From d50dc45cd4ddc20f0be0cd9b0225778a40116c14 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Tue, 29 Apr 2014 11:46:14 -0400 Subject: [PATCH 001/813] Document static groups of dynamic groups Document how to create static groups with dynamic child groups --- docsite/rst/intro_dynamic_inventory.rst | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 6ca7062935..a9e5d21a5d 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -223,6 +223,26 @@ If the location given to -i in Ansible is a directory (or as so configured in an at the same time. When doing so, it is possible to mix both dynamic and statically managed inventory sources in the same ansible run. Instant hybrid cloud! +.. _static_groups_of_dynamic: + +Static Groups of Dynamic Groups +``````````````````````````````` + +When defining groups of groups in the static inventory file, the child groups +must also be defined in the static inventory file, or ansible will return an +error. If you want to define a static group of dynamic child groups, define +the dynamic groups as empty in the static inventory file. For example:: + + [tag_Name_staging_foo] + + [tag_Name_staging_bar] + + [staging:children] + tag_Name_staging_foo + tag_Name_staging_bar + + + .. seealso:: :doc:`intro_inventory` From 6a6060ac5591a34386cdd03d1148d7063db77372 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 6 May 2014 11:19:41 -0700 Subject: [PATCH 002/813] Teach env-setup how to create egg-info for ansible so that pkg_resources works --- hacking/env-setup | 26 +++++++++++++++++++++++++- hacking/env-setup.fish | 10 ++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/hacking/env-setup b/hacking/env-setup index 6e4de1af72..032611b2ae 100755 --- a/hacking/env-setup +++ b/hacking/env-setup @@ -25,6 +25,30 @@ unset ANSIBLE_LIBRARY export ANSIBLE_LIBRARY="$ANSIBLE_HOME/library:`python $HACKING_DIR/get_library.py`" [[ $MANPATH != ${PREFIX_MANPATH}* ]] && export MANPATH=$PREFIX_MANPATH:$MANPATH +# +# Generate egg_info so that pkg_resources works +# + +# Do the work in a function so we don't repeat ourselves later +gen_egg_info() +{ + python setup.py egg_info + if [ -e $PREFIX_PYTHONPATH/ansible*.egg-info ] ; then + rm -r $PREFIX_PYTHONPATH/ansible*.egg-info + fi + mv ansible*.egg-info $PREFIX_PYTHONPATH +} + +# In some shells if pushd is a no-op then popd sends you to a previous +# directory in history +if [ "$ANSIBLE_HOME" != "$PWD" ] ; then + pushd "$ANSIBLE_HOME" + gen_egg_info + popd +else + gen_egg_info +fi + # Print out values unless -q is set if [ $# -eq 0 -o "$1" != "-q" ] ; then @@ -36,7 +60,7 @@ if [ $# -eq 0 -o "$1" != "-q" ] ; then echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY" echo "MANPATH=$MANPATH" echo "" - + echo "Remember, you may wish to specify your host file with -i" echo "" echo "Done!" diff --git a/hacking/env-setup.fish b/hacking/env-setup.fish index 1613baeb14..0caa055efa 100644 --- a/hacking/env-setup.fish +++ b/hacking/env-setup.fish @@ -36,6 +36,16 @@ end set -gx ANSIBLE_LIBRARY $ANSIBLE_HOME/library +# Generate egg_info so that pkg_resources works +pushd $ANSIBLE_HOME +python setup.py egg_info +if test -e $PREFIX_PYTHONPATH/ansible*.egg-info + rm -r $PREFIX_PYTHONPATH/ansible*.egg-info +end +mv ansible*egg-info $PREFIX_PYTHONPATH +popd + + if set -q argv switch $argv case '-q' '--quiet' From f7b76e0394ad2eb59f4aef6497fd89ce664e8718 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 6 May 2014 11:21:31 -0700 Subject: [PATCH 003/813] Restore the pkg_resources calls so that we pull in the correct pycrypto on RHEL6 --- bin/ansible-playbook | 4 ++-- bin/ansible-vault | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/ansible-playbook b/bin/ansible-playbook index 21635ea573..6c023fc8cd 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -18,8 +18,8 @@ ####################################################### -#__requires__ = ['ansible'] -#import pkg_resources +__requires__ = ['ansible'] +import pkg_resources import sys import os diff --git a/bin/ansible-vault b/bin/ansible-vault index 4929e1c2a1..07092376c4 100755 --- a/bin/ansible-vault +++ b/bin/ansible-vault @@ -20,8 +20,8 @@ # example playbook to bootstrap this script in the examples/ dir which # installs ansible and sets it up to run on cron. -#__requires__ = ['ansible'] -#import pkg_resources +__requires__ = ['ansible'] +import pkg_resources import os import sys From 76814089ae213796030debb9a548686122d409ce Mon Sep 17 00:00:00 2001 From: Adam Miller Date: Tue, 20 May 2014 20:28:14 -0500 Subject: [PATCH 004/813] add async fire-forget/check functionality --- docsite/rst/playbooks_async.rst | 22 ++++++++++++++++++++++ library/internal/async_status | 2 +- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_async.rst b/docsite/rst/playbooks_async.rst index 5a82189b65..62b5c944a8 100644 --- a/docsite/rst/playbooks_async.rst +++ b/docsite/rst/playbooks_async.rst @@ -56,6 +56,28 @@ Alternatively, if you do not need to wait on the task to complete, you may Using a higher value for ``--forks`` will result in kicking off asynchronous tasks even faster. This also increases the efficiency of polling. +If you would like to perform a variation of the "fire and forget" where you +"fire and forget, check on it later" you can perform a task similar to the +following:: + + --- + # Requires ansible 1.7+ + - name: 'YUM - fire and forget task' + yum: name=docker-io state=installed + async: 1000 + poll: 0 + register: yum_sleeper + + - name: 'YUM - check on fire and forget task' + async_status: jid={{ yum_sleeper.ansible_job_id }} + register: job_result + until: job_result.finished + retries: 30 + +.. note:: + If the value of ``async:`` is not high enough, this will cause the + "check on it later" task to fail because the temporary status file that + the ``async_status:`` is looking for will not have been written .. seealso:: diff --git a/library/internal/async_status b/library/internal/async_status index 1605f877a4..f991b50064 100644 --- a/library/internal/async_status +++ b/library/internal/async_status @@ -80,7 +80,7 @@ def main(): except Exception, e: if data == '': # file not written yet? That means it is running - module.exit_json(results_file=log_path, ansible_job_id=jid, started=1) + module.exit_json(results_file=log_path, ansible_job_id=jid, started=1, finished=0) else: module.fail_json(ansible_job_id=jid, results_file=log_path, msg="Could not parse job output: %s" % data) From 5ce366c6a2cd16baa69143d7246dc209f06c2249 Mon Sep 17 00:00:00 2001 From: Eric Brown Date: Tue, 12 Aug 2014 16:40:01 -0700 Subject: [PATCH 005/813] Fix the docs for glance_image.endpoint_type The endpoint_type option was added in version 1.7, so the docs need to state this. Also the the description is too brief. --- library/cloud/glance_image | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/library/cloud/glance_image b/library/cloud/glance_image index d8b02602fe..3bbc6f0ebc 100644 --- a/library/cloud/glance_image +++ b/library/cloud/glance_image @@ -106,10 +106,11 @@ options: default: None endpoint_type: description: - - endpoint URL type + - The name of the glance service's endpoint URL type choices: [publicURL, internalURL] required: false default: publicURL + version_added: "1.7" requirements: ["glanceclient", "keystoneclient"] ''' From a7d38702d95c63d883def6904e37d38093584ce3 Mon Sep 17 00:00:00 2001 From: Oleg Bulatov Date: Mon, 1 Sep 2014 23:23:36 +0400 Subject: [PATCH 006/813] Fix #7711 Fix relative symlinks creation in file module Signed-off-by: Oleg Bulatov --- library/files/file | 4 +- .../roles/test_file/tasks/main.yml | 37 +++++++++++++++++++ 2 files changed, 39 insertions(+), 2 deletions(-) diff --git a/library/files/file b/library/files/file index 82f4d5016d..4541d24d85 100644 --- a/library/files/file +++ b/library/files/file @@ -251,8 +251,8 @@ def main(): else: relpath = os.path.dirname(path) - absrc = os.path.normpath('%s/%s' % (relpath, os.path.basename(src))) - if not os.path.exists(src) and not os.path.exists(absrc) and not force: + absrc = os.path.join(relpath, src) + if not os.path.exists(absrc) and not force: module.fail_json(path=path, src=src, msg='src file does not exist, use "force=yes" if you really want to create the link: %s' % absrc) if state == 'hard': diff --git a/test/integration/roles/test_file/tasks/main.yml b/test/integration/roles/test_file/tasks/main.yml index 7c8262c27d..6db48a582c 100644 --- a/test/integration/roles/test_file/tasks/main.yml +++ b/test/integration/roles/test_file/tasks/main.yml @@ -229,6 +229,43 @@ - 'file17_result.failed == true' - 'file17_result.state == "directory"' +- name: create soft link to directory using absolute path + file: src=/ dest={{output_dir}}/root state=link + register: file18_result + +- name: verify that the result was marked as changed + assert: + that: + - "file18_result.changed == true" + +- name: create another test sub-directory + file: dest={{output_dir}}/sub2 state=directory + register: file19_result + +- name: verify that the new directory was created + assert: + that: + - 'file19_result.changed == true' + - 'file19_result.state == "directory"' + +- name: create soft link to relative file + file: src=../sub1/file1 dest={{output_dir}}/sub2/link1 state=link + register: file20_result + +- name: verify that the result was marked as changed + assert: + that: + - "file20_result.changed == true" + +- name: create soft link to relative directory + file: src=sub1 dest={{output_dir}}/sub1-link state=link + register: file21_result + +- name: verify that the result was marked as changed + assert: + that: + - "file21_result.changed == true" + - name: test file creation with symbolic mode file: dest={{output_dir}}/test_symbolic state=touch mode=u=rwx,g=rwx,o=rwx register: result From acd37c87a9e5a9115f8a575c9d79426285efea1b Mon Sep 17 00:00:00 2001 From: Kristofor Varhus Date: Tue, 2 Sep 2014 09:38:30 -0400 Subject: [PATCH 007/813] zypper: handle lists of packages efficiently --- lib/ansible/runner/__init__.py | 2 +- library/packaging/zypper | 105 ++++++++++++++++++++------------- 2 files changed, 65 insertions(+), 42 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index b7bfc7bd3c..15df88d42f 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -667,7 +667,7 @@ class Runner(object): if type(items) != list: raise errors.AnsibleError("lookup plugins have to return a list: %r" % items) - if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng' ]: + if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng', 'zypper' ]: # hack for apt, yum, and pkgng so that with_items maps back into a single module call use_these_items = [] for x in items: diff --git a/library/packaging/zypper b/library/packaging/zypper index 2a673eb0ca..2f42baac48 100644 --- a/library/packaging/zypper +++ b/library/packaging/zypper @@ -84,36 +84,58 @@ EXAMPLES = ''' - zypper: name=nmap state=absent ''' -# Function used for getting the name of a currently installed package. -def get_current_name(m, name): - cmd = '/bin/rpm -q --qf \'%{NAME}-%{VERSION}\'' - (rc, stdout, stderr) = m.run_command("%s %s" % (cmd, name)) +# Function used for getting versions of currently installed packages. +def get_current_version(m, name): + cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n'] + cmd.extend(name) + (rc, stdout, stderr) = m.run_command(cmd) - if rc != 0: - return (rc, stdout, stderr) + current_version = {} + rpmoutput_re = re.compile('^(\S+) (\S+)$') + for stdoutline, package in zip(stdout.splitlines(), name): + m = rpmoutput_re.match(stdoutline) + if m == None: + return None + rpmpackage = m.group(1) + rpmversion = m.group(2) + if package != rpmpackage: + return None + current_version[package] = rpmversion - syntax = "%s" - - for line in stdout.splitlines(): - if syntax % name in line: - current_name = line.split()[0] - - return current_name + return current_version # Function used to find out if a package is currently installed. -def get_package_state(m, name): - cmd = ['/bin/rpm', '--query', '--info', name] +def get_package_state(m, packages): + cmd = ['/bin/rpm', '--query', '--qf', 'package %{NAME} is installed\n'] + cmd.extend(packages) rc, stdout, stderr = m.run_command(cmd, check_rc=False) - if rc == 0: - return True - else: - return False + installed_state = {} + rpmoutput_re = re.compile('^package (\S+) (.*)$') + for stdoutline, name in zip(stdout.splitlines(), packages): + m = rpmoutput_re.match(stdoutline) + if m == None: + return None + package = m.group(1) + result = m.group(2) + if not name.startswith(package): + print name + ':' + package + ':' + stdoutline + '\n' + return None + if result == 'is installed': + installed_state[name] = True + else: + installed_state[name] = False + + return installed_state # Function used to make sure a package is present. def package_present(m, name, installed_state, disable_gpg_check, disable_recommends): - if installed_state is False: + packages = [] + for package in name: + if installed_state[package] is False: + packages.append(package) + if len(packages) != 0: cmd = ['/usr/bin/zypper', '--non-interactive'] # add global options before zypper command if disable_gpg_check: @@ -123,7 +145,7 @@ def package_present(m, name, installed_state, disable_gpg_check, disable_recomme # add install parameter if disable_recommends: cmd.append('--no-recommends') - cmd.append(name) + cmd.extend(packages) rc, stdout, stderr = m.run_command(cmd, check_rc=False) if rc == 0: @@ -141,33 +163,34 @@ def package_present(m, name, installed_state, disable_gpg_check, disable_recomme # Function used to make sure a package is the latest available version. def package_latest(m, name, installed_state, disable_gpg_check, disable_recommends): - if installed_state is True: - cmd = ['/usr/bin/zypper', '--non-interactive', 'update', '--auto-agree-with-licenses', name] - pre_upgrade_name = '' - post_upgrade_name = '' + # first of all, make sure all the packages are installed + (rc, stdout, stderr, changed) = package_present(m, name, installed_state, disable_gpg_check) - # Compare the installed package before and after to know if we changed anything. - pre_upgrade_name = get_current_name(m, name) + # if we've already made a change, we don't have to check whether a version changed + if not changed: + pre_upgrade_versions = get_current_version(m, name) - rc, stdout, stderr = m.run_command(cmd, check_rc=False) + cmd = ['/usr/bin/zypper', '--non-interactive', 'update', '--auto-agree-with-licenses'] + cmd.extend(name) + rc, stdout, stderr = m.run_command(cmd, check_rc=False) - post_upgrade_name = get_current_name(m, name) - - if pre_upgrade_name == post_upgrade_name: - changed = False - else: + # if we've already made a change, we don't have to check whether a version changed + if not changed: + post_upgrade_versions = get_current_version(m, name) + if pre_upgrade_versions != post_upgrade_versions: changed = True - return (rc, stdout, stderr, changed) - - else: - # If package was not installed at all just make it present. - return package_present(m, name, installed_state, disable_gpg_check, disable_recommends) + return (rc, stdout, stderr, changed) # Function used to make sure a package is not installed. def package_absent(m, name, installed_state): - if installed_state is True: - cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', name] + packages = [] + for package in name: + if installed_state[package] is True: + packages.append(package) + if len(packages) != 0: + cmd = ['/usr/bin/zypper', '--non-interactive', 'remove'] + cmd.extend(packages) rc, stdout, stderr = m.run_command(cmd) if rc == 0: @@ -188,7 +211,7 @@ def package_absent(m, name, installed_state): def main(): module = AnsibleModule( argument_spec = dict( - name = dict(required=True, aliases=['pkg']), + name = dict(required=True, aliases=['pkg'], type='list'), state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), disable_gpg_check = dict(required=False, default='no', type='bool'), disable_recommends = dict(requiered=False, default='yes', type='bool'), From 5b5103e6b4efc91f8749787c49c6ddfdfb9f9139 Mon Sep 17 00:00:00 2001 From: Michal Gasek Date: Wed, 3 Sep 2014 00:50:28 +0200 Subject: [PATCH 008/813] Add support for filtering EC2 instances in dynamic inventory This allows filtering out EC2 instances based on various different filters including tags. As requested in 7480 it supports logical "OR" instead of "AND" on the provided list of filters. --- plugins/inventory/ec2.ini | 20 ++++++++++++++++++++ plugins/inventory/ec2.py | 16 +++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/plugins/inventory/ec2.ini b/plugins/inventory/ec2.ini index a0c8672394..c66bf309b1 100644 --- a/plugins/inventory/ec2.ini +++ b/plugins/inventory/ec2.ini @@ -73,3 +73,23 @@ nested_groups = False # If you want to exclude any hosts that match a certain regular expression # pattern_exclude = stage-* + +# Instance filters can be used to control which instances are retrieved for +# inventory. For the full list of possible filters, please read the EC2 API +# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters +# Filters are key/value pairs separated by '=', to list multiple filters use +# a list separated by commas. See examples below. + +# Retrieve only instances with (key=value) env=stage tag +# instance_filters = tag:env=stage + +# Retrieve only instances with role=webservers OR role=dbservers tag +# instance_filters = tag:role=webservers,tag:role=dbservers + +# Retrieve only t1.micro instances OR instances with tag env=stage +# instance_filters = instance-type=t1.micro,tag:env=stage + +# You can use wildcards in filter values also. Below will list instances which +# tag Name value matches webservers1* +# (ex. webservers15, webservers1a, webservers123 etc) +# instance_filters = tag:Name=webservers1* diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index f4e98f6dd7..aec6473be6 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -123,6 +123,7 @@ from boto import ec2 from boto import rds from boto import route53 import ConfigParser +from collections import defaultdict try: import json @@ -272,6 +273,13 @@ class Ec2Inventory(object): except ConfigParser.NoOptionError, e: self.pattern_exclude = None + # Instance filters (see boto and EC2 API docs) + self.ec2_instance_filters = defaultdict(list) + if config.has_option('ec2', 'instance_filters'): + for x in config.get('ec2', 'instance_filters', '').split(','): + filter_key, filter_value = x.split('=') + self.ec2_instance_filters[filter_key].append(filter_value) + def parse_cli_args(self): ''' Command line argument processing ''' @@ -316,7 +324,13 @@ class Ec2Inventory(object): print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) sys.exit(1) - reservations = conn.get_all_instances() + reservations = [] + if self.ec2_instance_filters: + for filter_key, filter_values in self.ec2_instance_filters.iteritems(): + reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) + else: + reservations = conn.get_all_instances() + for reservation in reservations: for instance in reservation.instances: self.add_instance(instance, region) From 2474ce58198e1f1b2c58c1b99cb6f4547a8c5fd2 Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Tue, 9 Sep 2014 21:57:23 +0200 Subject: [PATCH 009/813] Exit with rc 1 if role doesn't exist/is not found fixes #8823 modified: bin/ansible-galaxy --- bin/ansible-galaxy | 1 + 1 file changed, 1 insertion(+) diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy index 146361da93..dd349901eb 100755 --- a/bin/ansible-galaxy +++ b/bin/ansible-galaxy @@ -759,6 +759,7 @@ def execute_install(args, options, parser): role_data = api_lookup_role_by_name(api_server, role_src) if not role_data: print "- sorry, %s was not found on %s." % (role_src, api_server) + exit_without_ignore(options) continue role_versions = api_fetch_role_related(api_server, 'versions', role_data['id']) From 537472f42cbd2a380fa090f1d09a21ed3e5b44e5 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Wed, 10 Sep 2014 09:07:50 +1000 Subject: [PATCH 010/813] Make ansible-galaxy work as expected This change fixes hg galaxy roles Roles also get installed if roles path is missing, which the tests currently require (fixes #8950) --- bin/ansible-galaxy | 8 -------- lib/ansible/utils/__init__.py | 7 +++---- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy index 146361da93..7598580d82 100755 --- a/bin/ansible-galaxy +++ b/bin/ansible-galaxy @@ -704,14 +704,6 @@ def execute_install(args, options, parser): print "- please specify a user/role name, or a roles file, but not both" sys.exit(1) - # error checking to ensure the specified roles path exists and is a directory - if not os.path.exists(roles_path): - print "- the specified role path %s does not exist" % roles_path - sys.exit(1) - elif not os.path.isdir(roles_path): - print "- the specified role path %s is not a directory" % roles_path - sys.exit(1) - roles_done = [] if role_file: f = open(role_file, 'r') diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 98b7f2f36b..fa11291776 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -387,15 +387,12 @@ def role_spec_parse(role_spec): role_spec = role_spec.strip() role_version = '' + default_role_versions = dict(git='master', hg='tip') if role_spec == "" or role_spec.startswith("#"): return (None, None, None, None) tokens = [s.strip() for s in role_spec.split(',')] - if not tokens[0].endswith('.tar.gz'): - # pick a reasonable default branch - role_version = 'master' - # assume https://github.com URLs are git+https:// URLs and not # tarballs unless they end in '.zip' if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'): @@ -412,6 +409,8 @@ def role_spec_parse(role_spec): role_name = tokens[2] else: role_name = repo_url_to_role_name(tokens[0]) + if scm and not role_version: + role_version = default_role_versions.get(scm, '') return dict(scm=scm, src=role_url, version=role_version, name=role_name) From d6a725659eae52e3afa6b8d8eabdcdbc857c91bd Mon Sep 17 00:00:00 2001 From: Will Thames Date: Wed, 10 Sep 2014 21:55:27 +1000 Subject: [PATCH 011/813] Allow github style ssh repo names --- lib/ansible/utils/__init__.py | 2 +- test/integration/galaxy_roles.yml | 5 +++++ test/integration/galaxy_rolesfile | 1 + 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index fa11291776..e7166347de 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -362,7 +362,7 @@ def repo_url_to_role_name(repo_url): # gets the role name out of a repo like # http://git.example.com/repos/repo.git" => "repo" - if '://' not in repo_url: + if '://' not in repo_url and '@' not in repo_url: return repo_url trailing_path = repo_url.split('/')[-1] if trailing_path.endswith('.git'): diff --git a/test/integration/galaxy_roles.yml b/test/integration/galaxy_roles.yml index cd61006840..76b385191c 100644 --- a/test/integration/galaxy_roles.yml +++ b/test/integration/galaxy_roles.yml @@ -6,3 +6,8 @@ - src: https://bitbucket.org/willthames/http-ansible-galaxy/get/master.tar.gz name: http-role + +- src: git@github.com:geerlingguy/ansible-role-php.git + scm: git + name: php + diff --git a/test/integration/galaxy_rolesfile b/test/integration/galaxy_rolesfile index a1374925ba..31596d4914 100644 --- a/test/integration/galaxy_rolesfile +++ b/test/integration/galaxy_rolesfile @@ -1,3 +1,4 @@ git+http://bitbucket.org/willthames/git-ansible-galaxy,v1.4 hg+http://bitbucket.org/willthames/hg-ansible-galaxy https://bitbucket.org/willthames/http-ansible-galaxy/get/master.tar.gz,,http-role +git+git@github.com:geerlingguy/ansible-role-php.git From c3614853ee98f5802df9ef009a8340cb1bdd55fc Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Thu, 11 Sep 2014 15:47:22 +0200 Subject: [PATCH 012/813] vsphere_guest module doc fix --- library/cloud/vsphere_guest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/vsphere_guest b/library/cloud/vsphere_guest index e3dcc4d28e..a91a8199dd 100644 --- a/library/cloud/vsphere_guest +++ b/library/cloud/vsphere_guest @@ -67,7 +67,7 @@ options: description: - Indicate desired state of the vm. default: present - choices: ['present', 'powered_on', 'absent', 'powered_on', 'restarted', 'reconfigured'] + choices: ['present', 'powered_on', 'absent', 'powered_off', 'restarted', 'reconfigured'] vm_disk: description: - A key, value list of disks and their sizes and which datastore to keep it in. From dc9482d54868f7abb1b453f77845a7ab927def00 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 11 Sep 2014 16:27:10 -0500 Subject: [PATCH 013/813] Fix quoting issues in lineinfile for individual single-quotes Fixes #8806 --- library/files/lineinfile | 9 +++++++-- .../roles/test_lineinfile/tasks/main.yml | 18 ++++++++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/library/files/lineinfile b/library/files/lineinfile index ba842e15e2..12f8dc89a7 100644 --- a/library/files/lineinfile +++ b/library/files/lineinfile @@ -369,14 +369,19 @@ def main(): # so we need to know if we should specifically unquote it. should_unquote = not is_quoted(line) + # always add one layer of quotes + line = "'%s'" % line + # Replace escape sequences like '\n' while being sure # not to replace octal escape sequences (\ooo) since they # match the backref syntax. if backrefs: line = re.sub(r'(\\[0-9]{1,3})', r'\\\1', line) - line = module.safe_eval(pipes.quote(line)) + line = module.safe_eval(line) - # Now remove quotes around the string, if needed + # Now remove quotes around the string, if needed after + # removing the layer we added above + line = unquote(line) if should_unquote: line = unquote(line) diff --git a/test/integration/roles/test_lineinfile/tasks/main.yml b/test/integration/roles/test_lineinfile/tasks/main.yml index 34c9db6f4f..8d58cbba6f 100644 --- a/test/integration/roles/test_lineinfile/tasks/main.yml +++ b/test/integration/roles/test_lineinfile/tasks/main.yml @@ -337,4 +337,22 @@ that: - "result.stat.md5 == '29f349baf1b9c6703beeb346fe8dc669'" +- name: insert a line into the quoted file with a single quote + lineinfile: dest={{output_dir}}/test_quoting.txt line="import g'" + register: result + +- name: assert that the quoted file was changed + assert: + that: + - result.changed + +- name: stat the quote test file + stat: path={{output_dir}}/test_quoting.txt + register: result + +- name: assert test md5 matches after backref line was replaced + assert: + that: + - "result.stat.md5 == 'fbe9c4ba2490f70eb1974ce31ec4a39f'" + ################################################################### From 49aa7ee523cf96ff1eb7b17b3add2f9b6e974d1e Mon Sep 17 00:00:00 2001 From: renning bruns Date: Thu, 11 Sep 2014 14:41:51 -0700 Subject: [PATCH 014/813] Update playbooks_variables.rst very small and minor addition, but I couldn't find anywhere that answered this (now obvious) question I had of whether or not facts could also be yaml files. --- docsite/rst/playbooks_variables.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index e20e1d6dbe..c5eeff0135 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -667,7 +667,7 @@ For instance, what if you want users to be able to control some aspect about how .. note:: Perhaps "local facts" is a bit of a misnomer, it means "locally supplied user values" as opposed to "centrally supplied user values", or what facts are -- "locally dynamically determined values". If a remotely managed system has an "/etc/ansible/facts.d" directory, any files in this directory -ending in ".fact", can be JSON, INI, or executable files returning JSON, and these can supply local facts in Ansible. +ending in ".fact", can be YAML, JSON, INI, or executable files returning JSON, and these can supply local facts in Ansible. For instance assume a /etc/ansible/facts.d/preferences.fact:: From 580968ca772cbc95f5d9e560c4c2b6389417b0e0 Mon Sep 17 00:00:00 2001 From: Bruno BAILLUET Date: Fri, 12 Sep 2014 18:19:00 +0200 Subject: [PATCH 015/813] Fix a typo on is_update call inside latest function --- library/packaging/yum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/yum b/library/packaging/yum index 245d3b8020..c3158077d1 100644 --- a/library/packaging/yum +++ b/library/packaging/yum @@ -672,7 +672,7 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): nothing_to_do = False break - if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=en_repos): + if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): nothing_to_do = False break From b9acaccc1d59a4efb70903fc3aa3e00a6c62749b Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sat, 13 Sep 2014 12:27:47 +0200 Subject: [PATCH 016/813] Do not remove local role file if given on the commandline using this (for testing purpose) : $ ansible-galaxy install COPYING - error: the file downloaded was not a tar.gz - COPYING was NOT installed successfully. - you can use --ignore-errors to skip failed roles. this result in COPYING being erased, which is surprising for the user. This also prevent erasing requirements.yml if someone use the wrong flag. --- bin/ansible-galaxy | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy index 146361da93..81be7b111a 100755 --- a/bin/ansible-galaxy +++ b/bin/ansible-galaxy @@ -787,7 +787,8 @@ def execute_install(args, options, parser): if tmp_file: installed = install_role(role.get("name"), role.get("version"), tmp_file, options) # we're done with the temp file, clean it up - os.unlink(tmp_file) + if tmp_file != role_src: + os.unlink(tmp_file) # install dependencies, if we want them if not no_deps and installed: if not role_data: From 8fb14db802ae0d8ac44de4001c2c68f6eac721c6 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sat, 13 Sep 2014 13:25:07 +0200 Subject: [PATCH 017/813] Mention defaults subdirectory in a more visible way All examples do not show it, so only someone reading the doc from end to end would know about it. --- docsite/rst/playbooks_best_practices.rst | 2 ++ docsite/rst/playbooks_roles.rst | 2 ++ 2 files changed, 4 insertions(+) diff --git a/docsite/rst/playbooks_best_practices.rst b/docsite/rst/playbooks_best_practices.rst index 7eee9cc68a..473e20db93 100644 --- a/docsite/rst/playbooks_best_practices.rst +++ b/docsite/rst/playbooks_best_practices.rst @@ -51,6 +51,8 @@ The top level of the directory would contain files and directories like so:: foo.sh # <-- script files for use with the script resource vars/ # main.yml # <-- variables associated with this role + defaults/ # + main.yml # <-- default lower priority variables for this role meta/ # main.yml # <-- role dependencies diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index eb7b812ea7..fc347e39a0 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -172,6 +172,7 @@ Example project structure:: tasks/ handlers/ vars/ + defaults/ meta/ webservers/ files/ @@ -179,6 +180,7 @@ Example project structure:: tasks/ handlers/ vars/ + defaults/ meta/ In a playbook, it would look like this:: From 9fb6cabe218b0bca7635faf718140a7dd9d05745 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 15 Sep 2014 09:00:11 -0700 Subject: [PATCH 018/813] Fix splitting the tag from the image name Fixes #8983 --- library/cloud/docker | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/library/cloud/docker b/library/cloud/docker index b831485e52..807572f866 100644 --- a/library/cloud/docker +++ b/library/cloud/docker @@ -507,12 +507,12 @@ class DockerManager: def get_split_image_tag(self, image): # If image contains a host or org name, omit that from our check - resource = image.rsplit('/', 1)[-1] + registry, resource = image.rsplit('/', 1) # now we can determine if image has a tag if resource.find(':') > 0: - # Use image here so that host and org name are included - return image.split(':', 1) + resource, tag = resource.split(':', 1) + return '/'.join((registry, resource)), tag else: tag = "latest" return image, tag From 0c4a7492bf6a62f7d84cb52f4ce9989f928f8b6d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 15 Sep 2014 11:29:04 -0500 Subject: [PATCH 019/813] Ensure checkmode is honored for templates that haven't changed Fixes #8998 --- lib/ansible/runner/action_plugins/template.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index b16e5f66e6..0348035bf5 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -132,5 +132,12 @@ class ActionModule(object): res.diff = dict(before=dest_contents, after=resultant) return res else: + # if we're running in check mode, we still want the file module + # to execute, since we can't know if anything would be changed here, + # so we inject the check mode param into the module args and rely on + # the file module to report its changed status + if self.runner.noop_on_check(inject): + new_module_args = dict(CHECKMODE=True) + module_args = utils.merge_module_args(module_args, new_module_args) return self.runner._execute_module(conn, tmp, 'file', module_args, inject=inject, complex_args=complex_args) From cdf938f2c2fb25032d09f135ecaebfc4c1bb5c5b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 15 Sep 2014 13:15:46 -0500 Subject: [PATCH 020/813] Ensure remote files are chmod'd properly for su/su_user too Fixes #8594 --- lib/ansible/runner/action_plugins/assemble.py | 2 +- lib/ansible/runner/action_plugins/copy.py | 2 +- lib/ansible/runner/action_plugins/template.py | 2 +- lib/ansible/runner/action_plugins/unarchive.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/runner/action_plugins/assemble.py b/lib/ansible/runner/action_plugins/assemble.py index fa4b694629..54b5d1985c 100644 --- a/lib/ansible/runner/action_plugins/assemble.py +++ b/lib/ansible/runner/action_plugins/assemble.py @@ -124,7 +124,7 @@ class ActionModule(object): xfered = self.runner._transfer_str(conn, tmp, 'src', resultant) # fix file permissions when the copy is done as a different user - if self.runner.sudo and self.runner.sudo_user != 'root': + if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': self.runner._remote_chmod(conn, 'a+r', xfered, tmp) # run the copy module diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py index 8dfb42ee2d..27b17b9969 100644 --- a/lib/ansible/runner/action_plugins/copy.py +++ b/lib/ansible/runner/action_plugins/copy.py @@ -227,7 +227,7 @@ class ActionModule(object): self._remove_tempfile_if_content_defined(content, content_tempfile) # fix file permissions when the copy is done as a different user - if self.runner.sudo and self.runner.sudo_user != 'root' and not raw: + if (self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root') and not raw: self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path) if raw: diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index 0348035bf5..9bfd66d3ff 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -113,7 +113,7 @@ class ActionModule(object): xfered = self.runner._transfer_str(conn, tmp, 'source', resultant) # fix file permissions when the copy is done as a different user - if self.runner.sudo and self.runner.sudo_user != 'root': + if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': self.runner._remote_chmod(conn, 'a+r', xfered, tmp) # run the copy module diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py index 1e3795955d..40bc5d9149 100644 --- a/lib/ansible/runner/action_plugins/unarchive.py +++ b/lib/ansible/runner/action_plugins/unarchive.py @@ -76,7 +76,7 @@ class ActionModule(object): # handle check mode client side # fix file permissions when the copy is done as a different user if copy: - if self.runner.sudo and self.runner.sudo_user != 'root': + if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp) # Build temporary module_args. new_module_args = dict( From a44ea2d1b7c1be234a898c1edf204602943564d5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 15 Sep 2014 12:34:16 -0700 Subject: [PATCH 021/813] Do not keep a new file if we fail to set its attributes. --- library/files/file | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/library/files/file b/library/files/file index 82f4d5016d..8bfd94dd98 100644 --- a/library/files/file +++ b/library/files/file @@ -331,8 +331,16 @@ def main(): module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e)) else: module.fail_json(msg='Cannot touch other than files and directories') - - module.set_fs_attributes_if_different(file_args, True) + try: + module.set_fs_attributes_if_different(file_args, True) + except SystemExit as e: + if e.code: + # We take this to mean that fail_json() was called from + # somewhere in basic.py + if prev_state == 'absent': + # If we just created the file we can safely remove it + os.remove(path) + raise e module.exit_json(dest=path, changed=True) From 49a6af1378c6d70c265c93fb610cd10454b2b7e0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 15 Sep 2014 12:35:49 -0700 Subject: [PATCH 022/813] Test that file touch does not keep a new file if we failed to set its attributes --- .../roles/test_file/tasks/main.yml | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/test/integration/roles/test_file/tasks/main.yml b/test/integration/roles/test_file/tasks/main.yml index 7c8262c27d..775c173f34 100644 --- a/test/integration/roles/test_file/tasks/main.yml +++ b/test/integration/roles/test_file/tasks/main.yml @@ -65,6 +65,29 @@ - name: change ownership and group file: path={{output_dir}}/baz.txt owner=1234 group=1234 +- name: setup a tmp-like directory for ownership test + file: path=/tmp/worldwritable mode=1777 state=directory + +- name: Ask to create a file without enough perms to change ownership + file: path=/tmp/worldwritable/baz.txt state=touch owner=root + sudo: yes + sudo_user: nobody + register: chown_result + ignore_errors: True + +- name: Ask whether the new file exists + stat: path=/tmp/worldwritable/baz.txt + register: file_exists_result + +- name: Verify that the file doesn't exist on failure + assert: + that: + - "chown_result.failed == True" + - "file_exists_result.stat.exists == False" + +- name: clean up + file: path=/tmp/worldwritable state=absent + - name: create soft link to file file: src={{output_file}} dest={{output_dir}}/soft.txt state=link register: file5_result From c6b26ff7c79cd73c98b161022044f3678ab7378d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 15 Sep 2014 14:45:57 -0700 Subject: [PATCH 023/813] Remove duplicate call to remove tmp role file --- bin/ansible-galaxy | 2 -- 1 file changed, 2 deletions(-) diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy index 81be7b111a..95d7453e05 100755 --- a/bin/ansible-galaxy +++ b/bin/ansible-galaxy @@ -810,8 +810,6 @@ def execute_install(args, options, parser): else: print '- dependency %s is already installed, skipping.' % dep["name"] if not tmp_file or not installed: - if tmp_file and installed: - os.unlink(tmp_file) print "- %s was NOT installed successfully." % role.get("name") exit_without_ignore(options) sys.exit(0) From de77f337968ba78e4c002c7ef4c71b2ae698ef9a Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 5 Sep 2014 08:42:31 -0500 Subject: [PATCH 024/813] Don't throw away useful stdout and stderr. Fixes #8418 When supplying a sudo password to a server that uses passwordless sudo, we should not throw away useful stdout and stderr. This is particularly important for modules that perform md5 checks as part of the pre module execution. --- lib/ansible/runner/connection_plugins/ssh.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index fdf5f0dc6e..cbba765903 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -301,6 +301,8 @@ class Connection(object): self._send_password() + no_prompt_out = '' + no_prompt_err = '' if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \ (self.runner.su and su and self.runner.su_pass): # several cases are handled for sudo privileges with password @@ -351,6 +353,9 @@ class Connection(object): stdin.write(self.runner.sudo_pass + '\n') elif su: stdin.write(self.runner.su_pass + '\n') + else: + no_prompt_out += sudo_output + no_prompt_err += sudo_errput (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt) @@ -371,7 +376,7 @@ class Connection(object): if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'): raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh') - return (p.returncode, '', stdout, stderr) + return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr) def put_file(self, in_path, out_path): ''' transfer a file from local to remote ''' From 2303044ffc68f8708529ffe236d41d491d4093ca Mon Sep 17 00:00:00 2001 From: Will Thames Date: Tue, 16 Sep 2014 10:22:36 +1000 Subject: [PATCH 025/813] Applied fix for sudo with no prompt to paramiko Effectively reproduces @sivel's work from #8900 but for the paramiko connection. Fixes #8418 when using paramiko This allows `_remote_md5` to work if a sudo password is passed in when no sudo password is required. --- lib/ansible/runner/connection_plugins/paramiko_ssh.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/connection_plugins/paramiko_ssh.py b/lib/ansible/runner/connection_plugins/paramiko_ssh.py index 5e3cfc55a9..dc02b047f8 100644 --- a/lib/ansible/runner/connection_plugins/paramiko_ssh.py +++ b/lib/ansible/runner/connection_plugins/paramiko_ssh.py @@ -204,6 +204,8 @@ class Connection(object): msg += ": %s" % str(e) raise errors.AnsibleConnectionFailed(msg) + no_prompt_out = '' + no_prompt_err = '' if not (self.runner.sudo and sudoable) and not (self.runner.su and su): if executable: @@ -259,6 +261,9 @@ class Connection(object): chan.sendall(self.runner.sudo_pass + '\n') elif su: chan.sendall(self.runner.su_pass + '\n') + else: + no_prompt_out += sudo_output + no_prompt_err += sudo_output except socket.timeout: @@ -267,7 +272,7 @@ class Connection(object): stdout = ''.join(chan.makefile('rb', bufsize)) stderr = ''.join(chan.makefile_stderr('rb', bufsize)) - return (chan.recv_exit_status(), '', stdout, stderr) + return (chan.recv_exit_status(), '', no_prompt_out + stdout, no_prompt_out + stderr) def put_file(self, in_path, out_path): ''' transfer a file from local to remote ''' From c1555987d77b19078a6a60e1f5fe5007a31b22c8 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 16 Sep 2014 10:22:32 -0500 Subject: [PATCH 026/813] Document vault_password_file in intro_configuration --- docsite/rst/intro_configuration.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 016faf5e44..5b126dccd9 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -548,6 +548,20 @@ different locations:: Most users will not need to use this feature. See :doc:`developing_plugins` for more details + +.. _vault_password_file: + +vault_password_file +=================== + +.. versionadded:: 1.7 + +Configures the path to the Vault password file as an alternative to specifying ``--vault-password-file`` on the command line:: + + vault_password_file = /path/to/vault_password_file + +As of 1.7 this file can also be a script. If you are using a script instead of a flat file, ensure that it is marked as executable, and that the password is printed to standard output. If your script needs to prompt for data, prompts can be sent to standard error. + .. _paramiko_settings: Paramiko Specific Settings From c319df78f87d92b1ba8a309d8318436a2454db08 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 16 Sep 2014 10:31:50 -0500 Subject: [PATCH 027/813] Also document force_color --- docsite/rst/intro_configuration.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 5b126dccd9..eb57b2bf18 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -237,6 +237,15 @@ different locations:: Most users will not need to use this feature. See :doc:`developing_plugins` for more details +.. _force_color: + +force_color +=========== + +This options forces color mode even when running without a TTY:: + + force_color = 1 + .. _forks: forks From 61dfeaa43c834867d6310a74555c4c288b6b7d75 Mon Sep 17 00:00:00 2001 From: Ted Timmons Date: Tue, 16 Sep 2014 09:35:20 -0700 Subject: [PATCH 028/813] rearrange description/short_description it's really dorked on [the documentation page](http://docs.ansible.com/ec2_elb_lb_module.html). This puts it in line with other modules. --- library/cloud/ec2_elb_lb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_elb_lb b/library/cloud/ec2_elb_lb index f15de26892..be287b0780 100644 --- a/library/cloud/ec2_elb_lb +++ b/library/cloud/ec2_elb_lb @@ -18,9 +18,9 @@ DOCUMENTATION = """ --- module: ec2_elb_lb description: Creates or destroys Amazon ELB. -short_description: Creates or destroys Amazon ELB. - Returns information about the load balancer. - Will be marked changed when called only if state is changed. +short_description: Creates or destroys Amazon ELB. version_added: "1.5" author: Jim Dalton options: From d98bc79f77f6ab86815b274a649cc8e844e13092 Mon Sep 17 00:00:00 2001 From: Ted Timmons Date: Tue, 16 Sep 2014 09:37:01 -0700 Subject: [PATCH 029/813] further cleanup --- library/cloud/ec2_elb_lb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_elb_lb b/library/cloud/ec2_elb_lb index be287b0780..462fbbcc79 100644 --- a/library/cloud/ec2_elb_lb +++ b/library/cloud/ec2_elb_lb @@ -17,7 +17,7 @@ DOCUMENTATION = """ --- module: ec2_elb_lb -description: Creates or destroys Amazon ELB. +description: - Returns information about the load balancer. - Will be marked changed when called only if state is changed. short_description: Creates or destroys Amazon ELB. From 79a2e586fed0fc6cfe6ec2d2b4c080885c110b34 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Sep 2014 09:53:13 -0700 Subject: [PATCH 030/813] Make systemd vs rht detection more robust for centos Centos 6.x and below use an old RHT style of configuring hostname. CentOS 7.x and better use systemd. Instead of depending on the distribution string which seems to have changed over the course of 6.x we need to explicitly check the version. Fixes #8997 --- library/system/hostname | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/library/system/hostname b/library/system/hostname index 50eaec12ff..a426b59136 100755 --- a/library/system/hostname +++ b/library/system/hostname @@ -367,12 +367,20 @@ class RedHatWorkstationHostname(Hostname): class CentOSHostname(Hostname): platform = 'Linux' distribution = 'Centos' - strategy_class = RedHatStrategy + distribution_version = _get_distribution_version() + if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): + strategy_class = FedoraStrategy + else: + strategy_class = RedHatStrategy class CentOSLinuxHostname(Hostname): platform = 'Linux' distribution = 'Centos linux' - strategy_class = FedoraStrategy + distribution_version = _get_distribution_version() + if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): + strategy_class = FedoraStrategy + else: + strategy_class = RedHatStrategy class ScientificHostname(Hostname): platform = 'Linux' From 5dd92317436b8d488d4dbaf554c1dc98bf0d0332 Mon Sep 17 00:00:00 2001 From: Devin Austin Date: Tue, 16 Sep 2014 11:05:37 -0600 Subject: [PATCH 031/813] updated git module documentation Clarified key_file option for the git module, since it requires the user to specify a private key, not a public key. --- library/source_control/git | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/library/source_control/git b/library/source_control/git index 15ca0fd07a..7cef7788e7 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -65,8 +65,7 @@ options: default: None version_added: "1.5" description: - - Uses the same wrapper method as ssh_opts to pass - "-i " to the ssh arguments used by git + - Specify an optional private key file to use for the checkout. reference: required: false default: null From b376e208c7d9e2fddd19ed964628c5d99bb71150 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 16 Sep 2014 12:03:40 -0500 Subject: [PATCH 032/813] Adding "follow" param for file/copy options Also modifies the template action plugin to use this new param when executing the file/copy modules for templating so that links are preserved correctly. Fixes #8998 --- lib/ansible/module_utils/basic.py | 6 ++++ lib/ansible/runner/action_plugins/template.py | 22 ++++++++------ .../utils/module_docs_fragments/files.py | 8 +++++ library/files/copy | 3 ++ library/files/file | 14 ++++++--- .../roles/test_copy/tasks/main.yml | 30 +++++++++++++++++++ .../roles/test_file/tasks/main.yml | 26 ++++++++++++++++ 7 files changed, 96 insertions(+), 13 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 17e2773e5b..655464d40f 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -151,6 +151,7 @@ FILE_COMMON_ARGUMENTS=dict( serole = dict(), selevel = dict(), setype = dict(), + follow = dict(type='bool', default=False), # not taken by the file module, but other modules call file so it must ignore them. content = dict(no_log=True), backup = dict(), @@ -295,6 +296,11 @@ class AnsibleModule(object): else: path = os.path.expanduser(path) + # if the path is a symlink, and we're following links, get + # the target of the link instead for testing + if params.get('follow', False) and os.path.islink(path): + path = os.path.realpath(path) + mode = params.get('mode', None) owner = params.get('owner', None) group = params.get('group', None) diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index 9bfd66d3ff..11e37b4815 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -33,9 +33,6 @@ class ActionModule(object): def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs): ''' handler for template operations ''' - # note: since this module just calls the copy module, the --check mode support - # can be implemented entirely over there - if not self.runner.is_playbook: raise errors.AnsibleError("in current versions of ansible, templates are only usable in playbooks") @@ -121,6 +118,7 @@ class ActionModule(object): src=xfered, dest=dest, original_basename=os.path.basename(source), + follow=True, ) module_args_tmp = utils.merge_module_args(module_args, new_module_args) @@ -132,12 +130,18 @@ class ActionModule(object): res.diff = dict(before=dest_contents, after=resultant) return res else: - # if we're running in check mode, we still want the file module - # to execute, since we can't know if anything would be changed here, - # so we inject the check mode param into the module args and rely on - # the file module to report its changed status + # when running the file module based on the template data, we do + # not want the source filename (the name of the template) to be used, + # since this would mess up links, so we clear the src param and tell + # the module to follow links + new_module_args = dict( + src=None, + follow=True, + ) + # be sure to inject the check mode param into the module args and + # rely on the file module to report its changed status if self.runner.noop_on_check(inject): - new_module_args = dict(CHECKMODE=True) - module_args = utils.merge_module_args(module_args, new_module_args) + new_module_args['CHECKMODE'] = True + module_args = utils.merge_module_args(module_args, new_module_args) return self.runner._execute_module(conn, tmp, 'file', module_args, inject=inject, complex_args=complex_args) diff --git a/lib/ansible/utils/module_docs_fragments/files.py b/lib/ansible/utils/module_docs_fragments/files.py index d54d0ece5a..adff1f2f1b 100644 --- a/lib/ansible/utils/module_docs_fragments/files.py +++ b/lib/ansible/utils/module_docs_fragments/files.py @@ -67,4 +67,12 @@ options: - level part of the SELinux file context. This is the MLS/MCS attribute, sometimes known as the C(range). C(_default) feature works as for I(seuser). + follow: + required: false + default: "no" + choices: [ "yes", "no" ] + version_added: "1.8" + description: + - 'This flag indicates that filesystem links, if they exist, should be followed.' + """ diff --git a/library/files/copy b/library/files/copy index 5aef3d7222..eff46dae98 100644 --- a/library/files/copy +++ b/library/files/copy @@ -160,6 +160,7 @@ def main(): force = module.params['force'] original_basename = module.params.get('original_basename',None) validate = module.params.get('validate',None) + follow = module.params['follow'] if not os.path.exists(src): module.fail_json(msg="Source %s failed to transfer" % (src)) @@ -187,6 +188,8 @@ def main(): adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed) if os.path.exists(dest): + if os.path.islink(dest) and follow: + dest = os.path.realpath(dest) if not force: module.exit_json(msg="file already exists", src=src, dest=dest, changed=False) if (os.path.isdir(dest)): diff --git a/library/files/file b/library/files/file index 8bfd94dd98..e25278fded 100644 --- a/library/files/file +++ b/library/files/file @@ -125,6 +125,7 @@ def main(): force = params['force'] diff_peek = params['diff_peek'] src = params['src'] + follow = params['follow'] # modify source as we later reload and pass, specially relevant when used by other modules. params['path'] = path = os.path.expanduser(params['path']) @@ -177,15 +178,20 @@ def main(): params['path'] = path = os.path.join(path, basename) else: if state in ['link','hard']: - module.fail_json(msg='src and dest are required for creating links') - - file_args = module.load_file_common_arguments(params) - changed = False + if follow: + # use the current target of the link as the source + src = os.readlink(path) + else: + module.fail_json(msg='src and dest are required for creating links') + # make sure the target path is a directory when we're doing a recursive operation recurse = params['recurse'] if recurse and state != 'directory': module.fail_json(path=path, msg="recurse option requires state to be 'directory'") + file_args = module.load_file_common_arguments(params) + changed = False + if state == 'absent': if state != prev_state: if not module.check_mode: diff --git a/test/integration/roles/test_copy/tasks/main.yml b/test/integration/roles/test_copy/tasks/main.yml index 8c4892bea8..47ed516657 100644 --- a/test/integration/roles/test_copy/tasks/main.yml +++ b/test/integration/roles/test_copy/tasks/main.yml @@ -207,3 +207,33 @@ - name: clean up file: dest=/tmp/worldwritable state=absent +# test overwritting a link using "follow=yes" so that the link +# is preserved and the link target is updated + +- name: create a test file to symlink to + copy: dest={{output_dir}}/follow_test content="this is the follow test file\n" + +- name: create a symlink to the test file + file: path={{output_dir}}/follow_link src='./follow_test' state=link + +- name: update the test file using follow=True to preserve the link + copy: dest={{output_dir}}/follow_link content="this is the new content\n" follow=yes + register: replace_follow_result + +- name: stat the link path + stat: path={{output_dir}}/follow_link + register: stat_link_result + +- name: assert that the link is still a link + assert: + that: + - stat_link_result.stat.islnk + +- name: get the md5 of the link target + shell: md5sum {{output_dir}}/follow_test | cut -f1 -sd ' ' + register: target_file_result + +- name: assert that the link target was updated + assert: + that: + - replace_follow_result.md5sum == target_file_result.stdout diff --git a/test/integration/roles/test_file/tasks/main.yml b/test/integration/roles/test_file/tasks/main.yml index 775c173f34..3c3bc81ea1 100644 --- a/test/integration/roles/test_file/tasks/main.yml +++ b/test/integration/roles/test_file/tasks/main.yml @@ -405,3 +405,29 @@ that: - result.mode == '0444' +# test the file module using follow=yes, so that the target of a +# symlink is modified, rather than the link itself + +- name: create a test file + copy: dest={{output_dir}}/test_follow content="this is a test file\n" mode=0666 + +- name: create a symlink to the test file + file: path={{output_dir}}/test_follow_link src="./test_follow" state=link + +- name: modify the permissions on the link using follow=yes + file: path={{output_dir}}/test_follow_link mode=0644 follow=yes + register: result + +- name: assert that the chmod worked + assert: + that: + - result.changed + +- name: stat the link target + stat: path={{output_dir}}/test_follow + register: result + +- name: assert that the link target was modified correctly + assert: + that: + - result.stat.mode == '0644' From fdf126a337cf1bacd24d22697af464534c49bf2c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 16 Sep 2014 13:20:39 -0500 Subject: [PATCH 033/813] Add note to ec2_vol state param regarding list option added in 1.8 --- library/cloud/ec2_vol | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_vol b/library/cloud/ec2_vol index 49d3a60143..0e662a77bd 100644 --- a/library/cloud/ec2_vol +++ b/library/cloud/ec2_vol @@ -94,7 +94,7 @@ options: version_added: "1.5" state: description: - - whether to ensure the volume is present or absent, or to list existing volumes + - whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8). required: false default: present choices: ['absent', 'present', 'list'] From 733290a7318ad62503bfbb4b663371ef85a72378 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Sep 2014 11:39:15 -0700 Subject: [PATCH 034/813] Fix the case where a docker image is not part of a namespace. Fixes #9016 --- library/cloud/docker | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/library/cloud/docker b/library/cloud/docker index 807572f866..e8f8cc4fa9 100644 --- a/library/cloud/docker +++ b/library/cloud/docker @@ -507,7 +507,10 @@ class DockerManager: def get_split_image_tag(self, image): # If image contains a host or org name, omit that from our check - registry, resource = image.rsplit('/', 1) + if image.find('/') > 0: + registry, resource = image.rsplit('/', 1) + else: + registry, resource = '', image # now we can determine if image has a tag if resource.find(':') > 0: From 9f708305696e685718496b11aa846fa8c6a655fb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Sep 2014 11:46:09 -0700 Subject: [PATCH 035/813] My fix was wrong again. Base this try off of @mantiz's fix in #9016 --- library/cloud/docker | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/library/cloud/docker b/library/cloud/docker index e8f8cc4fa9..57914bb79f 100644 --- a/library/cloud/docker +++ b/library/cloud/docker @@ -507,18 +507,21 @@ class DockerManager: def get_split_image_tag(self, image): # If image contains a host or org name, omit that from our check - if image.find('/') > 0: + if '/' in image: registry, resource = image.rsplit('/', 1) else: - registry, resource = '', image + registry, resource = None, image # now we can determine if image has a tag - if resource.find(':') > 0: + if ':' in resource: resource, tag = resource.split(':', 1) - return '/'.join((registry, resource)), tag + if registry: + resource = '/'.join((registry, resource)) else: tag = "latest" - return image, tag + resource = image + + return resource, tag def get_summary_counters_msg(self): msg = "" From 4a9cf3f3f2924d9543fd17413da255d78c45cc09 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 11 Sep 2014 15:01:34 -0500 Subject: [PATCH 036/813] Allow hostvars variables to be templated Fixes #7844 --- lib/ansible/runner/__init__.py | 62 +++++++++++++++++++++------------- 1 file changed, 38 insertions(+), 24 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 80263c56fa..7b3e48d90c 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -101,8 +101,8 @@ class HostVars(dict): def __getitem__(self, host): if host not in self.lookup: result = self.inventory.get_variables(host, vault_password=self.vault_password).copy() - result.update(self.vars_cache.get(host, {})) - self.lookup[host] = result + result.update(self.vars_cache) + self.lookup[host] = template.template('.', result, self.vars_cache) return self.lookup[host] @@ -583,24 +583,14 @@ class Runner(object): # ***************************************************** - def _executor_internal(self, host, new_stdin): - ''' executes any module one or more times ''' - - host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass) - host_connection = host_variables.get('ansible_connection', self.transport) - if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]: - port = host_variables.get('ansible_ssh_port', self.remote_port) - if port is None: - port = C.DEFAULT_REMOTE_PORT - else: - # fireball, local, etc - port = self.remote_port - + def get_combined_cache(self): # merge the VARS and SETUP caches for this host combined_cache = self.setup_cache.copy() - combined_cache = utils.merge_hash(combined_cache, self.vars_cache) + return utils.merge_hash(combined_cache, self.vars_cache) - hostvars = HostVars(combined_cache, self.inventory, vault_password=self.vault_pass) + def get_inject_vars(self, host): + host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass) + combined_cache = self.get_combined_cache() # use combined_cache and host_variables to template the module_vars # we update the inject variables with the data we're about to template @@ -611,21 +601,45 @@ class Runner(object): inject = {} + # default vars are the lowest priority inject = utils.combine_vars(inject, self.default_vars) + # next come inventory variables for the host inject = utils.combine_vars(inject, host_variables) + # then the setup_cache which contains facts gathered inject = utils.combine_vars(inject, self.setup_cache.get(host, {})) + # then come the module variables inject = utils.combine_vars(inject, module_vars) + # followed by vars (vars, vars_files, vars/main.yml) inject = utils.combine_vars(inject, self.vars_cache.get(host, {})) + # and finally -e vars are the highest priority inject = utils.combine_vars(inject, self.extra_vars) + # and then special vars inject.setdefault('ansible_ssh_user', self.remote_user) - inject['hostvars'] = hostvars - inject['group_names'] = host_variables.get('group_names', []) - inject['groups'] = self.inventory.groups_list() - inject['vars'] = self.module_vars - inject['defaults'] = self.default_vars - inject['environment'] = self.environment + inject['group_names'] = host_variables.get('group_names', []) + inject['groups'] = self.inventory.groups_list() + inject['vars'] = self.module_vars + inject['defaults'] = self.default_vars + inject['environment'] = self.environment inject['playbook_dir'] = os.path.abspath(self.basedir) - inject['omit'] = self.omit_token + inject['omit'] = self.omit_token + + return inject + + def _executor_internal(self, host, new_stdin): + ''' executes any module one or more times ''' + + inject = self.get_inject_vars(host) + hostvars = HostVars(inject, self.inventory, vault_password=self.vault_pass) + inject['hostvars'] = hostvars + + host_connection = inject.get('ansible_connection', self.transport) + if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]: + port = hostvars.get('ansible_ssh_port', self.remote_port) + if port is None: + port = C.DEFAULT_REMOTE_PORT + else: + # fireball, local, etc + port = self.remote_port # template this one is available, callbacks use this delegate_to = self.module_vars.get('delegate_to') From 506d4554dcf81fd1783a55f25251470de34e70d3 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 16 Sep 2014 15:27:40 -0400 Subject: [PATCH 037/813] Fix documentation indent levels. --- docsite/rst/galaxy.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 0065fd8a8f..7aec0c05d1 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -20,21 +20,21 @@ The ansible-galaxy command line tool The command line ansible-galaxy has many different subcommands. Installing Roles -++++++++++++++++ +================ The most obvious is downloading roles from the Ansible Galaxy website:: ansible-galaxy install username.rolename Building out Role Scaffolding -+++++++++++++++++++++++++++++ +============================= It can also be used to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires:: ansible-galaxy init rolename Installing Multiple Roles From A File -+++++++++++++++++++++++++++++++++++++ +===================================== To install multiple roles, the ansible-galaxy CLI can be fed a requirements file. All versions of ansible allow the following syntax for installing roles from the Ansible Galaxy website:: @@ -53,7 +53,7 @@ To request specific versions (tags) of a role, use this syntax in the roles file Available versions will be listed on the Ansible Galaxy webpage for that role. Advanced Control over Role Requirements Files -+++++++++++++++++++++++++++++++++++++++++++++ +============================================= For more advanced control over where to download roles from, including support for remote repositories, Ansible 1.8 and later support a new YAML format for the role requirements file, which must end in a 'yml' extension. It works like this:: From 489574e072e4fd82e2f32298ef3913ccf1a15b8f Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 16 Sep 2014 15:40:00 -0400 Subject: [PATCH 038/813] Docs indentation - this time with feeling --- docsite/rst/galaxy.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 7aec0c05d1..e7963a3e7e 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -20,21 +20,21 @@ The ansible-galaxy command line tool The command line ansible-galaxy has many different subcommands. Installing Roles -================ +---------------- The most obvious is downloading roles from the Ansible Galaxy website:: ansible-galaxy install username.rolename Building out Role Scaffolding -============================= +----------------------------- It can also be used to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires:: ansible-galaxy init rolename Installing Multiple Roles From A File -===================================== +------------------------------------- To install multiple roles, the ansible-galaxy CLI can be fed a requirements file. All versions of ansible allow the following syntax for installing roles from the Ansible Galaxy website:: @@ -53,7 +53,7 @@ To request specific versions (tags) of a role, use this syntax in the roles file Available versions will be listed on the Ansible Galaxy webpage for that role. Advanced Control over Role Requirements Files -============================================= +--------------------------------------------- For more advanced control over where to download roles from, including support for remote repositories, Ansible 1.8 and later support a new YAML format for the role requirements file, which must end in a 'yml' extension. It works like this:: From 2b0b2f42df4860f39abccfcfc9bae35e7fc23aad Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 16 Sep 2014 15:26:10 -0500 Subject: [PATCH 039/813] CHANGELOG update for ec2 filtering and file "following" param --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 62850280c8..915993a06a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,8 @@ New Modules: Some other notable changes: +* added the ability to set "instance filters" in the ec2.ini to limit results from the inventory plugin. +* added a new "follow" parameter to the file and copy modules, which allows actions to be taken on the target of a symlink rather than the symlink itself. * if a module should ever traceback, it will return a standard error, catchable by ignore_errors, versus an 'unreachable' * ec2_lc: added support for multiple new parameters like kernel_id, ramdisk_id and ebs_optimized. * ec2_elb_lb: added support for the connection_draining_timeout and cross_az_load_balancing options. From bc583dabbcdc2e3e4d1533590c8cdb2710ffb8e9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 16 Sep 2014 15:37:36 -0500 Subject: [PATCH 040/813] Removing reference to ansible-pull in bin/ansible-vault --- bin/ansible-vault | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/bin/ansible-vault b/bin/ansible-vault index 09f08d5487..10da6a9ea7 100755 --- a/bin/ansible-vault +++ b/bin/ansible-vault @@ -15,10 +15,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # -# ansible-pull is a script that runs ansible in local mode -# after checking out a playbooks directory from source repo. There is an -# example playbook to bootstrap this script in the examples/ dir which -# installs ansible and sets it up to run on cron. +# ansible-vault is a script that encrypts/decrypts YAML files. See +# http://docs.ansible.com/playbooks_vault.html for more details. #__requires__ = ['ansible'] #import pkg_resources From 4f03dd65ce182c9b96b1539bc7a2705ec5f51f45 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 16 Sep 2014 21:38:49 -0500 Subject: [PATCH 041/813] Ensure port in docker expose list is a string Fixes #8731 --- library/cloud/docker | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/docker b/library/cloud/docker index 57914bb79f..a0a52ffc75 100644 --- a/library/cloud/docker +++ b/library/cloud/docker @@ -450,7 +450,7 @@ class DockerManager: if expose_list: exposed = [] for port in expose_list: - port = port.strip() + port = str(port).strip() if port.endswith('/tcp') or port.endswith('/udp'): port_with_proto = tuple(port.split('/')) else: From 28f6a18ef6add2f43b0e9a489f5d914f3b882403 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 17 Sep 2014 02:01:27 -0500 Subject: [PATCH 042/813] Allow git repo queries without a clone when update=no This commit also makes the dest parameter optional, unless update=yes (the default), since it is not required for queries without an update. Fixes #8630 --- library/source_control/git | 42 +++++++++++-------- .../integration/roles/test_git/tasks/main.yml | 13 ++++++ 2 files changed, 38 insertions(+), 17 deletions(-) diff --git a/library/source_control/git b/library/source_control/git index 7cef7788e7..a5d94e3dbb 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -33,9 +33,12 @@ options: description: - git, SSH, or HTTP protocol address of the git repository. dest: - required: true + required: false description: - Absolute path of where the repository should be checked out to. + This parameter is required, unless C(update) is set to C(no) + This change was made in version 1.8. Prior to this version, the + C(dest) parameter was always required. version: required: false default: "HEAD" @@ -474,7 +477,7 @@ def switch_version(git_path, module, dest, remote, version, recursive): def main(): module = AnsibleModule( argument_spec = dict( - dest=dict(required=True), + dest=dict(), repo=dict(required=True, aliases=['name']), version=dict(default='HEAD'), remote=dict(default='origin'), @@ -492,7 +495,7 @@ def main(): supports_check_mode=True ) - dest = os.path.abspath(os.path.expanduser(module.params['dest'])) + dest = module.params['dest'] repo = module.params['repo'] version = module.params['version'] remote = module.params['remote'] @@ -502,9 +505,18 @@ def main(): bare = module.params['bare'] reference = module.params['reference'] git_path = module.params['executable'] or module.get_bin_path('git', True) - - key_file = module.params['key_file'] - ssh_opts = module.params['ssh_opts'] + key_file = module.params['key_file'] + ssh_opts = module.params['ssh_opts'] + + gitconfig = None + if not dest and update: + module.fail_json(msg="the destination directory must be specified unless update=no") + elif dest: + dest = os.path.abspath(os.path.expanduser(dest)) + if bare: + gitconfig = os.path.join(dest, 'config') + else: + gitconfig = os.path.join(dest, '.git', 'config') # create a wrapper script and export # GIT_SSH= as an environment variable @@ -524,23 +536,19 @@ def main(): recursive = module.params['recursive'] - if bare: - gitconfig = os.path.join(dest, 'config') - else: - gitconfig = os.path.join(dest, '.git', 'config') - rc, out, err, status = (0, None, None, None) - # if there is no git configuration, do a clone operation - # else pull and switch the version before = None local_mods = False - if not os.path.exists(gitconfig): - if module.check_mode: + if gitconfig and not os.path.exists(gitconfig) or not gitconfig and not update: + # if there is no git configuration, do a clone operation unless the + # user requested no updates or we're doing a check mode test (in + # which case we do a ls-remote), otherwise clone the repo + if module.check_mode or not update: remote_head = get_remote_head(git_path, module, dest, version, repo, bare) module.exit_json(changed=True, before=before, after=remote_head) - clone(git_path, module, repo, dest, remote, depth, version, bare, - reference, recursive) + # there's no git config, so clone + clone(git_path, module, repo, dest, remote, depth, version, bare, reference, recursive) elif not update: # Just return having found a repo already in the dest path # this does no checking that the repo is the actual repo diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml index 34f2879c5b..93774afb46 100644 --- a/test/integration/roles/test_git/tasks/main.yml +++ b/test/integration/roles/test_git/tasks/main.yml @@ -134,3 +134,16 @@ that: - 'git_result.changed' when: not git_result|skipped + +# Test a non-updating repo query with no destination specified + +- name: get info on a repo without updating and with no destination specified + git: + repo: '{{ repo_format1 }}' + update: no + accept_hostkey: yes + register: git_result + +- assert: + that: + - 'git_result.changed' From e9229cfeaa7727bfb66642cc553a51f9add81e6d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 17 Sep 2014 02:32:09 -0500 Subject: [PATCH 043/813] Revert to using just the combined_cache for HostVars --- lib/ansible/runner/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 7b3e48d90c..aa0ee670f2 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -615,6 +615,7 @@ class Runner(object): inject = utils.combine_vars(inject, self.extra_vars) # and then special vars inject.setdefault('ansible_ssh_user', self.remote_user) + inject['combined_cache'] = combined_cache inject['group_names'] = host_variables.get('group_names', []) inject['groups'] = self.inventory.groups_list() inject['vars'] = self.module_vars @@ -629,7 +630,7 @@ class Runner(object): ''' executes any module one or more times ''' inject = self.get_inject_vars(host) - hostvars = HostVars(inject, self.inventory, vault_password=self.vault_pass) + hostvars = HostVars(inject['combined_cache'], self.inventory, vault_password=self.vault_pass) inject['hostvars'] = hostvars host_connection = inject.get('ansible_connection', self.transport) From e294e31fd3e083cbf19e26fd551ed4f7e2f38e37 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Sep 2014 16:12:09 -0700 Subject: [PATCH 044/813] Refactor the Linux service_enable() method * Fix check_mode for initctl systems Fixes #9009 --- library/system/service | 141 ++++++++++++++++++++++++++--------------- 1 file changed, 89 insertions(+), 52 deletions(-) diff --git a/library/system/service b/library/system/service index 2da5e53b01..95ed56cf85 100644 --- a/library/system/service +++ b/library/system/service @@ -587,10 +587,16 @@ class LinuxService(Service): if self.enable_cmd is None: self.module.fail_json(msg='cannot detect command to enable service %s, typo or init system potentially unknown' % self.name) + self.changed = True + action = None + # FIXME: we use chkconfig or systemctl # to decide whether to run the command here but need something # similar for upstart + # + # Upstart's initctl + # if self.enable_cmd.endswith("initctl"): def write_to_override_file(file_name, file_contents, ): override_file = open(file_name, 'w') @@ -611,23 +617,48 @@ class LinuxService(Service): if manreg.search(open(conf_file_name).read()): self.module.fail_json(msg="manual stanza not supported in a .conf file") + self.changed = False if os.path.exists(override_file_name): override_file_contents = open(override_file_name).read() # Remove manual stanza if present and service enabled if self.enable and manreg.search(override_file_contents): - write_to_override_file(override_file_name, manreg.sub('', override_file_contents)) + self.changed = True + override_state = manreg.sub('', override_file_contents) # Add manual stanza if not present and service disabled elif not (self.enable) and not (manreg.search(override_file_contents)): - write_to_override_file(override_file_name, override_file_contents + '\n' + config_line) + self.changed = True + override_state = '\n'.join((override_file_contents, config_line)) + # service already in desired state else: - return + pass # Add file with manual stanza if service disabled elif not (self.enable): - write_to_override_file(override_file_name, config_line) + self.changed = True + override_state = config_line else: - return + # service already in desired state + pass + if self.module.check_mode: + self.module.exit_json(changed=self.changed) + + # The initctl method of enabling and disabling services is much + # different than for the other service methods. So actually + # committing the change is done in this conditional and then we + # skip the boilerplate at the bottom of the method + if self.changed: + write_to_override_file(override_file_name, override_state) + return + + # + # SysV's chkconfig + # if self.enable_cmd.endswith("chkconfig"): + if self.enable: + action = 'on' + else: + action = 'off' + (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name)) if 'chkconfig --add %s' % self.name in err: self.execute_command("%s --add %s" % (self.enable_cmd, self.name)) @@ -635,22 +666,42 @@ class LinuxService(Service): if not self.name in out: self.module.fail_json(msg="service %s does not support chkconfig" % self.name) state = out.split()[-1] - if self.enable and ( "3:on" in out and "5:on" in out ): - return - elif not self.enable and ( "3:off" in out and "5:off" in out ): + + # Check if we're already in the correct state + if "3:%s" % action in out and "5:%s" % action in out: return + # + # Systemd's systemctl + # if self.enable_cmd.endswith("systemctl"): + if self.enable: + action = 'enable' + else: + action = 'disable' + + # Check if we're already in the correct state d = self.get_systemd_status_dict() if "UnitFileState" in d: if self.enable and d["UnitFileState"] == "enabled": - return + self.changed = False elif not self.enable and d["UnitFileState"] == "disabled": - return + self.changed = False elif not self.enable: + self.changed = False + + if not self.changed: return + # + # OpenRC's rc-update + # if self.enable_cmd.endswith("rc-update"): + if self.enable: + action = 'add' + else: + action = 'delete' + (rc, out, err) = self.execute_command("%s show" % self.enable_cmd) for line in out.splitlines(): service_name, runlevels = line.split('|') @@ -660,15 +711,18 @@ class LinuxService(Service): runlevels = re.split(r'\s+', runlevels) # service already enabled for the runlevel if self.enable and self.runlevel in runlevels: - return + self.changed = False # service already disabled for the runlevel elif not self.enable and self.runlevel not in runlevels: - return + self.changed = False break else: # service already disabled altogether if not self.enable: - return + self.changed = False + + if not self.changed: + return if self.enable_cmd.endswith("update-rc.d"): if self.enable: @@ -676,6 +730,14 @@ class LinuxService(Service): else: action = 'disable' + if self.enable: + # make sure the init.d symlinks are created + # otherwise enable might not work + (rc, out, err) = self.execute_command("%s %s defaults" \ + % (self.enable_cmd, self.name)) + if rc != 0: + return (rc, out, err) + (rc, out, err) = self.execute_command("%s -n %s %s" \ % (self.enable_cmd, self.name, action)) self.changed = False @@ -696,51 +758,26 @@ class LinuxService(Service): self.changed = True break - if self.module.check_mode: - self.module.exit_json(changed=self.changed) - if not self.changed: return - if self.enable: - # make sure the init.d symlinks are created - # otherwise enable might not work - (rc, out, err) = self.execute_command("%s %s defaults" \ - % (self.enable_cmd, self.name)) - if rc != 0: - return (rc, out, err) - - return self.execute_command("%s %s enable" % (self.enable_cmd, self.name)) - else: - return self.execute_command("%s %s disable" % (self.enable_cmd, - self.name)) - - # we change argument depending on real binary used: - # - update-rc.d and systemctl wants enable/disable - # - chkconfig wants on/off - # - rc-update wants add/delete - # also, rc-update and systemctl needs the argument order reversed - if self.enable: - on_off = "on" - enable_disable = "enable" - add_delete = "add" - else: - on_off = "off" - enable_disable = "disable" - add_delete = "delete" - - if self.enable_cmd.endswith("rc-update"): - args = (self.enable_cmd, add_delete, self.name + " " + self.runlevel) - elif self.enable_cmd.endswith("systemctl"): - args = (self.enable_cmd, enable_disable, self.__systemd_unit) - else: - args = (self.enable_cmd, self.name, on_off) - + # If we've gotten to the end, the service needs to be updated self.changed = True - if self.module.check_mode and self.changed: - self.module.exit_json(changed=True) + # we change argument order depending on real binary used: + # rc-update and systemctl need the argument order reversed + if self.enable_cmd.endswith("rc-update"): + args = (self.enable_cmd, action, self.name + " " + self.runlevel) + elif self.enable_cmd.endswith("systemctl"): + args = (self.enable_cmd, action, self.__systemd_unit) + else: + args = (self.enable_cmd, self.name, action) + + if self.module.check_mode: + self.module.exit_json(changed=self.changed) + + self.module.fail_json(msg=self.execute_command("%s %s %s" % args)) return self.execute_command("%s %s %s" % args) From 3180a3745783a281c995e0ae051abdb4ea31cc94 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 17 Sep 2014 10:22:38 -0500 Subject: [PATCH 045/813] Revert "Revert to using just the combined_cache for HostVars" This reverts commit e9229cfeaa7727bfb66642cc553a51f9add81e6d. --- lib/ansible/runner/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index aa0ee670f2..7b3e48d90c 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -615,7 +615,6 @@ class Runner(object): inject = utils.combine_vars(inject, self.extra_vars) # and then special vars inject.setdefault('ansible_ssh_user', self.remote_user) - inject['combined_cache'] = combined_cache inject['group_names'] = host_variables.get('group_names', []) inject['groups'] = self.inventory.groups_list() inject['vars'] = self.module_vars @@ -630,7 +629,7 @@ class Runner(object): ''' executes any module one or more times ''' inject = self.get_inject_vars(host) - hostvars = HostVars(inject['combined_cache'], self.inventory, vault_password=self.vault_pass) + hostvars = HostVars(inject, self.inventory, vault_password=self.vault_pass) inject['hostvars'] = hostvars host_connection = inject.get('ansible_connection', self.transport) From ed3c9c40ca8b52f54e3ae1cf23184e7f5b2df262 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 17 Sep 2014 11:03:36 -0500 Subject: [PATCH 046/813] Redoing patch for hostvars fix, since the previous one broke other tests --- lib/ansible/runner/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 7b3e48d90c..8ec2d1a7ca 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -101,7 +101,7 @@ class HostVars(dict): def __getitem__(self, host): if host not in self.lookup: result = self.inventory.get_variables(host, vault_password=self.vault_password).copy() - result.update(self.vars_cache) + result.update(self.vars_cache.get(host, {})) self.lookup[host] = template.template('.', result, self.vars_cache) return self.lookup[host] @@ -622,6 +622,7 @@ class Runner(object): inject['environment'] = self.environment inject['playbook_dir'] = os.path.abspath(self.basedir) inject['omit'] = self.omit_token + inject['combined_cache'] = combined_cache return inject @@ -629,7 +630,7 @@ class Runner(object): ''' executes any module one or more times ''' inject = self.get_inject_vars(host) - hostvars = HostVars(inject, self.inventory, vault_password=self.vault_pass) + hostvars = HostVars(inject['combined_cache'], self.inventory, vault_password=self.vault_pass) inject['hostvars'] = hostvars host_connection = inject.get('ansible_connection', self.transport) From 24a33d5c10f5a96417641e6001776bf70ea808d1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 17 Sep 2014 09:16:56 -0700 Subject: [PATCH 047/813] Revert "Refactor the Linux service_enable() method" needs some debugging This reverts commit e294e31fd3e083cbf19e26fd551ed4f7e2f38e37. --- library/system/service | 133 +++++++++++++++-------------------------- 1 file changed, 48 insertions(+), 85 deletions(-) diff --git a/library/system/service b/library/system/service index 95ed56cf85..2da5e53b01 100644 --- a/library/system/service +++ b/library/system/service @@ -587,16 +587,10 @@ class LinuxService(Service): if self.enable_cmd is None: self.module.fail_json(msg='cannot detect command to enable service %s, typo or init system potentially unknown' % self.name) - self.changed = True - action = None - # FIXME: we use chkconfig or systemctl # to decide whether to run the command here but need something # similar for upstart - # - # Upstart's initctl - # if self.enable_cmd.endswith("initctl"): def write_to_override_file(file_name, file_contents, ): override_file = open(file_name, 'w') @@ -617,48 +611,23 @@ class LinuxService(Service): if manreg.search(open(conf_file_name).read()): self.module.fail_json(msg="manual stanza not supported in a .conf file") - self.changed = False if os.path.exists(override_file_name): override_file_contents = open(override_file_name).read() # Remove manual stanza if present and service enabled if self.enable and manreg.search(override_file_contents): - self.changed = True - override_state = manreg.sub('', override_file_contents) + write_to_override_file(override_file_name, manreg.sub('', override_file_contents)) # Add manual stanza if not present and service disabled elif not (self.enable) and not (manreg.search(override_file_contents)): - self.changed = True - override_state = '\n'.join((override_file_contents, config_line)) - # service already in desired state + write_to_override_file(override_file_name, override_file_contents + '\n' + config_line) else: - pass + return # Add file with manual stanza if service disabled elif not (self.enable): - self.changed = True - override_state = config_line + write_to_override_file(override_file_name, config_line) else: - # service already in desired state - pass + return - if self.module.check_mode: - self.module.exit_json(changed=self.changed) - - # The initctl method of enabling and disabling services is much - # different than for the other service methods. So actually - # committing the change is done in this conditional and then we - # skip the boilerplate at the bottom of the method - if self.changed: - write_to_override_file(override_file_name, override_state) - return - - # - # SysV's chkconfig - # if self.enable_cmd.endswith("chkconfig"): - if self.enable: - action = 'on' - else: - action = 'off' - (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name)) if 'chkconfig --add %s' % self.name in err: self.execute_command("%s --add %s" % (self.enable_cmd, self.name)) @@ -666,42 +635,22 @@ class LinuxService(Service): if not self.name in out: self.module.fail_json(msg="service %s does not support chkconfig" % self.name) state = out.split()[-1] - - # Check if we're already in the correct state - if "3:%s" % action in out and "5:%s" % action in out: + if self.enable and ( "3:on" in out and "5:on" in out ): + return + elif not self.enable and ( "3:off" in out and "5:off" in out ): return - # - # Systemd's systemctl - # if self.enable_cmd.endswith("systemctl"): - if self.enable: - action = 'enable' - else: - action = 'disable' - - # Check if we're already in the correct state d = self.get_systemd_status_dict() if "UnitFileState" in d: if self.enable and d["UnitFileState"] == "enabled": - self.changed = False + return elif not self.enable and d["UnitFileState"] == "disabled": - self.changed = False + return elif not self.enable: - self.changed = False - - if not self.changed: return - # - # OpenRC's rc-update - # if self.enable_cmd.endswith("rc-update"): - if self.enable: - action = 'add' - else: - action = 'delete' - (rc, out, err) = self.execute_command("%s show" % self.enable_cmd) for line in out.splitlines(): service_name, runlevels = line.split('|') @@ -711,18 +660,15 @@ class LinuxService(Service): runlevels = re.split(r'\s+', runlevels) # service already enabled for the runlevel if self.enable and self.runlevel in runlevels: - self.changed = False + return # service already disabled for the runlevel elif not self.enable and self.runlevel not in runlevels: - self.changed = False + return break else: # service already disabled altogether if not self.enable: - self.changed = False - - if not self.changed: - return + return if self.enable_cmd.endswith("update-rc.d"): if self.enable: @@ -730,14 +676,6 @@ class LinuxService(Service): else: action = 'disable' - if self.enable: - # make sure the init.d symlinks are created - # otherwise enable might not work - (rc, out, err) = self.execute_command("%s %s defaults" \ - % (self.enable_cmd, self.name)) - if rc != 0: - return (rc, out, err) - (rc, out, err) = self.execute_command("%s -n %s %s" \ % (self.enable_cmd, self.name, action)) self.changed = False @@ -758,26 +696,51 @@ class LinuxService(Service): self.changed = True break + if self.module.check_mode: + self.module.exit_json(changed=self.changed) + if not self.changed: return - # If we've gotten to the end, the service needs to be updated - self.changed = True + if self.enable: + # make sure the init.d symlinks are created + # otherwise enable might not work + (rc, out, err) = self.execute_command("%s %s defaults" \ + % (self.enable_cmd, self.name)) + if rc != 0: + return (rc, out, err) - # we change argument order depending on real binary used: - # rc-update and systemctl need the argument order reversed + return self.execute_command("%s %s enable" % (self.enable_cmd, self.name)) + else: + return self.execute_command("%s %s disable" % (self.enable_cmd, + self.name)) + + # we change argument depending on real binary used: + # - update-rc.d and systemctl wants enable/disable + # - chkconfig wants on/off + # - rc-update wants add/delete + # also, rc-update and systemctl needs the argument order reversed + if self.enable: + on_off = "on" + enable_disable = "enable" + add_delete = "add" + else: + on_off = "off" + enable_disable = "disable" + add_delete = "delete" if self.enable_cmd.endswith("rc-update"): - args = (self.enable_cmd, action, self.name + " " + self.runlevel) + args = (self.enable_cmd, add_delete, self.name + " " + self.runlevel) elif self.enable_cmd.endswith("systemctl"): - args = (self.enable_cmd, action, self.__systemd_unit) + args = (self.enable_cmd, enable_disable, self.__systemd_unit) else: - args = (self.enable_cmd, self.name, action) + args = (self.enable_cmd, self.name, on_off) - if self.module.check_mode: - self.module.exit_json(changed=self.changed) + self.changed = True + + if self.module.check_mode and self.changed: + self.module.exit_json(changed=True) - self.module.fail_json(msg=self.execute_command("%s %s %s" % args)) return self.execute_command("%s %s %s" % args) From 8dfc54b51780f6c5bb2d9938baead5c42382e60d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Sep 2014 16:12:09 -0700 Subject: [PATCH 048/813] Refactor the Linux service_enable() method * Fix check_mode for initctl systems Fixes #9009 --- library/system/service | 141 ++++++++++++++++++++++++++--------------- 1 file changed, 89 insertions(+), 52 deletions(-) diff --git a/library/system/service b/library/system/service index 2da5e53b01..95ed56cf85 100644 --- a/library/system/service +++ b/library/system/service @@ -587,10 +587,16 @@ class LinuxService(Service): if self.enable_cmd is None: self.module.fail_json(msg='cannot detect command to enable service %s, typo or init system potentially unknown' % self.name) + self.changed = True + action = None + # FIXME: we use chkconfig or systemctl # to decide whether to run the command here but need something # similar for upstart + # + # Upstart's initctl + # if self.enable_cmd.endswith("initctl"): def write_to_override_file(file_name, file_contents, ): override_file = open(file_name, 'w') @@ -611,23 +617,48 @@ class LinuxService(Service): if manreg.search(open(conf_file_name).read()): self.module.fail_json(msg="manual stanza not supported in a .conf file") + self.changed = False if os.path.exists(override_file_name): override_file_contents = open(override_file_name).read() # Remove manual stanza if present and service enabled if self.enable and manreg.search(override_file_contents): - write_to_override_file(override_file_name, manreg.sub('', override_file_contents)) + self.changed = True + override_state = manreg.sub('', override_file_contents) # Add manual stanza if not present and service disabled elif not (self.enable) and not (manreg.search(override_file_contents)): - write_to_override_file(override_file_name, override_file_contents + '\n' + config_line) + self.changed = True + override_state = '\n'.join((override_file_contents, config_line)) + # service already in desired state else: - return + pass # Add file with manual stanza if service disabled elif not (self.enable): - write_to_override_file(override_file_name, config_line) + self.changed = True + override_state = config_line else: - return + # service already in desired state + pass + if self.module.check_mode: + self.module.exit_json(changed=self.changed) + + # The initctl method of enabling and disabling services is much + # different than for the other service methods. So actually + # committing the change is done in this conditional and then we + # skip the boilerplate at the bottom of the method + if self.changed: + write_to_override_file(override_file_name, override_state) + return + + # + # SysV's chkconfig + # if self.enable_cmd.endswith("chkconfig"): + if self.enable: + action = 'on' + else: + action = 'off' + (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name)) if 'chkconfig --add %s' % self.name in err: self.execute_command("%s --add %s" % (self.enable_cmd, self.name)) @@ -635,22 +666,42 @@ class LinuxService(Service): if not self.name in out: self.module.fail_json(msg="service %s does not support chkconfig" % self.name) state = out.split()[-1] - if self.enable and ( "3:on" in out and "5:on" in out ): - return - elif not self.enable and ( "3:off" in out and "5:off" in out ): + + # Check if we're already in the correct state + if "3:%s" % action in out and "5:%s" % action in out: return + # + # Systemd's systemctl + # if self.enable_cmd.endswith("systemctl"): + if self.enable: + action = 'enable' + else: + action = 'disable' + + # Check if we're already in the correct state d = self.get_systemd_status_dict() if "UnitFileState" in d: if self.enable and d["UnitFileState"] == "enabled": - return + self.changed = False elif not self.enable and d["UnitFileState"] == "disabled": - return + self.changed = False elif not self.enable: + self.changed = False + + if not self.changed: return + # + # OpenRC's rc-update + # if self.enable_cmd.endswith("rc-update"): + if self.enable: + action = 'add' + else: + action = 'delete' + (rc, out, err) = self.execute_command("%s show" % self.enable_cmd) for line in out.splitlines(): service_name, runlevels = line.split('|') @@ -660,15 +711,18 @@ class LinuxService(Service): runlevels = re.split(r'\s+', runlevels) # service already enabled for the runlevel if self.enable and self.runlevel in runlevels: - return + self.changed = False # service already disabled for the runlevel elif not self.enable and self.runlevel not in runlevels: - return + self.changed = False break else: # service already disabled altogether if not self.enable: - return + self.changed = False + + if not self.changed: + return if self.enable_cmd.endswith("update-rc.d"): if self.enable: @@ -676,6 +730,14 @@ class LinuxService(Service): else: action = 'disable' + if self.enable: + # make sure the init.d symlinks are created + # otherwise enable might not work + (rc, out, err) = self.execute_command("%s %s defaults" \ + % (self.enable_cmd, self.name)) + if rc != 0: + return (rc, out, err) + (rc, out, err) = self.execute_command("%s -n %s %s" \ % (self.enable_cmd, self.name, action)) self.changed = False @@ -696,51 +758,26 @@ class LinuxService(Service): self.changed = True break - if self.module.check_mode: - self.module.exit_json(changed=self.changed) - if not self.changed: return - if self.enable: - # make sure the init.d symlinks are created - # otherwise enable might not work - (rc, out, err) = self.execute_command("%s %s defaults" \ - % (self.enable_cmd, self.name)) - if rc != 0: - return (rc, out, err) - - return self.execute_command("%s %s enable" % (self.enable_cmd, self.name)) - else: - return self.execute_command("%s %s disable" % (self.enable_cmd, - self.name)) - - # we change argument depending on real binary used: - # - update-rc.d and systemctl wants enable/disable - # - chkconfig wants on/off - # - rc-update wants add/delete - # also, rc-update and systemctl needs the argument order reversed - if self.enable: - on_off = "on" - enable_disable = "enable" - add_delete = "add" - else: - on_off = "off" - enable_disable = "disable" - add_delete = "delete" - - if self.enable_cmd.endswith("rc-update"): - args = (self.enable_cmd, add_delete, self.name + " " + self.runlevel) - elif self.enable_cmd.endswith("systemctl"): - args = (self.enable_cmd, enable_disable, self.__systemd_unit) - else: - args = (self.enable_cmd, self.name, on_off) - + # If we've gotten to the end, the service needs to be updated self.changed = True - if self.module.check_mode and self.changed: - self.module.exit_json(changed=True) + # we change argument order depending on real binary used: + # rc-update and systemctl need the argument order reversed + if self.enable_cmd.endswith("rc-update"): + args = (self.enable_cmd, action, self.name + " " + self.runlevel) + elif self.enable_cmd.endswith("systemctl"): + args = (self.enable_cmd, action, self.__systemd_unit) + else: + args = (self.enable_cmd, self.name, action) + + if self.module.check_mode: + self.module.exit_json(changed=self.changed) + + self.module.fail_json(msg=self.execute_command("%s %s %s" % args)) return self.execute_command("%s %s %s" % args) From 5ec8c28d2a6311152f8ff3bad807fd23d0597e38 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 17 Sep 2014 10:25:54 -0700 Subject: [PATCH 049/813] remove debugging that was causing failure --- library/system/service | 1 - 1 file changed, 1 deletion(-) diff --git a/library/system/service b/library/system/service index 95ed56cf85..ef78f3d5d5 100644 --- a/library/system/service +++ b/library/system/service @@ -777,7 +777,6 @@ class LinuxService(Service): if self.module.check_mode: self.module.exit_json(changed=self.changed) - self.module.fail_json(msg=self.execute_command("%s %s %s" % args)) return self.execute_command("%s %s %s" % args) From c3612e08f938aad02d1eb379ff8a913e07b435a6 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 16 Sep 2014 20:51:00 -0500 Subject: [PATCH 050/813] Move delegate_to out of runner module_vars to prevent bleeding across runs Previously, the delegate_to value was stored in the module_vars of runner, which could lead to bleeding that value across runs and incorrect hosts being delegated to. This patch moves the value to a local variable in the Runner class with some related refactoring of _compute_delegate() in Runner (since the value is no longer required to be a parameter). Fixes #8705 --- lib/ansible/callbacks.py | 20 ++++++--------- lib/ansible/runner/__init__.py | 45 ++++++++++++++-------------------- 2 files changed, 27 insertions(+), 38 deletions(-) diff --git a/lib/ansible/callbacks.py b/lib/ansible/callbacks.py index 2042dbc93a..03b7b8a853 100644 --- a/lib/ansible/callbacks.py +++ b/lib/ansible/callbacks.py @@ -450,9 +450,8 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks): self._async_notified = {} def on_unreachable(self, host, results): - delegate_to = self.runner.module_vars.get('delegate_to') - if delegate_to: - host = '%s -> %s' % (host, delegate_to) + if self.runner.delegate_to: + host = '%s -> %s' % (host, self.runner.delegate_to) item = None if type(results) == dict: @@ -465,9 +464,8 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks): super(PlaybookRunnerCallbacks, self).on_unreachable(host, results) def on_failed(self, host, results, ignore_errors=False): - delegate_to = self.runner.module_vars.get('delegate_to') - if delegate_to: - host = '%s -> %s' % (host, delegate_to) + if self.runner.delegate_to: + host = '%s -> %s' % (host, self.runner.delegate_to) results2 = results.copy() results2.pop('invocation', None) @@ -500,9 +498,8 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks): super(PlaybookRunnerCallbacks, self).on_failed(host, results, ignore_errors=ignore_errors) def on_ok(self, host, host_result): - delegate_to = self.runner.module_vars.get('delegate_to') - if delegate_to: - host = '%s -> %s' % (host, delegate_to) + if self.runner.delegate_to: + host = '%s -> %s' % (host, self.runner.delegate_to) item = host_result.get('item', None) @@ -542,9 +539,8 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks): super(PlaybookRunnerCallbacks, self).on_ok(host, host_result) def on_skipped(self, host, item=None): - delegate_to = self.runner.module_vars.get('delegate_to') - if delegate_to: - host = '%s -> %s' % (host, delegate_to) + if self.runner.delegate_to: + host = '%s -> %s' % (host, self.runner.delegate_to) if constants.DISPLAY_SKIPPED_HOSTS: msg = '' diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 8ec2d1a7ca..a1133fdbad 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -181,6 +181,7 @@ class Runner(object): self.always_run = None self.connector = connection.Connector(self) self.conditional = conditional + self.delegate_to = None self.module_name = module_name self.forks = int(forks) self.pattern = pattern @@ -312,16 +313,13 @@ class Runner(object): # ***************************************************** - def _compute_delegate(self, host, password, remote_inject): + def _compute_delegate(self, password, remote_inject): """ Build a dictionary of all attributes for the delegate host """ delegate = {} # allow delegated host to be templated - delegate['host'] = template.template(self.basedir, host, - remote_inject, fail_on_undefined=True) - delegate['inject'] = remote_inject.copy() # set any interpreters @@ -333,36 +331,33 @@ class Runner(object): del delegate['inject'][i] port = C.DEFAULT_REMOTE_PORT - this_host = delegate['host'] - # get the vars for the delegate by its name try: - this_info = delegate['inject']['hostvars'][this_host] + this_info = delegate['inject']['hostvars'][self.delegate_to] except: # make sure the inject is empty for non-inventory hosts this_info = {} # get the real ssh_address for the delegate # and allow ansible_ssh_host to be templated - delegate['ssh_host'] = template.template(self.basedir, - this_info.get('ansible_ssh_host', this_host), - this_info, fail_on_undefined=True) + delegate['ssh_host'] = template.template( + self.basedir, + this_info.get('ansible_ssh_host', self.delegate_to), + this_info, + fail_on_undefined=True + ) delegate['port'] = this_info.get('ansible_ssh_port', port) - - delegate['user'] = self._compute_delegate_user(this_host, delegate['inject']) - + delegate['user'] = self._compute_delegate_user(self.delegate_to, delegate['inject']) delegate['pass'] = this_info.get('ansible_ssh_pass', password) - delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', - self.private_key_file) + delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', self.private_key_file) delegate['transport'] = this_info.get('ansible_connection', self.transport) delegate['sudo_pass'] = this_info.get('ansible_sudo_pass', self.sudo_pass) # Last chance to get private_key_file from global variables. # this is useful if delegated host is not defined in the inventory if delegate['private_key_file'] is None: - delegate['private_key_file'] = remote_inject.get( - 'ansible_ssh_private_key_file', None) + delegate['private_key_file'] = remote_inject.get('ansible_ssh_private_key_file', None) if delegate['private_key_file'] is not None: delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file']) @@ -642,11 +637,6 @@ class Runner(object): # fireball, local, etc port = self.remote_port - # template this one is available, callbacks use this - delegate_to = self.module_vars.get('delegate_to') - if delegate_to: - self.module_vars['delegate_to'] = template.template(self.basedir, delegate_to, inject) - if self.inventory.basedir() is not None: inject['inventory_dir'] = self.inventory.basedir() @@ -846,9 +836,12 @@ class Runner(object): # the delegated host may have different SSH port configured, etc # and we need to transfer those, and only those, variables - delegate_to = inject.get('delegate_to', None) - if delegate_to is not None: - delegate = self._compute_delegate(delegate_to, actual_pass, inject) + self.delegate_to = inject.get('delegate_to', None) + if self.delegate_to: + self.delegate_to = template.template(self.basedir, self.delegate_to, inject) + + if self.delegate_to is not None: + delegate = self._compute_delegate(actual_pass, inject) actual_transport = delegate['transport'] actual_host = delegate['ssh_host'] actual_port = delegate['port'] @@ -880,7 +873,7 @@ class Runner(object): try: conn = self.connector.connect(actual_host, actual_port, actual_user, actual_pass, actual_transport, actual_private_key_file) - if delegate_to or host != actual_host: + if self.delegate_to or host != actual_host: conn.delegate = host default_shell = getattr(conn, 'default_shell', '') From 6b1ae82accea5e1401fe9ea749f58a33ae697bb4 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 17 Sep 2014 15:20:05 -0500 Subject: [PATCH 051/813] Allow filtering by availability zone for exact_count with ec2 Fixes #8863 --- library/cloud/ec2 | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index cb14d9b851..a4776c74b8 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -483,10 +483,10 @@ except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) -def find_running_instances_by_count_tag(module, ec2, count_tag): +def find_running_instances_by_count_tag(module, ec2, count_tag, zone=None): # get reservations for instances that match tag(s) and are running - reservations = get_reservations(module, ec2, tags=count_tag, state="running") + reservations = get_reservations(module, ec2, tags=count_tag, state="running", zone=zone) instances = [] for res in reservations: @@ -507,7 +507,7 @@ def _set_none_to_blank(dictionary): return result -def get_reservations(module, ec2, tags=None, state=None): +def get_reservations(module, ec2, tags=None, state=None, zone=None): # TODO: filters do not work with tags that have underscores filters = dict() @@ -542,6 +542,9 @@ def get_reservations(module, ec2, tags=None, state=None): # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api filters.update({'instance-state-name': state}) + if zone: + filters.update({'availability-zone': zone}) + results = ec2.get_all_instances(filters=filters) return results @@ -655,8 +658,9 @@ def enforce_count(module, ec2): exact_count = module.params.get('exact_count') count_tag = module.params.get('count_tag') + zone = module.params.get('zone') - reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag) + reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag, zone) changed = None checkmode = False From 44cab4bf7550d1fe1f82b7283c44878297078dfd Mon Sep 17 00:00:00 2001 From: Sam Pierson Date: Wed, 17 Sep 2014 14:47:21 -0700 Subject: [PATCH 052/813] [#9048] convert Route53 TTL to integer before comparison boto/Route53 are returning TTL as a string. The route53 module is comparing it with an integer and therfore thinks the DNS entry has always changed. --- library/cloud/route53 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/route53 b/library/cloud/route53 index 0f511c23a7..b3878e0580 100644 --- a/library/cloud/route53 +++ b/library/cloud/route53 @@ -241,7 +241,7 @@ def main(): record['ttl'] = rset.ttl record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) - if value_list == sorted(rset.resource_records) and record['ttl'] == ttl_in and command_in == 'create': + if value_list == sorted(rset.resource_records) and int(record['ttl']) == ttl_in and command_in == 'create': module.exit_json(changed=False) if command_in == 'get': From f445e3c606fb44f9995a0672973729b305b57938 Mon Sep 17 00:00:00 2001 From: Evan Coury Date: Wed, 17 Sep 2014 18:06:59 -0700 Subject: [PATCH 053/813] Update rax root pubkey example The example was showing how to use the `files` option to pass in a local file as an authorized public key for root. While this works, it's a bit sloppy, given that there's a specific option, `key_name` which will use one of your public keys on your rackspace account and add it as an authorized key for root. In our case, one of our admins didn't notice the `key_name` option because they scrolled straight to the example and saw the `files` strategy. I propose that the example still shows `files`, but not using a root public key as an example, and instead also demonstrate the `key_name` option so that it's clear from the example how to get the initial root public key deployed. --- library/cloud/rax | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/rax b/library/cloud/rax index d7db2c63d7..e01367ed5b 100644 --- a/library/cloud/rax +++ b/library/cloud/rax @@ -164,8 +164,8 @@ EXAMPLES = ''' name: rax-test1 flavor: 5 image: b11d9567-e412-4255-96b9-bd63ab23bcfe + key_name: my_rackspace_key files: - /root/.ssh/authorized_keys: /home/localuser/.ssh/id_rsa.pub /root/test.txt: /home/localuser/test.txt wait: yes state: present From 1d3a8bd39c19914acd9836f9e6df74d397d03880 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 18 Sep 2014 08:01:58 -0500 Subject: [PATCH 054/813] Updating current ansible version on docsite index --- docsite/rst/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst index b7bd7cce95..158f8bd1d8 100644 --- a/docsite/rst/index.rst +++ b/docsite/rst/index.rst @@ -16,7 +16,7 @@ We believe simplicity is relevant to all sizes of environments and design for bu Ansible manages machines in an agentless manner. There is never a question of how to upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. As OpenSSH is one of the most peer reviewed open source components, the security exposure of using the tool is greatly reduced. Ansible is decentralized -- it relies on your existing OS credentials to control access to remote machines; if needed it can easily connect with Kerberos, LDAP, and other centralized authentication management systems. -This documentation covers the current released version of Ansible (1.6.10) and also some development version features (1.7). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. +This documentation covers the current released version of Ansible (1.7.1) and also some development version features (1.8). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. .. _an_introduction: From 9c590b30b258b9a2aedc3d59dd7fa71f86453b67 Mon Sep 17 00:00:00 2001 From: Michael Peters Date: Thu, 18 Sep 2014 14:32:53 -0400 Subject: [PATCH 055/813] minor tweak to required modules for running tests --- docsite/rst/developing_test_pr.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docsite/rst/developing_test_pr.rst b/docsite/rst/developing_test_pr.rst index e966c62c1c..76153b3f36 100644 --- a/docsite/rst/developing_test_pr.rst +++ b/docsite/rst/developing_test_pr.rst @@ -33,7 +33,8 @@ First, you will need to configure your testing environment with the neccessary t suites. You will need at least:: git - python-nosetests + python-nosetests (sometimes named python-nose) + python-passlib Second, if you haven't already, clone the Ansible source code from GitHub:: From 7aa6639c139d29d7ef1a27288af62eb10c3a0042 Mon Sep 17 00:00:00 2001 From: Michael Peters Date: Thu, 18 Sep 2014 14:55:03 -0400 Subject: [PATCH 056/813] documenting extra packages that need to be installed to run the full integration tests --- docsite/rst/developing_test_pr.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docsite/rst/developing_test_pr.rst b/docsite/rst/developing_test_pr.rst index 76153b3f36..b19bf06493 100644 --- a/docsite/rst/developing_test_pr.rst +++ b/docsite/rst/developing_test_pr.rst @@ -36,6 +36,13 @@ suites. You will need at least:: python-nosetests (sometimes named python-nose) python-passlib +If you want to run the full integration test suite you'll also need the following packages installed:: + + svn + hg + python-pip + gem + Second, if you haven't already, clone the Ansible source code from GitHub:: git clone https://github.com/ansible/ansible.git From 98a1fd681eeb593052fbd84d4b76780b5288fd0b Mon Sep 17 00:00:00 2001 From: James Pharaoh Date: Thu, 18 Sep 2014 20:56:10 +0200 Subject: [PATCH 057/813] add LANG=C to apt module so the string matches on the output always match --- library/packaging/apt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/library/packaging/apt b/library/packaging/apt index b4b3f078a8..e5a38e538d 100755 --- a/library/packaging/apt +++ b/library/packaging/apt @@ -148,7 +148,8 @@ import fnmatch # APT related constants APT_ENV_VARS = dict( DEBIAN_FRONTEND = 'noninteractive', - DEBIAN_PRIORITY = 'critical' + DEBIAN_PRIORITY = 'critical', + LANG = 'C' ) DPKG_OPTIONS = 'force-confdef,force-confold' From 6943ec11a5b7a3fc684cea944a430c6aaa7d13aa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 18 Sep 2014 12:39:54 -0700 Subject: [PATCH 058/813] Failure to enable a service now fails the task Fixes: 8855 --- library/system/service | 30 ++++++++++++++++--- .../test_service/files/ansible-broken.upstart | 10 +++++++ .../roles/test_service/tasks/main.yml | 10 +++++++ .../test_service/tasks/systemd_cleanup.yml | 7 +++++ .../test_service/tasks/systemd_setup.yml | 6 ++++ .../test_service/tasks/upstart_cleanup.yml | 7 ++++- .../test_service/tasks/upstart_setup.yml | 9 +++++- 7 files changed, 73 insertions(+), 6 deletions(-) create mode 100644 test/integration/roles/test_service/files/ansible-broken.upstart diff --git a/library/system/service b/library/system/service index ef78f3d5d5..61e56ec83d 100644 --- a/library/system/service +++ b/library/system/service @@ -647,7 +647,11 @@ class LinuxService(Service): # committing the change is done in this conditional and then we # skip the boilerplate at the bottom of the method if self.changed: - write_to_override_file(override_file_name, override_state) + try: + write_to_override_file(override_file_name, override_state) + except: + self.module.fail_json(msg='Could not modify override file') + return # @@ -724,6 +728,9 @@ class LinuxService(Service): if not self.changed: return + # + # update-rc.d style + # if self.enable_cmd.endswith("update-rc.d"): if self.enable: action = 'enable' @@ -736,7 +743,10 @@ class LinuxService(Service): (rc, out, err) = self.execute_command("%s %s defaults" \ % (self.enable_cmd, self.name)) if rc != 0: - return (rc, out, err) + if err: + self.module.fail_json(msg=err) + else: + self.module.fail_json(msg=out) (rc, out, err) = self.execute_command("%s -n %s %s" \ % (self.enable_cmd, self.name, action)) @@ -761,7 +771,9 @@ class LinuxService(Service): if not self.changed: return + # # If we've gotten to the end, the service needs to be updated + # self.changed = True # we change argument order depending on real binary used: @@ -777,7 +789,14 @@ class LinuxService(Service): if self.module.check_mode: self.module.exit_json(changed=self.changed) - return self.execute_command("%s %s %s" % args) + (rc, out, err) = self.execute_command("%s %s %s" % args) + if rc != 0: + if err: + self.module.fail_json(msg=err) + else: + self.module.fail_json(msg=out) + + return (rc, out, err) def service_control(self): @@ -900,7 +919,10 @@ class FreeBsdService(Service): if self.rcconf_key is None: self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr) - return self.service_enable_rcconf() + try: + return self.service_enable_rcconf() + except: + self.module.fail_json(msg='unable to set rcvar') def service_control(self): diff --git a/test/integration/roles/test_service/files/ansible-broken.upstart b/test/integration/roles/test_service/files/ansible-broken.upstart new file mode 100644 index 0000000000..4e9c6694a1 --- /dev/null +++ b/test/integration/roles/test_service/files/ansible-broken.upstart @@ -0,0 +1,10 @@ +description "ansible test daemon" + +start on runlevel [345] +stop on runlevel [!345] + +expect daemon + +exec ansible_test_service + +manual diff --git a/test/integration/roles/test_service/tasks/main.yml b/test/integration/roles/test_service/tasks/main.yml index 749d164724..78aebbc8ac 100644 --- a/test/integration/roles/test_service/tasks/main.yml +++ b/test/integration/roles/test_service/tasks/main.yml @@ -90,6 +90,16 @@ that: - "disable_result.enabled == false" +- name: try to enable a broken service + service: name=ansible_test_broken enabled=yes + register: broken_enable_result + ignore_errors: True + +- name: assert that the broken test failed + assert: + that: + - "broken_enable_result.failed == True" + - name: remove the test daemon script file: path=/usr/sbin/ansible_test_service state=absent register: remove_result diff --git a/test/integration/roles/test_service/tasks/systemd_cleanup.yml b/test/integration/roles/test_service/tasks/systemd_cleanup.yml index 5a3abf46f0..10a60b216c 100644 --- a/test/integration/roles/test_service/tasks/systemd_cleanup.yml +++ b/test/integration/roles/test_service/tasks/systemd_cleanup.yml @@ -2,11 +2,18 @@ file: path=/usr/lib/systemd/system/ansible_test.service state=absent register: remove_systemd_result +- name: remove the systemd unit file + file: path=/usr/lib/systemd/system/ansible_test_broken.service state=absent + register: remove_systemd_broken_result + +- debug: var=remove_systemd_broken_result - name: assert that the systemd unit file was removed assert: that: - "remove_systemd_result.path == '/usr/lib/systemd/system/ansible_test.service'" - "remove_systemd_result.state == 'absent'" + - "remove_systemd_broken_result.path == '/usr/lib/systemd/system/ansible_test_broken.service'" + - "remove_systemd_broken_result.state == 'absent'" - name: make sure systemd is reloaded shell: systemctl daemon-reload diff --git a/test/integration/roles/test_service/tasks/systemd_setup.yml b/test/integration/roles/test_service/tasks/systemd_setup.yml index ca8d4078e6..e2c1ecfc1c 100644 --- a/test/integration/roles/test_service/tasks/systemd_setup.yml +++ b/test/integration/roles/test_service/tasks/systemd_setup.yml @@ -2,6 +2,10 @@ copy: src=ansible.systemd dest=/usr/lib/systemd/system/ansible_test.service register: install_systemd_result +- name: install a broken systemd unit file + file: src=ansible_test.service path=/usr/lib/systemd/system/ansible_test_broken.service state=link + register: install_broken_systemd_result + - name: assert that the systemd unit file was installed assert: that: @@ -9,4 +13,6 @@ - "install_systemd_result.state == 'file'" - "install_systemd_result.mode == '0644'" - "install_systemd_result.md5sum == 'f634df77d9160ab05bad4ed49d82a0d0'" + - "install_broken_systemd_result.dest == '/usr/lib/systemd/system/ansible_test_broken.service'" + - "install_broken_systemd_result.state == 'link'" diff --git a/test/integration/roles/test_service/tasks/upstart_cleanup.yml b/test/integration/roles/test_service/tasks/upstart_cleanup.yml index c99446bf65..a589d5a986 100644 --- a/test/integration/roles/test_service/tasks/upstart_cleanup.yml +++ b/test/integration/roles/test_service/tasks/upstart_cleanup.yml @@ -2,9 +2,14 @@ file: path=/etc/init/ansible_test.conf state=absent register: remove_upstart_result +- name: remove the upstart init file + file: path=/etc/init/ansible_test_broken.conf state=absent + register: remove_upstart_broken_result + - name: assert that the upstart init file was removed assert: that: - "remove_upstart_result.path == '/etc/init/ansible_test.conf'" - "remove_upstart_result.state == 'absent'" - + - "remove_upstart_broken_result.path == '/etc/init/ansible_test_broken.conf'" + - "remove_upstart_broken_result.state == 'absent'" diff --git a/test/integration/roles/test_service/tasks/upstart_setup.yml b/test/integration/roles/test_service/tasks/upstart_setup.yml index e889ef2789..e27e7dca1f 100644 --- a/test/integration/roles/test_service/tasks/upstart_setup.yml +++ b/test/integration/roles/test_service/tasks/upstart_setup.yml @@ -2,6 +2,10 @@ copy: src=ansible.upstart dest=/etc/init/ansible_test.conf mode=0644 register: install_upstart_result +- name: install an upstart init file that will fail (manual in .conf) + copy: src=ansible-broken.upstart dest=/etc/init/ansible_broken_test.conf mode=0644 + register: install_upstart_broken_result + - name: assert that the upstart init file was installed assert: that: @@ -9,4 +13,7 @@ - "install_upstart_result.state == 'file'" - "install_upstart_result.mode == '0644'" - "install_upstart_result.md5sum == 'ab3900ea4de8423add764c12aeb90c01'" - + - "install_upstart_result.dest == '/etc/init/ansible_broken_test.conf'" + - "install_upstart_result.state == 'file'" + - "install_upstart_result.mode == '0644'" + - "install_upstart_result.md5sum == '015e183d10c311276c3e269cbeb309b7'" From d98e32e364eefd153ca18b1dbb15e1e7cff0d202 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 18 Sep 2014 12:59:04 -0700 Subject: [PATCH 059/813] Fix the var we're checking in the upstart test --- .../roles/test_service/tasks/upstart_setup.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/integration/roles/test_service/tasks/upstart_setup.yml b/test/integration/roles/test_service/tasks/upstart_setup.yml index e27e7dca1f..118d2da50e 100644 --- a/test/integration/roles/test_service/tasks/upstart_setup.yml +++ b/test/integration/roles/test_service/tasks/upstart_setup.yml @@ -13,7 +13,7 @@ - "install_upstart_result.state == 'file'" - "install_upstart_result.mode == '0644'" - "install_upstart_result.md5sum == 'ab3900ea4de8423add764c12aeb90c01'" - - "install_upstart_result.dest == '/etc/init/ansible_broken_test.conf'" - - "install_upstart_result.state == 'file'" - - "install_upstart_result.mode == '0644'" - - "install_upstart_result.md5sum == '015e183d10c311276c3e269cbeb309b7'" + - "install_upstart_broken_result.dest == '/etc/init/ansible_broken_test.conf'" + - "install_upstart_broken_result.state == 'file'" + - "install_upstart_broken_result.mode == '0644'" + - "install_upstart_broken_result.md5sum == '015e183d10c311276c3e269cbeb309b7'" From f7b5b85743647c817e2479b8f214bbf3073d3ac6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 18 Sep 2014 13:18:31 -0700 Subject: [PATCH 060/813] Fix the name of the upstart service --- test/integration/roles/test_service/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_service/tasks/main.yml b/test/integration/roles/test_service/tasks/main.yml index 78aebbc8ac..ab4335a8a5 100644 --- a/test/integration/roles/test_service/tasks/main.yml +++ b/test/integration/roles/test_service/tasks/main.yml @@ -91,7 +91,7 @@ - "disable_result.enabled == false" - name: try to enable a broken service - service: name=ansible_test_broken enabled=yes + service: name=ansible_broken_test enabled=yes register: broken_enable_result ignore_errors: True From da5de725d767a227f57a485627e34b3c7d687dcf Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 18 Sep 2014 15:53:41 -0500 Subject: [PATCH 061/813] Allow with_first_files to search relative to templates and vars in roles Fixes #8879 --- lib/ansible/runner/lookup_plugins/first_found.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/lib/ansible/runner/lookup_plugins/first_found.py b/lib/ansible/runner/lookup_plugins/first_found.py index d394ee3a2f..a48b56a3c2 100644 --- a/lib/ansible/runner/lookup_plugins/first_found.py +++ b/lib/ansible/runner/lookup_plugins/first_found.py @@ -172,14 +172,21 @@ class LookupModule(object): else: total_search = terms - result = None for fn in total_search: + if inject and '_original_file' in inject: + # check the templates and vars directories too, + # if they exist + for roledir in ('templates', 'vars'): + path = utils.path_dwim(os.path.join(self.basedir, '..', roledir), fn) + if os.path.exists(path): + return [path] + # if none of the above were found, just check the + # current filename against the basedir (this will already + # have ../files from runner, if it's a role task path = utils.path_dwim(self.basedir, fn) if os.path.exists(path): return [path] - - - if not result: + else: if skip: return [] else: From cc80b058c1d5e8a41c888e8daae036bda5865934 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 19 Sep 2014 12:01:46 -0700 Subject: [PATCH 062/813] Be more specific in checking if a job restarted successfully. Fixes: #9056 --- library/system/service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/service b/library/system/service index 61e56ec83d..1effff382c 100644 --- a/library/system/service +++ b/library/system/service @@ -1287,7 +1287,7 @@ def main(): (rc, out, err) = service.modify_service_state() if rc != 0: - if err and "is already" in err: + if err and "Job is already running" in err: # upstart got confused, one such possibility is MySQL on Ubuntu 12.04 # where status may report it has no start/stop links and we could # not get accurate status From 9d45f3a65e7d94e9d25ee861d5c1a68257b69952 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 19 Sep 2014 15:08:38 -0500 Subject: [PATCH 063/813] Before decrypting check if vault password is set or error early Fixes #8926 --- lib/ansible/utils/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 6da1f9a030..647a61d696 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -738,6 +738,11 @@ def parse_yaml_from_file(path, vault_password=None): vault = VaultLib(password=vault_password) if vault.is_encrypted(data): + # if the file is encrypted and no password was specified, + # the decrypt call would throw an error, but we check first + # since the decrypt function doesn't know the file name + if vault_password is None: + raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path) data = vault.decrypt(data) show_content = False From 1bae4e4df1958b7c99e73aa3d48ab83cf901395d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 19 Sep 2014 15:17:46 -0500 Subject: [PATCH 064/813] Add note on ec2_lc regarding immutability of configs once created Fixes #8927 --- library/cloud/ec2_lc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/library/cloud/ec2_lc b/library/cloud/ec2_lc index b58eabd53e..f75dfe6d93 100755 --- a/library/cloud/ec2_lc +++ b/library/cloud/ec2_lc @@ -21,6 +21,10 @@ short_description: Create or delete AWS Autoscaling Launch Configurations description: - Can create or delete AwS Autoscaling Configurations - Works with the ec2_asg module to manage Autoscaling Groups +notes: + - "Amazon ASG Autoscaling Launch Configurations are immutable once created, so modifying the configuration + after it is changed will not modify the launch configuration on AWS. You must create a new config and assign + it to the ASG instead." version_added: "1.6" author: Gareth Rushgrove options: From e6bf57469ed6a39acec3b660aaf908f93bfbc120 Mon Sep 17 00:00:00 2001 From: Jakub Jirutka Date: Fri, 19 Sep 2014 23:48:11 +0200 Subject: [PATCH 065/813] Add Gentoo install notes --- docsite/rst/intro_installation.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 410284ab7d..795ea9b1ac 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -194,6 +194,24 @@ You may also wish to run from source to get the latest, which is covered above. .. _from_pkg: +Latest Releases Via Portage (Gentoo) +++++++++++++++++++++++++++++++++++++ + +.. code-block:: bash + + $ emerge -av app-admin/ansible + +To install the newest version, you may need to unmask the ansible package prior to emerging: + +.. code-block:: bash + + $ echo 'app-admin/ansible' >> /etc/portage/package.accept_keywords + +.. note:: + + If you have Python 3 as a default Python slot on your Gentoo nodes (default setting), then you + must set ``ansible_python_interpreter = /usr/bin/python2`` in your group or inventory variables. + Latest Releases Via pkg (FreeBSD) +++++++++++++++++++++++++++++++++ From 049c9c6b186b156894dd3d63463e6ce46c60231d Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 21 Sep 2014 01:49:59 +0200 Subject: [PATCH 066/813] Make file module work on python 2.4, fix #9080 Python 2.4 do not support "except ... as ..." construct, so revert back to the older syntax. --- library/files/file | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/files/file b/library/files/file index e25278fded..2f26a3bc31 100644 --- a/library/files/file +++ b/library/files/file @@ -339,7 +339,7 @@ def main(): module.fail_json(msg='Cannot touch other than files and directories') try: module.set_fs_attributes_if_different(file_args, True) - except SystemExit as e: + except SystemExit, e: if e.code: # We take this to mean that fail_json() was called from # somewhere in basic.py From 87b0b0e0ce63352c7b500f476f975845f7746bc8 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 21 Sep 2014 14:55:31 +0200 Subject: [PATCH 067/813] Fix #9704, correctly set changed as false when nothing is done --- library/system/service | 1 + 1 file changed, 1 insertion(+) diff --git a/library/system/service b/library/system/service index 1effff382c..83fc2843c3 100644 --- a/library/system/service +++ b/library/system/service @@ -673,6 +673,7 @@ class LinuxService(Service): # Check if we're already in the correct state if "3:%s" % action in out and "5:%s" % action in out: + self.changed = False return # From de031c84d519d00891418d043ffc7a6e2efa19e4 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 21 Sep 2014 15:18:53 +0200 Subject: [PATCH 068/813] Remove unused if/else clause, since it doesn't change anything --- docsite/build-site.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/docsite/build-site.py b/docsite/build-site.py index 70755b8a28..587a189f07 100755 --- a/docsite/build-site.py +++ b/docsite/build-site.py @@ -88,14 +88,7 @@ if __name__ == '__main__': print " Run 'make viewdocs' to build and then preview in a web browser." sys.exit(0) - # The 'htmldocs' make target will call this scrip twith the 'rst' - # parameter' We don't need to run the 'htmlman' target then. - if "rst" in sys.argv: - build_rst_docs() - else: - # By default, preform the rst->html transformation and then - # the asciidoc->html trasnformation - build_rst_docs() + build_rst_docs() if "view" in sys.argv: import webbrowser From dc31585f592edd4d02689fd1bf25731c64b4f035 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 21 Sep 2014 17:10:59 +0200 Subject: [PATCH 069/813] Fix indentation and bug #8895 --- library/cloud/rds_subnet_group | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/library/cloud/rds_subnet_group b/library/cloud/rds_subnet_group index 1688856719..552c94f188 100644 --- a/library/cloud/rds_subnet_group +++ b/library/cloud/rds_subnet_group @@ -72,13 +72,13 @@ author: Scott Anderson EXAMPLES = ''' # Add or change a subnet group - local_action: - module: rds_subnet_group - state: present - name: norwegian-blue - description: My Fancy Ex Parrot Subnet Group - subnets: - - subnet-aaaaaaaa - - subnet-bbbbbbbb + module: rds_subnet_group + state: present + name: norwegian-blue + description: My Fancy Ex Parrot Subnet Group + subnets: + - subnet-aaaaaaaa + - subnet-bbbbbbbb # Remove a parameter group - rds_param_group: > From cec7dd6666d51dd5625e8520693b345058d9d9a6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 22 Sep 2014 14:53:07 -0400 Subject: [PATCH 070/813] The fix for gh-7284 causes problems for initscripts which have the standards compliant return codes but return a verbose error message via stdout. Limit the times when we invoke the heuristic to attempt to work around this. --- library/system/service | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/library/system/service b/library/system/service index 83fc2843c3..b235ee25c5 100644 --- a/library/system/service +++ b/library/system/service @@ -543,7 +543,10 @@ class LinuxService(Service): self.crashed = "crashed" in openrc_status_stderr # if the job status is still not known check it by status output keywords - if self.running is None: + # Only check keywords if there's only one line of output (some init + # scripts will output verbosely in case of error and those can emit + # keywords that are picked up as false positives + if self.running is None and status_stdout.count('\n') <= 1: # first transform the status output that could irritate keyword matching cleanout = status_stdout.lower().replace(self.name.lower(), '') if "stop" in cleanout: From e1afaf8d05231cd4c070eb6499f34b692179ea50 Mon Sep 17 00:00:00 2001 From: Steven Ringo Date: Tue, 23 Sep 2014 08:48:23 +1000 Subject: [PATCH 071/813] Expand description for ec2_facts module --- library/cloud/ec2_facts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/library/cloud/ec2_facts b/library/cloud/ec2_facts index 227f714069..7b5c610dc2 100644 --- a/library/cloud/ec2_facts +++ b/library/cloud/ec2_facts @@ -31,9 +31,11 @@ options: choices: ['yes', 'no'] version_added: 1.5.1 description: - - This module fetches data from the metadata servers in ec2 (aws). + - This module fetches data from the metadata servers in ec2 (aws) as per + http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html. + The module must be called from within the EC2 instance itself. Eucalyptus cloud provides a similar service and this module should - work this cloud provider as well. + work with this cloud provider as well. notes: - Parameters to filter on ec2_facts may be added later. author: "Silviu Dicu " From 0073d2dd92742d09d206f0ed4325eeb22c651a7d Mon Sep 17 00:00:00 2001 From: Jakub Roztocil Date: Tue, 23 Sep 2014 07:13:59 +0200 Subject: [PATCH 072/813] Remove unreachable code. --- lib/ansible/module_utils/ec2.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index 6400e74f37..b4558ef0a4 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -54,7 +54,6 @@ def aws_common_argument_spec(): security_token=dict(no_log=True), profile=dict(), ) - return spec def ec2_argument_spec(): From 12c0378f78ecc744b9f3b174a8a285f781896e03 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 23 Sep 2014 14:51:56 -0500 Subject: [PATCH 073/813] Skip null id route tables when adding/deleting them in ec2_vpc Fixes #8552 --- library/cloud/ec2_vpc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index 2f9840281c..e4dc9a65f7 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -451,6 +451,7 @@ def create_vpc(module, vpc_conn): old_rt = vpc_conn.get_all_route_tables( filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id} ) + old_rt = [ x for x in old_rt if x.id != None ] if len(old_rt) == 1: old_rt = old_rt[0] association_id = None @@ -474,6 +475,8 @@ def create_vpc(module, vpc_conn): # table yet. all_rts = vpc_conn.get_all_route_tables(filters={'vpc-id': vpc.id}) for rt in all_rts: + if rt.id is None: + continue delete_rt = True for newrt in all_route_tables: if newrt.id == rt.id: From e47f6137e5b897dec4319e7cb7791fb9b2cffb8d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 23 Sep 2014 15:19:50 -0500 Subject: [PATCH 074/813] Push the expanduser call on the source down to slurp rather than fetch Also moves the calculation of the destination file name until after the slurp of the file contents, since the source as returned by slurp may now be different, so we want to use that expanded path locally. Fixes #8942 --- lib/ansible/runner/action_plugins/fetch.py | 37 +++++++++++++--------- library/network/slurp | 4 +-- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/lib/ansible/runner/action_plugins/fetch.py b/lib/ansible/runner/action_plugins/fetch.py index 00622f1282..3ac8b6f634 100644 --- a/lib/ansible/runner/action_plugins/fetch.py +++ b/lib/ansible/runner/action_plugins/fetch.py @@ -56,8 +56,28 @@ class ActionModule(object): results = dict(failed=True, msg="src and dest are required") return ReturnData(conn=conn, result=results) - source = os.path.expanduser(source) source = conn.shell.join_path(source) + + # calculate md5 sum for the remote file + remote_md5 = self.runner._remote_md5(conn, tmp, source) + + # use slurp if sudo and permissions are lacking + remote_data = None + if remote_md5 in ('1', '2') or self.runner.sudo: + slurpres = self.runner._execute_module(conn, tmp, 'slurp', 'src=%s' % source, inject=inject) + if slurpres.is_successful(): + if slurpres.result['encoding'] == 'base64': + remote_data = base64.b64decode(slurpres.result['content']) + if remote_data is not None: + remote_md5 = utils.md5s(remote_data) + # the source path may have been expanded on the + # target system, so we compare it here and use the + # expanded version if it's different + remote_source = slurpres.result.get('source') + if remote_source and remote_source != source: + source = remote_source + + # calculate the destination name if os.path.sep not in conn.shell.join_path('a', ''): source_local = source.replace('\\', '/') else: @@ -76,20 +96,7 @@ class ActionModule(object): # files are saved in dest dir, with a subdir for each host, then the filename dest = "%s/%s/%s" % (utils.path_dwim(self.runner.basedir, dest), conn.host, source_local) - dest = os.path.expanduser(dest.replace("//","/")) - - # calculate md5 sum for the remote file - remote_md5 = self.runner._remote_md5(conn, tmp, source) - - # use slurp if sudo and permissions are lacking - remote_data = None - if remote_md5 in ('1', '2') or self.runner.sudo: - slurpres = self.runner._execute_module(conn, tmp, 'slurp', 'src=%s' % source, inject=inject) - if slurpres.is_successful(): - if slurpres.result['encoding'] == 'base64': - remote_data = base64.b64decode(slurpres.result['content']) - if remote_data is not None: - remote_md5 = utils.md5s(remote_data) + dest = dest.replace("//","/") # these don't fail because you may want to transfer a log file that possibly MAY exist # but keep going to fetch other log files diff --git a/library/network/slurp b/library/network/slurp index 744032c2cd..a2130c354b 100644 --- a/library/network/slurp +++ b/library/network/slurp @@ -57,7 +57,7 @@ def main(): ), supports_check_mode=True ) - source = module.params['src'] + source = os.path.expanduser(module.params['src']) if not os.path.exists(source): module.fail_json(msg="file not found: %s" % source) @@ -66,7 +66,7 @@ def main(): data = base64.b64encode(file(source).read()) - module.exit_json(content=data, encoding='base64') + module.exit_json(content=data, source=source, encoding='base64') # import module snippets from ansible.module_utils.basic import * From 4e9c061b356b8ddb111ca2ce3f3b44c2c800e27f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 23 Sep 2014 15:36:24 -0500 Subject: [PATCH 075/813] Also make sure the dest param is expanded locally before recalculating --- lib/ansible/runner/action_plugins/fetch.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/runner/action_plugins/fetch.py b/lib/ansible/runner/action_plugins/fetch.py index 3ac8b6f634..80e8a89936 100644 --- a/lib/ansible/runner/action_plugins/fetch.py +++ b/lib/ansible/runner/action_plugins/fetch.py @@ -83,6 +83,7 @@ class ActionModule(object): else: source_local = source + dest = os.path.expanduser(dest) if flat: if dest.endswith("/"): # if the path ends with "/", we'll use the source filename as the From c4f1785217e0e000181df60847644b1d44163477 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 23 Sep 2014 16:12:01 -0500 Subject: [PATCH 076/813] Use split_args instead of shlex to split include params Fixes #8947 --- lib/ansible/playbook/__init__.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 4acb15a651..58e2bafe18 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -21,6 +21,7 @@ import ansible.runner from ansible.utils.template import template from ansible import utils from ansible import errors +from ansible.module_utils.splitter import split_args, unquote import ansible.callbacks import ansible.cache import os @@ -209,12 +210,15 @@ class PlayBook(object): name and returns the merged vars along with the path ''' new_vars = existing_vars.copy() - tokens = shlex.split(play_ds.get('include', '')) + tokens = split_args(play_ds.get('include', '')) for t in tokens[1:]: - (k,v) = t.split("=", 1) - new_vars[k] = template(basedir, v, new_vars) + try: + (k,v) = unquote(t).split("=", 1) + new_vars[k] = template(basedir, v, new_vars) + except ValueError, e: + raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t) - return (new_vars, tokens[0]) + return (new_vars, unquote(tokens[0])) # ***************************************************** From 86edd0381c8b330fd305b99c7996551d76701b39 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 24 Sep 2014 15:53:36 -0500 Subject: [PATCH 077/813] Adding in docs updates for 1.7.2 release --- CHANGELOG.md | 18 ++++++++++++++++++ RELEASES.txt | 2 ++ docsite/rst/index.rst | 2 +- packaging/debian/changelog | 12 ++++++++++++ packaging/rpm/ansible.spec | 6 ++++++ 5 files changed, 39 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 915993a06a..03564893b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,24 @@ Some other notable changes: And various other bug fixes and improvements ... +## 1.7.2 "Summer Nights" - Sep 24, 2014 + +- Fixes a bug in accelerate mode which caused a traceback when trying to use that connection method. +- Fixes a bug in vault where the password file option was not being used correctly internally. +- Improved multi-line parsing when using YAML literal blocks (using > or |). +- Fixed a bug with the file module and the creation of relative symlinks. +- Fixed a bug where checkmode was not being honored during the templating of files. +- Other various bug fixes. + +## 1.7.1 "Summer Nights" - Aug 14, 2014 + +- Security fix to disallow specifying 'args:' as a string, which could allow the insertion of extra module parameters through variables. +- Performance enhancements related to previous security fixes, which could cause slowness when modules returned very large JSON results. This specifically impacted the unarchive module frequently, which returns the details of all unarchived files in the result. +- Docker module bug fixes: + * Fixed support for specifying rw/ro bind modes for volumes + * Fixed support for allowing the tag in the image parameter +- Various other bug fixes + ## 1.7 "Summer Nights" - Aug 06, 2014 Major new features: diff --git a/RELEASES.txt b/RELEASES.txt index 01d880dfa5..72323a146f 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -9,6 +9,8 @@ Active Development Released ++++++++ +1.7.2 "Summer Nights" -------- 09-24-2014 +1.7.1 "Summer Nights" -------- 08-14-2014 1.7 "Summer Nights" -------- 08-06-2014 1.6.10 "The Cradle Will Rock" - 07-25-2014 1.6.9 "The Cradle Will Rock" - 07-24-2014 diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst index 158f8bd1d8..8085c509fe 100644 --- a/docsite/rst/index.rst +++ b/docsite/rst/index.rst @@ -16,7 +16,7 @@ We believe simplicity is relevant to all sizes of environments and design for bu Ansible manages machines in an agentless manner. There is never a question of how to upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. As OpenSSH is one of the most peer reviewed open source components, the security exposure of using the tool is greatly reduced. Ansible is decentralized -- it relies on your existing OS credentials to control access to remote machines; if needed it can easily connect with Kerberos, LDAP, and other centralized authentication management systems. -This documentation covers the current released version of Ansible (1.7.1) and also some development version features (1.8). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. +This documentation covers the current released version of Ansible (1.7.2) and also some development version features (1.8). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. .. _an_introduction: diff --git a/packaging/debian/changelog b/packaging/debian/changelog index d8238612d6..168b519dbc 100644 --- a/packaging/debian/changelog +++ b/packaging/debian/changelog @@ -4,6 +4,18 @@ ansible (1.8) unstable; urgency=low -- Michael DeHaan Wed, 21 Oct 2015 04:29:00 -0500 +ansible (1.7.2) unstable; urgency=low + + * 1.7.2 release + + -- Michael DeHaan Wed, 24 Sep 2014 15:00:00 -0500 + +ansible (1.7.1) unstable; urgency=low + + * 1.7.1 release + + -- Michael DeHaan Thu, 14 Oct 2014 17:00:00 -0500 + ansible (1.7) unstable; urgency=low * 1.7.0 release diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec index 3d1b82bb04..4483e7da5d 100644 --- a/packaging/rpm/ansible.spec +++ b/packaging/rpm/ansible.spec @@ -115,6 +115,12 @@ rm -rf %{buildroot} %changelog +* Wed Sep 24 2014 Michael DeHaan - 1.7.2 +- Release 1.7.2 + +* Thu Aug 14 2014 Michael DeHaan - 1.7.1 +- Release 1.7.1 + * Wed Aug 06 2014 Michael DeHaan - 1.7.0 - Release 1.7.0 From e0015395de0e23c006fd845dacd9be9c0bb7c187 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Wed, 24 Sep 2014 14:05:31 -0700 Subject: [PATCH 078/813] Expose more facts about user on host system Adds: - `user_uid` - `user_gid` - `user_gecos` - `user_dir` - `user_shell` --- lib/ansible/module_utils/facts.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index f9d2fdbf33..5edad914f2 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -29,6 +29,7 @@ import socket import struct import datetime import getpass +import pwd import ConfigParser import StringIO @@ -476,6 +477,12 @@ class Facts(object): # User def get_user_facts(self): self.facts['user_id'] = getpass.getuser() + pwent = pwd.getpwnam(getpass.getuser()) + self.facts['user_uid'] = pwent.pw_uid + self.facts['user_gid'] = pwent.pw_gid + self.facts['user_gecos'] = pwent.pw_gecos + self.facts['user_dir'] = pwent.pw_dir + self.facts['user_shell'] = pwent.pw_shell def get_env_facts(self): self.facts['env'] = {} From 19703617b8be22fc17d7a74e2959ed38ace4cfb4 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 25 Sep 2014 11:06:01 -0500 Subject: [PATCH 079/813] Fix action_plugins for modules not respecting check mode Fixes #9140 --- lib/ansible/runner/action_plugins/assemble.py | 5 +++++ lib/ansible/runner/action_plugins/unarchive.py | 11 ++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/assemble.py b/lib/ansible/runner/action_plugins/assemble.py index 54b5d1985c..c6f7165d82 100644 --- a/lib/ansible/runner/action_plugins/assemble.py +++ b/lib/ansible/runner/action_plugins/assemble.py @@ -147,6 +147,11 @@ class ActionModule(object): dest=dest, original_basename=os.path.basename(src), ) + + # make sure checkmod is passed on correctly + if self.runner.noop_on_check(inject): + new_module_args['CHECKMODE'] = True + module_args_tmp = utils.merge_module_args(module_args, new_module_args) return self.runner._execute_module(conn, tmp, 'file', module_args_tmp, inject=inject) diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py index 40bc5d9149..a569403cac 100644 --- a/lib/ansible/runner/action_plugins/unarchive.py +++ b/lib/ansible/runner/action_plugins/unarchive.py @@ -77,13 +77,22 @@ class ActionModule(object): # fix file permissions when the copy is done as a different user if copy: if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': - self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp) + if not self.runner.noop_on_check(inject): + self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp) # Build temporary module_args. new_module_args = dict( src=tmp_src, original_basename=os.path.basename(source), ) + + # make sure checkmod is passed on correctly + if self.runner.noop_on_check(inject): + new_module_args['CHECKMODE'] = True + module_args = utils.merge_module_args(module_args, new_module_args) else: module_args = "%s original_basename=%s" % (module_args, pipes.quote(os.path.basename(source))) + # make sure checkmod is passed on correctly + if self.runner.noop_on_check(inject): + module_args += " CHECKMODE=True" return self.runner._execute_module(conn, tmp, 'unarchive', module_args, inject=inject, complex_args=complex_args) From 128c10b3111b3bb6f0fac8bbbeddc8b2766ecbf7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 25 Sep 2014 14:46:16 -0500 Subject: [PATCH 080/813] Don't template 'vars' dictionary during templating Fixes #9132 --- lib/ansible/utils/template.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index d102659003..9521f2f2ec 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -166,6 +166,7 @@ class _jinja2_vars(object): return False def __getitem__(self, varname): + from ansible.runner import HostVars if varname not in self.vars: for i in self.extras: if varname in i: @@ -175,8 +176,9 @@ class _jinja2_vars(object): else: raise KeyError("undefined variable: %s" % varname) var = self.vars[varname] - # HostVars is special, return it as-is - if isinstance(var, dict) and type(var) != dict: + # HostVars is special, return it as-is, as is the special variable + # 'vars', which contains the vars structure + if isinstance(var, dict) and varname == "vars" or isinstance(var, HostVars): return var else: return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined) From 21dcfd7192c2546f5394039edefd27c51e9eb018 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 25 Sep 2014 15:42:45 -0500 Subject: [PATCH 081/813] Adding integration tests for async fire-and-forget checking Also updated the CHANGELOG for the feature --- CHANGELOG.md | 1 + .../roles/test_async/tasks/main.yml | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 03564893b0..0b84708e22 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ Major changes: * command_warnings feature will warn about when usage of the shell/command module can be simplified to use core modules - this can be enabled in ansible.cfg * new omit value can be used to leave off a parameter when not set, like so module_name: a=1 b={{ c | default(omit) }}, would not pass value for b (not even an empty value) if c was not set. * developers: 'baby JSON' in module responses, originally intended for writing modules in bash, is removed as a feature to simplify logic, script module remains available for running bash scripts. +* async jobs started in "fire & forget" mode can now be checked on at a later time. New Modules: diff --git a/test/integration/roles/test_async/tasks/main.yml b/test/integration/roles/test_async/tasks/main.yml index 556284770a..0b9991ec04 100644 --- a/test/integration/roles/test_async/tasks/main.yml +++ b/test/integration/roles/test_async/tasks/main.yml @@ -62,3 +62,28 @@ async: 15 poll: 0 when: False + +# test async "fire and forget, but check later" + +- name: 'start a task with "fire-and-forget"' + command: sleep 15 + async: 30 + poll: 0 + register: fnf_task + +- name: assert task was successfully started + assert: + that: + - fnf_task.started + - "'ansible_job_id' in fnf_task" + +- name: 'check on task started as a "fire-and-forget"' + async_status: jid={{ fnf_task.ansible_job_id }} + register: fnf_result + until: fnf_result.finished + retries: 30 + +- name: assert task was successfully checked + assert: + that: + - fnf_result.finished From cdcbde1cffb3dc0dc6712887f0b1a21a73081867 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 25 Sep 2014 19:16:41 -0500 Subject: [PATCH 082/813] Fixing version added for the fire/forget check later feature --- docsite/rst/playbooks_async.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_async.rst b/docsite/rst/playbooks_async.rst index 01003ee3b3..7b72846fd9 100644 --- a/docsite/rst/playbooks_async.rst +++ b/docsite/rst/playbooks_async.rst @@ -61,7 +61,7 @@ If you would like to perform a variation of the "fire and forget" where you following:: --- - # Requires ansible 1.7+ + # Requires ansible 1.8+ - name: 'YUM - fire and forget task' yum: name=docker-io state=installed async: 1000 From 6af5455edcd843a31a0b20eb103f37445b582672 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 25 Sep 2014 19:22:35 -0500 Subject: [PATCH 083/813] Default 'smart' connection to paramiko for OSX platforms Due to the long-standing bug in sshpass, which can crash OSX. Fixes #5007 --- lib/ansible/runner/__init__.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index a1133fdbad..7e093f3537 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -214,14 +214,18 @@ class Runner(object): self.run_once = run_once if self.transport == 'smart': - # if the transport is 'smart' see if SSH can support ControlPersist if not use paramiko + # If the transport is 'smart', check to see if certain conditions + # would prevent us from using ssh, and fallback to paramiko. # 'smart' is the default since 1.2.1/1.3 - cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (out, err) = cmd.communicate() - if "Bad configuration option" in err: + self.transport = "ssh" + if sys.platform.startswith('darwin'): self.transport = "paramiko" else: - self.transport = "ssh" + # see if SSH can support ControlPersist if not use paramiko + cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = cmd.communicate() + if "Bad configuration option" in err: + self.transport = "paramiko" # save the original transport, in case it gets # changed later via options like accelerate From 9249d1db28d6f106127cfcd16432096de7a35ac0 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 26 Sep 2014 10:17:25 -0400 Subject: [PATCH 084/813] Add submodules for new core and extras subdirectories of modules, not yet live. --- .gitmodules | 6 ++++++ lib/ansible/modules/core | 1 + lib/ansible/modules/extras | 1 + 3 files changed, 8 insertions(+) create mode 100644 .gitmodules create mode 160000 lib/ansible/modules/core create mode 160000 lib/ansible/modules/extras diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000..92c3072a28 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "lib/ansible/modules/core"] + path = lib/ansible/modules/core + url = git://github.com/ansible/ansible-modules-core.git +[submodule "lib/ansible/modules/extras"] + path = lib/ansible/modules/extras + url = git://github.com/ansible/ansible-modules-extras.git diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core new file mode 160000 index 0000000000..417309a626 --- /dev/null +++ b/lib/ansible/modules/core @@ -0,0 +1 @@ +Subproject commit 417309a626e39396196a4abc6208c9f6db158f9d diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras new file mode 160000 index 0000000000..50f105578a --- /dev/null +++ b/lib/ansible/modules/extras @@ -0,0 +1 @@ +Subproject commit 50f105578a07361e60f95a63da6acb5660765871 From 25cc79e2db5a79fd6d8022d916d20f821fab87ba Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 26 Sep 2014 10:55:00 -0400 Subject: [PATCH 085/813] Modules is a package. --- lib/ansible/modules/__init__.py | 0 lib/ansible/utils/plugins.py | 9 ++++++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 lib/ansible/modules/__init__.py diff --git a/lib/ansible/modules/__init__.py b/lib/ansible/modules/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/utils/plugins.py index 327bc99cec..f83155412f 100644 --- a/lib/ansible/utils/plugins.py +++ b/lib/ansible/utils/plugins.py @@ -101,7 +101,14 @@ class PluginLoader(object): for basedir in _basedirs: fullpath = os.path.realpath(os.path.join(basedir, self.subdir)) if os.path.isdir(fullpath): + files = glob.glob("%s/*" % fullpath) + + # allow directories to be two levels deep + files2 = glob.glob("%s/*/*" % fullpath) + + files = files.extend(files2) + for file in files: if os.path.isdir(file) and file not in ret: ret.append(file) @@ -232,7 +239,7 @@ shell_loader = PluginLoader( module_finder = PluginLoader( '', - '', + 'ansible.modules', C.DEFAULT_MODULE_PATH, 'library' ) From f35ed8a6c0dc81b86c69348fff543d52f070ee28 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 26 Sep 2014 10:57:12 -0400 Subject: [PATCH 086/813] Update module test code to avoid pycs (that are not used) --- test/units/TestModules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/units/TestModules.py b/test/units/TestModules.py index 83c2b7c398..aef2e83ed6 100644 --- a/test/units/TestModules.py +++ b/test/units/TestModules.py @@ -16,7 +16,7 @@ class TestModules(unittest.TestCase): for (dirpath, dirnames, filenames) in os.walk(path): for filename in filenames: (path, ext) = os.path.splitext(filename) - if ext != ".ps1": + if ext == ".py": module_list.append(os.path.join(dirpath, filename)) return module_list From e5116d2f9bd851949ae50e0c9c112750e7cec761 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 26 Sep 2014 11:25:56 -0400 Subject: [PATCH 087/813] changes for package loading of modules --- MANIFEST.in | 1 - examples/ansible.cfg | 2 +- lib/ansible/constants.py | 18 +- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- lib/ansible/runner/__init__.py | 2 +- lib/ansible/utils/plugins.py | 18 +- library/cloud/azure | 484 ----- library/cloud/cloudformation | 313 --- library/cloud/digital_ocean | 434 ----- library/cloud/digital_ocean_domain | 242 --- library/cloud/digital_ocean_sshkey | 178 -- library/cloud/docker | 854 --------- library/cloud/docker_image | 252 --- library/cloud/ec2 | 1199 ------------ library/cloud/ec2_ami | 273 --- library/cloud/ec2_ami_search | 196 -- library/cloud/ec2_asg | 608 ------ library/cloud/ec2_eip | 305 --- library/cloud/ec2_elb | 339 ---- library/cloud/ec2_elb_lb | 698 ------- library/cloud/ec2_facts | 182 -- library/cloud/ec2_group | 386 ---- library/cloud/ec2_key | 238 --- library/cloud/ec2_lc | 278 --- library/cloud/ec2_metric_alarm | 282 --- library/cloud/ec2_scaling_policy | 177 -- library/cloud/ec2_snapshot | 151 -- library/cloud/ec2_tag | 152 -- library/cloud/ec2_vol | 434 ----- library/cloud/ec2_vpc | 626 ------ library/cloud/elasticache | 547 ------ library/cloud/gc_storage | 420 ----- library/cloud/gce | 474 ----- library/cloud/gce_lb | 335 ---- library/cloud/gce_net | 271 --- library/cloud/gce_pd | 285 --- library/cloud/glance_image | 260 --- library/cloud/keystone_user | 394 ---- library/cloud/linode | 493 ----- library/cloud/nova_compute | 585 ------ library/cloud/nova_keypair | 139 -- library/cloud/ovirt | 425 ----- library/cloud/quantum_floating_ip | 266 --- library/cloud/quantum_floating_ip_associate | 218 --- library/cloud/quantum_network | 279 --- library/cloud/quantum_router | 210 --- library/cloud/quantum_router_gateway | 213 --- library/cloud/quantum_router_interface | 249 --- library/cloud/quantum_subnet | 291 --- library/cloud/rax | 711 ------- library/cloud/rax_cbs | 220 --- library/cloud/rax_cbs_attachments | 226 --- library/cloud/rax_cdb | 238 --- library/cloud/rax_cdb_database | 186 -- library/cloud/rax_cdb_user | 220 --- library/cloud/rax_clb | 303 --- library/cloud/rax_clb_nodes | 303 --- library/cloud/rax_dns | 173 -- library/cloud/rax_dns_record | 335 ---- library/cloud/rax_facts | 144 -- library/cloud/rax_files | 379 ---- library/cloud/rax_files_objects | 603 ------ library/cloud/rax_identity | 110 -- library/cloud/rax_keypair | 174 -- library/cloud/rax_meta | 178 -- library/cloud/rax_network | 145 -- library/cloud/rax_queue | 145 -- library/cloud/rax_scaling_group | 351 ---- library/cloud/rax_scaling_policy | 283 --- library/cloud/rds | 650 ------- library/cloud/rds_param_group | 313 --- library/cloud/rds_subnet_group | 166 -- library/cloud/route53 | 281 --- library/cloud/s3 | 514 ----- library/cloud/virt | 493 ----- library/cloud/vsphere_guest | 1225 ------------ library/commands/command | 275 --- library/commands/raw | 43 - library/commands/script | 47 - library/commands/shell | 78 - library/database/mongodb_user | 242 --- library/database/mysql_db | 363 ---- library/database/mysql_replication | 369 ---- library/database/mysql_user | 476 ----- library/database/mysql_variables | 253 --- library/database/postgresql_db | 301 --- library/database/postgresql_privs | 613 ------ library/database/postgresql_user | 526 ------ library/database/redis | 329 ---- library/database/riak | 255 --- library/files/acl | 295 --- library/files/assemble | 200 -- library/files/copy | 254 --- library/files/fetch | 67 - library/files/file | 358 ---- library/files/ini_file | 207 -- library/files/lineinfile | 400 ---- library/files/replace | 162 -- library/files/stat | 152 -- library/files/synchronize | 345 ---- library/files/template | 66 - library/files/unarchive | 250 --- library/files/xattr | 206 -- library/internal/async_status | 99 - library/internal/async_wrapper | 200 -- library/inventory/add_host | 36 - library/inventory/group_by | 25 - library/messaging/rabbitmq_parameter | 152 -- library/messaging/rabbitmq_plugin | 130 -- library/messaging/rabbitmq_policy | 156 -- library/messaging/rabbitmq_user | 249 --- library/messaging/rabbitmq_vhost | 147 -- library/monitoring/airbrake_deployment | 130 -- library/monitoring/bigpanda | 172 -- library/monitoring/boundary_meter | 256 --- library/monitoring/datadog_event | 143 -- library/monitoring/librato_annotation | 169 -- library/monitoring/logentries | 130 -- library/monitoring/monit | 155 -- library/monitoring/nagios | 880 --------- library/monitoring/newrelic_deployment | 145 -- library/monitoring/pagerduty | 232 --- library/monitoring/pingdom | 135 -- library/monitoring/rollbar_deployment | 133 -- library/monitoring/stackdriver | 196 -- library/monitoring/zabbix_maintenance | 371 ---- library/net_infrastructure/a10_server | 269 --- library/net_infrastructure/a10_service_group | 341 ---- library/net_infrastructure/a10_virtual_server | 299 --- library/net_infrastructure/bigip_facts | 1670 ----------------- library/net_infrastructure/bigip_monitor_http | 464 ----- library/net_infrastructure/bigip_monitor_tcp | 489 ----- library/net_infrastructure/bigip_node | 294 --- library/net_infrastructure/bigip_pool | 536 ------ library/net_infrastructure/bigip_pool_member | 378 ---- library/net_infrastructure/dnsimple | 302 --- library/net_infrastructure/dnsmadeeasy | 329 ---- library/net_infrastructure/lldp | 83 - library/net_infrastructure/netscaler | 190 -- library/net_infrastructure/openvswitch_bridge | 135 -- library/net_infrastructure/openvswitch_port | 139 -- library/network/get_url | 313 --- library/network/slurp | 75 - library/network/uri | 445 ----- library/notification/campfire | 143 -- library/notification/flowdock | 192 -- library/notification/grove | 99 - library/notification/hipchat | 149 -- library/notification/irc | 215 --- library/notification/jabber | 146 -- library/notification/mail | 252 --- library/notification/mqtt | 166 -- library/notification/nexmo | 140 -- library/notification/osx_say | 74 - library/notification/slack | 173 -- library/notification/sns | 190 -- library/notification/twilio | 135 -- library/notification/typetalk | 116 -- library/packaging/apt | 562 ------ library/packaging/apt_key | 277 --- library/packaging/apt_repository | 446 ----- library/packaging/apt_rpm | 172 -- library/packaging/composer | 164 -- library/packaging/cpanm | 145 -- library/packaging/easy_install | 188 -- library/packaging/gem | 238 --- library/packaging/homebrew | 835 --------- library/packaging/homebrew_cask | 513 ----- library/packaging/homebrew_tap | 215 --- library/packaging/layman | 236 --- library/packaging/macports | 217 --- library/packaging/npm | 263 --- library/packaging/openbsd_pkg | 373 ---- library/packaging/opkg | 150 -- library/packaging/pacman | 234 --- library/packaging/pip | 356 ---- library/packaging/pkgin | 168 -- library/packaging/pkgng | 301 --- library/packaging/pkgutil | 179 -- library/packaging/portage | 405 ---- library/packaging/portinstall | 207 -- library/packaging/redhat_subscription | 396 ---- library/packaging/rhn_channel | 169 -- library/packaging/rhn_register | 336 ---- library/packaging/rpm_key | 206 -- library/packaging/svr4pkg | 234 --- library/packaging/swdepot | 196 -- library/packaging/urpmi | 200 -- library/packaging/yum | 838 --------- library/packaging/zypper | 260 --- library/packaging/zypper_repository | 221 --- library/source_control/bzr | 198 -- library/source_control/git | 607 ------ library/source_control/github_hooks | 178 -- library/source_control/hg | 238 --- library/source_control/subversion | 231 --- library/system/alternatives | 140 -- library/system/at | 200 -- library/system/authorized_key | 421 ----- library/system/capabilities | 187 -- library/system/cron | 524 ------ library/system/debconf | 170 -- library/system/facter | 56 - library/system/filesystem | 119 -- library/system/firewalld | 398 ---- library/system/getent | 143 -- library/system/group | 403 ---- library/system/hostname | 445 ----- library/system/kernel_blacklist | 141 -- library/system/locale_gen | 151 -- library/system/lvg | 253 --- library/system/lvol | 235 --- library/system/modprobe | 115 -- library/system/mount | 338 ---- library/system/ohai | 56 - library/system/open_iscsi | 379 ---- library/system/ping | 59 - library/system/seboolean | 212 --- library/system/selinux | 203 -- library/system/service | 1328 ------------- library/system/setup | 146 -- library/system/sysctl | 334 ---- library/system/ufw | 269 --- library/system/user | 1584 ---------------- library/system/zfs | 417 ---- library/utilities/accelerate | 727 ------- library/utilities/assert | 44 - library/utilities/debug | 58 - library/utilities/fail | 44 - library/utilities/fireball | 280 --- library/utilities/include_vars | 39 - library/utilities/pause | 40 - library/utilities/set_fact | 57 - library/utilities/wait_for | 462 ----- library/web_infrastructure/apache2_module | 89 - library/web_infrastructure/django_manage | 281 --- library/web_infrastructure/ejabberd_user | 214 --- library/web_infrastructure/htpasswd | 219 --- library/web_infrastructure/jboss | 140 -- library/web_infrastructure/jira | 347 ---- library/web_infrastructure/supervisorctl | 221 --- library/windows/setup.ps1 | 100 - library/windows/slurp.ps1 | 46 - library/windows/win_feature | 97 - library/windows/win_feature.ps1 | 122 -- library/windows/win_get_url | 57 - library/windows/win_get_url.ps1 | 56 - library/windows/win_group | 67 - library/windows/win_group.ps1 | 70 - library/windows/win_msi | 58 - library/windows/win_msi.ps1 | 63 - library/windows/win_ping | 48 - library/windows/win_ping.ps1 | 29 - library/windows/win_service | 72 - library/windows/win_service.ps1 | 106 -- library/windows/win_stat | 52 - library/windows/win_stat.ps1 | 63 - library/windows/win_user | 71 - library/windows/win_user.ps1 | 116 -- setup.py | 16 +- 261 files changed, 21 insertions(+), 70591 deletions(-) delete mode 100644 library/cloud/azure delete mode 100644 library/cloud/cloudformation delete mode 100644 library/cloud/digital_ocean delete mode 100644 library/cloud/digital_ocean_domain delete mode 100644 library/cloud/digital_ocean_sshkey delete mode 100644 library/cloud/docker delete mode 100644 library/cloud/docker_image delete mode 100644 library/cloud/ec2 delete mode 100644 library/cloud/ec2_ami delete mode 100644 library/cloud/ec2_ami_search delete mode 100755 library/cloud/ec2_asg delete mode 100644 library/cloud/ec2_eip delete mode 100644 library/cloud/ec2_elb delete mode 100644 library/cloud/ec2_elb_lb delete mode 100644 library/cloud/ec2_facts delete mode 100644 library/cloud/ec2_group delete mode 100644 library/cloud/ec2_key delete mode 100755 library/cloud/ec2_lc delete mode 100644 library/cloud/ec2_metric_alarm delete mode 100755 library/cloud/ec2_scaling_policy delete mode 100644 library/cloud/ec2_snapshot delete mode 100644 library/cloud/ec2_tag delete mode 100644 library/cloud/ec2_vol delete mode 100644 library/cloud/ec2_vpc delete mode 100644 library/cloud/elasticache delete mode 100644 library/cloud/gc_storage delete mode 100755 library/cloud/gce delete mode 100644 library/cloud/gce_lb delete mode 100644 library/cloud/gce_net delete mode 100644 library/cloud/gce_pd delete mode 100644 library/cloud/glance_image delete mode 100644 library/cloud/keystone_user delete mode 100644 library/cloud/linode delete mode 100644 library/cloud/nova_compute delete mode 100644 library/cloud/nova_keypair delete mode 100755 library/cloud/ovirt delete mode 100644 library/cloud/quantum_floating_ip delete mode 100644 library/cloud/quantum_floating_ip_associate delete mode 100644 library/cloud/quantum_network delete mode 100644 library/cloud/quantum_router delete mode 100644 library/cloud/quantum_router_gateway delete mode 100644 library/cloud/quantum_router_interface delete mode 100644 library/cloud/quantum_subnet delete mode 100644 library/cloud/rax delete mode 100644 library/cloud/rax_cbs delete mode 100644 library/cloud/rax_cbs_attachments delete mode 100644 library/cloud/rax_cdb delete mode 100644 library/cloud/rax_cdb_database delete mode 100644 library/cloud/rax_cdb_user delete mode 100644 library/cloud/rax_clb delete mode 100644 library/cloud/rax_clb_nodes delete mode 100644 library/cloud/rax_dns delete mode 100644 library/cloud/rax_dns_record delete mode 100644 library/cloud/rax_facts delete mode 100644 library/cloud/rax_files delete mode 100644 library/cloud/rax_files_objects delete mode 100644 library/cloud/rax_identity delete mode 100644 library/cloud/rax_keypair delete mode 100644 library/cloud/rax_meta delete mode 100644 library/cloud/rax_network delete mode 100644 library/cloud/rax_queue delete mode 100644 library/cloud/rax_scaling_group delete mode 100644 library/cloud/rax_scaling_policy delete mode 100644 library/cloud/rds delete mode 100644 library/cloud/rds_param_group delete mode 100644 library/cloud/rds_subnet_group delete mode 100644 library/cloud/route53 delete mode 100644 library/cloud/s3 delete mode 100644 library/cloud/virt delete mode 100644 library/cloud/vsphere_guest delete mode 100644 library/commands/command delete mode 100644 library/commands/raw delete mode 100644 library/commands/script delete mode 100644 library/commands/shell delete mode 100644 library/database/mongodb_user delete mode 100644 library/database/mysql_db delete mode 100644 library/database/mysql_replication delete mode 100644 library/database/mysql_user delete mode 100644 library/database/mysql_variables delete mode 100644 library/database/postgresql_db delete mode 100644 library/database/postgresql_privs delete mode 100644 library/database/postgresql_user delete mode 100644 library/database/redis delete mode 100644 library/database/riak delete mode 100644 library/files/acl delete mode 100644 library/files/assemble delete mode 100644 library/files/copy delete mode 100644 library/files/fetch delete mode 100644 library/files/file delete mode 100644 library/files/ini_file delete mode 100644 library/files/lineinfile delete mode 100644 library/files/replace delete mode 100644 library/files/stat delete mode 100644 library/files/synchronize delete mode 100644 library/files/template delete mode 100644 library/files/unarchive delete mode 100644 library/files/xattr delete mode 100644 library/internal/async_status delete mode 100644 library/internal/async_wrapper delete mode 100644 library/inventory/add_host delete mode 100644 library/inventory/group_by delete mode 100644 library/messaging/rabbitmq_parameter delete mode 100644 library/messaging/rabbitmq_plugin delete mode 100644 library/messaging/rabbitmq_policy delete mode 100644 library/messaging/rabbitmq_user delete mode 100644 library/messaging/rabbitmq_vhost delete mode 100644 library/monitoring/airbrake_deployment delete mode 100644 library/monitoring/bigpanda delete mode 100644 library/monitoring/boundary_meter delete mode 100644 library/monitoring/datadog_event delete mode 100644 library/monitoring/librato_annotation delete mode 100644 library/monitoring/logentries delete mode 100644 library/monitoring/monit delete mode 100644 library/monitoring/nagios delete mode 100644 library/monitoring/newrelic_deployment delete mode 100644 library/monitoring/pagerduty delete mode 100644 library/monitoring/pingdom delete mode 100644 library/monitoring/rollbar_deployment delete mode 100644 library/monitoring/stackdriver delete mode 100644 library/monitoring/zabbix_maintenance delete mode 100644 library/net_infrastructure/a10_server delete mode 100644 library/net_infrastructure/a10_service_group delete mode 100644 library/net_infrastructure/a10_virtual_server delete mode 100755 library/net_infrastructure/bigip_facts delete mode 100644 library/net_infrastructure/bigip_monitor_http delete mode 100644 library/net_infrastructure/bigip_monitor_tcp delete mode 100644 library/net_infrastructure/bigip_node delete mode 100644 library/net_infrastructure/bigip_pool delete mode 100644 library/net_infrastructure/bigip_pool_member delete mode 100755 library/net_infrastructure/dnsimple delete mode 100644 library/net_infrastructure/dnsmadeeasy delete mode 100755 library/net_infrastructure/lldp delete mode 100644 library/net_infrastructure/netscaler delete mode 100644 library/net_infrastructure/openvswitch_bridge delete mode 100644 library/net_infrastructure/openvswitch_port delete mode 100644 library/network/get_url delete mode 100644 library/network/slurp delete mode 100644 library/network/uri delete mode 100644 library/notification/campfire delete mode 100644 library/notification/flowdock delete mode 100644 library/notification/grove delete mode 100644 library/notification/hipchat delete mode 100644 library/notification/irc delete mode 100644 library/notification/jabber delete mode 100644 library/notification/mail delete mode 100644 library/notification/mqtt delete mode 100644 library/notification/nexmo delete mode 100644 library/notification/osx_say delete mode 100644 library/notification/slack delete mode 100644 library/notification/sns delete mode 100644 library/notification/twilio delete mode 100644 library/notification/typetalk delete mode 100755 library/packaging/apt delete mode 100644 library/packaging/apt_key delete mode 100644 library/packaging/apt_repository delete mode 100755 library/packaging/apt_rpm delete mode 100644 library/packaging/composer delete mode 100644 library/packaging/cpanm delete mode 100644 library/packaging/easy_install delete mode 100644 library/packaging/gem delete mode 100644 library/packaging/homebrew delete mode 100644 library/packaging/homebrew_cask delete mode 100644 library/packaging/homebrew_tap delete mode 100644 library/packaging/layman delete mode 100644 library/packaging/macports delete mode 100644 library/packaging/npm delete mode 100644 library/packaging/openbsd_pkg delete mode 100644 library/packaging/opkg delete mode 100644 library/packaging/pacman delete mode 100644 library/packaging/pip delete mode 100755 library/packaging/pkgin delete mode 100644 library/packaging/pkgng delete mode 100644 library/packaging/pkgutil delete mode 100644 library/packaging/portage delete mode 100644 library/packaging/portinstall delete mode 100644 library/packaging/redhat_subscription delete mode 100644 library/packaging/rhn_channel delete mode 100644 library/packaging/rhn_register delete mode 100644 library/packaging/rpm_key delete mode 100644 library/packaging/svr4pkg delete mode 100644 library/packaging/swdepot delete mode 100644 library/packaging/urpmi delete mode 100644 library/packaging/yum delete mode 100644 library/packaging/zypper delete mode 100644 library/packaging/zypper_repository delete mode 100644 library/source_control/bzr delete mode 100644 library/source_control/git delete mode 100644 library/source_control/github_hooks delete mode 100644 library/source_control/hg delete mode 100644 library/source_control/subversion delete mode 100755 library/system/alternatives delete mode 100644 library/system/at delete mode 100644 library/system/authorized_key delete mode 100644 library/system/capabilities delete mode 100644 library/system/cron delete mode 100644 library/system/debconf delete mode 100644 library/system/facter delete mode 100644 library/system/filesystem delete mode 100644 library/system/firewalld delete mode 100644 library/system/getent delete mode 100644 library/system/group delete mode 100755 library/system/hostname delete mode 100644 library/system/kernel_blacklist delete mode 100644 library/system/locale_gen delete mode 100644 library/system/lvg delete mode 100644 library/system/lvol delete mode 100644 library/system/modprobe delete mode 100755 library/system/mount delete mode 100644 library/system/ohai delete mode 100644 library/system/open_iscsi delete mode 100644 library/system/ping delete mode 100644 library/system/seboolean delete mode 100644 library/system/selinux delete mode 100644 library/system/service delete mode 100644 library/system/setup delete mode 100644 library/system/sysctl delete mode 100644 library/system/ufw delete mode 100644 library/system/user delete mode 100644 library/system/zfs delete mode 100644 library/utilities/accelerate delete mode 100644 library/utilities/assert delete mode 100644 library/utilities/debug delete mode 100644 library/utilities/fail delete mode 100644 library/utilities/fireball delete mode 100644 library/utilities/include_vars delete mode 100644 library/utilities/pause delete mode 100644 library/utilities/set_fact delete mode 100644 library/utilities/wait_for delete mode 100644 library/web_infrastructure/apache2_module delete mode 100644 library/web_infrastructure/django_manage delete mode 100755 library/web_infrastructure/ejabberd_user delete mode 100644 library/web_infrastructure/htpasswd delete mode 100644 library/web_infrastructure/jboss delete mode 100644 library/web_infrastructure/jira delete mode 100644 library/web_infrastructure/supervisorctl delete mode 100644 library/windows/setup.ps1 delete mode 100644 library/windows/slurp.ps1 delete mode 100644 library/windows/win_feature delete mode 100644 library/windows/win_feature.ps1 delete mode 100644 library/windows/win_get_url delete mode 100644 library/windows/win_get_url.ps1 delete mode 100644 library/windows/win_group delete mode 100644 library/windows/win_group.ps1 delete mode 100644 library/windows/win_msi delete mode 100644 library/windows/win_msi.ps1 delete mode 100644 library/windows/win_ping delete mode 100644 library/windows/win_ping.ps1 delete mode 100644 library/windows/win_service delete mode 100644 library/windows/win_service.ps1 delete mode 100644 library/windows/win_stat delete mode 100644 library/windows/win_stat.ps1 delete mode 100644 library/windows/win_user delete mode 100644 library/windows/win_user.ps1 diff --git a/MANIFEST.in b/MANIFEST.in index 4fb0c04a4e..ff3a022108 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,7 +5,6 @@ graft examples/playbooks include packaging/distutils/setup.py include lib/ansible/module_utils/powershell.ps1 recursive-include docs * -recursive-include library * include Makefile include VERSION include MANIFEST.in diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 5147cc01e3..b3e862da51 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -12,7 +12,7 @@ # some basic default values... hostfile = /etc/ansible/hosts -library = /usr/share/ansible +# library_path = /usr/share/my_modules/ remote_tmp = $HOME/.ansible/tmp pattern = * forks = 5 diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 8c342497c1..861dd5325c 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -90,22 +90,6 @@ p = load_config_file() active_user = pwd.getpwuid(os.geteuid())[0] -# Needed so the RPM can call setup.py and have modules land in the -# correct location. See #1277 for discussion -if getattr(sys, "real_prefix", None): - # in a virtualenv - DIST_MODULE_PATH = os.path.join(sys.prefix, 'share/ansible/') -else: - DIST_MODULE_PATH = '/usr/share/ansible/' - -# Look for modules relative to this file path -# This is so that we can find the modules when running from a local checkout -# installed as editable with `pip install -e ...` or `python setup.py develop` -local_module_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', '..', 'library') -) -DIST_MODULE_PATH = os.pathsep.join([DIST_MODULE_PATH, local_module_path]) - # check all of these extensions when looking for yaml files for things like # group variables -- really anything we can load YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ] @@ -115,7 +99,7 @@ DEFAULTS='defaults' # configurable things DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts')) -DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', DIST_MODULE_PATH) +DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles')) DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command') diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 417309a626..385a037cd6 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 417309a626e39396196a4abc6208c9f6db158f9d +Subproject commit 385a037cd6bc42fc64e387973c0e7ef539b04df7 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 50f105578a..110250d344 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 50f105578a07361e60f95a63da6acb5660765871 +Subproject commit 110250d344be156387d08ea837f4bcb2c42034b4 diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 7e093f3537..f727bc6e4e 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1215,7 +1215,7 @@ class Runner(object): module_suffixes = getattr(conn, 'default_suffixes', None) module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes) if module_path is None: - raise errors.AnsibleFileNotFound("module %s not found in %s" % (module_name, utils.plugins.module_finder.print_paths())) + raise errors.AnsibleFileNotFound("module %s not found in configured module paths" % (module_name)) # insert shared code and arguments into the module diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/utils/plugins.py index f83155412f..9349f133c4 100644 --- a/lib/ansible/utils/plugins.py +++ b/lib/ansible/utils/plugins.py @@ -75,6 +75,15 @@ class PluginLoader(object): ret.append(i) return os.pathsep.join(ret) + def _all_directories(self, dir): + results = [] + results.append(dir) + for root, subdirs, files in os.walk(dir): + if '__init__.py' in files: + for x in subdirs: + results.append(os.path.join(root,x)) + return results + def _get_package_paths(self): ''' Gets the path of a Python package ''' @@ -85,7 +94,7 @@ class PluginLoader(object): m = __import__(self.package) parts = self.package.split('.')[1:] self.package_path = os.path.join(os.path.dirname(m.__file__), *parts) - paths.append(self.package_path) + paths.extend(self._all_directories(self.package_path)) return paths else: return [ self.package_path ] @@ -107,7 +116,8 @@ class PluginLoader(object): # allow directories to be two levels deep files2 = glob.glob("%s/*/*" % fullpath) - files = files.extend(files2) + if files2 is not None: + files.extend(files2) for file in files: if os.path.isdir(file) and file not in ret: @@ -128,6 +138,8 @@ class PluginLoader(object): # look for any plugins installed in the package subtree ret.extend(self._get_package_paths()) + package_dirs = self._get_package_paths() + self._paths = ret @@ -153,7 +165,7 @@ class PluginLoader(object): if self.class_name: suffixes = ['.py'] else: - suffixes = ['', '.ps1'] + suffixes = ['', '.ps1', '.py'] for suffix in suffixes: full_name = '%s%s' % (name, suffix) diff --git a/library/cloud/azure b/library/cloud/azure deleted file mode 100644 index 1679fbc45d..0000000000 --- a/library/cloud/azure +++ /dev/null @@ -1,484 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: azure -short_description: create or terminate a virtual machine in azure -description: - - Creates or terminates azure instances. When created optionally waits for it to be 'running'. This module has a dependency on python-azure >= 0.7.1 -version_added: "1.7" -options: - name: - description: - - name of the virtual machine and associated cloud service. - required: true - default: null - location: - description: - - the azure location to use (e.g. 'East US') - required: true - default: null - subscription_id: - description: - - azure subscription id. Overrides the AZURE_SUBSCRIPTION_ID environement variable. - required: false - default: null - management_cert_path: - description: - - path to an azure management certificate associated with the subscription id. Overrides the AZURE_CERT_PATH environement variable. - required: false - default: null - storage_account: - description: - - the azure storage account in which to store the data disks. - required: true - image: - description: - - system image for creating the virtual machine (e.g., b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB) - required: true - default: null - role_size: - description: - - azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6) - required: false - default: Small - endpoints: - description: - - a comma-separated list of TCP ports to expose on the virtual machine (e.g., "22,80") - required: false - default: 22 - user: - description: - - the unix username for the new virtual machine. - required: false - default: null - password: - description: - - the unix password for the new virtual machine. - required: false - default: null - ssh_cert_path: - description: - - path to an X509 certificate containing the public ssh key to install in the virtual machine. See http://www.windowsazure.com/en-us/manage/linux/tutorials/intro-to-linux/ for more details. - - if this option is specified, password-based ssh authentication will be disabled. - required: false - default: null - virtual_network_name: - description: - - Name of virtual network. - required: false - default: null - hostname: - description: - - hostname to write /etc/hostname. Defaults to .cloudapp.net. - required: false - default: null - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 600 - aliases: [] - wait_timeout_redirects: - description: - - how long before wait gives up for redirects, in seconds - default: 300 - aliases: [] - state: - description: - - create or terminate instances - required: false - default: 'present' - aliases: [] - -requirements: [ "azure" ] -author: John Whitbeck -''' - -EXAMPLES = ''' -# Note: None of these examples set subscription_id or management_cert_path -# It is assumed that their matching environment variables are set. - -# Provision virtual machine example -- local_action: - module: azure - name: my-virtual-machine - role_size: Small - image: b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB - location: 'East US' - user: ubuntu - ssh_cert_path: /path/to/azure_x509_cert.pem - storage_account: my-storage-account - wait: yes - -# Terminate virtual machine example -- local_action: - module: azure - name: my-virtual-machine - state: absent -''' - -import base64 -import datetime -import os -import sys -import time -from urlparse import urlparse - -AZURE_LOCATIONS = ['South Central US', - 'Central US', - 'East US 2', - 'East US', - 'West US', - 'North Central US', - 'North Europe', - 'West Europe', - 'East Asia', - 'Southeast Asia', - 'Japan West', - 'Japan East', - 'Brazil South'] - -AZURE_ROLE_SIZES = ['ExtraSmall', - 'Small', - 'Medium', - 'Large', - 'ExtraLarge', - 'A5', - 'A6', - 'A7', - 'A8', - 'A9', - 'Basic_A0', - 'Basic_A1', - 'Basic_A2', - 'Basic_A3', - 'Basic_A4'] - -try: - import azure as windows_azure - - from azure import WindowsAzureError, WindowsAzureMissingResourceError - from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys, - PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints, - ConfigurationSetInputEndpoint) -except ImportError: - print "failed=True msg='azure required for this module'" - sys.exit(1) - -from distutils.version import LooseVersion -from types import MethodType -import json - - -def _wait_for_completion(azure, promise, wait_timeout, msg): - if not promise: return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - operation_result = azure.get_operation_status(promise.request_id) - time.sleep(5) - if operation_result.status == "Succeeded": - return - - raise WindowsAzureError('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.') - - -def get_ssh_certificate_tokens(module, ssh_cert_path): - """ - Returns the sha1 fingerprint and a base64-encoded PKCS12 version of the certificate. - """ - # This returns a string such as SHA1 Fingerprint=88:60:0B:13:A9:14:47:DA:4E:19:10:7D:34:92:2B:DF:A1:7D:CA:FF - rc, stdout, stderr = module.run_command(['openssl', 'x509', '-in', ssh_cert_path, '-fingerprint', '-noout']) - if rc != 0: - module.fail_json(msg="failed to generate the key fingerprint, error was: %s" % stderr) - fingerprint = stdout.strip()[17:].replace(':', '') - - rc, stdout, stderr = module.run_command(['openssl', 'pkcs12', '-export', '-in', ssh_cert_path, '-nokeys', '-password', 'pass:']) - if rc != 0: - module.fail_json(msg="failed to generate the pkcs12 signature from the certificate, error was: %s" % stderr) - pkcs12_base64 = base64.b64encode(stdout.strip()) - - return (fingerprint, pkcs12_base64) - - -def create_virtual_machine(module, azure): - """ - Create new virtual machine - - module : AnsibleModule object - azure: authenticated azure ServiceManagementService object - - Returns: - True if a new virtual machine was created, false otherwise - """ - name = module.params.get('name') - hostname = module.params.get('hostname') or name + ".cloudapp.net" - endpoints = module.params.get('endpoints').split(',') - ssh_cert_path = module.params.get('ssh_cert_path') - user = module.params.get('user') - password = module.params.get('password') - location = module.params.get('location') - role_size = module.params.get('role_size') - storage_account = module.params.get('storage_account') - image = module.params.get('image') - virtual_network_name = module.params.get('virtual_network_name') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - # Check if a deployment with the same name already exists - cloud_service_name_available = azure.check_hosted_service_name_availability(name) - if not cloud_service_name_available.result: - changed = False - else: - changed = True - # Create cloud service if necessary - try: - result = azure.create_hosted_service(service_name=name, label=name, location=location) - _wait_for_completion(azure, result, wait_timeout, "create_hosted_service") - except WindowsAzureError as e: - module.fail_json(msg="failed to create the new service name, it already exists: %s" % str(e)) - - # Create linux configuration - disable_ssh_password_authentication = not password - linux_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication) - - # Add ssh certificates if specified - if ssh_cert_path: - fingerprint, pkcs12_base64 = get_ssh_certificate_tokens(module, ssh_cert_path) - # Add certificate to cloud service - result = azure.add_service_certificate(name, pkcs12_base64, 'pfx', '') - _wait_for_completion(azure, result, wait_timeout, "add_service_certificate") - - # Create ssh config - ssh_config = SSH() - ssh_config.public_keys = PublicKeys() - authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user - ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint)) - # Append ssh config to linux machine config - linux_config.ssh = ssh_config - - # Create network configuration - network_config = ConfigurationSetInputEndpoints() - network_config.configuration_set_type = 'NetworkConfiguration' - network_config.subnet_names = [] - for port in endpoints: - network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port, - protocol='TCP', - port=port, - local_port=port)) - - # First determine where to store disk - today = datetime.date.today().strftime('%Y-%m-%d') - disk_prefix = u'%s-%s' % (name, name) - media_link = u'http://%s.blob.core.windows.net/vhds/%s-%s.vhd' % (storage_account, disk_prefix, today) - # Create system hard disk - os_hd = OSVirtualHardDisk(image, media_link) - - # Spin up virtual machine - try: - result = azure.create_virtual_machine_deployment(service_name=name, - deployment_name=name, - deployment_slot='production', - label=name, - role_name=name, - system_config=linux_config, - network_config=network_config, - os_virtual_hard_disk=os_hd, - role_size=role_size, - role_type='PersistentVMRole', - virtual_network_name=virtual_network_name) - _wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment") - except WindowsAzureError as e: - module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e)) - - - try: - deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name) - return (changed, urlparse(deployment.url).hostname, deployment) - except WindowsAzureError as e: - module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e))) - - -def terminate_virtual_machine(module, azure): - """ - Terminates a virtual machine - - module : AnsibleModule object - azure: authenticated azure ServiceManagementService object - - Not yet supported: handle deletion of attached data disks. - - Returns: - True if a new virtual machine was deleted, false otherwise - """ - - # Whether to wait for termination to complete before returning - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - name = module.params.get('name') - delete_empty_services = module.params.get('delete_empty_services') - - changed = False - - deployment = None - public_dns_name = None - disk_names = [] - try: - deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name) - except WindowsAzureMissingResourceError as e: - pass # no such deployment or service - except WindowsAzureError as e: - module.fail_json(msg="failed to find the deployment, error was: %s" % str(e)) - - # Delete deployment - if deployment: - changed = True - try: - # gather disk info - results = [] - for role in deployment.role_list: - role_props = azure.get_role(name, deployment.name, role.role_name) - if role_props.os_virtual_hard_disk.disk_name not in disk_names: - disk_names.append(role_props.os_virtual_hard_disk.disk_name) - - result = azure.delete_deployment(name, deployment.name) - _wait_for_completion(azure, result, wait_timeout, "delete_deployment") - - for disk_name in disk_names: - azure.delete_disk(disk_name, True) - - # Now that the vm is deleted, remove the cloud service - result = azure.delete_hosted_service(service_name=name) - _wait_for_completion(azure, result, wait_timeout, "delete_hosted_service") - except WindowsAzureError as e: - module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e))) - public_dns_name = urlparse(deployment.url).hostname - - return changed, public_dns_name, deployment - - -def get_azure_creds(module): - # Check modul args for credentials, then check environment vars - subscription_id = module.params.get('subscription_id') - if not subscription_id: - subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', None) - if not subscription_id: - module.fail_json(msg="No subscription_id provided. Please set 'AZURE_SUBSCRIPTION_ID' or use the 'subscription_id' parameter") - - management_cert_path = module.params.get('management_cert_path') - if not management_cert_path: - management_cert_path = os.environ.get('AZURE_CERT_PATH', None) - if not management_cert_path: - module.fail_json(msg="No management_cert_path provided. Please set 'AZURE_CERT_PATH' or use the 'management_cert_path' parameter") - - return subscription_id, management_cert_path - - -def main(): - module = AnsibleModule( - argument_spec=dict( - ssh_cert_path=dict(), - name=dict(), - hostname=dict(), - location=dict(choices=AZURE_LOCATIONS), - role_size=dict(choices=AZURE_ROLE_SIZES), - subscription_id=dict(no_log=True), - storage_account=dict(), - management_cert_path=dict(), - endpoints=dict(default='22'), - user=dict(), - password=dict(), - image=dict(), - virtual_network_name=dict(default=None), - state=dict(default='present'), - wait=dict(type='bool', default=False), - wait_timeout=dict(default=600), - wait_timeout_redirects=dict(default=300) - ) - ) - # create azure ServiceManagementService object - subscription_id, management_cert_path = get_azure_creds(module) - - wait_timeout_redirects = int(module.params.get('wait_timeout_redirects')) - if LooseVersion(windows_azure.__version__) <= "0.8.0": - # wrapper for handling redirects which the sdk <= 0.8.0 is not following - azure = Wrapper(ServiceManagementService(subscription_id, management_cert_path), wait_timeout_redirects) - else: - azure = ServiceManagementService(subscription_id, management_cert_path) - - cloud_service_raw = None - if module.params.get('state') == 'absent': - (changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure) - - elif module.params.get('state') == 'present': - # Changed is always set to true when provisioning new instances - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for new instance') - if not module.params.get('image'): - module.fail_json(msg='image parameter is required for new instance') - if not module.params.get('user'): - module.fail_json(msg='user parameter is required for new instance') - if not module.params.get('location'): - module.fail_json(msg='location parameter is required for new instance') - if not module.params.get('storage_account'): - module.fail_json(msg='storage_account parameter is required for new instance') - (changed, public_dns_name, deployment) = create_virtual_machine(module, azure) - - module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__))) - - -class Wrapper(object): - def __init__(self, obj, wait_timeout): - self.other = obj - self.wait_timeout = wait_timeout - - def __getattr__(self, name): - if hasattr(self.other, name): - func = getattr(self.other, name) - return lambda *args, **kwargs: self._wrap(func, args, kwargs) - raise AttributeError(name) - - def _wrap(self, func, args, kwargs): - if type(func) == MethodType: - result = self._handle_temporary_redirects(lambda: func(*args, **kwargs)) - else: - result = self._handle_temporary_redirects(lambda: func(self.other, *args, **kwargs)) - return result - - def _handle_temporary_redirects(self, f): - wait_timeout = time.time() + self.wait_timeout - while wait_timeout > time.time(): - try: - return f() - except WindowsAzureError as e: - if not str(e).lower().find("temporary redirect") == -1: - time.sleep(5) - pass - else: - raise e - - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/cloudformation b/library/cloud/cloudformation deleted file mode 100644 index 6a7838a51b..0000000000 --- a/library/cloud/cloudformation +++ /dev/null @@ -1,313 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: cloudformation -short_description: create a AWS CloudFormation stack -description: - - Launches an AWS CloudFormation stack and waits for it complete. -version_added: "1.1" -options: - stack_name: - description: - - name of the cloudformation stack - required: true - default: null - aliases: [] - disable_rollback: - description: - - If a stacks fails to form, rollback will remove the stack - required: false - default: "false" - choices: [ "true", "false" ] - aliases: [] - template_parameters: - description: - - a list of hashes of all the template variables for the stack - required: false - default: {} - aliases: [] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: true - default: null - aliases: ['aws_region', 'ec2_region'] - state: - description: - - If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated. - If state is absent, stack will be removed. - required: true - default: null - aliases: [] - template: - description: - - the path of the cloudformation template - required: true - default: null - aliases: [] - tags: - description: - - Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later. - Requires at least Boto version 2.6.0. - required: false - default: null - aliases: [] - version_added: "1.4" - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_secret_key', 'secret_key' ] - version_added: "1.5" - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] - version_added: "1.5" - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - version_added: "1.5" - -requirements: [ "boto" ] -author: James S. Martin -''' - -EXAMPLES = ''' -# Basic task example -tasks: -- name: launch ansible cloudformation example - action: cloudformation > - stack_name="ansible-cloudformation" state=present - region=us-east-1 disable_rollback=true - template=files/cloudformation-example.json - args: - template_parameters: - KeyName: jmartin - DiskType: ephemeral - InstanceType: m1.small - ClusterSize: 3 - tags: - Stack: ansible-cloudformation -''' - -import json -import time - -try: - import boto - import boto.cloudformation.connection -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -class Region: - def __init__(self, region): - '''connects boto to the region specified in the cloudformation template''' - self.name = region - self.endpoint = 'cloudformation.%s.amazonaws.com' % region - - -def boto_exception(err): - '''generic error message handler''' - if hasattr(err, 'error_message'): - error = err.error_message - elif hasattr(err, 'message'): - error = err.message - else: - error = '%s: %s' % (Exception, err) - - return error - - -def boto_version_required(version_tuple): - parts = boto.Version.split('.') - boto_version = [] - try: - for part in parts: - boto_version.append(int(part)) - except: - boto_version.append(-1) - return tuple(boto_version) >= tuple(version_tuple) - - -def stack_operation(cfn, stack_name, operation): - '''gets the status of a stack while it is created/updated/deleted''' - existed = [] - result = {} - operation_complete = False - while operation_complete == False: - try: - stack = cfn.describe_stacks(stack_name)[0] - existed.append('yes') - except: - if 'yes' in existed: - result = dict(changed=True, - output='Stack Deleted', - events=map(str, list(stack.describe_events()))) - else: - result = dict(changed= True, output='Stack Not Found') - break - if '%s_COMPLETE' % operation == stack.stack_status: - result = dict(changed=True, - events = map(str, list(stack.describe_events())), - output = 'Stack %s complete' % operation) - break - if 'ROLLBACK_COMPLETE' == stack.stack_status or '%s_ROLLBACK_COMPLETE' % operation == stack.stack_status: - result = dict(changed=True, failed=True, - events = map(str, list(stack.describe_events())), - output = 'Problem with %s. Rollback complete' % operation) - break - elif '%s_FAILED' % operation == stack.stack_status: - result = dict(changed=True, failed=True, - events = map(str, list(stack.describe_events())), - output = 'Stack %s failed' % operation) - break - else: - time.sleep(5) - return result - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - stack_name=dict(required=True), - template_parameters=dict(required=False, type='dict', default={}), - state=dict(default='present', choices=['present', 'absent']), - template=dict(default=None, required=True), - disable_rollback=dict(default=False, type='bool'), - tags=dict(default=None) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - state = module.params['state'] - stack_name = module.params['stack_name'] - template_body = open(module.params['template'], 'r').read() - disable_rollback = module.params['disable_rollback'] - template_parameters = module.params['template_parameters'] - tags = module.params['tags'] - - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - kwargs = dict() - if tags is not None: - if not boto_version_required((2,6,0)): - module.fail_json(msg='Module parameter "tags" requires at least Boto version 2.6.0') - kwargs['tags'] = tags - - - # convert the template parameters ansible passes into a tuple for boto - template_parameters_tup = [(k, v) for k, v in template_parameters.items()] - stack_outputs = {} - - try: - cf_region = Region(region) - cfn = boto.cloudformation.connection.CloudFormationConnection( - aws_access_key_id=aws_access_key, - aws_secret_access_key=aws_secret_key, - region=cf_region, - ) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - update = False - result = {} - operation = None - - # if state is present we are going to ensure that the stack is either - # created or updated - if state == 'present': - try: - cfn.create_stack(stack_name, parameters=template_parameters_tup, - template_body=template_body, - disable_rollback=disable_rollback, - capabilities=['CAPABILITY_IAM'], - **kwargs) - operation = 'CREATE' - except Exception, err: - error_msg = boto_exception(err) - if 'AlreadyExistsException' in error_msg or 'already exists' in error_msg: - update = True - else: - module.fail_json(msg=error_msg) - if not update: - result = stack_operation(cfn, stack_name, operation) - - # if the state is present and the stack already exists, we try to update it - # AWS will tell us if the stack template and parameters are the same and - # don't need to be updated. - if update: - try: - cfn.update_stack(stack_name, parameters=template_parameters_tup, - template_body=template_body, - disable_rollback=disable_rollback, - capabilities=['CAPABILITY_IAM']) - operation = 'UPDATE' - except Exception, err: - error_msg = boto_exception(err) - if 'No updates are to be performed.' in error_msg: - result = dict(changed=False, output='Stack is already up-to-date.') - else: - module.fail_json(msg=error_msg) - - if operation == 'UPDATE': - result = stack_operation(cfn, stack_name, operation) - - # check the status of the stack while we are creating/updating it. - # and get the outputs of the stack - - if state == 'present' or update: - stack = cfn.describe_stacks(stack_name)[0] - for output in stack.outputs: - stack_outputs[output.key] = output.value - result['stack_outputs'] = stack_outputs - - # absent state is different because of the way delete_stack works. - # problem is it it doesn't give an error if stack isn't found - # so must describe the stack first - - if state == 'absent': - try: - cfn.describe_stacks(stack_name) - operation = 'DELETE' - except Exception, err: - error_msg = boto_exception(err) - if 'Stack:%s does not exist' % stack_name in error_msg: - result = dict(changed=False, output='Stack not found.') - else: - module.fail_json(msg=error_msg) - if operation == 'DELETE': - cfn.delete_stack(stack_name) - result = stack_operation(cfn, stack_name, operation) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/digital_ocean b/library/cloud/digital_ocean deleted file mode 100644 index efebf5f1bc..0000000000 --- a/library/cloud/digital_ocean +++ /dev/null @@ -1,434 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -DOCUMENTATION = ''' ---- -module: digital_ocean -short_description: Create/delete a droplet/SSH_key in DigitalOcean -description: - - Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key. -version_added: "1.3" -options: - command: - description: - - Which target you want to operate on. - default: droplet - choices: ['droplet', 'ssh'] - state: - description: - - Indicate desired state of the target. - default: present - choices: ['present', 'active', 'absent', 'deleted'] - client_id: - description: - - DigitalOcean manager id. - api_key: - description: - - DigitalOcean api key. - id: - description: - - Numeric, the droplet id you want to operate on. - name: - description: - - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key. - unique_name: - description: - - Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence. - version_added: "1.4" - default: "no" - choices: [ "yes", "no" ] - size_id: - description: - - Numeric, this is the id of the size you would like the droplet created with. - image_id: - description: - - Numeric, this is the id of the image you would like the droplet created with. - region_id: - description: - - "Numeric, this is the id of the region you would like your server to be created in." - ssh_key_ids: - description: - - Optional, comma separated list of ssh_key_ids that you would like to be added to the server. - virtio: - description: - - "Bool, turn on virtio driver in droplet for improved network and storage I/O." - version_added: "1.4" - default: "yes" - choices: [ "yes", "no" ] - private_networking: - description: - - "Bool, add an additional, private network interface to droplet for inter-droplet communication." - version_added: "1.4" - default: "no" - choices: [ "yes", "no" ] - backups_enabled: - description: - - Optional, Boolean, enables backups for your droplet. - version_added: "1.6" - default: "no" - choices: [ "yes", "no" ] - wait: - description: - - Wait for the droplet to be in state 'running' before returning. If wait is "no" an ip_address may not be returned. - default: "yes" - choices: [ "yes", "no" ] - wait_timeout: - description: - - How long before wait gives up, in seconds. - default: 300 - ssh_pub_key: - description: - - The public SSH key you want to add to your account. - -notes: - - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. -requirements: [ dopy ] -''' - - -EXAMPLES = ''' -# Ensure a SSH key is present -# If a key matches this name, will return the ssh key id and changed = False -# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False - -- digital_ocean: > - state=present - command=ssh - name=my_ssh_key - ssh_pub_key='ssh-rsa AAAA...' - client_id=XXX - api_key=XXX - -# Create a new Droplet -# Will return the droplet details including the droplet id (used for idempotence) - -- digital_ocean: > - state=present - command=droplet - name=mydroplet - client_id=XXX - api_key=XXX - size_id=1 - region_id=2 - image_id=3 - wait_timeout=500 - register: my_droplet -- debug: msg="ID is {{ my_droplet.droplet.id }}" -- debug: msg="IP is {{ my_droplet.droplet.ip_address }}" - -# Ensure a droplet is present -# If droplet id already exist, will return the droplet details and changed = False -# If no droplet matches the id, a new droplet will be created and the droplet details (including the new id) are returned, changed = True. - -- digital_ocean: > - state=present - command=droplet - id=123 - name=mydroplet - client_id=XXX - api_key=XXX - size_id=1 - region_id=2 - image_id=3 - wait_timeout=500 - -# Create a droplet with ssh key -# The ssh key id can be passed as argument at the creation of a droplet (see ssh_key_ids). -# Several keys can be added to ssh_key_ids as id1,id2,id3 -# The keys are used to connect as root to the droplet. - -- digital_ocean: > - state=present - ssh_key_ids=id1,id2 - name=mydroplet - client_id=XXX - api_key=XXX - size_id=1 - region_id=2 - image_id=3 -''' - -import sys -import os -import time - -try: - import dopy - from dopy.manager import DoError, DoManager -except ImportError, e: - print "failed=True msg='dopy >= 0.2.3 required for this module'" - sys.exit(1) - -if dopy.__version__ < '0.2.3': - print "failed=True msg='dopy >= 0.2.3 required for this module'" - sys.exit(1) - -class TimeoutError(DoError): - def __init__(self, msg, id): - super(TimeoutError, self).__init__(msg) - self.id = id - -class JsonfyMixIn(object): - def to_json(self): - return self.__dict__ - -class Droplet(JsonfyMixIn): - manager = None - - def __init__(self, droplet_json): - self.status = 'new' - self.__dict__.update(droplet_json) - - def is_powered_on(self): - return self.status == 'active' - - def update_attr(self, attrs=None): - if attrs: - for k, v in attrs.iteritems(): - setattr(self, k, v) - else: - json = self.manager.show_droplet(self.id) - if json['ip_address']: - self.update_attr(json) - - def power_on(self): - assert self.status == 'off', 'Can only power on a closed one.' - json = self.manager.power_on_droplet(self.id) - self.update_attr(json) - - def ensure_powered_on(self, wait=True, wait_timeout=300): - if self.is_powered_on(): - return - if self.status == 'off': # powered off - self.power_on() - - if wait: - end_time = time.time() + wait_timeout - while time.time() < end_time: - time.sleep(min(20, end_time - time.time())) - self.update_attr() - if self.is_powered_on(): - if not self.ip_address: - raise TimeoutError('No ip is found.', self.id) - return - raise TimeoutError('Wait for droplet running timeout', self.id) - - def destroy(self): - return self.manager.destroy_droplet(self.id, scrub_data=True) - - @classmethod - def setup(cls, client_id, api_key): - cls.manager = DoManager(client_id, api_key) - - @classmethod - def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False): - json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking, backups_enabled) - droplet = cls(json) - return droplet - - @classmethod - def find(cls, id=None, name=None): - if not id and not name: - return False - - droplets = cls.list_all() - - # Check first by id. digital ocean requires that it be unique - for droplet in droplets: - if droplet.id == id: - return droplet - - # Failing that, check by hostname. - for droplet in droplets: - if droplet.name == name: - return droplet - - return False - - @classmethod - def list_all(cls): - json = cls.manager.all_active_droplets() - return map(cls, json) - -class SSH(JsonfyMixIn): - manager = None - - def __init__(self, ssh_key_json): - self.__dict__.update(ssh_key_json) - update_attr = __init__ - - def destroy(self): - self.manager.destroy_ssh_key(self.id) - return True - - @classmethod - def setup(cls, client_id, api_key): - cls.manager = DoManager(client_id, api_key) - - @classmethod - def find(cls, name): - if not name: - return False - keys = cls.list_all() - for key in keys: - if key.name == name: - return key - return False - - @classmethod - def list_all(cls): - json = cls.manager.all_ssh_keys() - return map(cls, json) - - @classmethod - def add(cls, name, key_pub): - json = cls.manager.new_ssh_key(name, key_pub) - return cls(json) - -def core(module): - def getkeyordie(k): - v = module.params[k] - if v is None: - module.fail_json(msg='Unable to load %s' % k) - return v - - try: - # params['client_id'] will be None even if client_id is not passed in - client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID'] - api_key = module.params['api_key'] or os.environ['DO_API_KEY'] - except KeyError, e: - module.fail_json(msg='Unable to load %s' % e.message) - - changed = True - command = module.params['command'] - state = module.params['state'] - - if command == 'droplet': - Droplet.setup(client_id, api_key) - if state in ('active', 'present'): - - # First, try to find a droplet by id. - droplet = Droplet.find(id=module.params['id']) - - # If we couldn't find the droplet and the user is allowing unique - # hostnames, then check to see if a droplet with the specified - # hostname already exists. - if not droplet and module.params['unique_name']: - droplet = Droplet.find(name=getkeyordie('name')) - - # If both of those attempts failed, then create a new droplet. - if not droplet: - droplet = Droplet.add( - name=getkeyordie('name'), - size_id=getkeyordie('size_id'), - image_id=getkeyordie('image_id'), - region_id=getkeyordie('region_id'), - ssh_key_ids=module.params['ssh_key_ids'], - virtio=module.params['virtio'], - private_networking=module.params['private_networking'], - backups_enabled=module.params['backups_enabled'], - ) - - if droplet.is_powered_on(): - changed = False - - droplet.ensure_powered_on( - wait=getkeyordie('wait'), - wait_timeout=getkeyordie('wait_timeout') - ) - - module.exit_json(changed=changed, droplet=droplet.to_json()) - - elif state in ('absent', 'deleted'): - # First, try to find a droplet by id. - droplet = Droplet.find(module.params['id']) - - # If we couldn't find the droplet and the user is allowing unique - # hostnames, then check to see if a droplet with the specified - # hostname already exists. - if not droplet and module.params['unique_name']: - droplet = Droplet.find(name=getkeyordie('name')) - - if not droplet: - module.exit_json(changed=False, msg='The droplet is not found.') - - event_json = droplet.destroy() - module.exit_json(changed=True, event_id=event_json['event_id']) - - elif command == 'ssh': - SSH.setup(client_id, api_key) - name = getkeyordie('name') - if state in ('active', 'present'): - key = SSH.find(name) - if key: - module.exit_json(changed=False, ssh_key=key.to_json()) - key = SSH.add(name, getkeyordie('ssh_pub_key')) - module.exit_json(changed=True, ssh_key=key.to_json()) - - elif state in ('absent', 'deleted'): - key = SSH.find(name) - if not key: - module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name) - key.destroy() - module.exit_json(changed=True) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - command = dict(choices=['droplet', 'ssh'], default='droplet'), - state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'), - client_id = dict(aliases=['CLIENT_ID'], no_log=True), - api_key = dict(aliases=['API_KEY'], no_log=True), - name = dict(type='str'), - size_id = dict(type='int'), - image_id = dict(type='int'), - region_id = dict(type='int'), - ssh_key_ids = dict(default=''), - virtio = dict(type='bool', default='yes'), - private_networking = dict(type='bool', default='no'), - backups_enabled = dict(type='bool', default='no'), - id = dict(aliases=['droplet_id'], type='int'), - unique_name = dict(type='bool', default='no'), - wait = dict(type='bool', default=True), - wait_timeout = dict(default=300, type='int'), - ssh_pub_key = dict(type='str'), - ), - required_together = ( - ['size_id', 'image_id', 'region_id'], - ), - mutually_exclusive = ( - ['size_id', 'ssh_pub_key'], - ['image_id', 'ssh_pub_key'], - ['region_id', 'ssh_pub_key'], - ), - required_one_of = ( - ['id', 'name'], - ), - ) - - try: - core(module) - except TimeoutError, e: - module.fail_json(msg=str(e), id=e.id) - except (DoError, Exception), e: - module.fail_json(msg=str(e)) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/digital_ocean_domain b/library/cloud/digital_ocean_domain deleted file mode 100644 index d0615ad0df..0000000000 --- a/library/cloud/digital_ocean_domain +++ /dev/null @@ -1,242 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -DOCUMENTATION = ''' ---- -module: digital_ocean_domain -short_description: Create/delete a DNS record in DigitalOcean -description: - - Create/delete a DNS record in DigitalOcean. -version_added: "1.6" -options: - state: - description: - - Indicate desired state of the target. - default: present - choices: ['present', 'active', 'absent', 'deleted'] - client_id: - description: - - DigitalOcean manager id. - api_key: - description: - - DigitalOcean api key. - id: - description: - - Numeric, the droplet id you want to operate on. - name: - description: - - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain. - ip: - description: - - The IP address to point a domain at. - -notes: - - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. -''' - - -EXAMPLES = ''' -# Create a domain record - -- digital_ocean_domain: > - state=present - name=my.digitalocean.domain - ip=127.0.0.1 - -# Create a droplet and a corresponding domain record - -- digital_ocean: > - state=present - name=test_droplet - size_id=1 - region_id=2 - image_id=3 - register: test_droplet - -- digital_ocean_domain: > - state=present - name={{ test_droplet.droplet.name }}.my.domain - ip={{ test_droplet.droplet.ip_address }} -''' - -import sys -import os -import time - -try: - from dopy.manager import DoError, DoManager -except ImportError as e: - print "failed=True msg='dopy required for this module'" - sys.exit(1) - -class TimeoutError(DoError): - def __init__(self, msg, id): - super(TimeoutError, self).__init__(msg) - self.id = id - -class JsonfyMixIn(object): - def to_json(self): - return self.__dict__ - -class DomainRecord(JsonfyMixIn): - manager = None - - def __init__(self, json): - self.__dict__.update(json) - update_attr = __init__ - - def update(self, data = None, record_type = None): - json = self.manager.edit_domain_record(self.domain_id, - self.id, - record_type if record_type is not None else self.record_type, - data if data is not None else self.data) - self.__dict__.update(json) - return self - - def destroy(self): - json = self.manager.destroy_domain_record(self.domain_id, self.id) - return json - -class Domain(JsonfyMixIn): - manager = None - - def __init__(self, domain_json): - self.__dict__.update(domain_json) - - def destroy(self): - self.manager.destroy_domain(self.id) - - def records(self): - json = self.manager.all_domain_records(self.id) - return map(DomainRecord, json) - - @classmethod - def add(cls, name, ip): - json = cls.manager.new_domain(name, ip) - return cls(json) - - @classmethod - def setup(cls, client_id, api_key): - cls.manager = DoManager(client_id, api_key) - DomainRecord.manager = cls.manager - - @classmethod - def list_all(cls): - domains = cls.manager.all_domains() - return map(cls, domains) - - @classmethod - def find(cls, name=None, id=None): - if name is None and id is None: - return False - - domains = Domain.list_all() - - if id is not None: - for domain in domains: - if domain.id == id: - return domain - - if name is not None: - for domain in domains: - if domain.name == name: - return domain - - return False - -def core(module): - def getkeyordie(k): - v = module.params[k] - if v is None: - module.fail_json(msg='Unable to load %s' % k) - return v - - try: - # params['client_id'] will be None even if client_id is not passed in - client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID'] - api_key = module.params['api_key'] or os.environ['DO_API_KEY'] - except KeyError, e: - module.fail_json(msg='Unable to load %s' % e.message) - - changed = True - state = module.params['state'] - - Domain.setup(client_id, api_key) - if state in ('present'): - domain = Domain.find(id=module.params["id"]) - - if not domain: - domain = Domain.find(name=getkeyordie("name")) - - if not domain: - domain = Domain.add(getkeyordie("name"), - getkeyordie("ip")) - module.exit_json(changed=True, domain=domain.to_json()) - else: - records = domain.records() - at_record = None - for record in records: - if record.name == "@": - at_record = record - - if not at_record.data == getkeyordie("ip"): - record.update(data=getkeyordie("ip"), record_type='A') - module.exit_json(changed=True, domain=Domain.find(id=record.domain_id).to_json()) - - module.exit_json(changed=False, domain=domain.to_json()) - - elif state in ('absent'): - domain = None - if "id" in module.params: - domain = Domain.find(id=module.params["id"]) - - if not domain and "name" in module.params: - domain = Domain.find(name=module.params["name"]) - - if not domain: - module.exit_json(changed=False, msg="Domain not found.") - - event_json = domain.destroy() - module.exit_json(changed=True, event=event_json) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'), - client_id = dict(aliases=['CLIENT_ID'], no_log=True), - api_key = dict(aliases=['API_KEY'], no_log=True), - name = dict(type='str'), - id = dict(aliases=['droplet_id'], type='int'), - ip = dict(type='str'), - ), - required_one_of = ( - ['id', 'name'], - ), - ) - - try: - core(module) - except TimeoutError as e: - module.fail_json(msg=str(e), id=e.id) - except (DoError, Exception) as e: - module.fail_json(msg=str(e)) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/digital_ocean_sshkey b/library/cloud/digital_ocean_sshkey deleted file mode 100644 index 69f32266b5..0000000000 --- a/library/cloud/digital_ocean_sshkey +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -DOCUMENTATION = ''' ---- -module: digital_ocean_sshkey -short_description: Create/delete an SSH key in DigitalOcean -description: - - Create/delete an SSH key. -version_added: "1.6" -options: - state: - description: - - Indicate desired state of the target. - default: present - choices: ['present', 'absent'] - client_id: - description: - - DigitalOcean manager id. - api_key: - description: - - DigitalOcean api key. - id: - description: - - Numeric, the SSH key id you want to operate on. - name: - description: - - String, this is the name of an SSH key to create or destroy. - ssh_pub_key: - description: - - The public SSH key you want to add to your account. - -notes: - - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. -''' - - -EXAMPLES = ''' -# Ensure a SSH key is present -# If a key matches this name, will return the ssh key id and changed = False -# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False - -- digital_ocean_sshkey: > - state=present - name=my_ssh_key - ssh_pub_key='ssh-rsa AAAA...' - client_id=XXX - api_key=XXX - -''' - -import sys -import os -import time - -try: - from dopy.manager import DoError, DoManager -except ImportError as e: - print "failed=True msg='dopy required for this module'" - sys.exit(1) - -class TimeoutError(DoError): - def __init__(self, msg, id): - super(TimeoutError, self).__init__(msg) - self.id = id - -class JsonfyMixIn(object): - def to_json(self): - return self.__dict__ - -class SSH(JsonfyMixIn): - manager = None - - def __init__(self, ssh_key_json): - self.__dict__.update(ssh_key_json) - update_attr = __init__ - - def destroy(self): - self.manager.destroy_ssh_key(self.id) - return True - - @classmethod - def setup(cls, client_id, api_key): - cls.manager = DoManager(client_id, api_key) - - @classmethod - def find(cls, name): - if not name: - return False - keys = cls.list_all() - for key in keys: - if key.name == name: - return key - return False - - @classmethod - def list_all(cls): - json = cls.manager.all_ssh_keys() - return map(cls, json) - - @classmethod - def add(cls, name, key_pub): - json = cls.manager.new_ssh_key(name, key_pub) - return cls(json) - -def core(module): - def getkeyordie(k): - v = module.params[k] - if v is None: - module.fail_json(msg='Unable to load %s' % k) - return v - - try: - # params['client_id'] will be None even if client_id is not passed in - client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID'] - api_key = module.params['api_key'] or os.environ['DO_API_KEY'] - except KeyError, e: - module.fail_json(msg='Unable to load %s' % e.message) - - changed = True - state = module.params['state'] - - SSH.setup(client_id, api_key) - name = getkeyordie('name') - if state in ('present'): - key = SSH.find(name) - if key: - module.exit_json(changed=False, ssh_key=key.to_json()) - key = SSH.add(name, getkeyordie('ssh_pub_key')) - module.exit_json(changed=True, ssh_key=key.to_json()) - - elif state in ('absent'): - key = SSH.find(name) - if not key: - module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name) - key.destroy() - module.exit_json(changed=True) - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(choices=['present', 'absent'], default='present'), - client_id = dict(aliases=['CLIENT_ID'], no_log=True), - api_key = dict(aliases=['API_KEY'], no_log=True), - name = dict(type='str'), - id = dict(aliases=['droplet_id'], type='int'), - ssh_pub_key = dict(type='str'), - ), - required_one_of = ( - ['id', 'name'], - ), - ) - - try: - core(module) - except TimeoutError as e: - module.fail_json(msg=str(e), id=e.id) - except (DoError, Exception) as e: - module.fail_json(msg=str(e)) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/docker b/library/cloud/docker deleted file mode 100644 index a0a52ffc75..0000000000 --- a/library/cloud/docker +++ /dev/null @@ -1,854 +0,0 @@ -#!/usr/bin/python - -# (c) 2013, Cove Schneider -# (c) 2014, Joshua Conner -# (c) 2014, Pavel Antonov -# -# This file is part of Ansible, -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -###################################################################### - -DOCUMENTATION = ''' ---- -module: docker -version_added: "1.4" -short_description: manage docker containers -description: - - Manage the life cycle of docker containers. -options: - count: - description: - - Set number of containers to run - required: False - default: 1 - aliases: [] - image: - description: - - Set container image to use - required: true - default: null - aliases: [] - command: - description: - - Set command to run in a container on startup - required: false - default: null - aliases: [] - name: - description: - - Set name for container (used to find single container or to provide links) - required: false - default: null - aliases: [] - version_added: "1.5" - ports: - description: - - Set private to public port mapping specification using docker CLI-style syntax [([:[host_port]])|():][/udp] - required: false - default: null - aliases: [] - version_added: "1.5" - expose: - description: - - Set container ports to expose for port mappings or links. (If the port is already exposed using EXPOSE in a Dockerfile, you don't need to expose it again.) - required: false - default: null - aliases: [] - version_added: "1.5" - publish_all_ports: - description: - - Publish all exposed ports to the host interfaces - required: false - default: false - aliases: [] - version_added: "1.5" - volumes: - description: - - Set volume(s) to mount on the container - required: false - default: null - aliases: [] - volumes_from: - description: - - Set shared volume(s) from another container - required: false - default: null - aliases: [] - links: - description: - - Link container(s) to other container(s) (e.g. links=redis,postgresql:db) - required: false - default: null - aliases: [] - version_added: "1.5" - memory_limit: - description: - - Set RAM allocated to container - required: false - default: null - aliases: [] - default: 256MB - docker_url: - description: - - URL of docker host to issue commands to - required: false - default: unix://var/run/docker.sock - aliases: [] - docker_api_version: - description: - - Remote API version to use. This defaults to the current default as specified by docker-py. - required: false - default: docker-py default remote API version - aliases: [] - version_added: "1.8" - username: - description: - - Set remote API username - required: false - default: null - aliases: [] - password: - description: - - Set remote API password - required: false - default: null - aliases: [] - hostname: - description: - - Set container hostname - required: false - default: null - aliases: [] - env: - description: - - Set environment variables (e.g. env="PASSWORD=sEcRe7,WORKERS=4") - required: false - default: null - aliases: [] - dns: - description: - - Set custom DNS servers for the container - required: false - default: null - aliases: [] - detach: - description: - - Enable detached mode on start up, leaves container running in background - required: false - default: true - aliases: [] - state: - description: - - Set the state of the container - required: false - default: present - choices: [ "present", "running", "stopped", "absent", "killed", "restarted" ] - aliases: [] - privileged: - description: - - Set whether the container should run in privileged mode - required: false - default: false - aliases: [] - lxc_conf: - description: - - LXC config parameters, e.g. lxc.aa_profile:unconfined - required: false - default: - aliases: [] - name: - description: - - Set the name of the container (cannot use with count) - required: false - default: null - aliases: [] - version_added: "1.5" - stdin_open: - description: - - Keep stdin open - required: false - default: false - aliases: [] - version_added: "1.6" - tty: - description: - - Allocate a pseudo-tty - required: false - default: false - aliases: [] - version_added: "1.6" - net: - description: - - Set Network mode for the container (bridge, none, container:, host). Requires docker >= 0.11. - required: false - default: false - aliases: [] - version_added: "1.8" - registry: - description: - - The remote registry URL to use for pulling images. - required: false - default: '' - aliases: [] - version_added: "1.8" - -author: Cove Schneider, Joshua Conner, Pavel Antonov -requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ] -''' - -EXAMPLES = ''' -Start one docker container running tomcat in each host of the web group and bind tomcat's listening port to 8080 -on the host: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos command="service tomcat6 start" ports=8080 - -The tomcat server's port is NAT'ed to a dynamic port on the host, but you can determine which port the server was -mapped to using docker_containers: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos command="service tomcat6 start" ports=8080 count=5 - - name: Display IP address and port mappings for containers - debug: msg={{inventory_hostname}}:{{item['HostConfig']['PortBindings']['8080/tcp'][0]['HostPort']}} - with_items: docker_containers - -Just as in the previous example, but iterates over the list of docker containers with a sequence: - -- hosts: web - sudo: yes - vars: - start_containers_count: 5 - tasks: - - name: run tomcat servers - docker: image=centos command="service tomcat6 start" ports=8080 count={{start_containers_count}} - - name: Display IP address and port mappings for containers - debug: msg="{{inventory_hostname}}:{{docker_containers[{{item}}]['HostConfig']['PortBindings']['8080/tcp'][0]['HostPort']}}" - with_sequence: start=0 end={{start_containers_count - 1}} - -Stop, remove all of the running tomcat containers and list the exit code from the stopped containers: - -- hosts: web - sudo: yes - tasks: - - name: stop tomcat servers - docker: image=centos command="service tomcat6 start" state=absent - - name: Display return codes from stopped containers - debug: msg="Returned {{inventory_hostname}}:{{item}}" - with_items: docker_containers - -Create a named container: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat server - docker: image=centos name=tomcat command="service tomcat6 start" ports=8080 - -Create multiple named containers: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos name={{item}} command="service tomcat6 start" ports=8080 - with_items: - - crookshank - - snowbell - - heathcliff - - felix - - sylvester - -Create containers named in a sequence: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos name={{item}} command="service tomcat6 start" ports=8080 - with_sequence: start=1 end=5 format=tomcat_%d.example.com - -Create two linked containers: - -- hosts: web - sudo: yes - tasks: - - name: ensure redis container is running - docker: image=crosbymichael/redis name=redis - - - name: ensure redis_ambassador container is running - docker: image=svendowideit/ambassador ports=6379:6379 links=redis:redis name=redis_ambassador_ansible - -Create containers with options specified as key-value pairs and lists: - -- hosts: web - sudo: yes - tasks: - - docker: - image: namespace/image_name - links: - - postgresql:db - - redis:redis - - -Create containers with options specified as strings and lists as comma-separated strings: - -- hosts: web - sudo: yes - tasks: - docker: image=namespace/image_name links=postgresql:db,redis:redis - -Create a container with no networking: - -- hosts: web - sudo: yes - tasks: - docker: image=namespace/image_name net=none - -''' - -HAS_DOCKER_PY = True - -import sys -from urlparse import urlparse -try: - import docker.client - import docker.utils - from requests.exceptions import * -except ImportError, e: - HAS_DOCKER_PY = False - -try: - from docker.errors import APIError as DockerAPIError -except ImportError: - from docker.client import APIError as DockerAPIError - - -def _human_to_bytes(number): - suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] - - if isinstance(number, int): - return number - if number[-1] == suffixes[0] and number[-2].isdigit(): - return number[:-1] - - i = 1 - for each in suffixes[1:]: - if number[-len(each):] == suffixes[i]: - return int(number[:-len(each)]) * (1024 ** i) - i = i + 1 - - print "failed=True msg='Could not convert %s to integer'" % (number) - sys.exit(1) - -def _ansible_facts(container_list): - return {"docker_containers": container_list} - -def _docker_id_quirk(inspect): - # XXX: some quirk in docker - if 'ID' in inspect: - inspect['Id'] = inspect['ID'] - del inspect['ID'] - return inspect - -class DockerManager: - - counters = {'created':0, 'started':0, 'stopped':0, 'killed':0, 'removed':0, 'restarted':0, 'pull':0} - - def __init__(self, module): - self.module = module - - self.binds = None - self.volumes = None - if self.module.params.get('volumes'): - self.binds = {} - self.volumes = {} - vols = self.module.params.get('volumes') - for vol in vols: - parts = vol.split(":") - # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) - if len(parts) == 2: - self.volumes[parts[1]] = {} - self.binds[parts[0]] = parts[1] - # with bind mode - elif len(parts) == 3: - if parts[2] not in ['ro', 'rw']: - self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') - ro = parts[2] == 'ro' - self.volumes[parts[1]] = {} - self.binds[parts[0]] = {'bind': parts[1], 'ro': ro} - # docker mount (e.g. /www, mounts a docker volume /www on the container at the same location) - else: - self.volumes[parts[0]] = {} - - self.lxc_conf = None - if self.module.params.get('lxc_conf'): - self.lxc_conf = [] - options = self.module.params.get('lxc_conf') - for option in options: - parts = option.split(':') - self.lxc_conf.append({"Key": parts[0], "Value": parts[1]}) - - self.exposed_ports = None - if self.module.params.get('expose'): - self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose')) - - self.port_bindings = None - if self.module.params.get('ports'): - self.port_bindings = self.get_port_bindings(self.module.params.get('ports')) - - self.links = None - if self.module.params.get('links'): - self.links = self.get_links(self.module.params.get('links')) - - self.env = self.module.params.get('env', None) - - # connect to docker server - docker_url = urlparse(module.params.get('docker_url')) - docker_api_version = module.params.get('docker_api_version') - self.client = docker.Client(base_url=docker_url.geturl(), version=docker_api_version) - - - def get_links(self, links): - """ - Parse the links passed, if a link is specified without an alias then just create the alias of the same name as the link - """ - processed_links = {} - - for link in links: - parsed_link = link.split(':', 1) - if(len(parsed_link) == 2): - processed_links[parsed_link[0]] = parsed_link[1] - else: - processed_links[parsed_link[0]] = parsed_link[0] - - return processed_links - - - def get_exposed_ports(self, expose_list): - """ - Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax. - """ - if expose_list: - exposed = [] - for port in expose_list: - port = str(port).strip() - if port.endswith('/tcp') or port.endswith('/udp'): - port_with_proto = tuple(port.split('/')) - else: - # assume tcp protocol if not specified - port_with_proto = (port, 'tcp') - exposed.append(port_with_proto) - return exposed - else: - return None - - - def get_port_bindings(self, ports): - """ - Parse the `ports` string into a port bindings dict for the `start_container` call. - """ - binds = {} - for port in ports: - # ports could potentially be an array like [80, 443], so we make sure they're strings - # before splitting - parts = str(port).split(':') - container_port = parts[-1] - if '/' not in container_port: - container_port = int(parts[-1]) - - p_len = len(parts) - if p_len == 1: - # Bind `container_port` of the container to a dynamically - # allocated TCP port on all available interfaces of the host - # machine. - bind = ('0.0.0.0',) - elif p_len == 2: - # Bind `container_port` of the container to port `parts[0]` on - # all available interfaces of the host machine. - bind = ('0.0.0.0', int(parts[0])) - elif p_len == 3: - # Bind `container_port` of the container to port `parts[1]` on - # IP `parts[0]` of the host machine. If `parts[1]` empty bind - # to a dynamically allocacted port of IP `parts[0]`. - bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],) - - if container_port in binds: - old_bind = binds[container_port] - if isinstance(old_bind, list): - # append to list if it already exists - old_bind.append(bind) - else: - # otherwise create list that contains the old and new binds - binds[container_port] = [binds[container_port], bind] - else: - binds[container_port] = bind - - return binds - - - def get_split_image_tag(self, image): - # If image contains a host or org name, omit that from our check - if '/' in image: - registry, resource = image.rsplit('/', 1) - else: - registry, resource = None, image - - # now we can determine if image has a tag - if ':' in resource: - resource, tag = resource.split(':', 1) - if registry: - resource = '/'.join((registry, resource)) - else: - tag = "latest" - resource = image - - return resource, tag - - def get_summary_counters_msg(self): - msg = "" - for k, v in self.counters.iteritems(): - msg = msg + "%s %d " % (k, v) - - return msg - - def increment_counter(self, name): - self.counters[name] = self.counters[name] + 1 - - def has_changed(self): - for k, v in self.counters.iteritems(): - if v > 0: - return True - - return False - - def get_inspect_containers(self, containers): - inspect = [] - for i in containers: - details = self.client.inspect_container(i['Id']) - details = _docker_id_quirk(details) - inspect.append(details) - - return inspect - - def get_deployed_containers(self): - """determine which images/commands are running already""" - image = self.module.params.get('image') - command = self.module.params.get('command') - if command: - command = command.strip() - name = self.module.params.get('name') - if name and not name.startswith('/'): - name = '/' + name - deployed = [] - - # if we weren't given a tag with the image, we need to only compare on the image name, as that - # docker will give us back the full image name including a tag in the container list if one exists. - image, tag = self.get_split_image_tag(image) - - for i in self.client.containers(all=True): - running_image, running_tag = self.get_split_image_tag(i['Image']) - running_command = i['Command'].strip() - - name_matches = False - if i["Names"]: - name_matches = (name and name in i['Names']) - image_matches = (running_image == image) - tag_matches = (not tag or running_tag == tag) - # if a container has an entrypoint, `command` will actually equal - # '{} {}'.format(entrypoint, command) - command_matches = (not command or running_command.endswith(command)) - - if name_matches or (name is None and image_matches and tag_matches and command_matches): - details = self.client.inspect_container(i['Id']) - details = _docker_id_quirk(details) - deployed.append(details) - - return deployed - - def get_running_containers(self): - running = [] - for i in self.get_deployed_containers(): - if i['State']['Running'] == True and i['State'].get('Ghost', False) == False: - running.append(i) - - return running - - def create_containers(self, count=1): - params = {'image': self.module.params.get('image'), - 'command': self.module.params.get('command'), - 'ports': self.exposed_ports, - 'volumes': self.volumes, - 'mem_limit': _human_to_bytes(self.module.params.get('memory_limit')), - 'environment': self.env, - 'hostname': self.module.params.get('hostname'), - 'detach': self.module.params.get('detach'), - 'name': self.module.params.get('name'), - 'stdin_open': self.module.params.get('stdin_open'), - 'tty': self.module.params.get('tty'), - } - - if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) < 0: - params['dns'] = self.module.params.get('dns') - params['volumes_from'] = self.module.params.get('volumes_from') - - def do_create(count, params): - results = [] - for _ in range(count): - result = self.client.create_container(**params) - self.increment_counter('created') - results.append(result) - - return results - - try: - containers = do_create(count, params) - except: - resource = self.module.params.get('image') - image, tag = self.get_split_image_tag(resource) - if self.module.params.get('username'): - try: - self.client.login( - self.module.params.get('username'), - password=self.module.params.get('password'), - email=self.module.params.get('email'), - registry=self.module.params.get('registry') - ) - except: - self.module.fail_json(msg="failed to login to the remote registry, check your username/password.") - try: - self.client.pull(image, tag=tag) - except: - self.module.fail_json(msg="failed to pull the specified image: %s" % resource) - self.increment_counter('pull') - containers = do_create(count, params) - - return containers - - def start_containers(self, containers): - params = { - 'lxc_conf': self.lxc_conf, - 'binds': self.binds, - 'port_bindings': self.port_bindings, - 'publish_all_ports': self.module.params.get('publish_all_ports'), - 'privileged': self.module.params.get('privileged'), - 'links': self.links, - 'network_mode': self.module.params.get('net'), - } - if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) >= 0 and hasattr(docker, '__version__') and docker.__version__ > '0.3.0': - params['dns'] = self.module.params.get('dns') - params['volumes_from'] = self.module.params.get('volumes_from') - - for i in containers: - self.client.start(i['Id'], **params) - self.increment_counter('started') - - def stop_containers(self, containers): - for i in containers: - self.client.stop(i['Id']) - self.increment_counter('stopped') - - return [self.client.wait(i['Id']) for i in containers] - - def remove_containers(self, containers): - for i in containers: - self.client.remove_container(i['Id']) - self.increment_counter('removed') - - def kill_containers(self, containers): - for i in containers: - self.client.kill(i['Id']) - self.increment_counter('killed') - - def restart_containers(self, containers): - for i in containers: - self.client.restart(i['Id']) - self.increment_counter('restarted') - - -def check_dependencies(module): - """ - Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a - helpful error message if it isn't. - """ - if not HAS_DOCKER_PY: - module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.") - else: - HAS_NEW_ENOUGH_DOCKER_PY = False - if hasattr(docker, '__version__'): - # a '__version__' attribute was added to the module but not until - # after 0.3.0 was added pushed to pip. If it's there, use it. - if docker.__version__ >= '0.3.0': - HAS_NEW_ENOUGH_DOCKER_PY = True - else: - # HACK: if '__version__' isn't there, we check for the existence of - # `_get_raw_response_socket` in the docker.Client class, which was - # added in 0.3.0 - if hasattr(docker.Client, '_get_raw_response_socket'): - HAS_NEW_ENOUGH_DOCKER_PY = True - - if not HAS_NEW_ENOUGH_DOCKER_PY: - module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.") - - -def main(): - module = AnsibleModule( - argument_spec = dict( - count = dict(default=1), - image = dict(required=True), - command = dict(required=False, default=None), - expose = dict(required=False, default=None, type='list'), - ports = dict(required=False, default=None, type='list'), - publish_all_ports = dict(default=False, type='bool'), - volumes = dict(default=None, type='list'), - volumes_from = dict(default=None), - links = dict(default=None, type='list'), - memory_limit = dict(default=0), - memory_swap = dict(default=0), - docker_url = dict(default='unix://var/run/docker.sock'), - docker_api_version = dict(default=docker.client.DEFAULT_DOCKER_API_VERSION), - username = dict(default=None), - password = dict(), - email = dict(), - registry = dict(), - hostname = dict(default=None), - env = dict(type='dict'), - dns = dict(), - detach = dict(default=True, type='bool'), - state = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']), - debug = dict(default=False, type='bool'), - privileged = dict(default=False, type='bool'), - stdin_open = dict(default=False, type='bool'), - tty = dict(default=False, type='bool'), - lxc_conf = dict(default=None, type='list'), - name = dict(default=None), - net = dict(default=None) - ) - ) - - check_dependencies(module) - - try: - manager = DockerManager(module) - state = module.params.get('state') - count = int(module.params.get('count')) - name = module.params.get('name') - image = module.params.get('image') - - if count < 0: - module.fail_json(msg="Count must be greater than zero") - if count > 1 and name: - module.fail_json(msg="Count and name must not be used together") - - running_containers = manager.get_running_containers() - running_count = len(running_containers) - delta = count - running_count - deployed_containers = manager.get_deployed_containers() - facts = None - failed = False - changed = False - - # start/stop containers - if state in [ "running", "present" ]: - - # make sure a container with `name` exists, if not create and start it - if name: - # first determine if a container with this name exists - existing_container = None - for deployed_container in deployed_containers: - if deployed_container.get('Name') == '/%s' % name: - existing_container = deployed_container - break - - # the named container is running, but with a - # different image or tag, so we stop it first - if existing_container and existing_container.get('Config', dict()).get('Image') != image: - manager.stop_containers([existing_container]) - manager.remove_containers([existing_container]) - running_containers = manager.get_running_containers() - deployed_containers = manager.get_deployed_containers() - existing_container = None - - # if the container isn't running (or if we stopped the - # old version above), create and (maybe) start it up now - if not existing_container: - containers = manager.create_containers(1) - if state == "present": # otherwise it get (re)started later anyways.. - manager.start_containers(containers) - running_containers = manager.get_running_containers() - deployed_containers = manager.get_deployed_containers() - - if state == "running": - # make sure a container with `name` is running - if name and "/" + name not in map(lambda x: x.get('Name'), running_containers): - manager.start_containers(deployed_containers) - - # start more containers if we don't have enough - elif delta > 0: - containers = manager.create_containers(delta) - manager.start_containers(containers) - - # stop containers if we have too many - elif delta < 0: - containers_to_stop = running_containers[0:abs(delta)] - containers = manager.stop_containers(containers_to_stop) - manager.remove_containers(containers_to_stop) - - facts = manager.get_running_containers() - else: - facts = manager.get_deployed_containers() - - # stop and remove containers - elif state == "absent": - facts = manager.stop_containers(deployed_containers) - manager.remove_containers(deployed_containers) - - # stop containers - elif state == "stopped": - facts = manager.stop_containers(running_containers) - - # kill containers - elif state == "killed": - manager.kill_containers(running_containers) - - # restart containers - elif state == "restarted": - manager.restart_containers(running_containers) - facts = manager.get_inspect_containers(running_containers) - - msg = "%s container(s) running image %s with command %s" % \ - (manager.get_summary_counters_msg(), module.params.get('image'), module.params.get('command')) - changed = manager.has_changed() - - module.exit_json(failed=failed, changed=changed, msg=msg, ansible_facts=_ansible_facts(facts)) - - except DockerAPIError, e: - changed = manager.has_changed() - module.exit_json(failed=True, changed=changed, msg="Docker API error: " + e.explanation) - - except RequestException, e: - changed = manager.has_changed() - module.exit_json(failed=True, changed=changed, msg=repr(e)) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/docker_image b/library/cloud/docker_image deleted file mode 100644 index e1388f20f1..0000000000 --- a/library/cloud/docker_image +++ /dev/null @@ -1,252 +0,0 @@ -#!/usr/bin/python -# - -# (c) 2014, Pavel Antonov -# -# This file is part of Ansible -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -###################################################################### - -DOCUMENTATION = ''' ---- -module: docker_image -author: Pavel Antonov -version_added: "1.5" -short_description: manage docker images -description: - - Create, check and remove docker images -options: - path: - description: - - Path to directory with Dockerfile - required: false - default: null - aliases: [] - name: - description: - - Image name to work with - required: true - default: null - aliases: [] - tag: - description: - - Image tag to work with - required: false - default: "latest" - aliases: [] - nocache: - description: - - Do not use cache with building - required: false - default: false - aliases: [] - docker_url: - description: - - URL of docker host to issue commands to - required: false - default: unix://var/run/docker.sock - aliases: [] - state: - description: - - Set the state of the image - required: false - default: present - choices: [ "present", "absent", "build" ] - aliases: [] - timeout: - description: - - Set image operation timeout - required: false - default: 600 - aliases: [] -requirements: [ "docker-py" ] -''' - -EXAMPLES = ''' -Build docker image if required. Path should contains Dockerfile to build image: - -- hosts: web - sudo: yes - tasks: - - name: check or build image - docker_image: path="/path/to/build/dir" name="my/app" state=present - -Build new version of image: - -- hosts: web - sudo: yes - tasks: - - name: check or build image - docker_image: path="/path/to/build/dir" name="my/app" state=build - -Remove image from local docker storage: - -- hosts: web - sudo: yes - tasks: - - name: remove image - docker_image: name="my/app" state=absent - -''' - -try: - import sys - import re - import json - import docker.client - from requests.exceptions import * - from urlparse import urlparse -except ImportError, e: - print "failed=True msg='failed to import python module: %s'" % e - sys.exit(1) - -try: - from docker.errors import APIError as DockerAPIError -except ImportError: - from docker.client import APIError as DockerAPIError - -class DockerImageManager: - - def __init__(self, module): - self.module = module - self.path = self.module.params.get('path') - self.name = self.module.params.get('name') - self.tag = self.module.params.get('tag') - self.nocache = self.module.params.get('nocache') - docker_url = urlparse(module.params.get('docker_url')) - self.client = docker.Client(base_url=docker_url.geturl(), timeout=module.params.get('timeout')) - self.changed = False - self.log = [] - self.error_msg = None - - def get_log(self, as_string=True): - return "".join(self.log) if as_string else self.log - - def build(self): - stream = self.client.build(self.path, tag=':'.join([self.name, self.tag]), nocache=self.nocache, rm=True, stream=True) - success_search = r'Successfully built ([0-9a-f]+)' - image_id = None - self.changed = True - - for chunk in stream: - if not chunk: - continue - - try: - chunk_json = json.loads(chunk) - except ValueError: - continue - - if 'error' in chunk_json: - self.error_msg = chunk_json['error'] - return None - - if 'stream' in chunk_json: - output = chunk_json['stream'] - self.log.append(output) - match = re.search(success_search, output) - if match: - image_id = match.group(1) - - # Just in case we skipped evaluating the JSON returned from build - # during every iteration, add an error if the image_id was never - # populated - if not image_id: - self.error_msg = 'Unknown error encountered' - - return image_id - - def has_changed(self): - return self.changed - - def get_images(self): - filtered_images = [] - images = self.client.images() - for i in images: - # Docker-py version >= 0.3 (Docker API >= 1.8) - if 'RepoTags' in i: - repotag = ':'.join([self.name, self.tag]) - if not self.name or repotag in i['RepoTags']: - filtered_images.append(i) - # Docker-py version < 0.3 (Docker API < 1.8) - elif (not self.name or self.name == i['Repository']) and (not self.tag or self.tag == i['Tag']): - filtered_images.append(i) - return filtered_images - - def remove_images(self): - images = self.get_images() - for i in images: - try: - self.client.remove_image(i['Id']) - self.changed = True - except DockerAPIError as e: - # image can be removed by docker if not used - pass - - -def main(): - module = AnsibleModule( - argument_spec = dict( - path = dict(required=False, default=None), - name = dict(required=True), - tag = dict(required=False, default="latest"), - nocache = dict(default=False, type='bool'), - state = dict(default='present', choices=['absent', 'present', 'build']), - docker_url = dict(default='unix://var/run/docker.sock'), - timeout = dict(default=600, type='int'), - ) - ) - - try: - manager = DockerImageManager(module) - state = module.params.get('state') - failed = False - image_id = None - msg = '' - do_build = False - - # build image if not exists - if state == "present": - images = manager.get_images() - if len(images) == 0: - do_build = True - # build image - elif state == "build": - do_build = True - # remove image or images - elif state == "absent": - manager.remove_images() - - if do_build: - image_id = manager.build() - if image_id: - msg = "Image built: %s" % image_id - else: - failed = True - msg = "Error: %s\nLog:%s" % (manager.error_msg, manager.get_log()) - - module.exit_json(failed=failed, changed=manager.has_changed(), msg=msg, image_id=image_id) - - except DockerAPIError as e: - module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation) - - except RequestException as e: - module.exit_json(failed=True, changed=manager.has_changed(), msg=repr(e)) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/ec2 b/library/cloud/ec2 deleted file mode 100644 index a4776c74b8..0000000000 --- a/library/cloud/ec2 +++ /dev/null @@ -1,1199 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2 -short_description: create, terminate, start or stop an instance in ec2, return instanceid -description: - - Creates or terminates ec2 instances. When created optionally waits for it to be 'running'. This module has a dependency on python-boto >= 2.5 -version_added: "0.9" -options: - key_name: - description: - - key pair to use on the instance - required: false - default: null - aliases: ['keypair'] - id: - description: - - identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). - required: false - default: null - aliases: [] - group: - description: - - security group (or list of groups) to use with the instance - required: false - default: null - aliases: [ 'groups' ] - group_id: - version_added: "1.1" - description: - - security group id (or list of ids) to use with the instance - required: false - default: null - aliases: [] - region: - version_added: "1.2" - description: - - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - default: null - aliases: [ 'aws_region', 'ec2_region' ] - zone: - version_added: "1.2" - description: - - AWS availability zone in which to launch the instance - required: false - default: null - aliases: [ 'aws_zone', 'ec2_zone' ] - instance_type: - description: - - instance type to use for the instance - required: true - default: null - aliases: [] - spot_price: - version_added: "1.5" - description: - - Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid. When it is filled, the instance is started. - required: false - default: null - aliases: [] - image: - description: - - I(emi) (or I(ami)) to use for the instance - required: true - default: null - aliases: [] - kernel: - description: - - kernel I(eki) to use for the instance - required: false - default: null - aliases: [] - ramdisk: - description: - - ramdisk I(eri) to use for the instance - required: false - default: null - aliases: [] - wait: - description: - - wait for the instance to be in state 'running' before returning - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 - aliases: [] - spot_wait_timeout: - version_added: "1.5" - description: - - how long to wait for the spot instance request to be fulfilled - default: 600 - aliases: [] - count: - description: - - number of instances to launch - required: False - default: 1 - aliases: [] - monitoring: - version_added: "1.1" - description: - - enable detailed monitoring (CloudWatch) for instance - required: false - default: null - aliases: [] - user_data: - version_added: "0.9" - description: - - opaque blob of data which is made available to the ec2 instance - required: false - default: null - aliases: [] - instance_tags: - version_added: "1.0" - description: - - a hash/dictionary of tags to add to the new instance; '{"key":"value"}' and '{"key":"value","key":"value"}' - required: false - default: null - aliases: [] - placement_group: - version_added: "1.3" - description: - - placement group for the instance when using EC2 Clustered Compute - required: false - default: null - aliases: [] - vpc_subnet_id: - version_added: "1.1" - description: - - the subnet ID in which to launch the instance (VPC) - required: false - default: null - aliases: [] - assign_public_ip: - version_added: "1.5" - description: - - when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+ - required: false - default: null - aliases: [] - private_ip: - version_added: "1.2" - description: - - the private ip address to assign the instance (from the vpc subnet) - required: false - default: null - aliases: [] - instance_profile_name: - version_added: "1.3" - description: - - Name of the IAM instance profile to use. Boto library must be 2.5.0+ - required: false - default: null - aliases: [] - instance_ids: - version_added: "1.3" - description: - - "list of instance ids, currently used for states: absent, running, stopped" - required: false - default: null - aliases: [] - source_dest_check: - version_added: "1.6" - description: - - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers) - required: false - default: true - state: - version_added: "1.3" - description: - - create or terminate instances - required: false - default: 'present' - aliases: [] - choices: ['present', 'absent', 'running', 'stopped'] - volumes: - version_added: "1.5" - description: - - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. - required: false - default: null - aliases: [] - ebs_optimized: - version_added: "1.6" - description: - - whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) - required: false - default: false - exact_count: - version_added: "1.5" - description: - - An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value. - required: false - default: null - aliases: [] - count_tag: - version_added: "1.5" - description: - - Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver". - required: false - default: null - aliases: [] - -author: Seth Vidal, Tim Gerla, Lester Wade -extends_documentation_fragment: aws -''' - -EXAMPLES = ''' -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Basic provisioning example -- local_action: - module: ec2 - key_name: mykey - instance_type: c1.medium - image: emi-40603AD1 - wait: yes - group: webserver - count: 3 - -# Advanced example with tagging and CloudWatch -- local_action: - module: ec2 - key_name: mykey - group: databases - instance_type: m1.large - image: ami-6e649707 - wait: yes - wait_timeout: 500 - count: 5 - instance_tags: - db: postgres - monitoring: yes - -# Single instance with additional IOPS volume from snapshot and volume delete on termination -local_action: - module: ec2 - key_name: mykey - group: webserver - instance_type: m1.large - image: ami-6e649707 - wait: yes - wait_timeout: 500 - volumes: - - device_name: /dev/sdb - snapshot: snap-abcdef12 - device_type: io1 - iops: 1000 - volume_size: 100 - delete_on_termination: true - monitoring: yes - -# Multiple groups example -local_action: - module: ec2 - key_name: mykey - group: ['databases', 'internal-services', 'sshable', 'and-so-forth'] - instance_type: m1.large - image: ami-6e649707 - wait: yes - wait_timeout: 500 - count: 5 - instance_tags: - db: postgres - monitoring: yes - -# Multiple instances with additional volume from snapshot -local_action: - module: ec2 - key_name: mykey - group: webserver - instance_type: m1.large - image: ami-6e649707 - wait: yes - wait_timeout: 500 - count: 5 - volumes: - - device_name: /dev/sdb - snapshot: snap-abcdef12 - volume_size: 10 - monitoring: yes - -# VPC example -- local_action: - module: ec2 - key_name: mykey - group_id: sg-1dc53f72 - instance_type: m1.small - image: ami-6e649707 - wait: yes - vpc_subnet_id: subnet-29e63245 - assign_public_ip: yes - -# Spot instance example -- local_action: - module: ec2 - spot_price: 0.24 - spot_wait_timeout: 600 - keypair: mykey - group_id: sg-1dc53f72 - instance_type: m1.small - image: ami-6e649707 - wait: yes - vpc_subnet_id: subnet-29e63245 - assign_public_ip: yes - -# Launch instances, runs some tasks -# and then terminate them - - -- name: Create a sandbox instance - hosts: localhost - gather_facts: False - vars: - key_name: my_keypair - instance_type: m1.small - security_group: my_securitygroup - image: my_ami_id - region: us-east-1 - tasks: - - name: Launch instance - local_action: ec2 key_name={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image }} wait=true region={{ region }} - register: ec2 - - name: Add new instance to host group - local_action: add_host hostname={{ item.public_ip }} groupname=launched - with_items: ec2.instances - - name: Wait for SSH to come up - local_action: wait_for host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started - with_items: ec2.instances - -- name: Configure instance(s) - hosts: launched - sudo: True - gather_facts: True - roles: - - my_awesome_role - - my_awesome_test - -- name: Terminate instances - hosts: localhost - connection: local - tasks: - - name: Terminate instances that were previously launched - local_action: - module: ec2 - state: 'absent' - instance_ids: '{{ ec2.instance_ids }}' - -# Start a few existing instances, run some tasks -# and stop the instances - -- name: Start sandbox instances - hosts: localhost - gather_facts: false - connection: local - vars: - instance_ids: - - 'i-xxxxxx' - - 'i-xxxxxx' - - 'i-xxxxxx' - region: us-east-1 - tasks: - - name: Start the sandbox instances - local_action: - module: ec2 - instance_ids: '{{ instance_ids }}' - region: '{{ region }}' - state: running - wait: True - role: - - do_neat_stuff - - do_more_neat_stuff - -- name: Stop sandbox instances - hosts: localhost - gather_facts: false - connection: local - vars: - instance_ids: - - 'i-xxxxxx' - - 'i-xxxxxx' - - 'i-xxxxxx' - region: us-east-1 - tasks: - - name: Stop the sanbox instances - local_action: - module: ec2 - instance_ids: '{{ instance_ids }}' - region: '{{ region }}' - state: stopped - wait: True - -# -# Enforce that 5 instances with a tag "foo" are running -# - -- local_action: - module: ec2 - key_name: mykey - instance_type: c1.medium - image: emi-40603AD1 - wait: yes - group: webserver - instance_tags: - foo: bar - exact_count: 5 - count_tag: foo - -# -# Enforce that 5 running instances named "database" with a "dbtype" of "postgres" -# - -- local_action: - module: ec2 - key_name: mykey - instance_type: c1.medium - image: emi-40603AD1 - wait: yes - group: webserver - instance_tags: - Name: database - dbtype: postgres - exact_count: 5 - count_tag: - Name: database - dbtype: postgres - -# -# count_tag complex argument examples -# - - # instances with tag foo - count_tag: - foo: - - # instances with tag foo=bar - count_tag: - foo: bar - - # instances with tags foo=bar & baz - count_tag: - foo: bar - baz: - - # instances with tags foo & bar & baz=bang - count_tag: - - foo - - bar - - baz: bang - -''' - -import sys -import time -from ast import literal_eval - -try: - import boto.ec2 - from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping - from boto.exception import EC2ResponseError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def find_running_instances_by_count_tag(module, ec2, count_tag, zone=None): - - # get reservations for instances that match tag(s) and are running - reservations = get_reservations(module, ec2, tags=count_tag, state="running", zone=zone) - - instances = [] - for res in reservations: - if hasattr(res, 'instances'): - for inst in res.instances: - instances.append(inst) - - return reservations, instances - - -def _set_none_to_blank(dictionary): - result = dictionary - for k in result.iterkeys(): - if type(result[k]) == dict: - result[k] = _set_non_to_blank(result[k]) - elif not result[k]: - result[k] = "" - return result - - -def get_reservations(module, ec2, tags=None, state=None, zone=None): - - # TODO: filters do not work with tags that have underscores - filters = dict() - - if tags is not None: - - if type(tags) is str: - try: - tags = literal_eval(tags) - except: - pass - - # if string, we only care that a tag of that name exists - if type(tags) is str: - filters.update({"tag-key": tags}) - - # if list, append each item to filters - if type(tags) is list: - for x in tags: - if type(x) is dict: - x = _set_none_to_blank(x) - filters.update(dict(("tag:"+tn, tv) for (tn,tv) in x.iteritems())) - else: - filters.update({"tag-key": x}) - - # if dict, add the key and value to the filter - if type(tags) is dict: - tags = _set_none_to_blank(tags) - filters.update(dict(("tag:"+tn, tv) for (tn,tv) in tags.iteritems())) - - if state: - # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api - filters.update({'instance-state-name': state}) - - if zone: - filters.update({'availability-zone': zone}) - - results = ec2.get_all_instances(filters=filters) - - return results - -def get_instance_info(inst): - """ - Retrieves instance information from an instance - ID and returns it as a dictionary - """ - instance_info = {'id': inst.id, - 'ami_launch_index': inst.ami_launch_index, - 'private_ip': inst.private_ip_address, - 'private_dns_name': inst.private_dns_name, - 'public_ip': inst.ip_address, - 'dns_name': inst.dns_name, - 'public_dns_name': inst.public_dns_name, - 'state_code': inst.state_code, - 'architecture': inst.architecture, - 'image_id': inst.image_id, - 'key_name': inst.key_name, - 'placement': inst.placement, - 'region': inst.placement[:-1], - 'kernel': inst.kernel, - 'ramdisk': inst.ramdisk, - 'launch_time': inst.launch_time, - 'instance_type': inst.instance_type, - 'root_device_type': inst.root_device_type, - 'root_device_name': inst.root_device_name, - 'state': inst.state, - 'hypervisor': inst.hypervisor} - try: - instance_info['virtualization_type'] = getattr(inst,'virtualization_type') - except AttributeError: - instance_info['virtualization_type'] = None - - try: - instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized') - except AttributeError: - instance_info['ebs_optimized'] = False - - return instance_info - -def boto_supports_associate_public_ip_address(ec2): - """ - Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification - class. Added in Boto 2.13.0 - - ec2: authenticated ec2 connection object - - Returns: - True if Boto library accepts associate_public_ip_address argument, else false - """ - - try: - network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification() - getattr(network_interface, "associate_public_ip_address") - return True - except AttributeError: - return False - -def boto_supports_profile_name_arg(ec2): - """ - Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0 - - ec2: authenticated ec2 connection object - - Returns: - True if Boto library accept instance_profile_name argument, else false - """ - run_instances_method = getattr(ec2, 'run_instances') - return 'instance_profile_name' in run_instances_method.func_code.co_varnames - -def create_block_device(module, ec2, volume): - # Not aware of a way to determine this programatically - # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/ - MAX_IOPS_TO_SIZE_RATIO = 30 - if 'snapshot' not in volume and 'ephemeral' not in volume: - if 'volume_size' not in volume: - module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume') - if 'snapshot' in volume: - if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume: - module.fail_json(msg = 'io1 volumes must have an iops value set') - if 'iops' in volume: - snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0] - size = volume.get('volume_size', snapshot.volume_size) - if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size: - module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO) - if 'ephemeral' in volume: - if 'snapshot' in volume: - module.fail_json(msg = 'Cannot set both ephemeral and snapshot') - return BlockDeviceType(snapshot_id=volume.get('snapshot'), - ephemeral_name=volume.get('ephemeral'), - size=volume.get('volume_size'), - volume_type=volume.get('device_type'), - delete_on_termination=volume.get('delete_on_termination', False), - iops=volume.get('iops')) - -def boto_supports_param_in_spot_request(ec2, param): - """ - Check if Boto library has a in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0. - - ec2: authenticated ec2 connection object - - Returns: - True if boto library has the named param as an argument on the request_spot_instances method, else False - """ - method = getattr(ec2, 'request_spot_instances') - return param in method.func_code.co_varnames - -def enforce_count(module, ec2): - - exact_count = module.params.get('exact_count') - count_tag = module.params.get('count_tag') - zone = module.params.get('zone') - - reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag, zone) - - changed = None - checkmode = False - instance_dict_array = [] - changed_instance_ids = None - - if len(instances) == exact_count: - changed = False - elif len(instances) < exact_count: - changed = True - to_create = exact_count - len(instances) - if not checkmode: - (instance_dict_array, changed_instance_ids, changed) \ - = create_instances(module, ec2, override_count=to_create) - - for inst in instance_dict_array: - instances.append(inst) - elif len(instances) > exact_count: - changed = True - to_remove = len(instances) - exact_count - if not checkmode: - all_instance_ids = sorted([ x.id for x in instances ]) - remove_ids = all_instance_ids[0:to_remove] - - instances = [ x for x in instances if x.id not in remove_ids] - - (changed, instance_dict_array, changed_instance_ids) \ - = terminate_instances(module, ec2, remove_ids) - terminated_list = [] - for inst in instance_dict_array: - inst['state'] = "terminated" - terminated_list.append(inst) - instance_dict_array = terminated_list - - # ensure all instances are dictionaries - all_instances = [] - for inst in instances: - if type(inst) is not dict: - inst = get_instance_info(inst) - all_instances.append(inst) - - return (all_instances, instance_dict_array, changed_instance_ids, changed) - - -def create_instances(module, ec2, override_count=None): - """ - Creates new instances - - module : AnsibleModule object - ec2: authenticated ec2 connection object - - Returns: - A list of dictionaries with instance information - about the instances that were launched - """ - - key_name = module.params.get('key_name') - id = module.params.get('id') - group_name = module.params.get('group') - group_id = module.params.get('group_id') - zone = module.params.get('zone') - instance_type = module.params.get('instance_type') - spot_price = module.params.get('spot_price') - image = module.params.get('image') - if override_count: - count = override_count - else: - count = module.params.get('count') - monitoring = module.params.get('monitoring') - kernel = module.params.get('kernel') - ramdisk = module.params.get('ramdisk') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - spot_wait_timeout = int(module.params.get('spot_wait_timeout')) - placement_group = module.params.get('placement_group') - user_data = module.params.get('user_data') - instance_tags = module.params.get('instance_tags') - vpc_subnet_id = module.params.get('vpc_subnet_id') - assign_public_ip = module.boolean(module.params.get('assign_public_ip')) - private_ip = module.params.get('private_ip') - instance_profile_name = module.params.get('instance_profile_name') - volumes = module.params.get('volumes') - ebs_optimized = module.params.get('ebs_optimized') - exact_count = module.params.get('exact_count') - count_tag = module.params.get('count_tag') - source_dest_check = module.boolean(module.params.get('source_dest_check')) - - # group_id and group_name are exclusive of each other - if group_id and group_name: - module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)")) - sys.exit(1) - - try: - # Here we try to lookup the group id from the security group name - if group is set. - if group_name: - grp_details = ec2.get_all_security_groups() - if type(group_name) == list: - group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] - elif type(group_name) == str: - for grp in grp_details: - if str(group_name) in str(grp): - group_id = [str(grp.id)] - group_name = [group_name] - # Now we try to lookup the group id testing if group exists. - elif group_id: - #wrap the group_id in a list if it's not one already - if type(group_id) == str: - group_id = [group_id] - grp_details = ec2.get_all_security_groups(group_ids=group_id) - grp_item = grp_details[0] - group_name = [grp_item.name] - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - - # Lookup any instances that much our run id. - - running_instances = [] - count_remaining = int(count) - - if id != None: - filter_dict = {'client-token':id, 'instance-state-name' : 'running'} - previous_reservations = ec2.get_all_instances(None, filter_dict) - for res in previous_reservations: - for prev_instance in res.instances: - running_instances.append(prev_instance) - count_remaining = count_remaining - len(running_instances) - - # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want. - - if count_remaining == 0: - changed = False - else: - changed = True - try: - params = {'image_id': image, - 'key_name': key_name, - 'monitoring_enabled': monitoring, - 'placement': zone, - 'instance_type': instance_type, - 'kernel_id': kernel, - 'ramdisk_id': ramdisk, - 'user_data': user_data} - - if ebs_optimized: - params['ebs_optimized'] = ebs_optimized - - if boto_supports_profile_name_arg(ec2): - params['instance_profile_name'] = instance_profile_name - else: - if instance_profile_name is not None: - module.fail_json( - msg="instance_profile_name parameter requires Boto version 2.5.0 or higher") - - if assign_public_ip: - if not boto_supports_associate_public_ip_address(ec2): - module.fail_json( - msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.") - elif not vpc_subnet_id: - module.fail_json( - msg="assign_public_ip only available with vpc_subnet_id") - - else: - if private_ip: - interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( - subnet_id=vpc_subnet_id, - private_ip_address=private_ip, - groups=group_id, - associate_public_ip_address=assign_public_ip) - else: - interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( - subnet_id=vpc_subnet_id, - groups=group_id, - associate_public_ip_address=assign_public_ip) - interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) - params['network_interfaces'] = interfaces - else: - params['subnet_id'] = vpc_subnet_id - if vpc_subnet_id: - params['security_group_ids'] = group_id - else: - params['security_groups'] = group_name - - if volumes: - bdm = BlockDeviceMapping() - for volume in volumes: - if 'device_name' not in volume: - module.fail_json(msg = 'Device name must be set for volume') - # Minimum volume size is 1GB. We'll use volume size explicitly set to 0 - # to be a signal not to create this volume - if 'volume_size' not in volume or int(volume['volume_size']) > 0: - bdm[volume['device_name']] = create_block_device(module, ec2, volume) - - params['block_device_map'] = bdm - - # check to see if we're using spot pricing first before starting instances - if not spot_price: - if assign_public_ip and private_ip: - params.update(dict( - min_count = count_remaining, - max_count = count_remaining, - client_token = id, - placement_group = placement_group, - )) - else: - params.update(dict( - min_count = count_remaining, - max_count = count_remaining, - client_token = id, - placement_group = placement_group, - private_ip_address = private_ip, - )) - - res = ec2.run_instances(**params) - instids = [ i.id for i in res.instances ] - while True: - try: - ec2.get_all_instances(instids) - break - except boto.exception.EC2ResponseError as e: - if "InvalidInstanceID.NotFound" in str(e): - # there's a race between start and get an instance - continue - else: - module.fail_json(msg = str(e)) - else: - if private_ip: - module.fail_json( - msg='private_ip only available with on-demand (non-spot) instances') - if boto_supports_param_in_spot_request(ec2, placement_group): - params['placement_group'] = placement_group - elif placement_group : - module.fail_json( - msg="placement_group parameter requires Boto version 2.3.0 or higher.") - - params.update(dict( - count = count_remaining, - )) - res = ec2.request_spot_instances(spot_price, **params) - - # Now we have to do the intermediate waiting - if wait: - spot_req_inst_ids = dict() - spot_wait_timeout = time.time() + spot_wait_timeout - while spot_wait_timeout > time.time(): - reqs = ec2.get_all_spot_instance_requests() - for sirb in res: - if sirb.id in spot_req_inst_ids: - continue - for sir in reqs: - if sir.id == sirb.id and sir.instance_id is not None: - spot_req_inst_ids[sirb.id] = sir.instance_id - if len(spot_req_inst_ids) < count: - time.sleep(5) - else: - break - if spot_wait_timeout <= time.time(): - module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime()) - instids = spot_req_inst_ids.values() - except boto.exception.BotoServerError, e: - module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message)) - - # The instances returned through run_instances can be in - # terminated state due to idempotency. - terminated_instances = [ str(instance.id) for instance in res.instances - if instance.state == 'terminated' ] - if terminated_instances: - module.fail_json(msg = "Instances with id(s) %s " % terminated_instances + - "were created previously but have since been terminated - " + - "use a (possibly different) 'instanceid' parameter") - - # wait here until the instances are up - num_running = 0 - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and num_running < len(instids): - try: - res_list = ec2.get_all_instances(instids) - except boto.exception.BotoServerError, e: - if e.error_code == 'InvalidInstanceID.NotFound': - time.sleep(1) - continue - else: - raise - - num_running = 0 - for res in res_list: - num_running += len([ i for i in res.instances if i.state=='running' ]) - if len(res_list) <= 0: - # got a bad response of some sort, possibly due to - # stale/cached data. Wait a second and then try again - time.sleep(1) - continue - if wait and num_running < len(instids): - time.sleep(5) - else: - break - - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime()) - - #We do this after the loop ends so that we end up with one list - for res in res_list: - running_instances.extend(res.instances) - - # Enabled by default by Amazon - if not source_dest_check: - for inst in res.instances: - inst.modify_attribute('sourceDestCheck', False) - - # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound - if instance_tags: - try: - ec2.create_tags(instids, instance_tags) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message)) - - instance_dict_array = [] - created_instance_ids = [] - for inst in running_instances: - d = get_instance_info(inst) - created_instance_ids.append(inst.id) - instance_dict_array.append(d) - - return (instance_dict_array, created_instance_ids, changed) - - -def terminate_instances(module, ec2, instance_ids): - """ - Terminates a list of instances - - module: Ansible module object - ec2: authenticated ec2 connection object - termination_list: a list of instances to terminate in the form of - [ {id: }, ..] - - Returns a dictionary of instance information - about the instances terminated. - - If the instance to be terminated is running - "changed" will be set to False. - - """ - - # Whether to wait for termination to complete before returning - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - changed = False - instance_dict_array = [] - - if not isinstance(instance_ids, list) or len(instance_ids) < 1: - module.fail_json(msg='instance_ids should be a list of instances, aborting') - - terminated_instance_ids = [] - for res in ec2.get_all_instances(instance_ids): - for inst in res.instances: - if inst.state == 'running' or inst.state == 'stopped': - terminated_instance_ids.append(inst.id) - instance_dict_array.append(get_instance_info(inst)) - try: - ec2.terminate_instances([inst.id]) - except EC2ResponseError, e: - module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e)) - changed = True - - # wait here until the instances are 'terminated' - if wait: - num_terminated = 0 - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids): - response = ec2.get_all_instances( \ - instance_ids=terminated_instance_ids, \ - filters={'instance-state-name':'terminated'}) - try: - num_terminated = len(response.pop().instances) - except Exception, e: - # got a bad response of some sort, possibly due to - # stale/cached data. Wait a second and then try again - time.sleep(1) - continue - - if num_terminated < len(terminated_instance_ids): - time.sleep(5) - - # waiting took too long - if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids): - module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime()) - - return (changed, instance_dict_array, terminated_instance_ids) - - -def startstop_instances(module, ec2, instance_ids, state): - """ - Starts or stops a list of existing instances - - module: Ansible module object - ec2: authenticated ec2 connection object - instance_ids: The list of instances to start in the form of - [ {id: }, ..] - state: Intended state ("running" or "stopped") - - Returns a dictionary of instance information - about the instances started/stopped. - - If the instance was not able to change state, - "changed" will be set to False. - - """ - - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - changed = False - instance_dict_array = [] - - if not isinstance(instance_ids, list) or len(instance_ids) < 1: - module.fail_json(msg='instance_ids should be a list of instances, aborting') - - # Check that our instances are not in the state we want to take them to - # and change them to our desired state - running_instances_array = [] - for res in ec2.get_all_instances(instance_ids): - for inst in res.instances: - if inst.state != state: - instance_dict_array.append(get_instance_info(inst)) - try: - if state == 'running': - inst.start() - else: - inst.stop() - except EC2ResponseError, e: - module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) - changed = True - - ## Wait for all the instances to finish starting or stopping - wait_timeout = time.time() + wait_timeout - while wait and wait_timeout > time.time(): - instance_dict_array = [] - matched_instances = [] - for res in ec2.get_all_instances(instance_ids): - for i in res.instances: - if i.state == state: - instance_dict_array.append(get_instance_info(i)) - matched_instances.append(i) - if len(matched_instances) < len(instance_ids): - time.sleep(5) - else: - break - - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime()) - - return (changed, instance_dict_array, instance_ids) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - key_name = dict(aliases = ['keypair']), - id = dict(), - group = dict(type='list'), - group_id = dict(type='list'), - zone = dict(aliases=['aws_zone', 'ec2_zone']), - instance_type = dict(aliases=['type']), - spot_price = dict(), - image = dict(), - kernel = dict(), - count = dict(type='int', default='1'), - monitoring = dict(type='bool', default=False), - ramdisk = dict(), - wait = dict(type='bool', default=False), - wait_timeout = dict(default=300), - spot_wait_timeout = dict(default=600), - placement_group = dict(), - user_data = dict(), - instance_tags = dict(type='dict'), - vpc_subnet_id = dict(), - assign_public_ip = dict(type='bool', default=False), - private_ip = dict(), - instance_profile_name = dict(), - instance_ids = dict(type='list'), - source_dest_check = dict(type='bool', default=True), - state = dict(default='present'), - exact_count = dict(type='int', default=None), - count_tag = dict(), - volumes = dict(type='list'), - ebs_optimized = dict(type='bool', default=False), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive = [ - ['exact_count', 'count'], - ['exact_count', 'state'], - ['exact_count', 'instance_ids'] - ], - ) - - ec2 = ec2_connect(module) - - tagged_instances = [] - - state = module.params.get('state') - - if state == 'absent': - instance_ids = module.params.get('instance_ids') - if not isinstance(instance_ids, list): - module.fail_json(msg='termination_list needs to be a list of instances to terminate') - - (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids) - - elif state in ('running', 'stopped'): - instance_ids = module.params.get('instance_ids') - if not isinstance(instance_ids, list): - module.fail_json(msg='running list needs to be a list of instances to run: %s' % instance_ids) - - (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state) - - elif state == 'present': - # Changed is always set to true when provisioning new instances - if not module.params.get('image'): - module.fail_json(msg='image parameter is required for new instance') - - if module.params.get('exact_count') is None: - (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2) - else: - (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2) - - module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_ami b/library/cloud/ec2_ami deleted file mode 100644 index 3baf70a438..0000000000 --- a/library/cloud/ec2_ami +++ /dev/null @@ -1,273 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2_ami -version_added: "1.3" -short_description: create or destroy an image in ec2, return imageid -description: - - Creates or deletes ec2 images. This module has a dependency on python-boto >= 2.5 -options: - instance_id: - description: - - instance id of the image to create - required: false - default: null - aliases: [] - name: - description: - - The name of the new image to create - required: false - default: null - aliases: [] - wait: - description: - - wait for the AMI to be in state 'available' before returning. - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 - aliases: [] - state: - description: - - create or deregister/delete image - required: false - default: 'present' - aliases: [] - region: - description: - - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - default: null - aliases: [ 'aws_region', 'ec2_region' ] - description: - description: - - An optional human-readable string describing the contents and purpose of the AMI. - required: false - default: null - aliases: [] - no_reboot: - description: - - An optional flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance. The default choice is "no". - required: false - default: no - choices: [ "yes", "no" ] - aliases: [] - image_id: - description: - - Image ID to be deregistered. - required: false - default: null - aliases: [] - delete_snapshot: - description: - - Whether or not to deleted an AMI while deregistering it. - required: false - default: null - aliases: [] - -author: Evan Duffield -extends_documentation_fragment: aws -''' - -# Thank you to iAcquire for sponsoring development of this module. -# -# See http://alestic.com/2011/06/ec2-ami-security for more information about ensuring the security of your AMI. - -EXAMPLES = ''' -# Basic AMI Creation -- local_action: - module: ec2_ami - aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx - aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - instance_id: i-xxxxxx - wait: yes - name: newtest - register: instance - -# Basic AMI Creation, without waiting -- local_action: - module: ec2_ami - aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx - aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - region: xxxxxx - instance_id: i-xxxxxx - wait: no - name: newtest - register: instance - -# Deregister/Delete AMI -- local_action: - module: ec2_ami - aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx - aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - region: xxxxxx - image_id: ${instance.image_id} - delete_snapshot: True - state: absent - -# Deregister AMI -- local_action: - module: ec2_ami - aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx - aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - region: xxxxxx - image_id: ${instance.image_id} - delete_snapshot: False - state: absent - -''' -import sys -import time - -try: - import boto - import boto.ec2 -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def create_image(module, ec2): - """ - Creates new AMI - - module : AnsibleModule object - ec2: authenticated ec2 connection object - """ - - instance_id = module.params.get('instance_id') - name = module.params.get('name') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - description = module.params.get('description') - no_reboot = module.params.get('no_reboot') - - try: - params = {'instance_id': instance_id, - 'name': name, - 'description': description, - 'no_reboot': no_reboot} - - image_id = ec2.create_image(**params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - # Wait until the image is recognized. EC2 API has eventual consistency, - # such that a successful CreateImage API call doesn't guarantee the success - # of subsequent DescribeImages API call using the new image id returned. - for i in range(wait_timeout): - try: - img = ec2.get_image(image_id) - break - except boto.exception.EC2ResponseError, e: - if 'InvalidAMIID.NotFound' in e.error_code and wait: - time.sleep(1) - else: - module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help.") - else: - module.fail_json(msg="timed out waiting for image to be recognized") - - # wait here until the image is created - wait_timeout = time.time() + wait_timeout - while wait and wait_timeout > time.time() and (img is None or img.state != 'available'): - img = ec2.get_image(image_id) - time.sleep(3) - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "timed out waiting for image to be created") - - module.exit_json(msg="AMI creation operation complete", image_id=image_id, state=img.state, changed=True) - - -def deregister_image(module, ec2): - """ - Deregisters AMI - """ - - image_id = module.params.get('image_id') - delete_snapshot = module.params.get('delete_snapshot') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - img = ec2.get_image(image_id) - if img == None: - module.fail_json(msg = "Image %s does not exist" % image_id, changed=False) - - try: - params = {'image_id': image_id, - 'delete_snapshot': delete_snapshot} - - res = ec2.deregister_image(**params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - # wait here until the image is gone - img = ec2.get_image(image_id) - wait_timeout = time.time() + wait_timeout - while wait and wait_timeout > time.time() and img is not None: - img = ec2.get_image(image_id) - time.sleep(3) - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "timed out waiting for image to be reregistered/deleted") - - module.exit_json(msg="AMI deregister/delete operation complete", changed=True) - sys.exit(0) - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - instance_id = dict(), - image_id = dict(), - delete_snapshot = dict(), - name = dict(), - wait = dict(type="bool", default=False), - wait_timeout = dict(default=900), - description = dict(default=""), - no_reboot = dict(default=False, type="bool"), - state = dict(default='present'), - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - ec2 = ec2_connect(module) - - if module.params.get('state') == 'absent': - if not module.params.get('image_id'): - module.fail_json(msg='image_id needs to be an ami image to registered/delete') - - deregister_image(module, ec2) - - elif module.params.get('state') == 'present': - # Changed is always set to true when provisioning new AMI - if not module.params.get('instance_id'): - module.fail_json(msg='instance_id parameter is required for new image') - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for new image') - create_image(module, ec2) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() - diff --git a/library/cloud/ec2_ami_search b/library/cloud/ec2_ami_search deleted file mode 100644 index 25875de39b..0000000000 --- a/library/cloud/ec2_ami_search +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/python -# -# (c) 2013, Nimbis Services -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -DOCUMENTATION = ''' ---- -module: ec2_ami_search -short_description: Retrieve AWS AMI for a given operating system. -version_added: "1.6" -description: - - Look up the most recent AMI on AWS for a given operating system. - - Returns C(ami), C(aki), C(ari), C(serial), C(tag) - - If there is no AKI or ARI associated with an image, these will be C(null). - - Only supports images from cloud-images.ubuntu.com - - 'Example output: C({"ami": "ami-69f5a900", "changed": false, "aki": "aki-88aa75e1", "tag": "release", "ari": null, "serial": "20131024"})' -version_added: "1.6" -options: - distro: - description: Linux distribution (e.g., C(ubuntu)) - required: true - choices: ["ubuntu"] - release: - description: short name of the release (e.g., C(precise)) - required: true - stream: - description: Type of release. - required: false - default: "server" - choices: ["server", "desktop"] - store: - description: Back-end store for instance - required: false - default: "ebs" - choices: ["ebs", "ebs-io1", "ebs-ssd", "instance-store"] - arch: - description: CPU architecture - required: false - default: "amd64" - choices: ["i386", "amd64"] - region: - description: EC2 region - required: false - default: us-east-1 - choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2", - "eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2", "us-gov-west-1"] - virt: - description: virutalization type - required: false - default: paravirtual - choices: ["paravirtual", "hvm"] - -author: Lorin Hochstein -''' - -EXAMPLES = ''' -- name: Launch an Ubuntu 12.04 (Precise Pangolin) EC2 instance - hosts: 127.0.0.1 - connection: local - tasks: - - name: Get the Ubuntu precise AMI - ec2_ami_search: distro=ubuntu release=precise region=us-west-1 store=instance-store - register: ubuntu_image - - name: Start the EC2 instance - ec2: image={{ ubuntu_image.ami }} instance_type=m1.small key_name=mykey -''' - -import csv -import json -import urllib2 -import urlparse - -SUPPORTED_DISTROS = ['ubuntu'] - -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] - - -def get_url(module, url): - """ Get url and return response """ - try: - r = urllib2.urlopen(url) - except (urllib2.HTTPError, urllib2.URLError), e: - code = getattr(e, 'code', -1) - module.fail_json(msg="Request failed: %s" % str(e), status_code=code) - return r - - -def ubuntu(module): - """ Get the ami for ubuntu """ - - release = module.params['release'] - stream = module.params['stream'] - store = module.params['store'] - arch = module.params['arch'] - region = module.params['region'] - virt = module.params['virt'] - - url = get_ubuntu_url(release, stream) - - req = get_url(module, url) - reader = csv.reader(req, delimiter='\t') - try: - ami, aki, ari, tag, serial = lookup_ubuntu_ami(reader, release, stream, - store, arch, region, virt) - module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag, - serial=serial) - except KeyError: - module.fail_json(msg="No matching AMI found") - - -def lookup_ubuntu_ami(table, release, stream, store, arch, region, virt): - """ Look up the Ubuntu AMI that matches query given a table of AMIs - - table: an iterable that returns a row of - (release, stream, tag, serial, region, ami, aki, ari, virt) - release: ubuntu release name - stream: 'server' or 'desktop' - store: 'ebs', 'ebs-io1', 'ebs-ssd' or 'instance-store' - arch: 'i386' or 'amd64' - region: EC2 region - virt: 'paravirtual' or 'hvm' - - Returns (ami, aki, ari, tag, serial)""" - expected = (release, stream, store, arch, region, virt) - - for row in table: - (actual_release, actual_stream, tag, serial, - actual_store, actual_arch, actual_region, ami, aki, ari, - actual_virt) = row - actual = (actual_release, actual_stream, actual_store, actual_arch, - actual_region, actual_virt) - if actual == expected: - # aki and ari are sometimes blank - if aki == '': - aki = None - if ari == '': - ari = None - return (ami, aki, ari, tag, serial) - - raise KeyError() - - -def get_ubuntu_url(release, stream): - url = "https://cloud-images.ubuntu.com/query/%s/%s/released.current.txt" - return url % (release, stream) - - -def main(): - arg_spec = dict( - distro=dict(required=True, choices=SUPPORTED_DISTROS), - release=dict(required=True), - stream=dict(required=False, default='server', - choices=['desktop', 'server']), - store=dict(required=False, default='ebs', - choices=['ebs', 'ebs-io1', 'ebs-ssd', 'instance-store']), - arch=dict(required=False, default='amd64', - choices=['i386', 'amd64']), - region=dict(required=False, default='us-east-1', choices=AWS_REGIONS), - virt=dict(required=False, default='paravirtual', - choices=['paravirtual', 'hvm']) - ) - module = AnsibleModule(argument_spec=arg_spec) - distro = module.params['distro'] - - if distro == 'ubuntu': - ubuntu(module) - else: - module.fail_json(msg="Unsupported distro: %s" % distro) - - - -# this is magic, see lib/ansible/module_common.py -#<> - -if __name__ == '__main__': - main() diff --git a/library/cloud/ec2_asg b/library/cloud/ec2_asg deleted file mode 100755 index 3fc033e6d6..0000000000 --- a/library/cloud/ec2_asg +++ /dev/null @@ -1,608 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: ec2_asg -short_description: Create or delete AWS Autoscaling Groups -description: - - Can create or delete AWS Autoscaling Groups - - Works with the ec2_lc module to manage Launch Configurations -version_added: "1.6" -author: Gareth Rushgrove -options: - state: - description: - - register or deregister the instance - required: true - choices: ['present', 'absent'] - name: - description: - - Unique name for group to be created or deleted - required: true - load_balancers: - description: - - List of ELB names to use for the group - required: false - availability_zones: - description: - - List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set. - required: false - launch_config_name: - description: - - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. - required: false - min_size: - description: - - Minimum number of instances in group - required: false - max_size: - description: - - Maximum number of instances in group - required: false - desired_capacity: - description: - - Desired number of instances in group - required: false - replace_all_instances: - description: - - In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuraiton. - required: false - version_added: "1.8" - default: False - replace_batch_size: - description: - - Number of instances you'd like to replace at a time. Used with replace_all_instances. - required: false - version_added: "1.8" - default: 1 - replace_instances: - description: - - List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration. - required: false - version_added: "1.8" - default: None - lc_check: - description: - - Check to make sure instances that are being replaced with replace_instances do not aready have the current launch_config. - required: false - version_added: "1.8" - default: True - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - vpc_zone_identifier: - description: - - List of VPC subnets to use - required: false - default: None - tags: - description: - - A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true. - required: false - default: None - version_added: "1.7" - health_check_period: - description: - - Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. - required: false - default: 500 seconds - version_added: "1.7" - health_check_type: - description: - - The service you want the health status from, Amazon EC2 or Elastic Load Balancer. - required: false - default: EC2 - version_added: "1.7" - choices: ['EC2', 'ELB'] - wait_timeout: - description: - - how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option. - default: 300 - version_added: "1.8" -extends_documentation_fragment: aws -""" - -EXAMPLES = ''' -A basic example of configuration: - -- ec2_asg: - name: special - load_balancers: 'lb1,lb2' - availability_zones: 'eu-west-1a,eu-west-1b' - launch_config_name: 'lc-1' - min_size: 1 - max_size: 10 - desired_capacity: 5 - vpc_zone_identifier: 'subnet-abcd1234,subnet-1a2b3c4d' - tags: - - environment: production - propagate_at_launch: no - -Below is an example of how to assign a new launch config to an ASG and terminate old instances. - -All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in -a rolling fashion with instances using the current launch configuration, "my_new_lc". - -This could also be considered a rolling deploy of a pre-baked AMI. - -If this is a newly created group, the instances will not be replaced since all instances -will have the current launch configuration. - -- name: create launch config - ec2_lc: - name: my_new_lc - image_id: ami-lkajsf - key_name: mykey - region: us-east-1 - security_groups: sg-23423 - instance_type: m1.small - assign_public_ip: yes - -- ec2_asg: - name: myasg - launch_config_name: my_new_lc - health_check_period: 60 - health_check_type: ELB - replace_all_instances: yes - min_size: 5 - max_size: 5 - desired_capacity: 5 - region: us-east-1 - -To only replace a couple of instances instead of all of them, supply a list -to "replace_instances": - -- ec2_asg: - name: myasg - launch_config_name: my_new_lc - health_check_period: 60 - health_check_type: ELB - replace_instances: - - i-b345231 - - i-24c2931 - min_size: 5 - max_size: 5 - desired_capacity: 5 - region: us-east-1 -''' - -import sys -import time - -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -try: - import boto.ec2.autoscale - from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag - from boto.exception import BotoServerError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity', - 'health_check_period', 'health_check_type', 'launch_config_name', - 'load_balancers', 'max_size', 'min_size', 'name', 'placement_group', - 'tags', 'termination_policies', 'vpc_zone_identifier') - -INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') - -def enforce_required_arguments(module): - ''' As many arguments are not required for autoscale group deletion - they cannot be mandatory arguments for the module, so we enforce - them here ''' - missing_args = [] - for arg in ('min_size', 'max_size', 'launch_config_name'): - if module.params[arg] is None: - missing_args.append(arg) - if missing_args: - module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args)) - - -def get_properties(autoscaling_group): - properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES) - properties['healthy_instances'] = 0 - properties['in_service_instances'] = 0 - properties['unhealthy_instances'] = 0 - properties['pending_instances'] = 0 - properties['viable_instances'] = 0 - properties['terminating_instances'] = 0 - - if autoscaling_group.instances: - properties['instances'] = [i.instance_id for i in autoscaling_group.instances] - instance_facts = {} - for i in autoscaling_group.instances: - instance_facts[i.instance_id] = {'health_status': i.health_status, - 'lifecycle_state': i.lifecycle_state, - 'launch_config_name': i.launch_config_name } - if i.health_status == 'Healthy' and i.lifecycle_state == 'InService': - properties['viable_instances'] += 1 - if i.health_status == 'Healthy': - properties['healthy_instances'] += 1 - else: - properties['unhealthy_instances'] += 1 - if i.lifecycle_state == 'InService': - properties['in_service_instances'] += 1 - if i.lifecycle_state == 'Terminating': - properties['terminating_instances'] += 1 - if i.lifecycle_state == 'Pending': - properties['pending_instances'] += 1 - properties['instance_facts'] = instance_facts - properties['load_balancers'] = autoscaling_group.load_balancers - return properties - - -def create_autoscaling_group(connection, module): - - group_name = module.params.get('name') - load_balancers = module.params['load_balancers'] - availability_zones = module.params['availability_zones'] - launch_config_name = module.params.get('launch_config_name') - min_size = module.params['min_size'] - max_size = module.params['max_size'] - desired_capacity = module.params.get('desired_capacity') - vpc_zone_identifier = module.params.get('vpc_zone_identifier') - set_tags = module.params.get('tags') - health_check_period = module.params.get('health_check_period') - health_check_type = module.params.get('health_check_type') - - as_groups = connection.get_all_groups(names=[group_name]) - - if not vpc_zone_identifier and not availability_zones: - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - try: - ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - - asg_tags = [] - for tag in set_tags: - if tag.has_key('key') and tag.has_key('value'): # this block is to support depricated form - asg_tags.append(Tag(key=tag.get('key'), - value=tag.get('value'), - propagate_at_launch=bool(tag.get('propagate_at_launch', True)), - resource_id=group_name)) - else: - for k,v in tag.iteritems(): - if k !='propagate_at_launch': - asg_tags.append(Tag(key=k, - value=v, - propagate_at_launch=bool(tag.get('propagate_at_launch', True)), - resource_id=group_name)) - - if not as_groups: - if not vpc_zone_identifier and not availability_zones: - availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()] - enforce_required_arguments(module) - launch_configs = connection.get_all_launch_configurations(names=[launch_config_name]) - ag = AutoScalingGroup( - group_name=group_name, - load_balancers=load_balancers, - availability_zones=availability_zones, - launch_config=launch_configs[0], - min_size=min_size, - max_size=max_size, - desired_capacity=desired_capacity, - vpc_zone_identifier=vpc_zone_identifier, - connection=connection, - tags=asg_tags, - health_check_period=health_check_period, - health_check_type=health_check_type) - - try: - connection.create_auto_scaling_group(ag) - asg_properties = get_properties(ag) - changed = True - return(changed, asg_properties) - except BotoServerError, e: - module.fail_json(msg=str(e)) - else: - as_group = as_groups[0] - changed = False - for attr in ASG_ATTRIBUTES: - if module.params.get(attr): - module_attr = module.params.get(attr) - group_attr = getattr(as_group, attr) - # we do this because AWS and the module may return the same list - # sorted differently - try: - module_attr.sort() - except: - pass - try: - group_attr.sort() - except: - pass - if group_attr != module_attr: - changed = True - setattr(as_group, attr, module_attr) - - if len(set_tags) > 0: - existing_tags = as_group.tags - existing_tag_map = dict((tag.key, tag) for tag in existing_tags) - for tag in set_tags: - if tag.has_key('key') and tag.has_key('value'): # this is to support deprecated method - if 'key' not in tag: - continue - if ( not tag['key'] in existing_tag_map or - existing_tag_map[tag['key']].value != tag['value'] or - ('propagate_at_launch' in tag and - existing_tag_map[tag['key']].propagate_at_launch != tag['propagate_at_launch']) ): - changed = True - continue - else: - for k,v in tag.iteritems(): - if k !='propagate_at_launch': - if ( not k in existing_tag_map or - existing_tag_map[k].value != v or - ('propagate_at_launch' in tag and - existing_tag_map[k].propagate_at_launch != tag['propagate_at_launch']) ): - changed = True - continue - if changed: - connection.create_or_update_tags(asg_tags) - - # handle loadbalancers separately because None != [] - load_balancers = module.params.get('load_balancers') or [] - if load_balancers and as_group.load_balancers != load_balancers: - changed = True - as_group.load_balancers = module.params.get('load_balancers') - - try: - if changed: - as_group.update() - asg_properties = get_properties(as_group) - return(changed, asg_properties) - except BotoServerError, e: - module.fail_json(msg=str(e)) - - - result = as_groups[0] - module.exit_json(changed=changed, name=result.name, - autoscaling_group_arn=result.autoscaling_group_arn, - availability_zones=result.availability_zones, - created_time=str(result.created_time), - default_cooldown=result.default_cooldown, - health_check_period=result.health_check_period, - health_check_type=result.health_check_type, - instance_id=result.instance_id, - instances=[instance.instance_id for instance in result.instances], - launch_config_name=result.launch_config_name, - load_balancers=result.load_balancers, - min_size=result.min_size, max_size=result.max_size, - placement_group=result.placement_group, - wait_timeout = dict(default=300), - tags=result.tags, - termination_policies=result.termination_policies, - vpc_zone_identifier=result.vpc_zone_identifier) - - -def delete_autoscaling_group(connection, module): - group_name = module.params.get('name') - groups = connection.get_all_groups(names=[group_name]) - if groups: - group = groups[0] - group.max_size = 0 - group.min_size = 0 - group.desired_capacity = 0 - group.update() - instances = True - while instances: - tmp_groups = connection.get_all_groups(names=[group_name]) - if tmp_groups: - tmp_group = tmp_groups[0] - if not tmp_group.instances: - instances = False - time.sleep(10) - - group.delete() - changed=True - return changed - else: - changed=False - return changed - -def get_chunks(l, n): - for i in xrange(0, len(l), n): - yield l[i:i+n] - -def replace(connection, module): - - batch_size = module.params.get('replace_batch_size') - wait_timeout = module.params.get('wait_timeout') - group_name = module.params.get('group_name') - max_size = module.params.get('max_size') - min_size = module.params.get('min_size') - desired_capacity = module.params.get('desired_capacity') - replace_instances = module.params.get('replace_instances') - - - # wait for instance list to be populated on a newly provisioned ASG - instance_wait = time.time() + 30 - while instance_wait > time.time(): - as_group = connection.get_all_groups(names=[group_name])[0] - props = get_properties(as_group) - if props.has_key('instances'): - instances = props['instances'] - break - time.sleep(10) - if instance_wait <= time.time(): - # waiting took too long - module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime()) - # determine if we need to continue - replaceable = 0 - if replace_instances: - instances = replace_instances - for k in props['instance_facts'].keys(): - if k in instances: - if props['instance_facts'][k]['launch_config_name'] != props['launch_config_name']: - replaceable += 1 - if replaceable == 0: - changed = False - return(changed, props) - - # set temporary settings and wait for them to be reached - as_group.max_size = max_size + batch_size - as_group.min_size = min_size + batch_size - as_group.desired_capacity = desired_capacity + batch_size - as_group.update() - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and min_size + batch_size > props['viable_instances']: - time.sleep(10) - as_groups = connection.get_all_groups(names=[group_name]) - as_group = as_groups[0] - props = get_properties(as_group) - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime()) - instances = props['instances'] - if replace_instances: - instances = replace_instances - for i in get_chunks(instances, batch_size): - replace_batch(connection, module, i) - # return settings to normal - as_group = connection.get_all_groups(names=[group_name])[0] - as_group.max_size = max_size - as_group.min_size = min_size - as_group.desired_capacity = desired_capacity - as_group.update() - as_group = connection.get_all_groups(names=[group_name])[0] - asg_properties = get_properties(as_group) - changed=True - return(changed, asg_properties) - -def replace_batch(connection, module, replace_instances): - - - group_name = module.params.get('group_name') - wait_timeout = int(module.params.get('wait_timeout')) - lc_check = module.params.get('lc_check') - - as_group = connection.get_all_groups(names=[group_name])[0] - props = get_properties(as_group) - - # check to make sure instances given are actually in the given ASG - # and they have a non-current launch config - old_instances = [] - instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances']) - - if lc_check: - for i in instances: - if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: - old_instances.append(i) - else: - old_instances = instances - - # set all instances given to unhealthy - for instance_id in old_instances: - connection.set_instance_health(instance_id,'Unhealthy') - - # we wait to make sure the machines we marked as Unhealthy are - # no longer in the list - - count = 1 - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and count > 0: - count = 0 - as_group = connection.get_all_groups(names=[group_name])[0] - props = get_properties(as_group) - instance_facts = props['instance_facts'] - instances = ( i for i in instance_facts if i in old_instances) - for i in instances: - if ( instance_facts[i]['lifecycle_state'] == 'Terminating' - or instance_facts[i]['health_status'] == 'Unhealthy' ): - count += 1 - time.sleep(10) - - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime()) - - # make sure we have the latest stats after that last loop. - as_group = connection.get_all_groups(names=[group_name])[0] - props = get_properties(as_group) - - # now we make sure that we have enough instances in a viable state - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time() and props['min_size'] > props['viable_instances']: - time.sleep(10) - as_groups = connection.get_all_groups(names=[group_name]) - as_group = as_groups[0] - props = get_properties(as_group) - - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime()) - - # collect final stats info - as_group = connection.get_all_groups(names=[group_name])[0] - asg_properties = get_properties(as_group) - - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(required=True, type='str'), - load_balancers=dict(type='list'), - availability_zones=dict(type='list'), - launch_config_name=dict(type='str'), - min_size=dict(type='int'), - max_size=dict(type='int'), - desired_capacity=dict(type='int'), - vpc_zone_identifier=dict(type='str'), - replace_batch_size=dict(type='int', default=1), - replace_all_instances=dict(type='bool', default=False), - replace_instances=dict(type='list', default=[]), - lc_check=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=300), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='list', default=[]), - health_check_period=dict(type='int', default=300), - health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - state = module.params.get('state') - replace_instances = module.params.get('replace_instances') - replace_all_instances = module.params.get('replace_all_instances') - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - try: - connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - if not connection: - module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - changed = False - if replace_all_instances and replace_instances: - module.fail_json(msg="You can't use replace_instances and replace_all_instances in the same task.") - if state == 'present': - create_changed, asg_properties=create_autoscaling_group(connection, module) - if replace_all_instances or replace_instances: - replace_changed, asg_properties=replace(connection, module) - elif state == 'absent': - changed = delete_autoscaling_group(connection, module) - module.exit_json( changed = changed ) - if create_changed or replace_changed: - changed = True - module.exit_json( changed = changed, **asg_properties ) - -main() diff --git a/library/cloud/ec2_eip b/library/cloud/ec2_eip deleted file mode 100644 index cff83e482b..0000000000 --- a/library/cloud/ec2_eip +++ /dev/null @@ -1,305 +0,0 @@ -#!/usr/bin/python -DOCUMENTATION = ''' ---- -module: ec2_eip -short_description: associate an EC2 elastic IP with an instance. -description: - - This module associates AWS EC2 elastic IP addresses with instances -version_added: 1.4 -options: - instance_id: - description: - - The EC2 instance id - required: false - public_ip: - description: - - The elastic IP address to associate with the instance. - - If absent, allocate a new address - required: false - state: - description: - - If present, associate the IP with the instance. - - If absent, disassociate the IP with the instance. - required: false - choices: ['present', 'absent'] - default: present - region: - description: - - the EC2 region to use - required: false - default: null - aliases: [ ec2_region ] - in_vpc: - description: - - allocate an EIP inside a VPC or not - required: false - default: false - version_added: "1.4" - reuse_existing_ip_allowed: - description: - - Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one. - required: false - default: false - version_added: "1.6" - wait_timeout: - description: - - how long to wait in seconds for newly provisioned EIPs to become available - default: 300 - version_added: "1.7" - -extends_documentation_fragment: aws -author: Lorin Hochstein -notes: - - This module will return C(public_ip) on success, which will contain the - public IP address associated with the instance. - - There may be a delay between the time the Elastic IP is assigned and when - the cloud instance is reachable via the new address. Use wait_for and pause - to delay further playbook execution until the instance is reachable, if - necessary. -''' - -EXAMPLES = ''' -- name: associate an elastic IP with an instance - ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 - -- name: disassociate an elastic IP from an instance - ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 state=absent - -- name: allocate a new elastic IP and associate it with an instance - ec2_eip: instance_id=i-1212f003 - -- name: allocate a new elastic IP without associating it to anything - ec2_eip: - register: eip -- name: output the IP - debug: msg="Allocated IP is {{ eip.public_ip }}" - -- name: provision new instances with ec2 - ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes group=webserver count=3 - register: ec2 -- name: associate new elastic IPs with each of the instances - ec2_eip: "instance_id={{ item }}" - with_items: ec2.instance_ids - -- name: allocate a new elastic IP inside a VPC in us-west-2 - ec2_eip: region=us-west-2 in_vpc=yes - register: eip -- name: output the IP - debug: msg="Allocated IP inside a VPC is {{ eip.public_ip }}" -''' - -try: - import boto.ec2 -except ImportError: - boto_found = False -else: - boto_found = True - - -wait_timeout = 0 - -def associate_ip_and_instance(ec2, address, instance_id, module): - if ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module): - module.exit_json(changed=False, public_ip=address.public_ip) - - # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(changed=True) - - try: - if address.domain == "vpc": - res = ec2.associate_address(instance_id, allocation_id=address.allocation_id) - else: - res = ec2.associate_address(instance_id, public_ip=address.public_ip) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e)) - - if res: - module.exit_json(changed=True, public_ip=address.public_ip) - else: - module.fail_json(msg="association failed") - - -def disassociate_ip_and_instance(ec2, address, instance_id, module): - if not ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module): - module.exit_json(changed=False, public_ip=address.public_ip) - - # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(changed=True) - - try: - if address.domain == "vpc": - res = ec2.disassociate_address(association_id=address.association_id) - else: - res = ec2.disassociate_address(public_ip=address.public_ip) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e)) - - if res: - module.exit_json(changed=True) - else: - module.fail_json(msg="disassociation failed") - - -def find_address(ec2, public_ip, module): - """ Find an existing Elastic IP address """ - if wait_timeout != 0: - timeout = time.time() + wait_timeout - while timeout > time.time(): - try: - addresses = ec2.get_all_addresses([public_ip]) - break - except boto.exception.EC2ResponseError, e: - if "Address '%s' not found." % public_ip in e.message : - pass - else: - module.fail_json(msg=str(e.message)) - time.sleep(5) - - if timeout <= time.time(): - module.fail_json(msg = "wait for EIPs timeout on %s" % time.asctime()) - else: - try: - addresses = ec2.get_all_addresses([public_ip]) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e.message)) - - return addresses[0] - - -def ip_is_associated_with_instance(ec2, public_ip, instance_id, module): - """ Check if the elastic IP is currently associated with the instance """ - address = find_address(ec2, public_ip, module) - if address: - return address.instance_id == instance_id - else: - return False - - -def allocate_address(ec2, domain, module, reuse_existing_ip_allowed): - """ Allocate a new elastic IP address (when needed) and return it """ - # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(change=True) - - if reuse_existing_ip_allowed: - if domain: - domain_filter = { 'domain' : domain } - else: - domain_filter = { 'domain' : 'standard' } - all_addresses = ec2.get_all_addresses(filters=domain_filter) - - unassociated_addresses = filter(lambda a: a.instance_id == "", all_addresses) - if unassociated_addresses: - address = unassociated_addresses[0]; - else: - address = ec2.allocate_address(domain=domain) - else: - address = ec2.allocate_address(domain=domain) - - return address - - -def release_address(ec2, public_ip, module): - """ Release a previously allocated elastic IP address """ - - address = find_address(ec2, public_ip, module) - - # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(change=True) - - res = address.release() - if res: - module.exit_json(changed=True) - else: - module.fail_json(msg="release failed") - - -def find_instance(ec2, instance_id, module): - """ Attempt to find the EC2 instance and return it """ - - try: - reservations = ec2.get_all_reservations(instance_ids=[instance_id]) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e)) - - if len(reservations) == 1: - instances = reservations[0].instances - if len(instances) == 1: - return instances[0] - - module.fail_json(msg="could not find instance" + instance_id) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - instance_id = dict(required=False), - public_ip = dict(required=False, aliases= ['ip']), - state = dict(required=False, default='present', - choices=['present', 'absent']), - in_vpc = dict(required=False, type='bool', default=False), - reuse_existing_ip_allowed = dict(required=False, type='bool', default=False), - wait_timeout = dict(default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - if not boto_found: - module.fail_json(msg="boto is required") - - ec2 = ec2_connect(module) - - instance_id = module.params.get('instance_id') - public_ip = module.params.get('public_ip') - state = module.params.get('state') - in_vpc = module.params.get('in_vpc') - domain = "vpc" if in_vpc else None - reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') - new_eip_timeout = int(module.params.get('wait_timeout')) - - if state == 'present': - # Allocate an EIP and exit - if not instance_id and not public_ip: - address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed) - module.exit_json(changed=True, public_ip=address.public_ip) - - # Return the EIP object since we've been given a public IP - if public_ip: - address = find_address(ec2, public_ip, module) - - # Allocate an IP for instance since no public_ip was provided - if instance_id and not public_ip: - instance = find_instance(ec2, instance_id, module) - if instance.vpc_id: - domain = "vpc" - address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed) - # overriding the timeout since this is a a newly provisioned ip - global wait_timeout - wait_timeout = new_eip_timeout - - # Associate address object (provided or allocated) with instance - associate_ip_and_instance(ec2, address, instance_id, module) - - else: - #disassociating address from instance - if instance_id: - address = find_address(ec2, public_ip, module) - disassociate_ip_and_instance(ec2, address, instance_id, module) - #releasing address - else: - release_address(ec2, public_ip, module) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -if __name__ == '__main__': - main() diff --git a/library/cloud/ec2_elb b/library/cloud/ec2_elb deleted file mode 100644 index 42cb181902..0000000000 --- a/library/cloud/ec2_elb +++ /dev/null @@ -1,339 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: ec2_elb -short_description: De-registers or registers instances from EC2 ELBs -description: - - This module de-registers or registers an AWS EC2 instance from the ELBs - that it belongs to. - - Returns fact "ec2_elbs" which is a list of elbs attached to the instance - if state=absent is passed as an argument. - - Will be marked changed when called only if there are ELBs found to operate on. -version_added: "1.2" -author: John Jarvis -options: - state: - description: - - register or deregister the instance - required: true - choices: ['present', 'absent'] - instance_id: - description: - - EC2 Instance ID - required: true - ec2_elbs: - description: - - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. - required: false - default: None - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - enable_availability_zone: - description: - - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already - been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB. - required: false - default: yes - choices: [ "yes", "no" ] - wait: - description: - - Wait for instance registration or deregistration to complete successfully before returning. - required: false - default: yes - choices: [ "yes", "no" ] - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - wait_timeout: - description: - - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no. - required: false - default: 0 - version_added: "1.6" -extends_documentation_fragment: aws -""" - -EXAMPLES = """ -# basic pre_task and post_task example -pre_tasks: - - name: Gathering ec2 facts - ec2_facts: - - name: Instance De-register - local_action: ec2_elb - args: - instance_id: "{{ ansible_ec2_instance_id }}" - state: 'absent' -roles: - - myrole -post_tasks: - - name: Instance Register - local_action: ec2_elb - args: - instance_id: "{{ ansible_ec2_instance_id }}" - ec2_elbs: "{{ item }}" - state: 'present' - with_items: ec2_elbs -""" - -import time -import sys -import os - -try: - import boto - import boto.ec2 - import boto.ec2.elb - from boto.regioninfo import RegionInfo -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -class ElbManager: - """Handles EC2 instance ELB registration and de-registration""" - - def __init__(self, module, instance_id=None, ec2_elbs=None, - region=None, **aws_connect_params): - self.module = module - self.instance_id = instance_id - self.region = region - self.aws_connect_params = aws_connect_params - self.lbs = self._get_instance_lbs(ec2_elbs) - self.changed = False - - def deregister(self, wait, timeout): - """De-register the instance from all ELBs and wait for the ELB - to report it out-of-service""" - - for lb in self.lbs: - initial_state = self._get_instance_health(lb) - if initial_state is None: - # The instance isn't registered with this ELB so just - # return unchanged - return - - lb.deregister_instances([self.instance_id]) - - # The ELB is changing state in some way. Either an instance that's - # InService is moving to OutOfService, or an instance that's - # already OutOfService is being deregistered. - self.changed = True - - if wait: - self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout) - - def register(self, wait, enable_availability_zone, timeout): - """Register the instance for all ELBs and wait for the ELB - to report the instance in-service""" - for lb in self.lbs: - initial_state = self._get_instance_health(lb) - - if enable_availability_zone: - self._enable_availailability_zone(lb) - - lb.register_instances([self.instance_id]) - - if wait: - self._await_elb_instance_state(lb, 'InService', initial_state, timeout) - else: - # We cannot assume no change was made if we don't wait - # to find out - self.changed = True - - def exists(self, lbtest): - """ Verify that the named ELB actually exists """ - - found = False - for lb in self.lbs: - if lb.name == lbtest: - found=True - break - return found - - def _enable_availailability_zone(self, lb): - """Enable the current instance's availability zone in the provided lb. - Returns True if the zone was enabled or False if no change was made. - lb: load balancer""" - instance = self._get_instance() - if instance.placement in lb.availability_zones: - return False - - lb.enable_zones(zones=instance.placement) - - # If successful, the new zone will have been added to - # lb.availability_zones - return instance.placement in lb.availability_zones - - def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout): - """Wait for an ELB to change state - lb: load balancer - awaited_state : state to poll for (string)""" - - wait_timeout = time.time() + timeout - while True: - instance_state = self._get_instance_health(lb) - - if not instance_state: - msg = ("The instance %s could not be put in service on %s." - " Reason: Invalid Instance") - self.module.fail_json(msg=msg % (self.instance_id, lb)) - - if instance_state.state == awaited_state: - # Check the current state against the initial state, and only set - # changed if they are different. - if (initial_state is None) or (instance_state.state != initial_state.state): - self.changed = True - break - elif self._is_instance_state_pending(instance_state): - # If it's pending, we'll skip further checks andd continue waiting - pass - elif (awaited_state == 'InService' - and instance_state.reason_code == "Instance" - and time.time() >= wait_timeout): - # If the reason_code for the instance being out of service is - # "Instance" this indicates a failure state, e.g. the instance - # has failed a health check or the ELB does not have the - # instance's availabilty zone enabled. The exact reason why is - # described in InstantState.description. - msg = ("The instance %s could not be put in service on %s." - " Reason: %s") - self.module.fail_json(msg=msg % (self.instance_id, - lb, - instance_state.description)) - time.sleep(1) - - def _is_instance_state_pending(self, instance_state): - """ - Determines whether the instance_state is "pending", meaning there is - an operation under way to bring it in service. - """ - # This is messy, because AWS provides no way to distinguish between - # an instance that is is OutOfService because it's pending vs. OutOfService - # because it's failing health checks. So we're forced to analyze the - # description, which is likely to be brittle. - return (instance_state and 'pending' in instance_state.description) - - def _get_instance_health(self, lb): - """ - Check instance health, should return status object or None under - certain error conditions. - """ - try: - status = lb.get_instance_health([self.instance_id])[0] - except boto.exception.BotoServerError, e: - if e.error_code == 'InvalidInstance': - return None - else: - raise - return status - - def _get_instance_lbs(self, ec2_elbs=None): - """Returns a list of ELBs attached to self.instance_id - ec2_elbs: an optional list of elb names that will be used - for elb lookup instead of returning what elbs - are attached to self.instance_id""" - - try: - elb = connect_to_aws(boto.ec2.elb, self.region, - **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - self.module.fail_json(msg=str(e)) - - elbs = elb.get_all_load_balancers() - - if ec2_elbs: - lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs) - else: - lbs = [] - for lb in elbs: - for info in lb.instances: - if self.instance_id == info.id: - lbs.append(lb) - return lbs - - def _get_instance(self): - """Returns a boto.ec2.InstanceObject for self.instance_id""" - try: - ec2 = connect_to_aws(boto.ec2, self.region, - **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - self.module.fail_json(msg=str(e)) - return ec2.get_only_instances(instance_ids=[self.instance_id])[0] - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - state={'required': True}, - instance_id={'required': True}, - ec2_elbs={'default': None, 'required': False, 'type':'list'}, - enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, - wait={'required': False, 'default': True, 'type': 'bool'}, - wait_timeout={'requred': False, 'default': 0, 'type': 'int'} - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - - if not region: - module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") - - ec2_elbs = module.params['ec2_elbs'] - wait = module.params['wait'] - enable_availability_zone = module.params['enable_availability_zone'] - timeout = module.params['wait_timeout'] - - if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: - module.fail_json(msg="ELBs are required for registration") - - instance_id = module.params['instance_id'] - elb_man = ElbManager(module, instance_id, ec2_elbs, - region=region, **aws_connect_params) - - if ec2_elbs is not None: - for elb in ec2_elbs: - if not elb_man.exists(elb): - msg="ELB %s does not exist" % elb - module.fail_json(msg=msg) - - if module.params['state'] == 'present': - elb_man.register(wait, enable_availability_zone, timeout) - elif module.params['state'] == 'absent': - elb_man.deregister(wait, timeout) - - ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} - ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) - - module.exit_json(**ec2_facts_result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_elb_lb b/library/cloud/ec2_elb_lb deleted file mode 100644 index 462fbbcc79..0000000000 --- a/library/cloud/ec2_elb_lb +++ /dev/null @@ -1,698 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: ec2_elb_lb -description: - - Returns information about the load balancer. - - Will be marked changed when called only if state is changed. -short_description: Creates or destroys Amazon ELB. -version_added: "1.5" -author: Jim Dalton -options: - state: - description: - - Create or destroy the ELB - required: true - name: - description: - - The name of the ELB - required: true - listeners: - description: - - List of ports/protocols for this ELB to listen on (see example) - required: false - purge_listeners: - description: - - Purge existing listeners on ELB that are not found in listeners - required: false - default: true - zones: - description: - - List of availability zones to enable on this ELB - required: false - purge_zones: - description: - - Purge existing availability zones on ELB that are not found in zones - required: false - default: false - security_group_ids: - description: - - A list of security groups to apply to the elb - require: false - default: None - version_added: "1.6" - health_check: - description: - - An associative array of health check configuration settigs (see example) - require: false - default: None - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - subnets: - description: - - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. - required: false - default: None - aliases: [] - version_added: "1.7" - purge_subnets: - description: - - Purge existing subnet on ELB that are not found in subnets - required: false - default: false - version_added: "1.7" - scheme: - description: - - The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'. - required: false - default: 'internet-facing' - version_added: "1.7" - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - connection_draining_timeout: - description: - - Wait a specified timeout allowing connections to drain before terminating an instance - required: false - aliases: [] - version_added: "1.8" - cross_az_load_balancing: - description: - - Distribute load across all configured Availablity Zones - required: false - default: "no" - choices: ["yes", "no"] - aliases: [] - version_added: "1.8" - -extends_documentation_fragment: aws -""" - -EXAMPLES = """ -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Basic provisioning example -- local_action: - module: ec2_elb_lb - name: "test-please-delete" - state: present - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http # options are http, https, ssl, tcp - load_balancer_port: 80 - instance_port: 80 - - protocol: https - load_balancer_port: 443 - instance_protocol: http # optional, defaults to value of protocol setting - instance_port: 80 - # ssl certificate required for https or ssl - ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert" - - -# Basic VPC provisioning example -- local_action: - module: ec2_elb_lb - name: "test-vpc" - scheme: internal - state: present - subnets: - - subnet-abcd1234 - - subnet-1a2b3c4d - listeners: - - protocol: http # options are http, https, ssl, tcp - load_balancer_port: 80 - instance_port: 80 - -# Configure a health check -- local_action: - module: ec2_elb_lb - name: "test-please-delete" - state: present - zones: - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - health_check: - ping_protocol: http # options are http, https, ssl, tcp - ping_port: 80 - ping_path: "/index.html" # not required for tcp or ssl - response_timeout: 5 # seconds - interval: 30 # seconds - unhealthy_threshold: 2 - healthy_threshold: 10 - -# Ensure ELB is gone -- local_action: - module: ec2_elb_lb - name: "test-please-delete" - state: absent - -# Normally, this module will purge any listeners that exist on the ELB -# but aren't specified in the listeners parameter. If purge_listeners is -# false it leaves them alone -- local_action: - module: ec2_elb_lb - name: "test-please-delete" - state: present - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - purge_listeners: no - -# Normally, this module will leave availability zones that are enabled -# on the ELB alone. If purge_zones is true, then any extreneous zones -# will be removed -- local_action: - module: ec2_elb_lb - name: "test-please-delete" - state: present - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - purge_zones: yes - -# Creates a ELB and assigns a list of subnets to it. -- local_action: - module: ec2_elb_lb - state: present - name: 'New ELB' - security_group_ids: 'sg-123456, sg-67890' - region: us-west-2 - subnets: 'subnet-123456, subnet-67890' - purge_subnets: yes - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - -# Create an ELB with connection draining and cross availability -# zone load balancing -- local_action: - module: ec2_elb_lb - name: "New ELB" - state: present - connection_draining_timeout: 60 - cross_az_load_balancing: "yes" - region: us-east-1 - zones: - - us-east-1a - - us-east-1d - listeners: - - protocols: http - - load_balancer_port: 80 - - instance_port: 80 -""" - -import sys -import os - -try: - import boto - import boto.ec2.elb - import boto.ec2.elb.attributes - from boto.ec2.elb.healthcheck import HealthCheck - from boto.regioninfo import RegionInfo -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -class ElbManager(object): - """Handles ELB creation and destruction""" - - def __init__(self, module, name, listeners=None, purge_listeners=None, - zones=None, purge_zones=None, security_group_ids=None, - health_check=None, subnets=None, purge_subnets=None, - scheme="internet-facing", connection_draining_timeout=None, - cross_az_load_balancing=None, region=None, **aws_connect_params): - - self.module = module - self.name = name - self.listeners = listeners - self.purge_listeners = purge_listeners - self.zones = zones - self.purge_zones = purge_zones - self.security_group_ids = security_group_ids - self.health_check = health_check - self.subnets = subnets - self.purge_subnets = purge_subnets - self.scheme = scheme - self.connection_draining_timeout = connection_draining_timeout - self.cross_az_load_balancing = cross_az_load_balancing - - self.aws_connect_params = aws_connect_params - self.region = region - - self.changed = False - self.status = 'gone' - self.elb_conn = self._get_elb_connection() - self.elb = self._get_elb() - - def ensure_ok(self): - """Create the ELB""" - if not self.elb: - # Zones and listeners will be added at creation - self._create_elb() - else: - self._set_zones() - self._set_security_groups() - self._set_elb_listeners() - self._set_subnets() - self._set_health_check() - # boto has introduced support for some ELB attributes in - # different versions, so we check first before trying to - # set them to avoid errors - if self._check_attribute_support('connection_draining'): - self._set_connection_draining_timeout() - if self._check_attribute_support('cross_zone_load_balancing'): - self._set_cross_az_load_balancing() - - def ensure_gone(self): - """Destroy the ELB""" - if self.elb: - self._delete_elb() - - def get_info(self): - try: - check_elb = self.elb_conn.get_all_load_balancers(self.name)[0] - except: - check_elb = None - - if not check_elb: - info = { - 'name': self.name, - 'status': self.status - } - else: - info = { - 'name': check_elb.name, - 'dns_name': check_elb.dns_name, - 'zones': check_elb.availability_zones, - 'security_group_ids': check_elb.security_groups, - 'status': self.status, - 'subnets': self.subnets, - 'scheme': check_elb.scheme - } - - if check_elb.health_check: - info['health_check'] = { - 'target': check_elb.health_check.target, - 'interval': check_elb.health_check.interval, - 'timeout': check_elb.health_check.timeout, - 'healthy_threshold': check_elb.health_check.healthy_threshold, - 'unhealthy_threshold': check_elb.health_check.unhealthy_threshold, - } - - if check_elb.listeners: - info['listeners'] = [l.get_complex_tuple() - for l in check_elb.listeners] - elif self.status == 'created': - # When creating a new ELB, listeners don't show in the - # immediately returned result, so just include the - # ones that were added - info['listeners'] = [self._listener_as_tuple(l) - for l in self.listeners] - else: - info['listeners'] = [] - - if self._check_attribute_support('connection_draining'): - info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout - - if self._check_attribute_support('cross_zone_load_balancing'): - is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing') - if is_cross_az_lb_enabled: - info['cross_az_load_balancing'] = 'yes' - else: - info['cross_az_load_balancing'] = 'no' - - return info - - def _get_elb(self): - elbs = self.elb_conn.get_all_load_balancers() - for elb in elbs: - if self.name == elb.name: - self.status = 'ok' - return elb - - def _get_elb_connection(self): - try: - return connect_to_aws(boto.ec2.elb, self.region, - **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - self.module.fail_json(msg=str(e)) - - def _delete_elb(self): - # True if succeeds, exception raised if not - result = self.elb_conn.delete_load_balancer(name=self.name) - if result: - self.changed = True - self.status = 'deleted' - - def _create_elb(self): - listeners = [self._listener_as_tuple(l) for l in self.listeners] - self.elb = self.elb_conn.create_load_balancer(name=self.name, - zones=self.zones, - security_groups=self.security_group_ids, - complex_listeners=listeners, - subnets=self.subnets, - scheme=self.scheme) - if self.elb: - self.changed = True - self.status = 'created' - - def _create_elb_listeners(self, listeners): - """Takes a list of listener tuples and creates them""" - # True if succeeds, exception raised if not - self.changed = self.elb_conn.create_load_balancer_listeners(self.name, - complex_listeners=listeners) - - def _delete_elb_listeners(self, listeners): - """Takes a list of listener tuples and deletes them from the elb""" - ports = [l[0] for l in listeners] - - # True if succeeds, exception raised if not - self.changed = self.elb_conn.delete_load_balancer_listeners(self.name, - ports) - - def _set_elb_listeners(self): - """ - Creates listeners specified by self.listeners; overwrites existing - listeners on these ports; removes extraneous listeners - """ - listeners_to_add = [] - listeners_to_remove = [] - listeners_to_keep = [] - - # Check for any listeners we need to create or overwrite - for listener in self.listeners: - listener_as_tuple = self._listener_as_tuple(listener) - - # First we loop through existing listeners to see if one is - # already specified for this port - existing_listener_found = None - for existing_listener in self.elb.listeners: - # Since ELB allows only one listener on each incoming port, a - # single match on the incomping port is all we're looking for - if existing_listener[0] == listener['load_balancer_port']: - existing_listener_found = existing_listener.get_complex_tuple() - break - - if existing_listener_found: - # Does it match exactly? - if listener_as_tuple != existing_listener_found: - # The ports are the same but something else is different, - # so we'll remove the exsiting one and add the new one - listeners_to_remove.append(existing_listener_found) - listeners_to_add.append(listener_as_tuple) - else: - # We already have this listener, so we're going to keep it - listeners_to_keep.append(existing_listener_found) - else: - # We didn't find an existing listener, so just add the new one - listeners_to_add.append(listener_as_tuple) - - # Check for any extraneous listeners we need to remove, if desired - if self.purge_listeners: - for existing_listener in self.elb.listeners: - existing_listener_tuple = existing_listener.get_complex_tuple() - if existing_listener_tuple in listeners_to_remove: - # Already queued for removal - continue - if existing_listener_tuple in listeners_to_keep: - # Keep this one around - continue - # Since we're not already removing it and we don't need to keep - # it, let's get rid of it - listeners_to_remove.append(existing_listener_tuple) - - if listeners_to_remove: - self._delete_elb_listeners(listeners_to_remove) - - if listeners_to_add: - self._create_elb_listeners(listeners_to_add) - - def _listener_as_tuple(self, listener): - """Formats listener as a 4- or 5-tuples, in the order specified by the - ELB API""" - # N.B. string manipulations on protocols below (str(), upper()) is to - # ensure format matches output from ELB API - listener_list = [ - listener['load_balancer_port'], - listener['instance_port'], - str(listener['protocol'].upper()), - ] - - # Instance protocol is not required by ELB API; it defaults to match - # load balancer protocol. We'll mimic that behavior here - if 'instance_protocol' in listener: - listener_list.append(str(listener['instance_protocol'].upper())) - else: - listener_list.append(str(listener['protocol'].upper())) - - if 'ssl_certificate_id' in listener: - listener_list.append(str(listener['ssl_certificate_id'])) - - return tuple(listener_list) - - def _enable_zones(self, zones): - try: - self.elb.enable_zones(zones) - except boto.exception.BotoServerError, e: - if "Invalid Availability Zone" in e.error_message: - self.module.fail_json(msg=e.error_message) - else: - self.module.fail_json(msg="an unknown server error occurred, please try again later") - self.changed = True - - def _disable_zones(self, zones): - try: - self.elb.disable_zones(zones) - except boto.exception.BotoServerError, e: - if "Invalid Availability Zone" in e.error_message: - self.module.fail_json(msg=e.error_message) - else: - self.module.fail_json(msg="an unknown server error occurred, please try again later") - self.changed = True - - def _attach_subnets(self, subnets): - self.elb_conn.attach_lb_to_subnets(self.name, subnets) - self.changed = True - - def _detach_subnets(self, subnets): - self.elb_conn.detach_lb_from_subnets(self.name, subnets) - self.changed = True - - def _set_subnets(self): - """Determine which subnets need to be attached or detached on the ELB""" - if self.subnets: - if self.purge_subnets: - subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets)) - subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) - else: - subnets_to_detach = None - subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) - - if subnets_to_attach: - self._attach_subnets(subnets_to_attach) - if subnets_to_detach: - self._detach_subnets(subnets_to_detach) - - def _set_zones(self): - """Determine which zones need to be enabled or disabled on the ELB""" - if self.zones: - if self.purge_zones: - zones_to_disable = list(set(self.elb.availability_zones) - - set(self.zones)) - zones_to_enable = list(set(self.zones) - - set(self.elb.availability_zones)) - else: - zones_to_disable = None - zones_to_enable = list(set(self.zones) - - set(self.elb.availability_zones)) - if zones_to_enable: - self._enable_zones(zones_to_enable) - # N.B. This must come second, in case it would have removed all zones - if zones_to_disable: - self._disable_zones(zones_to_disable) - - def _set_security_groups(self): - if self.security_group_ids != None and set(self.elb.security_groups) != set(self.security_group_ids): - self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids) - self.Changed = True - - def _set_health_check(self): - """Set health check values on ELB as needed""" - if self.health_check: - # This just makes it easier to compare each of the attributes - # and look for changes. Keys are attributes of the current - # health_check; values are desired values of new health_check - health_check_config = { - "target": self._get_health_check_target(), - "timeout": self.health_check['response_timeout'], - "interval": self.health_check['interval'], - "unhealthy_threshold": self.health_check['unhealthy_threshold'], - "healthy_threshold": self.health_check['healthy_threshold'], - } - - update_health_check = False - - # The health_check attribute is *not* set on newly created - # ELBs! So we have to create our own. - if not self.elb.health_check: - self.elb.health_check = HealthCheck() - - for attr, desired_value in health_check_config.iteritems(): - if getattr(self.elb.health_check, attr) != desired_value: - setattr(self.elb.health_check, attr, desired_value) - update_health_check = True - - if update_health_check: - self.elb.configure_health_check(self.elb.health_check) - self.changed = True - - def _check_attribute_support(self, attr): - return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr) - - def _set_cross_az_load_balancing(self): - attributes = self.elb.get_attributes() - if self.cross_az_load_balancing: - attributes.cross_zone_load_balancing.enabled = True - else: - attributes.cross_zone_load_balancing.enabled = False - self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing', - attributes.cross_zone_load_balancing.enabled) - - def _set_connection_draining_timeout(self): - attributes = self.elb.get_attributes() - if self.connection_draining_timeout is not None: - attributes.connection_draining.enabled = True - attributes.connection_draining.timeout = self.connection_draining_timeout - self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) - else: - attributes.connection_draining.enabled = False - self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) - - def _get_health_check_target(self): - """Compose target string from healthcheck parameters""" - protocol = self.health_check['ping_protocol'].upper() - path = "" - - if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check: - path = self.health_check['ping_path'] - - return "%s:%s%s" % (protocol, self.health_check['ping_port'], path) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - state={'required': True, 'choices': ['present', 'absent']}, - name={'required': True}, - listeners={'default': None, 'required': False, 'type': 'list'}, - purge_listeners={'default': True, 'required': False, 'type': 'bool'}, - zones={'default': None, 'required': False, 'type': 'list'}, - purge_zones={'default': False, 'required': False, 'type': 'bool'}, - security_group_ids={'default': None, 'required': False, 'type': 'list'}, - health_check={'default': None, 'required': False, 'type': 'dict'}, - subnets={'default': None, 'required': False, 'type': 'list'}, - purge_subnets={'default': False, 'required': False, 'type': 'bool'}, - scheme={'default': 'internet-facing', 'required': False}, - connection_draining_timeout={'default': None, 'required': False}, - cross_az_load_balancing={'default': None, 'required': False} - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") - - name = module.params['name'] - state = module.params['state'] - listeners = module.params['listeners'] - purge_listeners = module.params['purge_listeners'] - zones = module.params['zones'] - purge_zones = module.params['purge_zones'] - security_group_ids = module.params['security_group_ids'] - health_check = module.params['health_check'] - subnets = module.params['subnets'] - purge_subnets = module.params['purge_subnets'] - scheme = module.params['scheme'] - connection_draining_timeout = module.params['connection_draining_timeout'] - cross_az_load_balancing = module.params['cross_az_load_balancing'] - - if state == 'present' and not listeners: - module.fail_json(msg="At least one port is required for ELB creation") - - if state == 'present' and not (zones or subnets): - module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") - - elb_man = ElbManager(module, name, listeners, purge_listeners, zones, - purge_zones, security_group_ids, health_check, - subnets, purge_subnets, scheme, - connection_draining_timeout, cross_az_load_balancing, - region=region, **aws_connect_params) - - # check for unsupported attributes for this version of boto - if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'): - module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute") - - if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'): - module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute") - - if state == 'present': - elb_man.ensure_ok() - elif state == 'absent': - elb_man.ensure_gone() - - ansible_facts = {'ec2_elb': 'info'} - ec2_facts_result = dict(changed=elb_man.changed, - elb=elb_man.get_info(), - ansible_facts=ansible_facts) - - module.exit_json(**ec2_facts_result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_facts b/library/cloud/ec2_facts deleted file mode 100644 index 7b5c610dc2..0000000000 --- a/library/cloud/ec2_facts +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2_facts -short_description: Gathers facts about remote hosts within ec2 (aws) -version_added: "1.0" -options: - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 -description: - - This module fetches data from the metadata servers in ec2 (aws) as per - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html. - The module must be called from within the EC2 instance itself. - Eucalyptus cloud provides a similar service and this module should - work with this cloud provider as well. -notes: - - Parameters to filter on ec2_facts may be added later. -author: "Silviu Dicu " -''' - -EXAMPLES = ''' -# Conditional example -- name: Gather facts - action: ec2_facts - -- name: Conditional - action: debug msg="This instance is a t1.micro" - when: ansible_ec2_instance_type == "t1.micro" -''' - -import socket -import re - -socket.setdefaulttimeout(5) - -class Ec2Metadata(object): - - ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/' - ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key' - ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/' - - AWS_REGIONS = ('ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2', - 'us-gov-west-1' - ) - - def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None): - self.module = module - self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri - self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri - self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri - self._data = {} - self._prefix = 'ansible_ec2_%s' - - def _fetch(self, url): - (response, info) = fetch_url(self.module, url, force=True) - if response: - data = response.read() - else: - data = None - return data - - def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']): - new_fields = {} - for key, value in fields.iteritems(): - split_fields = key[len(uri):].split('/') - if len(split_fields) > 1 and split_fields[1]: - new_key = "-".join(split_fields) - new_fields[self._prefix % new_key] = value - else: - new_key = "".join(split_fields) - new_fields[self._prefix % new_key] = value - for pattern in filter_patterns: - for key in new_fields.keys(): - match = re.search(pattern, key) - if match: - new_fields.pop(key) - return new_fields - - def fetch(self, uri, recurse=True): - raw_subfields = self._fetch(uri) - if not raw_subfields: - return - subfields = raw_subfields.split('\n') - for field in subfields: - if field.endswith('/') and recurse: - self.fetch(uri + field) - if uri.endswith('/'): - new_uri = uri + field - else: - new_uri = uri + '/' + field - if new_uri not in self._data and not new_uri.endswith('/'): - content = self._fetch(new_uri) - if field == 'security-groups': - sg_fields = ",".join(content.split('\n')) - self._data['%s' % (new_uri)] = sg_fields - else: - self._data['%s' % (new_uri)] = content - - def fix_invalid_varnames(self, data): - """Change ':'' and '-' to '_' to ensure valid template variable names""" - for (key, value) in data.items(): - if ':' in key or '-' in key: - newkey = key.replace(':','_').replace('-','_') - del data[key] - data[newkey] = value - - def add_ec2_region(self, data): - """Use the 'ansible_ec2_placement_availability_zone' key/value - pair to add 'ansible_ec2_placement_region' key/value pair with - the EC2 region name. - """ - - # Only add a 'ansible_ec2_placement_region' key if the - # 'ansible_ec2_placement_availability_zone' exists. - zone = data.get('ansible_ec2_placement_availability_zone') - if zone is not None: - # Use the zone name as the region name unless the zone - # name starts with a known AWS region name. - region = zone - for r in self.AWS_REGIONS: - if zone.startswith(r): - region = r - break - data['ansible_ec2_placement_region'] = region - - def run(self): - self.fetch(self.uri_meta) # populate _data - data = self._mangle_fields(self._data, self.uri_meta) - data[self._prefix % 'user-data'] = self._fetch(self.uri_user) - data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh) - self.fix_invalid_varnames(data) - self.add_ec2_region(data) - return data - -def main(): - argument_spec = url_argument_spec() - - module = AnsibleModule( - argument_spec = argument_spec, - supports_check_mode = True, - ) - - ec2_facts = Ec2Metadata(module).run() - ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts) - - module.exit_json(**ec2_facts_result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/cloud/ec2_group b/library/cloud/ec2_group deleted file mode 100644 index 1c8aa70101..0000000000 --- a/library/cloud/ec2_group +++ /dev/null @@ -1,386 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - - -DOCUMENTATION = ''' ---- -module: ec2_group -version_added: "1.3" -short_description: maintain an ec2 VPC security group. -description: - - maintains ec2 security groups. This module has a dependency on python-boto >= 2.5 -options: - name: - description: - - Name of the security group. - required: true - description: - description: - - Description of the security group. - required: true - vpc_id: - description: - - ID of the VPC to create the group in. - required: false - rules: - description: - - List of firewall inbound rules to enforce in this group (see example). - required: false - rules_egress: - description: - - List of firewall outbound rules to enforce in this group (see example). - required: false - version_added: "1.6" - region: - description: - - the EC2 region to use - required: false - default: null - aliases: [] - state: - version_added: "1.4" - description: - - Create or delete a security group - required: false - default: 'present' - choices: [ "present", "absent" ] - aliases: [] - purge_rules: - version_added: "1.8" - description: - - Purge existing rules on security group that are not found in rules - required: false - default: 'true' - aliases: [] - purge_rules_egress: - version_added: "1.8" - description: - - Purge existing rules_egree on security group that are not found in rules_egress - required: false - default: 'true' - aliases: [] - -extends_documentation_fragment: aws - -notes: - - If a rule declares a group_name and that group doesn't exist, it will be - automatically created. In that case, group_desc should be provided as well. - The module will refuse to create a depended-on group without a description. -''' - -EXAMPLES = ''' -- name: example ec2 group - local_action: - module: ec2_group - name: example - description: an example EC2 group - vpc_id: 12345 - region: eu-west-1a - aws_secret_key: SECRET - aws_access_key: ACCESS - rules: - - proto: tcp - from_port: 80 - to_port: 80 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 10.0.0.0/8 - - proto: udp - from_port: 10050 - to_port: 10050 - cidr_ip: 10.0.0.0/8 - - proto: udp - from_port: 10051 - to_port: 10051 - group_id: sg-12345678 - - proto: all - # the containing group name may be specified here - group_name: example - rules_egress: - - proto: tcp - from_port: 80 - to_port: 80 - group_name: example-other - # description to use if example-other needs to be created - group_desc: other example EC2 group -''' - -try: - import boto.ec2 -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -def addRulesToLookup(rules, prefix, dict): - for rule in rules: - for grant in rule.grants: - dict["%s-%s-%s-%s-%s-%s" % (prefix, rule.ip_protocol, rule.from_port, rule.to_port, - grant.group_id, grant.cidr_ip)] = rule - - -def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): - """ - Returns tuple of (group_id, ip) after validating rule params. - - rule: Dict describing a rule. - name: Name of the security group being managed. - groups: Dict of all available security groups. - - AWS accepts an ip range or a security group as target of a rule. This - function validate the rule specification and return either a non-None - group_id or a non-None ip range. - """ - - group_id = None - group_name = None - ip = None - target_group_created = False - if 'group_id' in rule and 'cidr_ip' in rule: - module.fail_json(msg="Specify group_id OR cidr_ip, not both") - elif 'group_name' in rule and 'cidr_ip' in rule: - module.fail_json(msg="Specify group_name OR cidr_ip, not both") - elif 'group_id' in rule and 'group_name' in rule: - module.fail_json(msg="Specify group_id OR group_name, not both") - elif 'group_id' in rule: - group_id = rule['group_id'] - elif 'group_name' in rule: - group_name = rule['group_name'] - if group_name in groups: - group_id = groups[group_name].id - elif group_name == name: - group_id = group.id - groups[group_id] = group - groups[group_name] = group - else: - if not rule.get('group_desc', '').strip(): - module.fail_json(msg="group %s will be automatically created by rule %s and no description was provided" % (group_name, rule)) - if not module.check_mode: - auto_group = ec2.create_security_group(group_name, rule['group_desc'], vpc_id=vpc_id) - group_id = auto_group.id - groups[group_id] = auto_group - groups[group_name] = auto_group - target_group_created = True - elif 'cidr_ip' in rule: - ip = rule['cidr_ip'] - - return group_id, ip, target_group_created - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - name=dict(required=True), - description=dict(required=True), - vpc_id=dict(), - rules=dict(), - rules_egress=dict(), - state = dict(default='present', choices=['present', 'absent']), - purge_rules=dict(default=True, required=False, type='bool'), - purge_rules_egress=dict(default=True, required=False, type='bool'), - - ) - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - name = module.params['name'] - description = module.params['description'] - vpc_id = module.params['vpc_id'] - rules = module.params['rules'] - rules_egress = module.params['rules_egress'] - state = module.params.get('state') - purge_rules = module.params['purge_rules'] - purge_rules_egress = module.params['purge_rules_egress'] - - changed = False - - ec2 = ec2_connect(module) - - # find the group if present - group = None - groups = {} - for curGroup in ec2.get_all_security_groups(): - groups[curGroup.id] = curGroup - groups[curGroup.name] = curGroup - - if curGroup.name == name and (vpc_id is None or curGroup.vpc_id == vpc_id): - group = curGroup - - # Ensure requested group is absent - if state == 'absent': - if group: - '''found a match, delete it''' - try: - group.delete() - except Exception, e: - module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e)) - else: - group = None - changed = True - else: - '''no match found, no changes required''' - - # Ensure requested group is present - elif state == 'present': - if group: - '''existing group found''' - # check the group parameters are correct - group_in_use = False - rs = ec2.get_all_instances() - for r in rs: - for i in r.instances: - group_in_use |= reduce(lambda x, y: x | (y.name == 'public-ssh'), i.groups, False) - - if group.description != description: - if group_in_use: - module.fail_json(msg="Group description does not match, but it is in use so cannot be changed.") - - # if the group doesn't exist, create it now - else: - '''no match found, create it''' - if not module.check_mode: - group = ec2.create_security_group(name, description, vpc_id=vpc_id) - - # When a group is created, an egress_rule ALLOW ALL - # to 0.0.0.0/0 is added automatically but it's not - # reflected in the object returned by the AWS API - # call. We re-read the group for getting an updated object - # amazon sometimes takes a couple seconds to update the security group so wait till it exists - while len(ec2.get_all_security_groups(filters={ 'group_id': group.id, })) == 0: - time.sleep(0.1) - - group = ec2.get_all_security_groups(group_ids=(group.id,))[0] - changed = True - else: - module.fail_json(msg="Unsupported state requested: %s" % state) - - # create a lookup for all existing rules on the group - if group: - - # Manage ingress rules - groupRules = {} - addRulesToLookup(group.rules, 'in', groupRules) - - # Now, go through all provided rules and ensure they are there. - if rules: - for rule in rules: - group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id) - if target_group_created: - changed = True - - if rule['proto'] in ('all', '-1', -1): - rule['proto'] = -1 - rule['from_port'] = None - rule['to_port'] = None - - # If rule already exists, don't later delete it - ruleId = "%s-%s-%s-%s-%s-%s" % ('in', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip) - if ruleId in groupRules: - del groupRules[ruleId] - # Otherwise, add new rule - else: - grantGroup = None - if group_id: - grantGroup = groups[group_id] - - if not module.check_mode: - group.authorize(rule['proto'], rule['from_port'], rule['to_port'], ip, grantGroup) - changed = True - - # Finally, remove anything left in the groupRules -- these will be defunct rules - if purge_rules: - for rule in groupRules.itervalues() : - for grant in rule.grants: - grantGroup = None - if grant.group_id: - grantGroup = groups[grant.group_id] - if not module.check_mode: - group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup) - changed = True - - # Manage egress rules - groupRules = {} - addRulesToLookup(group.rules_egress, 'out', groupRules) - - # Now, go through all provided rules and ensure they are there. - if rules_egress: - for rule in rules_egress: - group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id) - if target_group_created: - changed = True - - if rule['proto'] in ('all', '-1', -1): - rule['proto'] = -1 - rule['from_port'] = None - rule['to_port'] = None - - # If rule already exists, don't later delete it - ruleId = "%s-%s-%s-%s-%s-%s" % ('out', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip) - if ruleId in groupRules: - del groupRules[ruleId] - # Otherwise, add new rule - else: - grantGroup = None - if group_id: - grantGroup = groups[group_id].id - - if not module.check_mode: - ec2.authorize_security_group_egress( - group_id=group.id, - ip_protocol=rule['proto'], - from_port=rule['from_port'], - to_port=rule['to_port'], - src_group_id=grantGroup, - cidr_ip=ip) - changed = True - elif vpc_id and not module.check_mode: - # when using a vpc, but no egress rules are specified, - # we add in a default allow all out rule, which was the - # default behavior before egress rules were added - default_egress_rule = 'out--1-None-None-None-0.0.0.0/0' - if default_egress_rule not in groupRules: - ec2.authorize_security_group_egress( - group_id=group.id, - ip_protocol=-1, - from_port=None, - to_port=None, - src_group_id=None, - cidr_ip='0.0.0.0/0' - ) - changed = True - else: - # make sure the default egress rule is not removed - del groupRules[default_egress_rule] - - # Finally, remove anything left in the groupRules -- these will be defunct rules - if purge_rules_egress: - for rule in groupRules.itervalues(): - for grant in rule.grants: - grantGroup = None - if grant.group_id: - grantGroup = groups[grant.group_id].id - if not module.check_mode: - ec2.revoke_security_group_egress( - group_id=group.id, - ip_protocol=rule.ip_protocol, - from_port=rule.from_port, - to_port=rule.to_port, - src_group_id=grantGroup, - cidr_ip=grant.cidr_ip) - changed = True - - if group: - module.exit_json(changed=changed, group_id=group.id) - else: - module.exit_json(changed=changed, group_id=None) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_key b/library/cloud/ec2_key deleted file mode 100644 index 9c8274f764..0000000000 --- a/library/cloud/ec2_key +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - - -DOCUMENTATION = ''' ---- -module: ec2_key -version_added: "1.5" -short_description: maintain an ec2 key pair. -description: - - maintains ec2 key pairs. This module has a dependency on python-boto >= 2.5 -options: - name: - description: - - Name of the key pair. - required: true - key_material: - description: - - Public key material. - required: false - region: - description: - - the EC2 region to use - required: false - default: null - aliases: [] - state: - description: - - create or delete keypair - required: false - default: 'present' - aliases: [] - wait: - description: - - Wait for the specified action to complete before returning. - required: false - default: false - aliases: [] - version_added: "1.6" - wait_timeout: - description: - - How long before wait gives up, in seconds - required: false - default: 300 - aliases: [] - version_added: "1.6" - -extends_documentation_fragment: aws -author: Vincent Viallet -''' - -EXAMPLES = ''' -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Creates a new ec2 key pair named `example` if not present, returns generated -# private key -- name: example ec2 key - local_action: - module: ec2_key - name: example - -# Creates a new ec2 key pair named `example` if not present using provided key -# material -- name: example2 ec2 key - local_action: - module: ec2_key - name: example2 - key_material: 'ssh-rsa AAAAxyz...== me@example.com' - state: present - -# Creates a new ec2 key pair named `example` if not present using provided key -# material -- name: example3 ec2 key - local_action: - module: ec2_key - name: example3 - key_material: "{{ item }}" - with_file: /path/to/public_key.id_rsa.pub - -# Removes ec2 key pair by name -- name: remove example key - local_action: - module: ec2_key - name: example - state: absent -''' - -try: - import boto.ec2 -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -import random -import string - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - name=dict(required=True), - key_material=dict(required=False), - state = dict(default='present', choices=['present', 'absent']), - wait = dict(type='bool', default=False), - wait_timeout = dict(default=300), - ) - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - name = module.params['name'] - state = module.params.get('state') - key_material = module.params.get('key_material') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - changed = False - - ec2 = ec2_connect(module) - - # find the key if present - key = ec2.get_key_pair(name) - - # Ensure requested key is absent - if state == 'absent': - if key: - '''found a match, delete it''' - try: - key.delete() - if wait: - start = time.time() - action_complete = False - while (time.time() - start) < wait_timeout: - if not ec2.get_key_pair(name): - action_complete = True - break - time.sleep(1) - if not action_complete: - module.fail_json(msg="timed out while waiting for the key to be removed") - except Exception, e: - module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e)) - else: - key = None - changed = True - else: - '''no match found, no changes required''' - - # Ensure requested key is present - elif state == 'present': - if key: - # existing key found - if key_material: - # EC2's fingerprints are non-trivial to generate, so push this key - # to a temporary name and make ec2 calculate the fingerprint for us. - # - # http://blog.jbrowne.com/?p=23 - # https://forums.aws.amazon.com/thread.jspa?messageID=352828 - - # find an unused name - test = 'empty' - while test: - randomchars = [random.choice(string.ascii_letters + string.digits) for x in range(0,10)] - tmpkeyname = "ansible-" + ''.join(randomchars) - test = ec2.get_key_pair(tmpkeyname) - - # create tmp key - tmpkey = ec2.import_key_pair(tmpkeyname, key_material) - # get tmp key fingerprint - tmpfingerprint = tmpkey.fingerprint - # delete tmp key - tmpkey.delete() - - if key.fingerprint != tmpfingerprint: - if not module.check_mode: - key.delete() - key = ec2.import_key_pair(name, key_material) - - if wait: - start = time.time() - action_complete = False - while (time.time() - start) < wait_timeout: - if ec2.get_key_pair(name): - action_complete = True - break - time.sleep(1) - if not action_complete: - module.fail_json(msg="timed out while waiting for the key to be re-created") - - changed = True - pass - - # if the key doesn't exist, create it now - else: - '''no match found, create it''' - if not module.check_mode: - if key_material: - '''We are providing the key, need to import''' - key = ec2.import_key_pair(name, key_material) - else: - ''' - No material provided, let AWS handle the key creation and - retrieve the private key - ''' - key = ec2.create_key_pair(name) - - if wait: - start = time.time() - action_complete = False - while (time.time() - start) < wait_timeout: - if ec2.get_key_pair(name): - action_complete = True - break - time.sleep(1) - if not action_complete: - module.fail_json(msg="timed out while waiting for the key to be created") - - changed = True - - if key: - data = { - 'name': key.name, - 'fingerprint': key.fingerprint - } - if key.material: - data.update({'private_key': key.material}) - - module.exit_json(changed=changed, key=data) - else: - module.exit_json(changed=changed, key=None) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_lc b/library/cloud/ec2_lc deleted file mode 100755 index f75dfe6d93..0000000000 --- a/library/cloud/ec2_lc +++ /dev/null @@ -1,278 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: ec2_lc -short_description: Create or delete AWS Autoscaling Launch Configurations -description: - - Can create or delete AwS Autoscaling Configurations - - Works with the ec2_asg module to manage Autoscaling Groups -notes: - - "Amazon ASG Autoscaling Launch Configurations are immutable once created, so modifying the configuration - after it is changed will not modify the launch configuration on AWS. You must create a new config and assign - it to the ASG instead." -version_added: "1.6" -author: Gareth Rushgrove -options: - state: - description: - - register or deregister the instance - required: true - choices: ['present', 'absent'] - name: - description: - - Unique name for configuration - required: true - instance_type: - description: - - instance type to use for the instance - required: true - default: null - aliases: [] - image_id: - description: - - The AMI unique identifier to be used for the group - required: false - key_name: - description: - - The SSH key name to be used for access to managed instances - required: false - security_groups: - description: - - A list of security groups into which instances should be found - required: false - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - volumes: - description: - - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. - required: false - default: null - aliases: [] - user_data: - description: - - opaque blob of data which is made available to the ec2 instance - required: false - default: null - aliases: [] - kernel_id: - description: - - Kernel id for the EC2 instance - required: false - default: null - aliases: [] - spot_price: - description: - - The spot price you are bidding. Only applies for an autoscaling group with spot instances. - required: false - default: null - instance_monitoring: - description: - - whether instances in group are launched with detailed monitoring. - required: false - default: false - aliases: [] - assign_public_ip: - description: - - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC. - required: false - default: false - aliases: [] - version_added: "1.8" - ramdisk_id: - description: - - A RAM disk id for the instances. - required: false - default: null - aliases: [] - version_added: "1.8" - instance_profile_name: - description: - - The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instances. - required: false - default: null - aliases: [] - version_added: "1.8" - ebs_optimized: - description: - - Specifies whether the instance is optimized for EBS I/O (true) or not (false). - required: false - default: false - aliases: [] - version_added: "1.8" -extends_documentation_fragment: aws -""" - -EXAMPLES = ''' -- ec2_lc: - name: special - image_id: ami-XXX - key_name: default - security_groups: 'group,group2' - instance_type: t1.micro - -''' - -import sys -import time - -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -try: - from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping - import boto.ec2.autoscale - from boto.ec2.autoscale import LaunchConfiguration - from boto.exception import BotoServerError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -def create_block_device(module, volume): - # Not aware of a way to determine this programatically - # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/ - MAX_IOPS_TO_SIZE_RATIO = 30 - if 'snapshot' not in volume and 'ephemeral' not in volume: - if 'volume_size' not in volume: - module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume') - if 'snapshot' in volume: - if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume: - module.fail_json(msg='io1 volumes must have an iops value set') - if 'ephemeral' in volume: - if 'snapshot' in volume: - module.fail_json(msg='Cannot set both ephemeral and snapshot') - return BlockDeviceType(snapshot_id=volume.get('snapshot'), - ephemeral_name=volume.get('ephemeral'), - size=volume.get('volume_size'), - volume_type=volume.get('device_type'), - delete_on_termination=volume.get('delete_on_termination', False), - iops=volume.get('iops')) - - -def create_launch_config(connection, module): - name = module.params.get('name') - image_id = module.params.get('image_id') - key_name = module.params.get('key_name') - security_groups = module.params['security_groups'] - user_data = module.params.get('user_data') - volumes = module.params['volumes'] - instance_type = module.params.get('instance_type') - spot_price = module.params.get('spot_price') - instance_monitoring = module.params.get('instance_monitoring') - assign_public_ip = module.params.get('assign_public_ip') - kernel_id = module.params.get('kernel_id') - ramdisk_id = module.params.get('ramdisk_id') - instance_profile_name = module.params.get('instance_profile_name') - ebs_optimized = module.params.get('ebs_optimized') - bdm = BlockDeviceMapping() - - if volumes: - for volume in volumes: - if 'device_name' not in volume: - module.fail_json(msg='Device name must be set for volume') - # Minimum volume size is 1GB. We'll use volume size explicitly set to 0 - # to be a signal not to create this volume - if 'volume_size' not in volume or int(volume['volume_size']) > 0: - bdm[volume['device_name']] = create_block_device(module, volume) - - lc = LaunchConfiguration( - name=name, - image_id=image_id, - key_name=key_name, - security_groups=security_groups, - user_data=user_data, - block_device_mappings=[bdm], - instance_type=instance_type, - kernel_id=kernel_id, - spot_price=spot_price, - instance_monitoring=instance_monitoring, - associate_public_ip_address = assign_public_ip, - ramdisk_id=ramdisk_id, - instance_profile_name=instance_profile_name, - ebs_optimized=ebs_optimized, - ) - - launch_configs = connection.get_all_launch_configurations(names=[name]) - changed = False - if not launch_configs: - try: - connection.create_launch_configuration(lc) - launch_configs = connection.get_all_launch_configurations(names=[name]) - changed = True - except BotoServerError, e: - module.fail_json(msg=str(e)) - result = launch_configs[0] - - module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), - image_id=result.image_id, arn=result.launch_configuration_arn, - security_groups=result.security_groups, instance_type=instance_type) - - -def delete_launch_config(connection, module): - name = module.params.get('name') - launch_configs = connection.get_all_launch_configurations(names=[name]) - if launch_configs: - launch_configs[0].delete() - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(required=True, type='str'), - image_id=dict(type='str'), - key_name=dict(type='str'), - security_groups=dict(type='list'), - user_data=dict(type='str'), - kernel_id=dict(type='str'), - volumes=dict(type='list'), - instance_type=dict(type='str'), - state=dict(default='present', choices=['present', 'absent']), - spot_price=dict(type='float'), - ramdisk_id=dict(type='str'), - instance_profile_name=dict(type='str'), - ebs_optimized=dict(default=False, type='bool'), - associate_public_ip_address=dict(type='bool'), - instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(default=False, type='bool') - ) - ) - - module = AnsibleModule(argument_spec=argument_spec) - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - - try: - connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - - state = module.params.get('state') - - if state == 'present': - create_launch_config(connection, module) - elif state == 'absent': - delete_launch_config(connection, module) - -main() diff --git a/library/cloud/ec2_metric_alarm b/library/cloud/ec2_metric_alarm deleted file mode 100644 index 519f88f24f..0000000000 --- a/library/cloud/ec2_metric_alarm +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ -module: ec2_metric_alarm -short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'" -description: - - Can create or delete AWS metric alarms - - Metrics you wish to alarm on must already exist -version_added: "1.6" -author: Zacharie Eakin -options: - state: - description: - - register or deregister the alarm - required: true - choices: ['present', 'absent'] - name: - desciption: - - Unique name for the alarm - required: true - metric: - description: - - Name of the monitored metric (e.g. CPUUtilization) - - Metric must already exist - required: false - namespace: - description: - - Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch - required: false - statistic: - description: - - Operation applied to the metric - - Works in conjunction with period and evaluation_periods to determine the comparison value - required: false - options: ['SampleCount','Average','Sum','Minimum','Maximum'] - comparison: - description: - - Determines how the threshold value is compared - required: false - options: ['<=','<','>','>='] - threshold: - description: - - Sets the min/max bound for triggering the alarm - required: false - period: - description: - - The time (in seconds) between metric evaluations - required: false - evaluation_periods: - description: - - The number of times in which the metric is evaluated before final calculation - required: false - unit: - description: - - The threshold's unit of measurement - required: false - options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None'] - description: - description: - - A longer desciption of the alarm - required: false - dimensions: - description: - - Describes to what the alarm is applied - required: false - alarm_actions: - description: - - A list of the names action(s) taken when the alarm is in the 'alarm' status - required: false - insufficient_data_actions: - description: - - A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status - required: false - ok_actions: - description: - - A list of the names of action(s) to take when the alarm is in the 'ok' status - required: false -extends_documentation_fragment: aws -""" - -EXAMPLES = ''' - - name: create alarm - ec2_metric_alarm: - state: present - region: ap-southeast-2 - name: "cpu-low" - metric: "CPUUtilization" - namespace: "AWS/EC2" - statistic: Average - comparison: "<=" - threshold: 5.0 - period: 300 - evaluation_periods: 3 - unit: "Percent" - description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes " - dimensions: {'InstanceId':'i-XXX'} - alarm_actions: ["action1","action2"] - - -''' - -import sys - -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -try: - import boto.ec2.cloudwatch - from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm - from boto.exception import BotoServerError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -def create_metric_alarm(connection, module): - - name = module.params.get('name') - metric = module.params.get('metric') - namespace = module.params.get('namespace') - statistic = module.params.get('statistic') - comparison = module.params.get('comparison') - threshold = module.params.get('threshold') - period = module.params.get('period') - evaluation_periods = module.params.get('evaluation_periods') - unit = module.params.get('unit') - description = module.params.get('description') - dimensions = module.params.get('dimensions') - alarm_actions = module.params.get('alarm_actions') - insufficient_data_actions = module.params.get('insufficient_data_actions') - ok_actions = module.params.get('ok_actions') - - alarms = connection.describe_alarms(alarm_names=[name]) - - if not alarms: - - alm = MetricAlarm( - name=name, - metric=metric, - namespace=namespace, - statistic=statistic, - comparison=comparison, - threshold=threshold, - period=period, - evaluation_periods=evaluation_periods, - unit=unit, - description=description, - dimensions=dimensions, - alarm_actions=alarm_actions, - insufficient_data_actions=insufficient_data_actions, - ok_actions=ok_actions - ) - try: - connection.create_alarm(alm) - changed = True - alarms = connection.describe_alarms(alarm_names=[name]) - except BotoServerError, e: - module.fail_json(msg=str(e)) - - else: - alarm = alarms[0] - changed = False - - for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'): - if getattr(alarm, attr) != module.params.get(attr): - changed = True - setattr(alarm, attr, module.params.get(attr)) - #this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm - comparison = alarm.comparison - comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'} - alarm.comparison = comparisons[comparison] - - dim1 = module.params.get('dimensions') - dim2 = alarm.dimensions - - for keys in dim1: - if not isinstance(dim1[keys], list): - dim1[keys] = [dim1[keys]] - if dim1[keys] != dim2[keys]: - changed=True - setattr(alarm, 'dimensions', dim1) - - for attr in ('alarm_actions','insufficient_data_actions','ok_actions'): - action = module.params.get(attr) or [] - if getattr(alarm, attr) != action: - changed = True - setattr(alarm, attr, module.params.get(attr)) - - try: - if changed: - connection.create_alarm(alarm) - except BotoServerError, e: - module.fail_json(msg=str(e)) - result = alarms[0] - module.exit_json(changed=changed, name=result.name, - actions_enabled=result.actions_enabled, - alarm_actions=result.alarm_actions, - alarm_arn=result.alarm_arn, - comparison=result.comparison, - description=result.description, - dimensions=result.dimensions, - evaluation_periods=result.evaluation_periods, - insufficient_data_actions=result.insufficient_data_actions, - last_updated=result.last_updated, - metric=result.metric, - namespace=result.namespace, - ok_actions=result.ok_actions, - period=result.period, - state_reason=result.state_reason, - state_value=result.state_value, - statistic=result.statistic, - threshold=result.threshold, - unit=result.unit) - -def delete_metric_alarm(connection, module): - name = module.params.get('name') - - alarms = connection.describe_alarms(alarm_names=[name]) - - if alarms: - try: - connection.delete_alarms([name]) - module.exit_json(changed=True) - except BotoServerError, e: - module.fail_json(msg=str(e)) - else: - module.exit_json(changed=False) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name=dict(required=True, type='str'), - metric=dict(type='str'), - namespace=dict(type='str'), - statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']), - comparison=dict(type='str', choices=['<=', '<', '>', '>=']), - threshold=dict(type='float'), - period=dict(type='int'), - unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']), - evaluation_periods=dict(type='int'), - description=dict(type='str'), - dimensions=dict(type='dict'), - alarm_actions=dict(type='list'), - insufficient_data_actions=dict(type='list'), - ok_actions=dict(type='list'), - state=dict(default='present', choices=['present', 'absent']), - region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), - ) - ) - - module = AnsibleModule(argument_spec=argument_spec) - - state = module.params.get('state') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - try: - connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - - if state == 'present': - create_metric_alarm(connection, module) - elif state == 'absent': - delete_metric_alarm(connection, module) - -main() diff --git a/library/cloud/ec2_scaling_policy b/library/cloud/ec2_scaling_policy deleted file mode 100755 index ad1fa7ce7f..0000000000 --- a/library/cloud/ec2_scaling_policy +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/python - -DOCUMENTATION = """ -module: ec2_scaling_policy -short_description: Create or delete AWS scaling policies for Autoscaling groups -description: - - Can create or delete scaling policies for autoscaling groups - - Referenced autoscaling groups must already exist -version_added: "1.6" -author: Zacharie Eakin -options: - state: - description: - - register or deregister the policy - required: true - choices: ['present', 'absent'] - name: - description: - - Unique name for the scaling policy - required: true - asg_name: - description: - - Name of the associated autoscaling group - required: true - adjustment_type: - desciption: - - The type of change in capacity of the autoscaling group - required: false - choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity'] - scaling_adjustment: - description: - - The amount by which the autoscaling group is adjusted by the policy - required: false - min_adjustment_step: - description: - - Minimum amount of adjustment when policy is triggered - required: false - cooldown: - description: - - The minimum period of time between which autoscaling actions can take place - required: false -extends_documentation_fragment: aws -""" - -EXAMPLES = ''' -- ec2_scaling_policy: - state: present - region: US-XXX - name: "scaledown-policy" - adjustment_type: "ChangeInCapacity" - asg_name: "slave-pool" - scaling_adjustment: -1 - min_adjustment_step: 1 - cooldown: 300 -''' - - -import sys - -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -try: - import boto.ec2.autoscale - from boto.ec2.autoscale import ScalingPolicy - from boto.exception import BotoServerError - -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -def create_scaling_policy(connection, module): - sp_name = module.params.get('name') - adjustment_type = module.params.get('adjustment_type') - asg_name = module.params.get('asg_name') - scaling_adjustment = module.params.get('scaling_adjustment') - min_adjustment_step = module.params.get('min_adjustment_step') - cooldown = module.params.get('cooldown') - - scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name]) - - if not scalingPolicies: - sp = ScalingPolicy( - name=sp_name, - adjustment_type=adjustment_type, - as_name=asg_name, - scaling_adjustment=scaling_adjustment, - min_adjustment_step=min_adjustment_step, - cooldown=cooldown) - - try: - connection.create_scaling_policy(sp) - policy = connection.get_all_policies(policy_names=[sp_name])[0] - module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) - except BotoServerError, e: - module.fail_json(msg=str(e)) - else: - policy = scalingPolicies[0] - changed = False - - # min_adjustment_step attribute is only relevant if the adjustment_type - # is set to percentage change in capacity, so it is a special case - if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity': - if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'): - changed = True - - # set the min adjustment step incase the user decided to change their - # adjustment type to percentage - setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step')) - - # check the remaining attributes - for attr in ('adjustment_type','scaling_adjustment','cooldown'): - if getattr(policy, attr) != module.params.get(attr): - changed = True - setattr(policy, attr, module.params.get(attr)) - - try: - if changed: - connection.create_scaling_policy(policy) - policy = connection.get_all_policies(policy_names=[sp_name])[0] - module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) - except BotoServerError, e: - module.fail_json(msg=str(e)) - - -def delete_scaling_policy(connection, module): - sp_name = module.params.get('name') - asg_name = module.params.get('asg_name') - - scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name]) - - if scalingPolicies: - try: - connection.delete_policy(sp_name, asg_name) - module.exit_json(changed=True) - except BotoServerError, e: - module.exit_json(changed=False, msg=str(e)) - else: - module.exit_json(changed=False) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - name = dict(required=True, type='str'), - adjustment_type = dict(type='str', choices=['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']), - asg_name = dict(required=True, type='str'), - scaling_adjustment = dict(type='int'), - min_adjustment_step = dict(type='int'), - cooldown = dict(type='int'), - region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule(argument_spec=argument_spec) - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - - state = module.params.get('state') - - try: - connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - if not connection: - module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - - if state == 'present': - create_scaling_policy(connection, module) - elif state == 'absent': - delete_scaling_policy(connection, module) - - -main() diff --git a/library/cloud/ec2_snapshot b/library/cloud/ec2_snapshot deleted file mode 100644 index a37aadb95e..0000000000 --- a/library/cloud/ec2_snapshot +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2_snapshot -short_description: creates a snapshot from an existing volume -description: - - creates an EC2 snapshot from an existing EBS volume -version_added: "1.5" -options: - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - volume_id: - description: - - volume from which to take the snapshot - required: false - description: - description: - - description to be applied to the snapshot - required: false - instance_id: - description: - - instance that has the required volume to snapshot mounted - required: false - device_name: - description: - - device name of a mounted volume to be snapshotted - required: false - snapshot_tags: - description: - - a hash/dictionary of tags to add to the snapshot - required: false - version_added: "1.6" - -author: Will Thames -extends_documentation_fragment: aws -''' - -EXAMPLES = ''' -# Simple snapshot of volume using volume_id -- local_action: - module: ec2_snapshot - volume_id: vol-abcdef12 - description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 - -# Snapshot of volume mounted on device_name attached to instance_id -- local_action: - module: ec2_snapshot - instance_id: i-12345678 - device_name: /dev/sdb1 - description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 - -# Snapshot of volume with tagging -- local_action: - module: ec2_snapshot - instance_id: i-12345678 - device_name: /dev/sdb1 - snapshot_tags: - frequency: hourly - source: /data -''' - -import sys -import time - -try: - import boto.ec2 -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - volume_id = dict(), - description = dict(), - instance_id = dict(), - device_name = dict(), - wait = dict(type='bool', default='true'), - wait_timeout = dict(default=0), - snapshot_tags = dict(type='dict', default=dict()), - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - volume_id = module.params.get('volume_id') - description = module.params.get('description') - instance_id = module.params.get('instance_id') - device_name = module.params.get('device_name') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - snapshot_tags = module.params.get('snapshot_tags') - - if not volume_id and not instance_id or volume_id and instance_id: - module.fail_json('One and only one of volume_id or instance_id must be specified') - if instance_id and not device_name or device_name and not instance_id: - module.fail_json('Instance ID and device name must both be specified') - - ec2 = ec2_connect(module) - - if instance_id: - try: - volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name}) - if not volumes: - module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id)) - volume_id = volumes[0].id - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - try: - snapshot = ec2.create_snapshot(volume_id, description=description) - time_waited = 0 - if wait: - snapshot.update() - while snapshot.status != 'completed': - time.sleep(3) - snapshot.update() - time_waited += 3 - if wait_timeout and time_waited > wait_timeout: - module.fail_json('Timed out while creating snapshot.') - for k, v in snapshot_tags.items(): - snapshot.add_tag(k, v) - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - module.exit_json(changed=True, snapshot_id=snapshot.id, volume_id=snapshot.volume_id, - volume_size=snapshot.volume_size, tags=snapshot.tags.copy()) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_tag b/library/cloud/ec2_tag deleted file mode 100644 index 4a33112189..0000000000 --- a/library/cloud/ec2_tag +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2_tag -short_description: create and remove tag(s) to ec2 resources. -description: - - Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto. -version_added: "1.3" -options: - resource: - description: - - The EC2 resource id. - required: true - default: null - aliases: [] - state: - description: - - Whether the tags should be present or absent on the resource. Use list to interrogate the tags of an instance. - required: false - default: present - choices: ['present', 'absent', 'list'] - aliases: [] - region: - description: - - region in which the resource exists. - required: false - default: null - aliases: ['aws_region', 'ec2_region'] - -author: Lester Wade -extends_documentation_fragment: aws -''' - -EXAMPLES = ''' -# Basic example of adding tag(s) -tasks: -- name: tag a resource - local_action: ec2_tag resource=vol-XXXXXX region=eu-west-1 state=present - args: - tags: - Name: ubervol - env: prod - -# Playbook example of adding tag(s) to spawned instances -tasks: -- name: launch some instances - local_action: ec2 keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1 - register: ec2 - -- name: tag my launched instances - local_action: ec2_tag resource={{ item.id }} region=eu-west-1 state=present - with_items: ec2.instances - args: - tags: - Name: webserver - env: prod -''' - -# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes. -# if state=present and it doesn't exist, create, tag and attach. -# Check for state by looking for volume attachment with tag (and against block device mapping?). -# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3). - -import sys -import time - -try: - import boto.ec2 -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - resource = dict(required=True), - tags = dict(), - state = dict(default='present', choices=['present', 'absent', 'list']), - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - resource = module.params.get('resource') - tags = module.params.get('tags') - state = module.params.get('state') - - ec2 = ec2_connect(module) - - # We need a comparison here so that we can accurately report back changed status. - # Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate. - filters = {'resource-id' : resource} - gettags = ec2.get_all_tags(filters=filters) - - dictadd = {} - dictremove = {} - baddict = {} - tagdict = {} - for tag in gettags: - tagdict[tag.name] = tag.value - - if state == 'present': - if not tags: - module.fail_json(msg="tags argument is required when state is present") - if set(tags.items()).issubset(set(tagdict.items())): - module.exit_json(msg="Tags already exists in %s." %resource, changed=False) - else: - for (key, value) in set(tags.items()): - if (key, value) not in set(tagdict.items()): - dictadd[key] = value - tagger = ec2.create_tags(resource, dictadd) - gettags = ec2.get_all_tags(filters=filters) - module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True) - - if state == 'absent': - if not tags: - module.fail_json(msg="tags argument is required when state is absent") - for (key, value) in set(tags.items()): - if (key, value) not in set(tagdict.items()): - baddict[key] = value - if set(baddict) == set(tags): - module.exit_json(msg="Nothing to remove here. Move along.", changed=False) - for (key, value) in set(tags.items()): - if (key, value) in set(tagdict.items()): - dictremove[key] = value - tagger = ec2.delete_tags(resource, dictremove) - gettags = ec2.get_all_tags(filters=filters) - module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True) - - if state == 'list': - module.exit_json(changed=False, tags=tagdict) - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_vol b/library/cloud/ec2_vol deleted file mode 100644 index 0e662a77bd..0000000000 --- a/library/cloud/ec2_vol +++ /dev/null @@ -1,434 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2_vol -short_description: create and attach a volume, return volume id and device map -description: - - creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto. -version_added: "1.1" -options: - instance: - description: - - instance ID if you wish to attach the volume. - required: false - default: null - aliases: [] - name: - description: - - volume Name tag if you wish to attach an existing volume (requires instance) - required: false - default: null - aliases: [] - version_added: "1.6" - id: - description: - - volume id if you wish to attach an existing volume (requires instance) or remove an existing volume - required: false - default: null - aliases: [] - version_added: "1.6" - volume_size: - description: - - size of volume (in GB) to create. - required: false - default: null - aliases: [] - iops: - description: - - the provisioned IOPs you want to associate with this volume (integer). - required: false - default: 100 - aliases: [] - version_added: "1.3" - encrypted: - description: - - Enable encryption at rest for this volume. - default: false - version_added: "1.8" - device_name: - description: - - device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows. - required: false - default: null - aliases: [] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - default: null - aliases: ['aws_region', 'ec2_region'] - zone: - description: - - zone in which to create the volume, if unset uses the zone the instance is in (if set) - required: false - default: null - aliases: ['aws_zone', 'ec2_zone'] - snapshot: - description: - - snapshot ID on which to base the volume - required: false - default: null - version_added: "1.5" - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - state: - description: - - whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8). - required: false - default: present - choices: ['absent', 'present', 'list'] - version_added: "1.6" -author: Lester Wade -extends_documentation_fragment: aws -''' - -EXAMPLES = ''' -# Simple attachment action -- local_action: - module: ec2_vol - instance: XXXXXX - volume_size: 5 - device_name: sdd - -# Example using custom iops params -- local_action: - module: ec2_vol - instance: XXXXXX - volume_size: 5 - iops: 200 - device_name: sdd - -# Example using snapshot id -- local_action: - module: ec2_vol - instance: XXXXXX - snapshot: "{{ snapshot }}" - -# Playbook example combined with instance launch -- local_action: - module: ec2 - keypair: "{{ keypair }}" - image: "{{ image }}" - wait: yes - count: 3 - register: ec2 -- local_action: - module: ec2_vol - instance: "{{ item.id }} " - volume_size: 5 - with_items: ec2.instances - register: ec2_vol - -# Example: Launch an instance and then add a volue if not already present -# * Nothing will happen if the volume is already attached. -# * Volume must exist in the same zone. - -- local_action: - module: ec2 - keypair: "{{ keypair }}" - image: "{{ image }}" - zone: YYYYYY - id: my_instance - wait: yes - count: 1 - register: ec2 - -- local_action: - module: ec2_vol - instance: "{{ item.id }}" - name: my_existing_volume_Name_tag - device_name: /dev/xvdf - with_items: ec2.instances - register: ec2_vol - -# Remove a volume -- local_action: - module: ec2_vol - id: vol-XXXXXXXX - state: absent - -# List volumes for an instance -- local_action: - module: ec2_vol - instance: i-XXXXXX - state: list -''' - -# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes. -# if state=present and it doesn't exist, create, tag and attach. -# Check for state by looking for volume attachment with tag (and against block device mapping?). -# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3). - -import sys -import time - -from distutils.version import LooseVersion - -try: - import boto.ec2 -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def get_volume(module, ec2): - name = module.params.get('name') - id = module.params.get('id') - zone = module.params.get('zone') - filters = {} - volume_ids = None - if zone: - filters['availability_zone'] = zone - if name: - filters = {'tag:Name': name} - if id: - volume_ids = [id] - try: - vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters) - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - if not vols: - module.fail_json(msg="Could not find volume in zone (if specified): %s" % name or id) - if len(vols) > 1: - module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name) - return vols[0] - -def get_volumes(module, ec2): - instance = module.params.get('instance') - - if not instance: - module.fail_json(msg = "Instance must be specified to get volumes") - - try: - vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance}) - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - return vols - -def delete_volume(module, ec2): - vol = get_volume(module, ec2) - if not vol: - module.exit_json(changed=False) - else: - if vol.attachment_state() is not None: - adata = vol.attach_data - module.fail_json(msg="Volume %s is attached to an instance %s." % (vol.id, adata.instance_id)) - ec2.delete_volume(vol.id) - module.exit_json(changed=True) - -def boto_supports_volume_encryption(): - """ - Check if Boto library supports encryption of EBS volumes (added in 2.29.0) - - Returns: - True if boto library has the named param as an argument on the request_spot_instances method, else False - """ - return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0') - -def create_volume(module, ec2, zone): - name = module.params.get('name') - id = module.params.get('id') - instance = module.params.get('instance') - iops = module.params.get('iops') - encrypted = module.params.get('encrypted') - volume_size = module.params.get('volume_size') - snapshot = module.params.get('snapshot') - # If custom iops is defined we use volume_type "io1" rather than the default of "standard" - if iops: - volume_type = 'io1' - else: - volume_type = 'standard' - - # If no instance supplied, try volume creation based on module parameters. - if name or id: - if not instance: - module.fail_json(msg = "If name or id is specified, instance must also be specified") - if iops or volume_size: - module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]") - - volume = get_volume(module, ec2) - if volume.attachment_state() is not None: - adata = volume.attach_data - if adata.instance_id != instance: - module.fail_json(msg = "Volume %s is already attached to another instance: %s" - % (name or id, adata.instance_id)) - else: - module.exit_json(msg="Volume %s is already mapped on instance %s: %s" % - (name or id, adata.instance_id, adata.device), - volume_id=id, - device=adata.device, - changed=False) - else: - try: - if boto_supports_volume_encryption(): - volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted) - else: - volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops) - - while volume.status != 'available': - time.sleep(3) - volume.update() - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - return volume - - -def attach_volume(module, ec2, volume, instance): - device_name = module.params.get('device_name') - - if device_name and instance: - try: - attach = volume.attach(instance.id, device_name) - while volume.attachment_state() != 'attached': - time.sleep(3) - volume.update() - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - # If device_name isn't set, make a choice based on best practices here: - # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html - - # In future this needs to be more dynamic but combining block device mapping best practices - # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;) - - # Use password data attribute to tell whether the instance is Windows or Linux - if device_name is None and instance: - try: - if not ec2.get_password_data(instance.id): - device_name = '/dev/sdf' - attach = volume.attach(instance.id, device_name) - while volume.attachment_state() != 'attached': - time.sleep(3) - volume.update() - else: - device_name = '/dev/xvdf' - attach = volume.attach(instance.id, device_name) - while volume.attachment_state() != 'attached': - time.sleep(3) - volume.update() - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - instance = dict(), - id = dict(), - name = dict(), - volume_size = dict(), - iops = dict(), - encrypted = dict(), - device_name = dict(), - zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), - snapshot = dict(), - state = dict(choices=['absent', 'present', 'list'], default='present') - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - id = module.params.get('id') - name = module.params.get('name') - instance = module.params.get('instance') - volume_size = module.params.get('volume_size') - iops = module.params.get('iops') - encrypted = module.params.get('encrypted') - device_name = module.params.get('device_name') - zone = module.params.get('zone') - snapshot = module.params.get('snapshot') - state = module.params.get('state') - - ec2 = ec2_connect(module) - - if state == 'list': - returned_volumes = [] - vols = get_volumes(module, ec2) - - for v in vols: - attachment = v.attach_data - - returned_volumes.append({ - 'create_time': v.create_time, - 'id': v.id, - 'iops': v.iops, - 'size': v.size, - 'snapshot_id': v.snapshot_id, - 'status': v.status, - 'type': v.type, - 'zone': v.zone, - 'attachment_set': { - 'attach_time': attachment.attach_time, - 'device': attachment.device, - 'status': attachment.status - } - }) - - module.exit_json(changed=False, volumes=returned_volumes) - - if id and name: - module.fail_json(msg="Both id and name cannot be specified") - - if encrypted and not boto_supports_volume_encryption(): - module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes") - - # Here we need to get the zone info for the instance. This covers situation where - # instance is specified but zone isn't. - # Useful for playbooks chaining instance launch with volume create + attach and where the - # zone doesn't matter to the user. - if instance: - reservation = ec2.get_all_instances(instance_ids=instance) - inst = reservation[0].instances[0] - zone = inst.placement - - # Check if there is a volume already mounted there. - if device_name: - if device_name in inst.block_device_mapping: - module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance), - volume_id=inst.block_device_mapping[device_name].volume_id, - device=device_name, - changed=False) - - # Delaying the checks until after the instance check allows us to get volume ids for existing volumes - # without needing to pass an unused volume_size - if not volume_size and not (id or name): - module.fail_json(msg="You must specify an existing volume with id or name or a volume_size") - - if volume_size and (id or name): - module.fail_json(msg="Cannot specify volume_size and either one of name or id") - - - if state == 'absent': - delete_volume(module, ec2) - - if state == 'present': - volume = create_volume(module, ec2, zone) - if instance: - attach_volume(module, ec2, volume, inst) - module.exit_json(volume_id=volume.id, device=device_name) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc deleted file mode 100644 index e4dc9a65f7..0000000000 --- a/library/cloud/ec2_vpc +++ /dev/null @@ -1,626 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ec2_vpc -short_description: configure AWS virtual private clouds -description: - - Create or terminates AWS virtual private clouds. This module has a dependency on python-boto. -version_added: "1.4" -options: - cidr_block: - description: - - "The cidr block representing the VPC, e.g. 10.0.0.0/16" - required: false, unless state=present - instance_tenancy: - description: - - "The supported tenancy options for instances launched into the VPC." - required: false - default: "default" - choices: [ "default", "dedicated" ] - dns_support: - description: - - toggles the "Enable DNS resolution" flag - required: false - default: "yes" - choices: [ "yes", "no" ] - dns_hostnames: - description: - - toggles the "Enable DNS hostname support for instances" flag - required: false - default: "yes" - choices: [ "yes", "no" ] - subnets: - description: - - 'A dictionary array of subnets to add of the form: { cidr: ..., az: ... , resource_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: resource_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed. As of 1.8, if the subnets parameter is not specified, no existing subnets will be modified.' - required: false - default: null - aliases: [] - vpc_id: - description: - - A VPC id to terminate when state=absent - required: false - default: null - aliases: [] - resource_tags: - description: - - 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exits, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.' - required: true - default: null - aliases: [] - version_added: "1.6" - internet_gateway: - description: - - Toggle whether there should be an Internet gateway attached to the VPC - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - route_tables: - description: - - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.' - required: false - default: null - aliases: [] - wait: - description: - - wait for the VPC to be in state 'available' before returning - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 - aliases: [] - state: - description: - - Create or terminate the VPC - required: true - default: present - aliases: [] - region: - description: - - region in which the resource exists. - required: false - default: null - aliases: ['aws_region', 'ec2_region'] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key' ] - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - -requirements: [ "boto" ] -author: Carson Gee -''' - -EXAMPLES = ''' -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Basic creation example: - local_action: - module: ec2_vpc - state: present - cidr_block: 172.23.0.0/16 - resource_tags: { "Environment":"Development" } - region: us-west-2 -# Full creation example with subnets and optional availability zones. -# The absence or presence of subnets deletes or creates them respectively. - local_action: - module: ec2_vpc - state: present - cidr_block: 172.22.0.0/16 - resource_tags: { "Environment":"Development" } - subnets: - - cidr: 172.22.1.0/24 - az: us-west-2c - resource_tags: { "Environment":"Dev", "Tier" : "Web" } - - cidr: 172.22.2.0/24 - az: us-west-2b - resource_tags: { "Environment":"Dev", "Tier" : "App" } - - cidr: 172.22.3.0/24 - az: us-west-2a - resource_tags: { "Environment":"Dev", "Tier" : "DB" } - internet_gateway: True - route_tables: - - subnets: - - 172.22.2.0/24 - - 172.22.3.0/24 - routes: - - dest: 0.0.0.0/0 - gw: igw - - subnets: - - 172.22.1.0/24 - routes: - - dest: 0.0.0.0/0 - gw: igw - region: us-west-2 - register: vpc - -# Removal of a VPC by id - local_action: - module: ec2_vpc - state: absent - vpc_id: vpc-aaaaaaa - region: us-west-2 -If you have added elements not managed by this module, e.g. instances, NATs, etc then -the delete will fail until those dependencies are removed. -''' - - -import sys -import time - -try: - import boto.ec2 - import boto.vpc - from boto.exception import EC2ResponseError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def get_vpc_info(vpc): - """ - Retrieves vpc information from an instance - ID and returns it as a dictionary - """ - - return({ - 'id': vpc.id, - 'cidr_block': vpc.cidr_block, - 'dhcp_options_id': vpc.dhcp_options_id, - 'region': vpc.region.name, - 'state': vpc.state, - }) - -def find_vpc(module, vpc_conn, vpc_id=None, cidr=None): - """ - Finds a VPC that matches a specific id or cidr + tags - - module : AnsibleModule object - vpc_conn: authenticated VPCConnection connection object - - Returns: - A VPC object that matches either an ID or CIDR and one or more tag values - """ - - if vpc_id == None and cidr == None: - module.fail_json( - msg='You must specify either a vpc_id or a cidr block + list of unique tags, aborting' - ) - - found_vpcs = [] - - resource_tags = module.params.get('resource_tags') - - # Check for existing VPC by cidr_block or id - if vpc_id is not None: - found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',}) - - else: - previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'}) - - for vpc in previous_vpcs: - # Get all tags for each of the found VPCs - vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id})) - - # If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC - if resource_tags and set(resource_tags.items()).issubset(set(vpc_tags.items())): - found_vpcs.append(vpc) - - found_vpc = None - - if len(found_vpcs) == 1: - found_vpc = found_vpcs[0] - - if len(found_vpcs) > 1: - module.fail_json(msg='Found more than one vpc based on the supplied criteria, aborting') - - return (found_vpc) - -def create_vpc(module, vpc_conn): - """ - Creates a new or modifies an existing VPC. - - module : AnsibleModule object - vpc_conn: authenticated VPCConnection connection object - - Returns: - A dictionary with information - about the VPC and subnets that were launched - """ - - id = module.params.get('vpc_id') - cidr_block = module.params.get('cidr_block') - instance_tenancy = module.params.get('instance_tenancy') - dns_support = module.params.get('dns_support') - dns_hostnames = module.params.get('dns_hostnames') - subnets = module.params.get('subnets') - internet_gateway = module.params.get('internet_gateway') - route_tables = module.params.get('route_tables') - vpc_spec_tags = module.params.get('resource_tags') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - changed = False - - # Check for existing VPC by cidr_block + tags or id - previous_vpc = find_vpc(module, vpc_conn, id, cidr_block) - - if previous_vpc is not None: - changed = False - vpc = previous_vpc - else: - changed = True - try: - vpc = vpc_conn.create_vpc(cidr_block, instance_tenancy) - # wait here until the vpc is available - pending = True - wait_timeout = time.time() + wait_timeout - while wait and wait_timeout > time.time() and pending: - try: - pvpc = vpc_conn.get_all_vpcs(vpc.id) - if hasattr(pvpc, 'state'): - if pvpc.state == "available": - pending = False - elif hasattr(pvpc[0], 'state'): - if pvpc[0].state == "available": - pending = False - # sometimes vpc_conn.create_vpc() will return a vpc that can't be found yet by vpc_conn.get_all_vpcs() - # when that happens, just wait a bit longer and try again - except boto.exception.BotoServerError, e: - if e.error_code != 'InvalidVpcID.NotFound': - raise - if pending: - time.sleep(5) - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime()) - - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - # Done with base VPC, now change to attributes and features. - - # Add resource tags - vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id})) - - if not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())): - new_tags = {} - - for (key, value) in set(vpc_spec_tags.items()): - if (key, value) not in set(vpc_tags.items()): - new_tags[key] = value - - if new_tags: - vpc_conn.create_tags(vpc.id, new_tags) - - - # boto doesn't appear to have a way to determine the existing - # value of the dns attributes, so we just set them. - # It also must be done one at a time. - vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_support=dns_support) - vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_hostnames=dns_hostnames) - - - # Process all subnet properties - if subnets is not None: - if not isinstance(subnets, list): - module.fail_json(msg='subnets needs to be a list of cidr blocks') - - current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id }) - - # First add all new subnets - for subnet in subnets: - add_subnet = True - for csn in current_subnets: - if subnet['cidr'] == csn.cidr_block: - add_subnet = False - if add_subnet: - try: - new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None)) - new_subnet_tags = subnet.get('resource_tags', None) - if new_subnet_tags: - # Sometimes AWS takes its time to create a subnet and so using new subnets's id - # to create tags results in exception. - # boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending' - # so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet - while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0: - time.sleep(0.1) - - vpc_conn.create_tags(new_subnet.id, new_subnet_tags) - - changed = True - except EC2ResponseError, e: - module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e)) - - # Now delete all absent subnets - for csubnet in current_subnets: - delete_subnet = True - for subnet in subnets: - if csubnet.cidr_block == subnet['cidr']: - delete_subnet = False - if delete_subnet: - try: - vpc_conn.delete_subnet(csubnet.id) - changed = True - except EC2ResponseError, e: - module.fail_json(msg='Unable to delete subnet {0}, error: {1}'.format(csubnet.cidr_block, e)) - - # Handle Internet gateway (create/delete igw) - igw = None - igws = vpc_conn.get_all_internet_gateways(filters={'attachment.vpc-id': vpc.id}) - if len(igws) > 1: - module.fail_json(msg='EC2 returned more than one Internet Gateway for id %s, aborting' % vpc.id) - if internet_gateway: - if len(igws) != 1: - try: - igw = vpc_conn.create_internet_gateway() - vpc_conn.attach_internet_gateway(igw.id, vpc.id) - changed = True - except EC2ResponseError, e: - module.fail_json(msg='Unable to create Internet Gateway, error: {0}'.format(e)) - else: - # Set igw variable to the current igw instance for use in route tables. - igw = igws[0] - else: - if len(igws) > 0: - try: - vpc_conn.detach_internet_gateway(igws[0].id, vpc.id) - vpc_conn.delete_internet_gateway(igws[0].id) - changed = True - except EC2ResponseError, e: - module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e)) - - # Handle route tables - this may be worth splitting into a - # different module but should work fine here. The strategy to stay - # indempotent is to basically build all the route tables as - # defined, track the route table ids, and then run through the - # remote list of route tables and delete any that we didn't - # create. This shouldn't interrupt traffic in theory, but is the - # only way to really work with route tables over time that I can - # think of without using painful aws ids. Hopefully boto will add - # the replace-route-table API to make this smoother and - # allow control of the 'main' routing table. - if route_tables is not None: - if not isinstance(route_tables, list): - module.fail_json(msg='route tables need to be a list of dictionaries') - - # Work through each route table and update/create to match dictionary array - all_route_tables = [] - for rt in route_tables: - try: - new_rt = vpc_conn.create_route_table(vpc.id) - for route in rt['routes']: - route_kwargs = {} - if route['gw'] == 'igw': - if not internet_gateway: - module.fail_json( - msg='You asked for an Internet Gateway ' \ - '(igw) route, but you have no Internet Gateway' - ) - route_kwargs['gateway_id'] = igw.id - elif route['gw'].startswith('i-'): - route_kwargs['instance_id'] = route['gw'] - else: - route_kwargs['gateway_id'] = route['gw'] - vpc_conn.create_route(new_rt.id, route['dest'], **route_kwargs) - - # Associate with subnets - for sn in rt['subnets']: - rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id }) - if len(rsn) != 1: - module.fail_json( - msg='The subnet {0} to associate with route_table {1} ' \ - 'does not exist, aborting'.format(sn, rt) - ) - rsn = rsn[0] - - # Disassociate then associate since we don't have replace - old_rt = vpc_conn.get_all_route_tables( - filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id} - ) - old_rt = [ x for x in old_rt if x.id != None ] - if len(old_rt) == 1: - old_rt = old_rt[0] - association_id = None - for a in old_rt.associations: - if a.subnet_id == rsn.id: - association_id = a.id - vpc_conn.disassociate_route_table(association_id) - - vpc_conn.associate_route_table(new_rt.id, rsn.id) - - all_route_tables.append(new_rt) - changed = True - except EC2ResponseError, e: - module.fail_json( - msg='Unable to create and associate route table {0}, error: ' \ - '{1}'.format(rt, e) - ) - - # Now that we are good to go on our new route tables, delete the - # old ones except the 'main' route table as boto can't set the main - # table yet. - all_rts = vpc_conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - for rt in all_rts: - if rt.id is None: - continue - delete_rt = True - for newrt in all_route_tables: - if newrt.id == rt.id: - delete_rt = False - break - if delete_rt: - rta = rt.associations - is_main = False - for a in rta: - if a.main: - is_main = True - break - try: - if not is_main: - vpc_conn.delete_route_table(rt.id) - changed = True - except EC2ResponseError, e: - module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e)) - - vpc_dict = get_vpc_info(vpc) - created_vpc_id = vpc.id - returned_subnets = [] - current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id }) - - for sn in current_subnets: - returned_subnets.append({ - 'resource_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})), - 'cidr': sn.cidr_block, - 'az': sn.availability_zone, - 'id': sn.id, - }) - - return (vpc_dict, created_vpc_id, returned_subnets, changed) - -def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None): - """ - Terminates a VPC - - module: Ansible module object - vpc_conn: authenticated VPCConnection connection object - vpc_id: a vpc id to terminate - cidr: The cidr block of the VPC - can be used in lieu of an ID - - Returns a dictionary of VPC information - about the VPC terminated. - - If the VPC to be terminated is available - "changed" will be set to True. - - """ - vpc_dict = {} - terminated_vpc_id = '' - changed = False - - vpc = find_vpc(module, vpc_conn, vpc_id, cidr) - - if vpc is not None: - if vpc.state == 'available': - terminated_vpc_id=vpc.id - vpc_dict=get_vpc_info(vpc) - try: - subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id}) - for sn in subnets: - vpc_conn.delete_subnet(sn.id) - - igws = vpc_conn.get_all_internet_gateways( - filters={'attachment.vpc-id': vpc.id} - ) - for igw in igws: - vpc_conn.detach_internet_gateway(igw.id, vpc.id) - vpc_conn.delete_internet_gateway(igw.id) - - rts = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}) - for rt in rts: - rta = rt.associations - is_main = False - for a in rta: - if a.main: - is_main = True - if not is_main: - vpc_conn.delete_route_table(rt.id) - - vpc_conn.delete_vpc(vpc.id) - except EC2ResponseError, e: - module.fail_json( - msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e) - ) - changed = True - - return (changed, vpc_dict, terminated_vpc_id) - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - cidr_block = dict(), - instance_tenancy = dict(choices=['default', 'dedicated'], default='default'), - wait = dict(type='bool', default=False), - wait_timeout = dict(default=300), - dns_support = dict(type='bool', default=True), - dns_hostnames = dict(type='bool', default=True), - subnets = dict(type='list'), - vpc_id = dict(), - internet_gateway = dict(type='bool', default=False), - resource_tags = dict(type='dict', required=True), - route_tables = dict(type='list'), - state = dict(choices=['present', 'absent'], default='present'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - state = module.params.get('state') - - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - # If we have a region specified, connect to its endpoint. - if region: - try: - vpc_conn = boto.vpc.connect_to_region( - region, - aws_access_key_id=aws_access_key, - aws_secret_access_key=aws_secret_key - ) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - else: - module.fail_json(msg="region must be specified") - - if module.params.get('state') == 'absent': - vpc_id = module.params.get('vpc_id') - cidr = module.params.get('cidr_block') - (changed, vpc_dict, new_vpc_id) = terminate_vpc(module, vpc_conn, vpc_id, cidr) - subnets_changed = None - elif module.params.get('state') == 'present': - # Changed is always set to true when provisioning a new VPC - (vpc_dict, new_vpc_id, subnets_changed, changed) = create_vpc(module, vpc_conn) - - module.exit_json(changed=changed, vpc_id=new_vpc_id, vpc=vpc_dict, subnets=subnets_changed) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/elasticache b/library/cloud/elasticache deleted file mode 100644 index 8c82f2fcc2..0000000000 --- a/library/cloud/elasticache +++ /dev/null @@ -1,547 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: elasticache -short_description: Manage cache clusters in Amazon Elasticache. -description: - - Manage cache clusters in Amazon Elasticache. - - Returns information about the specified cache cluster. -version_added: "1.4" -requirements: [ "boto" ] -author: Jim Dalton -options: - state: - description: - - C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster, resulting in a momentary outage. - choices: ['present', 'absent', 'rebooted'] - required: true - name: - description: - - The cache cluster identifier - required: true - engine: - description: - - Name of the cache engine to be used (memcached or redis) - required: false - default: memcached - cache_engine_version: - description: - - The version number of the cache engine - required: false - default: 1.4.14 - node_type: - description: - - The compute and memory capacity of the nodes in the cache cluster - required: false - default: cache.m1.small - num_nodes: - description: - - The initial number of cache nodes that the cache cluster will have - required: false - cache_port: - description: - - The port number on which each of the cache nodes will accept connections - required: false - default: 11211 - security_group_ids: - description: - - A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc - required: false - default: ['default'] - version_added: "1.6" - cache_security_groups: - description: - - A list of cache security group names to associate with this cache cluster - required: false - default: ['default'] - zone: - description: - - The EC2 Availability Zone in which the cache cluster will be created - required: false - default: None - wait: - description: - - Wait for cache cluster result before returning - required: false - default: yes - choices: [ "yes", "no" ] - hard_modify: - description: - - Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state - required: false - default: no - choices: [ "yes", "no" ] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key'] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key'] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - -""" - -EXAMPLES = """ -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Basic example -- local_action: - module: elasticache - name: "test-please-delete" - state: present - engine: memcached - cache_engine_version: 1.4.14 - node_type: cache.m1.small - num_nodes: 1 - cache_port: 11211 - cache_security_groups: - - default - zone: us-east-1d - - -# Ensure cache cluster is gone -- local_action: - module: elasticache - name: "test-please-delete" - state: absent - -# Reboot cache cluster -- local_action: - module: elasticache - name: "test-please-delete" - state: rebooted - -""" - -import sys -import os -import time - -try: - import boto - from boto.elasticache.layer1 import ElastiCacheConnection - from boto.regioninfo import RegionInfo -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -class ElastiCacheManager(object): - """Handles elasticache creation and destruction""" - - EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying'] - - def __init__(self, module, name, engine, cache_engine_version, node_type, - num_nodes, cache_port, cache_security_groups, security_group_ids, zone, wait, - hard_modify, aws_access_key, aws_secret_key, region): - self.module = module - self.name = name - self.engine = engine - self.cache_engine_version = cache_engine_version - self.node_type = node_type - self.num_nodes = num_nodes - self.cache_port = cache_port - self.cache_security_groups = cache_security_groups - self.security_group_ids = security_group_ids - self.zone = zone - self.wait = wait - self.hard_modify = hard_modify - - self.aws_access_key = aws_access_key - self.aws_secret_key = aws_secret_key - self.region = region - - self.changed = False - self.data = None - self.status = 'gone' - self.conn = self._get_elasticache_connection() - self._refresh_data() - - def ensure_present(self): - """Ensure cache cluster exists or create it if not""" - if self.exists(): - self.sync() - else: - self.create() - - def ensure_absent(self): - """Ensure cache cluster is gone or delete it if not""" - self.delete() - - def ensure_rebooted(self): - """Ensure cache cluster is gone or delete it if not""" - self.reboot() - - def exists(self): - """Check if cache cluster exists""" - return self.status in self.EXIST_STATUSES - - def create(self): - """Create an ElastiCache cluster""" - if self.status == 'available': - return - if self.status in ['creating', 'rebooting', 'modifying']: - if self.wait: - self._wait_for_status('available') - return - if self.status == 'deleting': - if self.wait: - self._wait_for_status('gone') - else: - msg = "'%s' is currently deleting. Cannot create." - self.module.fail_json(msg=msg % self.name) - - try: - response = self.conn.create_cache_cluster(cache_cluster_id=self.name, - num_cache_nodes=self.num_nodes, - cache_node_type=self.node_type, - engine=self.engine, - engine_version=self.cache_engine_version, - cache_security_group_names=self.cache_security_groups, - security_group_ids=self.security_group_ids, - preferred_availability_zone=self.zone, - port=self.cache_port) - except boto.exception.BotoServerError, e: - self.module.fail_json(msg=e.message) - cache_cluster_data = response['CreateCacheClusterResponse']['CreateCacheClusterResult']['CacheCluster'] - self._refresh_data(cache_cluster_data) - - self.changed = True - if self.wait: - self._wait_for_status('available') - return True - - def delete(self): - """Destroy an ElastiCache cluster""" - if self.status == 'gone': - return - if self.status == 'deleting': - if self.wait: - self._wait_for_status('gone') - return - if self.status in ['creating', 'rebooting', 'modifying']: - if self.wait: - self._wait_for_status('available') - else: - msg = "'%s' is currently %s. Cannot delete." - self.module.fail_json(msg=msg % (self.name, self.status)) - - try: - response = self.conn.delete_cache_cluster(cache_cluster_id=self.name) - except boto.exception.BotoServerError, e: - self.module.fail_json(msg=e.message) - cache_cluster_data = response['DeleteCacheClusterResponse']['DeleteCacheClusterResult']['CacheCluster'] - self._refresh_data(cache_cluster_data) - - self.changed = True - if self.wait: - self._wait_for_status('gone') - - def sync(self): - """Sync settings to cluster if required""" - if not self.exists(): - msg = "'%s' is %s. Cannot sync." - self.module.fail_json(msg=msg % (self.name, self.status)) - - if self.status in ['creating', 'rebooting', 'modifying']: - if self.wait: - self._wait_for_status('available') - else: - # Cluster can only be synced if available. If we can't wait - # for this, then just be done. - return - - if self._requires_destroy_and_create(): - if not self.hard_modify: - msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed." - self.module.fail_json(msg=msg % self.name) - if not self.wait: - msg = "'%s' requires destructive modification. 'wait' must be set to true." - self.module.fail_json(msg=msg % self.name) - self.delete() - self.create() - return - - if self._requires_modification(): - self.modify() - - def modify(self): - """Modify the cache cluster. Note it's only possible to modify a few select options.""" - nodes_to_remove = self._get_nodes_to_remove() - try: - response = self.conn.modify_cache_cluster(cache_cluster_id=self.name, - num_cache_nodes=self.num_nodes, - cache_node_ids_to_remove=nodes_to_remove, - cache_security_group_names=self.cache_security_groups, - security_group_ids=self.security_group_ids, - apply_immediately=True, - engine_version=self.cache_engine_version) - except boto.exception.BotoServerError, e: - self.module.fail_json(msg=e.message) - - cache_cluster_data = response['ModifyCacheClusterResponse']['ModifyCacheClusterResult']['CacheCluster'] - self._refresh_data(cache_cluster_data) - - self.changed = True - if self.wait: - self._wait_for_status('available') - - def reboot(self): - """Reboot the cache cluster""" - if not self.exists(): - msg = "'%s' is %s. Cannot reboot." - self.module.fail_json(msg=msg % (self.name, self.status)) - if self.status == 'rebooting': - return - if self.status in ['creating', 'modifying']: - if self.wait: - self._wait_for_status('available') - else: - msg = "'%s' is currently %s. Cannot reboot." - self.module.fail_json(msg=msg % (self.name, self.status)) - - # Collect ALL nodes for reboot - cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] - try: - response = self.conn.reboot_cache_cluster(cache_cluster_id=self.name, - cache_node_ids_to_reboot=cache_node_ids) - except boto.exception.BotoServerError, e: - self.module.fail_json(msg=e.message) - - cache_cluster_data = response['RebootCacheClusterResponse']['RebootCacheClusterResult']['CacheCluster'] - self._refresh_data(cache_cluster_data) - - self.changed = True - if self.wait: - self._wait_for_status('available') - - def get_info(self): - """Return basic info about the cache cluster""" - info = { - 'name': self.name, - 'status': self.status - } - if self.data: - info['data'] = self.data - return info - - - def _wait_for_status(self, awaited_status): - """Wait for status to change from present status to awaited_status""" - status_map = { - 'creating': 'available', - 'rebooting': 'available', - 'modifying': 'available', - 'deleting': 'gone' - } - - if status_map[self.status] != awaited_status: - msg = "Invalid awaited status. '%s' cannot transition to '%s'" - self.module.fail_json(msg=msg % (self.status, awaited_status)) - - if awaited_status not in set(status_map.values()): - msg = "'%s' is not a valid awaited status." - self.module.fail_json(msg=msg % awaited_status) - - while True: - time.sleep(1) - self._refresh_data() - if self.status == awaited_status: - break - - def _requires_modification(self): - """Check if cluster requires (nondestructive) modification""" - # Check modifiable data attributes - modifiable_data = { - 'NumCacheNodes': self.num_nodes, - 'EngineVersion': self.cache_engine_version - } - for key, value in modifiable_data.iteritems(): - if self.data[key] != value: - return True - - # Check cache security groups - cache_security_groups = [] - for sg in self.data['CacheSecurityGroups']: - cache_security_groups.append(sg['CacheSecurityGroupName']) - if set(cache_security_groups) - set(self.cache_security_groups): - return True - - # check vpc security groups - vpc_security_groups = [] - security_groups = self.data['SecurityGroups'] or [] - for sg in security_groups: - vpc_security_groups.append(sg['SecurityGroupId']) - if set(vpc_security_groups) - set(self.security_group_ids): - return True - - return False - - def _requires_destroy_and_create(self): - """ - Check whether a destroy and create is required to synchronize cluster. - """ - unmodifiable_data = { - 'node_type': self.data['CacheNodeType'], - 'engine': self.data['Engine'], - 'cache_port': self._get_port() - } - # Only check for modifications if zone is specified - if self.zone is not None: - unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone'] - for key, value in unmodifiable_data.iteritems(): - if getattr(self, key) != value: - return True - return False - - def _get_elasticache_connection(self): - """Get an elasticache connection""" - try: - endpoint = "elasticache.%s.amazonaws.com" % self.region - connect_region = RegionInfo(name=self.region, endpoint=endpoint) - return ElastiCacheConnection(aws_access_key_id=self.aws_access_key, - aws_secret_access_key=self.aws_secret_key, - region=connect_region) - except boto.exception.NoAuthHandlerFound, e: - self.module.fail_json(msg=e.message) - - def _get_port(self): - """Get the port. Where this information is retrieved from is engine dependent.""" - if self.data['Engine'] == 'memcached': - return self.data['ConfigurationEndpoint']['Port'] - elif self.data['Engine'] == 'redis': - # Redis only supports a single node (presently) so just use - # the first and only - return self.data['CacheNodes'][0]['Endpoint']['Port'] - - def _refresh_data(self, cache_cluster_data=None): - """Refresh data about this cache cluster""" - if cache_cluster_data is None: - try: - response = self.conn.describe_cache_clusters(cache_cluster_id=self.name, - show_cache_node_info=True) - except boto.exception.BotoServerError: - self.data = None - self.status = 'gone' - return - cache_cluster_data = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'][0] - self.data = cache_cluster_data - self.status = self.data['CacheClusterStatus'] - - # The documentation for elasticache lies -- status on rebooting is set - # to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it - # here to make status checks etc. more sane. - if self.status == 'rebooting cache cluster nodes': - self.status = 'rebooting' - - def _get_nodes_to_remove(self): - """If there are nodes to remove, it figures out which need to be removed""" - num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes - if num_nodes_to_remove <= 0: - return None - - if not self.hard_modify: - msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." - self.module.fail_json(msg=msg % self.name) - - cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']] - return cache_node_ids[-num_nodes_to_remove:] - - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - state={'required': True, 'choices': ['present', 'absent', 'rebooted']}, - name={'required': True}, - engine={'required': False, 'default': 'memcached'}, - cache_engine_version={'required': False, 'default': '1.4.14'}, - node_type={'required': False, 'default': 'cache.m1.small'}, - num_nodes={'required': False, 'default': None, 'type': 'int'}, - cache_port={'required': False, 'default': 11211, 'type': 'int'}, - cache_security_groups={'required': False, 'default': ['default'], - 'type': 'list'}, - security_group_ids={'required': False, 'default': [], - 'type': 'list'}, - zone={'required': False, 'default': None}, - wait={'required': False, 'type' : 'bool', 'default': True}, - hard_modify={'required': False, 'type': 'bool', 'default': False} - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - name = module.params['name'] - state = module.params['state'] - engine = module.params['engine'] - cache_engine_version = module.params['cache_engine_version'] - node_type = module.params['node_type'] - num_nodes = module.params['num_nodes'] - cache_port = module.params['cache_port'] - cache_security_groups = module.params['cache_security_groups'] - security_group_ids = module.params['security_group_ids'] - zone = module.params['zone'] - wait = module.params['wait'] - hard_modify = module.params['hard_modify'] - - if state == 'present' and not num_nodes: - module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0") - - if not region: - module.fail_json(msg=str("Either region or EC2_REGION environment variable must be set.")) - - elasticache_manager = ElastiCacheManager(module, name, engine, - cache_engine_version, node_type, - num_nodes, cache_port, - cache_security_groups, - security_group_ids, zone, wait, - hard_modify, aws_access_key, - aws_secret_key, region) - - if state == 'present': - elasticache_manager.ensure_present() - elif state == 'absent': - elasticache_manager.ensure_absent() - elif state == 'rebooted': - elasticache_manager.ensure_rebooted() - - facts_result = dict(changed=elasticache_manager.changed, - elasticache=elasticache_manager.get_info()) - - module.exit_json(**facts_result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/gc_storage b/library/cloud/gc_storage deleted file mode 100644 index 1963a148da..0000000000 --- a/library/cloud/gc_storage +++ /dev/null @@ -1,420 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: gc_storage -version_added: "1.4" -short_description: This module manages objects/buckets in Google Cloud Storage. -description: - - This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for information about setting the default project. - -options: - bucket: - description: - - Bucket name. - required: true - default: null - aliases: [] - object: - description: - - Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples). - required: false - default: null - aliases: [] - src: - description: - - The source file path when performing a PUT operation. - required: false - default: null - aliases: [] - dest: - description: - - The destination file path when downloading an object/key with a GET operation. - required: false - aliases: [] - force: - description: - - Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. - required: false - default: true - aliases: [ 'overwrite' ] - permission: - description: - - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'authenticated-read'. - required: false - default: private - expiration: - description: - - Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only avaialbe when public-read is the acl for the object. - required: false - default: null - aliases: [] - mode: - description: - - Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and delete (bucket). - required: true - default: null - aliases: [] - choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ] - gcs_secret_key: - description: - - GCS secret key. If not set then the value of the GCS_SECRET_KEY environment variable is used. - required: true - default: null - gcs_access_key: - description: - - GCS access key. If not set then the value of the GCS_ACCESS_KEY environment variable is used. - required: true - default: null - -requirements: [ "boto 2.9+" ] - -author: benno@ansible.com Note. Most of the code has been taken from the S3 module. - -''' - -EXAMPLES = ''' -# upload some content -- gc_storage: bucket=mybucket object=key.txt src=/usr/local/myfile.txt mode=put permission=public-read - -# download some content -- gc_storage: bucket=mybucket object=key.txt dest=/usr/local/myfile.txt mode=get - -# Download an object as a string to use else where in your playbook -- gc_storage: bucket=mybucket object=key.txt mode=get_str - -# Create an empty bucket -- gc_storage: bucket=mybucket mode=create - -# Create a bucket with key as directory -- gc_storage: bucket=mybucket object=/my/directory/path mode=create - -# Delete a bucket and all contents -- gc_storage: bucket=mybucket mode=delete -''' - -import sys -import os -import urlparse -import hashlib - -try: - import boto -except ImportError: - print "failed=True msg='boto 2.9+ required for this module'" - sys.exit(1) - -def grant_check(module, gs, obj): - try: - acp = obj.get_acl() - if module.params.get('permission') == 'public-read': - grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllUsers'] - if not grant: - obj.set_acl('public-read') - module.exit_json(changed=True, result="The objects permission as been set to public-read") - if module.params.get('permission') == 'authenticated-read': - grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers'] - if not grant: - obj.set_acl('authenticated-read') - module.exit_json(changed=True, result="The objects permission as been set to authenticated-read") - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - return True - - - -def key_check(module, gs, bucket, obj): - try: - bucket = gs.lookup(bucket) - key_check = bucket.get_key(obj) - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if key_check: - grant_check(module, gs, key_check) - return True - else: - return False - -def keysum(module, gs, bucket, obj): - bucket = gs.lookup(bucket) - key_check = bucket.get_key(obj) - if not key_check: - return None - md5_remote = key_check.etag[1:-1] - etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5 - if etag_multipart is True: - module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.") - return md5_remote - -def bucket_check(module, gs, bucket): - try: - result = gs.lookup(bucket) - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if result: - grant_check(module, gs, result) - return True - else: - return False - -def create_bucket(module, gs, bucket): - try: - bucket = gs.create_bucket(bucket) - bucket.set_acl(module.params.get('permission')) - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if bucket: - return True - -def delete_bucket(module, gs, bucket): - try: - bucket = gs.lookup(bucket) - bucket_contents = bucket.list() - for key in bucket_contents: - bucket.delete_key(key.name) - bucket.delete() - return True - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def delete_key(module, gs, bucket, obj): - try: - bucket = gs.lookup(bucket) - bucket.delete_key(obj) - module.exit_json(msg="Object deleted from bucket ", changed=True) - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def create_dirkey(module, gs, bucket, obj): - try: - bucket = gs.lookup(bucket) - key = bucket.new_key(obj) - key.set_contents_from_string('') - module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True) - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def upload_file_check(src): - if os.path.exists(src): - file_exists is True - else: - file_exists is False - if os.path.isdir(src): - module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True) - return file_exists - -def path_check(path): - if os.path.exists(path): - return True - else: - return False - -def upload_gsfile(module, gs, bucket, obj, src, expiry): - try: - bucket = gs.lookup(bucket) - key = bucket.new_key(obj) - key.set_contents_from_filename(src) - key.set_acl(module.params.get('permission')) - url = key.generate_url(expiry) - module.exit_json(msg="PUT operation complete", url=url, changed=True) - except gs.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def download_gsfile(module, gs, bucket, obj, dest): - try: - bucket = gs.lookup(bucket) - key = bucket.lookup(obj) - key.get_contents_to_filename(dest) - module.exit_json(msg="GET operation complete", changed=True) - except gs.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def download_gsstr(module, gs, bucket, obj): - try: - bucket = gs.lookup(bucket) - key = bucket.lookup(obj) - contents = key.get_contents_as_string() - module.exit_json(msg="GET operation complete", contents=contents, changed=True) - except gs.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def get_download_url(module, gs, bucket, obj, expiry): - try: - bucket = gs.lookup(bucket) - key = bucket.lookup(obj) - url = key.generate_url(expiry) - module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True) - except gs.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def handle_get(module, gs, bucket, obj, overwrite, dest): - md5_remote = keysum(module, gs, bucket, obj) - md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest() - if md5_local == md5_remote: - module.exit_json(changed=False) - if md5_local != md5_remote and not overwrite: - module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True) - else: - download_gsfile(module, gs, bucket, obj, dest) - -def handle_put(module, gs, bucket, obj, overwrite, src, expiration): - # Lets check to see if bucket exists to get ground truth. - bucket_rc = bucket_check(module, gs, bucket) - key_rc = key_check(module, gs, bucket, obj) - - # Lets check key state. Does it exist and if it does, compute the etag md5sum. - if bucket_rc and key_rc: - md5_remote = keysum(module, gs, bucket, obj) - md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest() - if md5_local == md5_remote: - module.exit_json(msg="Local and remote object are identical", changed=False) - if md5_local != md5_remote and not overwrite: - module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) - else: - upload_gsfile(module, gs, bucket, obj, src, expiration) - - if not bucket_rc: - create_bucket(module, gs, bucket) - upload_gsfile(module, gs, bucket, obj, src, expiration) - - # If bucket exists but key doesn't, just upload. - if bucket_rc and not key_rc: - upload_gsfile(module, gs, bucket, obj, src, expiration) - -def handle_delete(module, gs, bucket, obj): - if bucket and not obj: - if bucket_check(module, gs, bucket): - module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=delete_bucket(module, gs, bucket)) - else: - module.exit_json(msg="Bucket does not exist.", changed=False) - if bucket and obj: - if bucket_check(module, gs, bucket): - if key_check(module, gs, bucket, obj): - module.exit_json(msg="Object has been deleted.", changed=delete_key(module, gs, bucket, obj)) - else: - module.exit_json(msg="Object does not exists.", changed=False) - else: - module.exit_json(msg="Bucket does not exist.", changed=False) - else: - module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True) - -def handle_create(module, gs, bucket, obj): - if bucket and not obj: - if bucket_check(module, gs, bucket): - module.exit_json(msg="Bucket already exists.", changed=False) - else: - module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket)) - if bucket and obj: - if bucket_check(module, gs, bucket): - if obj.endswith('/'): - dirobj = obj - else: - dirobj = obj + "/" - if key_check(module, gs, bucket, dirobj): - module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False) - else: - create_dirkey(module, gs, bucket, dirobj) - else: - create_bucket(module, gs, bucket) - create_dirkey(module, gs, bucket, dirobj) - -def main(): - module = AnsibleModule( - argument_spec = dict( - bucket = dict(required=True), - object = dict(default=None), - src = dict(default=None), - dest = dict(default=None), - expiration = dict(default=600, aliases=['expiry']), - mode = dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True), - permission = dict(choices=['private', 'public-read', 'authenticated-read'], default='private'), - gs_secret_key = dict(no_log=True, required=True), - gs_access_key = dict(required=True), - overwrite = dict(default=True, type='bool', aliases=['force']), - ), - ) - - bucket = module.params.get('bucket') - obj = module.params.get('object') - src = module.params.get('src') - dest = module.params.get('dest') - if dest: - dest = os.path.expanduser(dest) - mode = module.params.get('mode') - expiry = module.params.get('expiration') - gs_secret_key = module.params.get('gs_secret_key') - gs_access_key = module.params.get('gs_access_key') - overwrite = module.params.get('overwrite') - - if mode == 'put': - if not src or not object: - module.fail_json(msg="When using PUT, src, bucket, object are mandatory parameters") - if mode == 'get': - if not dest or not object: - module.fail_json(msg="When using GET, dest, bucket, object are mandatory parameters") - if obj: - obj = os.path.expanduser(module.params['object']) - - try: - gs = boto.connect_gs(gs_access_key, gs_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - - if mode == 'get': - if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj): - module.fail_json(msg="Target bucket/key cannot be found", failed=True) - if not path_check(dest): - download_gsfile(module, gs, bucket, obj, dest) - else: - handle_get(module, gs, bucket, obj, overwrite, dest) - - if mode == 'put': - if not path_check(src): - module.fail_json(msg="Local object for PUT does not exist", failed=True) - handle_put(module, gs, bucket, obj, overwrite, src, expiry) - - # Support for deleting an object if we have both params. - if mode == 'delete': - handle_delete(module, gs, bucket, obj) - - if mode == 'create': - handle_create(module, gs, bucket, obj) - - if mode == 'get_url': - if bucket and obj: - if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj): - get_download_url(module, gs, bucket, obj, expiry) - else: - module.fail_json(msg="Key/Bucket does not exist", failed=True) - else: - module.fail_json(msg="Bucket and Object parameters must be set", failed=True) - - # --------------------------- Get the String contents of an Object ------------------------- - if mode == 'get_str': - if bucket and obj: - if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj): - download_gsstr(module, gs, bucket, obj) - else: - module.fail_json(msg="Key/Bucket does not exist", failed=True) - else: - module.fail_json(msg="Bucket and Object parameters must be set", failed=True) - - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/gce b/library/cloud/gce deleted file mode 100755 index d429b61de2..0000000000 --- a/library/cloud/gce +++ /dev/null @@ -1,474 +0,0 @@ -#!/usr/bin/python -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: gce -version_added: "1.4" -short_description: create or terminate GCE instances -description: - - Creates or terminates Google Compute Engine (GCE) instances. See - U(https://cloud.google.com/products/compute-engine) for an overview. - Full install/configuration instructions for the gce* modules can - be found in the comments of ansible/test/gce_tests.py. -options: - image: - description: - - image string to use for the instance - required: false - default: "debian-7" - aliases: [] - instance_names: - description: - - a comma-separated list of instance names to create or destroy - required: false - default: null - aliases: [] - machine_type: - description: - - machine type to use for the instance, use 'n1-standard-1' by default - required: false - default: "n1-standard-1" - aliases: [] - metadata: - description: - - a hash/dictionary of custom data for the instance; '{"key":"value",...}' - required: false - default: null - aliases: [] - service_account_email: - version_added: 1.5.1 - description: - - service account email - required: false - default: null - aliases: [] - pem_file: - version_added: 1.5.1 - description: - - path to the pem file associated with the service account email - required: false - default: null - aliases: [] - project_id: - version_added: 1.5.1 - description: - - your GCE project ID - required: false - default: null - aliases: [] - name: - description: - - identifier when working with a single instance - required: false - aliases: [] - network: - description: - - name of the network, 'default' will be used if not specified - required: false - default: "default" - aliases: [] - persistent_boot_disk: - description: - - if set, create the instance with a persistent boot disk - required: false - default: "false" - aliases: [] - disks: - description: - - a list of persistent disks to attach to the instance; a string value gives the name of the disk; alternatively, a dictionary value can define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry will be the boot disk (which must be READ_WRITE). - required: false - default: null - aliases: [] - version_added: "1.7" - state: - description: - - desired state of the resource - required: false - default: "present" - choices: ["active", "present", "absent", "deleted"] - aliases: [] - tags: - description: - - a comma-separated list of tags to associate with the instance - required: false - default: null - aliases: [] - zone: - description: - - the GCE zone to use - required: true - default: "us-central1-a" - aliases: [] - -requirements: [ "libcloud" ] -notes: - - Either I(name) or I(instance_names) is required. -author: Eric Johnson -''' - -EXAMPLES = ''' -# Basic provisioning example. Create a single Debian 7 instance in the -# us-central1-a Zone of n1-standard-1 machine type. -- local_action: - module: gce - name: test-instance - zone: us-central1-a - machine_type: n1-standard-1 - image: debian-7 - -# Example using defaults and with metadata to create a single 'foo' instance -- local_action: - module: gce - name: foo - metadata: '{"db":"postgres", "group":"qa", "id":500}' - - -# Launch instances from a control node, runs some tasks on the new instances, -# and then terminate them -- name: Create a sandbox instance - hosts: localhost - vars: - names: foo,bar - machine_type: n1-standard-1 - image: debian-6 - zone: us-central1-a - service_account_email: unique-email@developer.gserviceaccount.com - pem_file: /path/to/pem_file - project_id: project-id - tasks: - - name: Launch instances - local_action: gce instance_names={{names}} machine_type={{machine_type}} - image={{image}} zone={{zone}} service_account_email={{ service_account_email }} - pem_file={{ pem_file }} project_id={{ project_id }} - register: gce - - name: Wait for SSH to come up - local_action: wait_for host={{item.public_ip}} port=22 delay=10 - timeout=60 state=started - with_items: {{gce.instance_data}} - -- name: Configure instance(s) - hosts: launched - sudo: True - roles: - - my_awesome_role - - my_awesome_tasks - -- name: Terminate instances - hosts: localhost - connection: local - tasks: - - name: Terminate instances that were previously launched - local_action: - module: gce - state: 'absent' - instance_names: {{gce.instance_names}} - -''' - -import sys - -try: - from libcloud.compute.types import Provider - from libcloud.compute.providers import get_driver - from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ - ResourceExistsError, ResourceInUseError, ResourceNotFoundError - _ = Provider.GCE -except ImportError: - print("failed=True " + \ - "msg='libcloud with GCE support (0.13.3+) required for this module'") - sys.exit(1) - -try: - from ast import literal_eval -except ImportError: - print("failed=True " + \ - "msg='GCE module requires python's 'ast' module, python v2.6+'") - sys.exit(1) - - -def get_instance_info(inst): - """Retrieves instance information from an instance object and returns it - as a dictionary. - - """ - metadata = {} - if 'metadata' in inst.extra and 'items' in inst.extra['metadata']: - for md in inst.extra['metadata']['items']: - metadata[md['key']] = md['value'] - - try: - netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] - except: - netname = None - if 'disks' in inst.extra: - disk_names = [disk_info['source'].split('/')[-1] - for disk_info - in sorted(inst.extra['disks'], - key=lambda disk_info: disk_info['index'])] - else: - disk_names = [] - return({ - 'image': not inst.image is None and inst.image.split('/')[-1] or None, - 'disks': disk_names, - 'machine_type': inst.size, - 'metadata': metadata, - 'name': inst.name, - 'network': netname, - 'private_ip': inst.private_ips[0], - 'public_ip': inst.public_ips[0], - 'status': ('status' in inst.extra) and inst.extra['status'] or None, - 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [], - 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None, - }) - -def create_instances(module, gce, instance_names): - """Creates new instances. Attributes other than instance_names are picked - up from 'module' - - module : AnsibleModule object - gce: authenticated GCE libcloud driver - instance_names: python list of instance names to create - - Returns: - A list of dictionaries with instance information - about the instances that were launched. - - """ - image = module.params.get('image') - machine_type = module.params.get('machine_type') - metadata = module.params.get('metadata') - network = module.params.get('network') - persistent_boot_disk = module.params.get('persistent_boot_disk') - disks = module.params.get('disks') - state = module.params.get('state') - tags = module.params.get('tags') - zone = module.params.get('zone') - - new_instances = [] - changed = False - - lc_image = gce.ex_get_image(image) - lc_disks = [] - disk_modes = [] - for i, disk in enumerate(disks or []): - if isinstance(disk, dict): - lc_disks.append(gce.ex_get_volume(disk['name'])) - disk_modes.append(disk['mode']) - else: - lc_disks.append(gce.ex_get_volume(disk)) - # boot disk is implicitly READ_WRITE - disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE') - lc_network = gce.ex_get_network(network) - lc_machine_type = gce.ex_get_size(machine_type) - lc_zone = gce.ex_get_zone(zone) - - # Try to convert the user's metadata value into the format expected - # by GCE. First try to ensure user has proper quoting of a - # dictionary-like syntax using 'literal_eval', then convert the python - # dict into a python list of 'key' / 'value' dicts. Should end up - # with: - # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] - if metadata: - try: - md = literal_eval(metadata) - if not isinstance(md, dict): - raise ValueError('metadata must be a dict') - except ValueError, e: - print("failed=True msg='bad metadata: %s'" % str(e)) - sys.exit(1) - except SyntaxError, e: - print("failed=True msg='bad metadata syntax'") - sys.exit(1) - - items = [] - for k,v in md.items(): - items.append({"key": k,"value": v}) - metadata = {'items': items} - - # These variables all have default values but check just in case - if not lc_image or not lc_network or not lc_machine_type or not lc_zone: - module.fail_json(msg='Missing required create instance variable', - changed=False) - - for name in instance_names: - pd = None - if lc_disks: - pd = lc_disks[0] - elif persistent_boot_disk: - try: - pd = gce.create_volume(None, "%s" % name, image=lc_image) - except ResourceExistsError: - pd = gce.ex_get_volume("%s" % name, lc_zone) - inst = None - try: - inst = gce.create_node(name, lc_machine_type, lc_image, - location=lc_zone, ex_network=network, ex_tags=tags, - ex_metadata=metadata, ex_boot_disk=pd) - changed = True - except ResourceExistsError: - inst = gce.ex_get_node(name, lc_zone) - except GoogleBaseError, e: - module.fail_json(msg='Unexpected error attempting to create ' + \ - 'instance %s, error: %s' % (name, e.value)) - - for i, lc_disk in enumerate(lc_disks): - # Check whether the disk is already attached - if (len(inst.extra['disks']) > i): - attached_disk = inst.extra['disks'][i] - if attached_disk['source'] != lc_disk.extra['selfLink']: - module.fail_json( - msg=("Disk at index %d does not match: requested=%s found=%s" % ( - i, lc_disk.extra['selfLink'], attached_disk['source']))) - elif attached_disk['mode'] != disk_modes[i]: - module.fail_json( - msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % ( - i, disk_modes[i], attached_disk['mode']))) - else: - continue - gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i]) - # Work around libcloud bug: attached volumes don't get added - # to the instance metadata. get_instance_info() only cares about - # source and index. - if len(inst.extra['disks']) != i+1: - inst.extra['disks'].append( - {'source': lc_disk.extra['selfLink'], 'index': i}) - - if inst: - new_instances.append(inst) - - instance_names = [] - instance_json_data = [] - for inst in new_instances: - d = get_instance_info(inst) - instance_names.append(d['name']) - instance_json_data.append(d) - - return (changed, instance_json_data, instance_names) - - -def terminate_instances(module, gce, instance_names, zone_name): - """Terminates a list of instances. - - module: Ansible module object - gce: authenticated GCE connection object - instance_names: a list of instance names to terminate - zone_name: the zone where the instances reside prior to termination - - Returns a dictionary of instance names that were terminated. - - """ - changed = False - terminated_instance_names = [] - for name in instance_names: - inst = None - try: - inst = gce.ex_get_node(name, zone_name) - except ResourceNotFoundError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - if inst: - gce.destroy_node(inst) - terminated_instance_names.append(inst.name) - changed = True - - return (changed, terminated_instance_names) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - image = dict(default='debian-7'), - instance_names = dict(), - machine_type = dict(default='n1-standard-1'), - metadata = dict(), - name = dict(), - network = dict(default='default'), - persistent_boot_disk = dict(type='bool', default=False), - disks = dict(type='list'), - state = dict(choices=['active', 'present', 'absent', 'deleted'], - default='present'), - tags = dict(type='list'), - zone = dict(default='us-central1-a'), - service_account_email = dict(), - pem_file = dict(), - project_id = dict(), - ) - ) - - gce = gce_connect(module) - - image = module.params.get('image') - instance_names = module.params.get('instance_names') - machine_type = module.params.get('machine_type') - metadata = module.params.get('metadata') - name = module.params.get('name') - network = module.params.get('network') - persistent_boot_disk = module.params.get('persistent_boot_disk') - state = module.params.get('state') - tags = module.params.get('tags') - zone = module.params.get('zone') - changed = False - - inames = [] - if isinstance(instance_names, list): - inames = instance_names - elif isinstance(instance_names, str): - inames = instance_names.split(',') - if name: - inames.append(name) - if not inames: - module.fail_json(msg='Must specify a "name" or "instance_names"', - changed=False) - if not zone: - module.fail_json(msg='Must specify a "zone"', changed=False) - - json_output = {'zone': zone} - if state in ['absent', 'deleted']: - json_output['state'] = 'absent' - (changed, terminated_instance_names) = terminate_instances(module, - gce, inames, zone) - - # based on what user specified, return the same variable, although - # value could be different if an instance could not be destroyed - if instance_names: - json_output['instance_names'] = terminated_instance_names - elif name: - json_output['name'] = name - - elif state in ['active', 'present']: - json_output['state'] = 'present' - (changed, instance_data,instance_name_list) = create_instances( - module, gce, inames) - json_output['instance_data'] = instance_data - if instance_names: - json_output['instance_names'] = instance_name_list - elif name: - json_output['name'] = name - - - json_output['changed'] = changed - print json.dumps(json_output) - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.gce import * - -main() diff --git a/library/cloud/gce_lb b/library/cloud/gce_lb deleted file mode 100644 index a60e14010c..0000000000 --- a/library/cloud/gce_lb +++ /dev/null @@ -1,335 +0,0 @@ -#!/usr/bin/python -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: gce_lb -version_added: "1.5" -short_description: create/destroy GCE load-balancer resources -description: - - This module can create and destroy Google Compute Engine C(loadbalancer) - and C(httphealthcheck) resources. The primary LB resource is the - C(load_balancer) resource and the health check parameters are all - prefixed with I(httphealthcheck). - The full documentation for Google Compute Engine load balancing is at - U(https://developers.google.com/compute/docs/load-balancing/). However, - the ansible module simplifies the configuration by following the - libcloud model. - Full install/configuration instructions for the gce* modules can - be found in the comments of ansible/test/gce_tests.py. -options: - httphealthcheck_name: - description: - - the name identifier for the HTTP health check - required: false - default: null - httphealthcheck_port: - description: - - the TCP port to use for HTTP health checking - required: false - default: 80 - httphealthcheck_path: - description: - - the url path to use for HTTP health checking - required: false - default: "/" - httphealthcheck_interval: - description: - - the duration in seconds between each health check request - required: false - default: 5 - httphealthcheck_timeout: - description: - - the timeout in seconds before a request is considered a failed check - required: false - default: 5 - httphealthcheck_unhealthy_count: - description: - - number of consecutive failed checks before marking a node unhealthy - required: false - default: 2 - httphealthcheck_healthy_count: - description: - - number of consecutive successful checks before marking a node healthy - required: false - default: 2 - httphealthcheck_host: - description: - - host header to pass through on HTTP check requests - required: false - default: null - name: - description: - - name of the load-balancer resource - required: false - default: null - protocol: - description: - - the protocol used for the load-balancer packet forwarding, tcp or udp - required: false - default: "tcp" - choices: ['tcp', 'udp'] - region: - description: - - the GCE region where the load-balancer is defined - required: false - external_ip: - description: - - the external static IPv4 (or auto-assigned) address for the LB - required: false - default: null - port_range: - description: - - the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports - required: false - default: null - members: - description: - - a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...] - required: false - aliases: ['nodes'] - state: - description: - - desired state of the LB - default: "present" - choices: ["active", "present", "absent", "deleted"] - aliases: [] - service_account_email: - version_added: "1.6" - description: - - service account email - required: false - default: null - aliases: [] - pem_file: - version_added: "1.6" - description: - - path to the pem file associated with the service account email - required: false - default: null - aliases: [] - project_id: - version_added: "1.6" - description: - - your GCE project ID - required: false - default: null - aliases: [] - -requirements: [ "libcloud" ] -author: Eric Johnson -''' - -EXAMPLES = ''' -# Simple example of creating a new LB, adding members, and a health check -- local_action: - module: gce_lb - name: testlb - region: us-central1 - members: ["us-central1-a/www-a", "us-central1-b/www-b"] - httphealthcheck_name: hc - httphealthcheck_port: 80 - httphealthcheck_path: "/up" -''' - -import sys - - -try: - from libcloud.compute.types import Provider - from libcloud.compute.providers import get_driver - from libcloud.loadbalancer.types import Provider as Provider_lb - from libcloud.loadbalancer.providers import get_driver as get_driver_lb - from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ - ResourceExistsError, ResourceNotFoundError - _ = Provider.GCE -except ImportError: - print("failed=True " + \ - "msg='libcloud with GCE support required for this module.'") - sys.exit(1) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - httphealthcheck_name = dict(), - httphealthcheck_port = dict(default=80), - httphealthcheck_path = dict(default='/'), - httphealthcheck_interval = dict(default=5), - httphealthcheck_timeout = dict(default=5), - httphealthcheck_unhealthy_count = dict(default=2), - httphealthcheck_healthy_count = dict(default=2), - httphealthcheck_host = dict(), - name = dict(), - protocol = dict(default='tcp'), - region = dict(), - external_ip = dict(), - port_range = dict(), - members = dict(type='list'), - state = dict(default='present'), - service_account_email = dict(), - pem_file = dict(), - project_id = dict(), - ) - ) - - gce = gce_connect(module) - - httphealthcheck_name = module.params.get('httphealthcheck_name') - httphealthcheck_port = module.params.get('httphealthcheck_port') - httphealthcheck_path = module.params.get('httphealthcheck_path') - httphealthcheck_interval = module.params.get('httphealthcheck_interval') - httphealthcheck_timeout = module.params.get('httphealthcheck_timeout') - httphealthcheck_unhealthy_count = \ - module.params.get('httphealthcheck_unhealthy_count') - httphealthcheck_healthy_count = \ - module.params.get('httphealthcheck_healthy_count') - httphealthcheck_host = module.params.get('httphealthcheck_host') - name = module.params.get('name') - protocol = module.params.get('protocol') - region = module.params.get('region') - external_ip = module.params.get('external_ip') - port_range = module.params.get('port_range') - members = module.params.get('members') - state = module.params.get('state') - - try: - gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce) - gcelb.connection.user_agent_append("%s/%s" % ( - USER_AGENT_PRODUCT, USER_AGENT_VERSION)) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - changed = False - json_output = {'name': name, 'state': state} - - if not name and not httphealthcheck_name: - module.fail_json(msg='Nothing to do, please specify a "name" ' + \ - 'or "httphealthcheck_name" parameter', changed=False) - - if state in ['active', 'present']: - # first, create the httphealthcheck if requested - hc = None - if httphealthcheck_name: - json_output['httphealthcheck_name'] = httphealthcheck_name - try: - hc = gcelb.ex_create_healthcheck(httphealthcheck_name, - host=httphealthcheck_host, path=httphealthcheck_path, - port=httphealthcheck_port, - interval=httphealthcheck_interval, - timeout=httphealthcheck_timeout, - unhealthy_threshold=httphealthcheck_unhealthy_count, - healthy_threshold=httphealthcheck_healthy_count) - changed = True - except ResourceExistsError: - hc = gce.ex_get_healthcheck(httphealthcheck_name) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - if hc is not None: - json_output['httphealthcheck_host'] = hc.extra['host'] - json_output['httphealthcheck_path'] = hc.path - json_output['httphealthcheck_port'] = hc.port - json_output['httphealthcheck_interval'] = hc.interval - json_output['httphealthcheck_timeout'] = hc.timeout - json_output['httphealthcheck_unhealthy_count'] = \ - hc.unhealthy_threshold - json_output['httphealthcheck_healthy_count'] = \ - hc.healthy_threshold - - # create the forwarding rule (and target pool under the hood) - lb = None - if name: - if not region: - module.fail_json(msg='Missing required region name', - changed=False) - nodes = [] - output_nodes = [] - json_output['name'] = name - # members is a python list of 'zone/inst' strings - if members: - for node in members: - try: - zone, node_name = node.split('/') - nodes.append(gce.ex_get_node(node_name, zone)) - output_nodes.append(node) - except: - # skip nodes that are badly formatted or don't exist - pass - try: - if hc is not None: - lb = gcelb.create_balancer(name, port_range, protocol, - None, nodes, ex_region=region, ex_healthchecks=[hc], - ex_address=external_ip) - else: - lb = gcelb.create_balancer(name, port_range, protocol, - None, nodes, ex_region=region, ex_address=external_ip) - changed = True - except ResourceExistsError: - lb = gcelb.get_balancer(name) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - if lb is not None: - json_output['members'] = output_nodes - json_output['protocol'] = protocol - json_output['region'] = region - json_output['external_ip'] = lb.ip - json_output['port_range'] = lb.port - hc_names = [] - if 'healthchecks' in lb.extra: - for hc in lb.extra['healthchecks']: - hc_names.append(hc.name) - json_output['httphealthchecks'] = hc_names - - if state in ['absent', 'deleted']: - # first, delete the load balancer (forwarding rule and target pool) - # if specified. - if name: - json_output['name'] = name - try: - lb = gcelb.get_balancer(name) - gcelb.destroy_balancer(lb) - changed = True - except ResourceNotFoundError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - # destroy the health check if specified - if httphealthcheck_name: - json_output['httphealthcheck_name'] = httphealthcheck_name - try: - hc = gce.ex_get_healthcheck(httphealthcheck_name) - gce.ex_destroy_healthcheck(hc) - changed = True - except ResourceNotFoundError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - - json_output['changed'] = changed - print json.dumps(json_output) - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.gce import * - -main() diff --git a/library/cloud/gce_net b/library/cloud/gce_net deleted file mode 100644 index c2c0b30452..0000000000 --- a/library/cloud/gce_net +++ /dev/null @@ -1,271 +0,0 @@ -#!/usr/bin/python -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: gce_net -version_added: "1.5" -short_description: create/destroy GCE networks and firewall rules -description: - - This module can create and destroy Google Compue Engine networks and - firewall rules U(https://developers.google.com/compute/docs/networking). - The I(name) parameter is reserved for referencing a network while the - I(fwname) parameter is used to reference firewall rules. - IPv4 Address ranges must be specified using the CIDR - U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format. - Full install/configuration instructions for the gce* modules can - be found in the comments of ansible/test/gce_tests.py. -options: - allowed: - description: - - the protocol:ports to allow ('tcp:80' or 'tcp:80,443' or 'tcp:80-800') - required: false - default: null - aliases: [] - ipv4_range: - description: - - the IPv4 address range in CIDR notation for the network - required: false - aliases: ['cidr'] - fwname: - description: - - name of the firewall rule - required: false - default: null - aliases: ['fwrule'] - name: - description: - - name of the network - required: false - default: null - aliases: [] - src_range: - description: - - the source IPv4 address range in CIDR notation - required: false - default: null - aliases: ['src_cidr'] - src_tags: - description: - - the source instance tags for creating a firewall rule - required: false - default: null - aliases: [] - state: - description: - - desired state of the persistent disk - required: false - default: "present" - choices: ["active", "present", "absent", "deleted"] - aliases: [] - service_account_email: - version_added: "1.6" - description: - - service account email - required: false - default: null - aliases: [] - pem_file: - version_added: "1.6" - description: - - path to the pem file associated with the service account email - required: false - default: null - aliases: [] - project_id: - version_added: "1.6" - description: - - your GCE project ID - required: false - default: null - aliases: [] - -requirements: [ "libcloud" ] -author: Eric Johnson -''' - -EXAMPLES = ''' -# Simple example of creating a new network -- local_action: - module: gce_net - name: privatenet - ipv4_range: '10.240.16.0/24' - -# Simple example of creating a new firewall rule -- local_action: - module: gce_net - name: privatenet - allowed: tcp:80,8080 - src_tags: ["web", "proxy"] - -''' - -import sys - -try: - from libcloud.compute.types import Provider - from libcloud.compute.providers import get_driver - from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ - ResourceExistsError, ResourceNotFoundError - _ = Provider.GCE -except ImportError: - print("failed=True " + \ - "msg='libcloud with GCE support required for this module.'") - sys.exit(1) - - -def format_allowed(allowed): - """Format the 'allowed' value so that it is GCE compatible.""" - if allowed.count(":") == 0: - protocol = allowed - ports = [] - elif allowed.count(":") == 1: - protocol, ports = allowed.split(":") - else: - return [] - if ports.count(","): - ports = ports.split(",") - else: - ports = [ports] - return_val = {"IPProtocol": protocol} - if ports: - return_val["ports"] = ports - return [return_val] - - -def main(): - module = AnsibleModule( - argument_spec = dict( - allowed = dict(), - ipv4_range = dict(), - fwname = dict(), - name = dict(), - src_range = dict(), - src_tags = dict(type='list'), - state = dict(default='present'), - service_account_email = dict(), - pem_file = dict(), - project_id = dict(), - ) - ) - - gce = gce_connect(module) - - allowed = module.params.get('allowed') - ipv4_range = module.params.get('ipv4_range') - fwname = module.params.get('fwname') - name = module.params.get('name') - src_range = module.params.get('src_range') - src_tags = module.params.get('src_tags') - state = module.params.get('state') - - changed = False - json_output = {'state': state} - - if state in ['active', 'present']: - network = None - try: - network = gce.ex_get_network(name) - json_output['name'] = name - json_output['ipv4_range'] = network.cidr - except ResourceNotFoundError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - # user wants to create a new network that doesn't yet exist - if name and not network: - if not ipv4_range: - module.fail_json(msg="Missing required 'ipv4_range' parameter", - changed=False) - - try: - network = gce.ex_create_network(name, ipv4_range) - json_output['name'] = name - json_output['ipv4_range'] = ipv4_range - changed = True - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - if fwname: - # user creating a firewall rule - if not allowed and not src_range and not src_tags: - if changed and network: - module.fail_json( - msg="Network created, but missing required " + \ - "firewall rule parameter(s)", changed=True) - module.fail_json( - msg="Missing required firewall rule parameter(s)", - changed=False) - - allowed_list = format_allowed(allowed) - - try: - gce.ex_create_firewall(fwname, allowed_list, network=name, - source_ranges=src_range, source_tags=src_tags) - changed = True - except ResourceExistsError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - json_output['fwname'] = fwname - json_output['allowed'] = allowed - json_output['src_range'] = src_range - json_output['src_tags'] = src_tags - - if state in ['absent', 'deleted']: - if fwname: - json_output['fwname'] = fwname - fw = None - try: - fw = gce.ex_get_firewall(fwname) - except ResourceNotFoundError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - if fw: - gce.ex_destroy_firewall(fw) - changed = True - if name: - json_output['name'] = name - network = None - try: - network = gce.ex_get_network(name) -# json_output['d1'] = 'found network name %s' % name - except ResourceNotFoundError: -# json_output['d2'] = 'not found network name %s' % name - pass - except Exception, e: -# json_output['d3'] = 'error with %s' % name - module.fail_json(msg=unexpected_error_msg(e), changed=False) - if network: -# json_output['d4'] = 'deleting %s' % name - gce.ex_destroy_network(network) -# json_output['d5'] = 'deleted %s' % name - changed = True - - json_output['changed'] = changed - print json.dumps(json_output) - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.gce import * - -main() diff --git a/library/cloud/gce_pd b/library/cloud/gce_pd deleted file mode 100644 index ddfe711304..0000000000 --- a/library/cloud/gce_pd +++ /dev/null @@ -1,285 +0,0 @@ -#!/usr/bin/python -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: gce_pd -version_added: "1.4" -short_description: utilize GCE persistent disk resources -description: - - This module can create and destroy unformatted GCE persistent disks - U(https://developers.google.com/compute/docs/disks#persistentdisks). - It also supports attaching and detaching disks from running instances. - Full install/configuration instructions for the gce* modules can - be found in the comments of ansible/test/gce_tests.py. -options: - detach_only: - description: - - do not destroy the disk, merely detach it from an instance - required: false - default: "no" - choices: ["yes", "no"] - aliases: [] - instance_name: - description: - - instance name if you wish to attach or detach the disk - required: false - default: null - aliases: [] - mode: - description: - - GCE mount mode of disk, READ_ONLY (default) or READ_WRITE - required: false - default: "READ_ONLY" - choices: ["READ_WRITE", "READ_ONLY"] - aliases: [] - name: - description: - - name of the disk - required: true - default: null - aliases: [] - size_gb: - description: - - whole integer size of disk (in GB) to create, default is 10 GB - required: false - default: 10 - aliases: [] - image: - description: - - the source image to use for the disk - required: false - default: null - aliases: [] - version_added: "1.7" - snapshot: - description: - - the source snapshot to use for the disk - required: false - default: null - aliases: [] - version_added: "1.7" - state: - description: - - desired state of the persistent disk - required: false - default: "present" - choices: ["active", "present", "absent", "deleted"] - aliases: [] - zone: - description: - - zone in which to create the disk - required: false - default: "us-central1-b" - aliases: [] - service_account_email: - version_added: "1.6" - description: - - service account email - required: false - default: null - aliases: [] - pem_file: - version_added: "1.6" - description: - - path to the pem file associated with the service account email - required: false - default: null - aliases: [] - project_id: - version_added: "1.6" - description: - - your GCE project ID - required: false - default: null - aliases: [] - -requirements: [ "libcloud" ] -author: Eric Johnson -''' - -EXAMPLES = ''' -# Simple attachment action to an existing instance -- local_action: - module: gce_pd - instance_name: notlocalhost - size_gb: 5 - name: pd -''' - -import sys - -try: - from libcloud.compute.types import Provider - from libcloud.compute.providers import get_driver - from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ - ResourceExistsError, ResourceNotFoundError, ResourceInUseError - _ = Provider.GCE -except ImportError: - print("failed=True " + \ - "msg='libcloud with GCE support is required for this module.'") - sys.exit(1) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - detach_only = dict(type='bool'), - instance_name = dict(), - mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']), - name = dict(required=True), - size_gb = dict(default=10), - image = dict(), - snapshot = dict(), - state = dict(default='present'), - zone = dict(default='us-central1-b'), - service_account_email = dict(), - pem_file = dict(), - project_id = dict(), - ) - ) - - gce = gce_connect(module) - - detach_only = module.params.get('detach_only') - instance_name = module.params.get('instance_name') - mode = module.params.get('mode') - name = module.params.get('name') - size_gb = module.params.get('size_gb') - image = module.params.get('image') - snapshot = module.params.get('snapshot') - state = module.params.get('state') - zone = module.params.get('zone') - - if detach_only and not instance_name: - module.fail_json( - msg='Must specify an instance name when detaching a disk', - changed=False) - - disk = inst = None - changed = is_attached = False - - json_output = { 'name': name, 'zone': zone, 'state': state } - if detach_only: - json_output['detach_only'] = True - json_output['detached_from_instance'] = instance_name - - if instance_name: - # user wants to attach/detach from an existing instance - try: - inst = gce.ex_get_node(instance_name, zone) - # is the disk attached? - for d in inst.extra['disks']: - if d['deviceName'] == name: - is_attached = True - json_output['attached_mode'] = d['mode'] - json_output['attached_to_instance'] = inst.name - except: - pass - - # find disk if it already exists - try: - disk = gce.ex_get_volume(name) - json_output['size_gb'] = int(disk.size) - except ResourceNotFoundError: - pass - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - - # user wants a disk to exist. If "instance_name" is supplied the user - # also wants it attached - if state in ['active', 'present']: - - if not size_gb: - module.fail_json(msg="Must supply a size_gb", changed=False) - try: - size_gb = int(round(float(size_gb))) - if size_gb < 1: - raise Exception - except: - module.fail_json(msg="Must supply a size_gb larger than 1 GB", - changed=False) - - if instance_name and inst is None: - module.fail_json(msg='Instance %s does not exist in zone %s' % ( - instance_name, zone), changed=False) - - if not disk: - if image is not None and snapshot is not None: - module.fail_json( - msg='Cannot give both image (%s) and snapshot (%s)' % ( - image, snapshot), changed=False) - lc_image = None - lc_snapshot = None - if image is not None: - lc_image = gce.ex_get_image(image) - elif snapshot is not None: - lc_snapshot = gce.ex_get_snapshot(snapshot) - try: - disk = gce.create_volume( - size_gb, name, location=zone, image=lc_image, - snapshot=lc_snapshot) - except ResourceExistsError: - pass - except QuotaExceededError: - module.fail_json(msg='Requested disk size exceeds quota', - changed=False) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - json_output['size_gb'] = size_gb - if image is not None: - json_output['image'] = image - if snapshot is not None: - json_output['snapshot'] = snapshot - changed = True - if inst and not is_attached: - try: - gce.attach_volume(inst, disk, device=name, ex_mode=mode) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - json_output['attached_to_instance'] = inst.name - json_output['attached_mode'] = mode - changed = True - - # user wants to delete a disk (or perhaps just detach it). - if state in ['absent', 'deleted'] and disk: - - if inst and is_attached: - try: - gce.detach_volume(disk, ex_node=inst) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - changed = True - if not detach_only: - try: - gce.destroy_volume(disk) - except ResourceInUseError, e: - module.fail_json(msg=str(e.value), changed=False) - except Exception, e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - changed = True - - json_output['changed'] = changed - print json.dumps(json_output) - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.gce import * - -main() diff --git a/library/cloud/glance_image b/library/cloud/glance_image deleted file mode 100644 index 3bbc6f0ebc..0000000000 --- a/library/cloud/glance_image +++ /dev/null @@ -1,260 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: glance_image -version_added: "1.2" -short_description: Add/Delete images from glance -description: - - Add or Remove images from the glance repository. -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - name: - description: - - Name that has to be given to the image - required: true - default: None - disk_format: - description: - - The format of the disk that is getting uploaded - required: false - default: qcow2 - container_format: - description: - - The format of the container - required: false - default: bare - owner: - description: - - The owner of the image - required: false - default: None - min_disk: - description: - - The minimum disk space required to deploy this image - required: false - default: None - min_ram: - description: - - The minimum ram required to deploy this image - required: false - default: None - is_public: - description: - - Whether the image can be accessed publicly - required: false - default: 'yes' - copy_from: - description: - - A url from where the image can be downloaded, mutually exclusive with file parameter - required: false - default: None - timeout: - description: - - The time to wait for the image process to complete in seconds - required: false - default: 180 - file: - description: - - The path to the file which has to be uploaded, mutually exclusive with copy_from - required: false - default: None - endpoint_type: - description: - - The name of the glance service's endpoint URL type - choices: [publicURL, internalURL] - required: false - default: publicURL - version_added: "1.7" -requirements: ["glanceclient", "keystoneclient"] - -''' - -EXAMPLES = ''' -# Upload an image from an HTTP URL -- glance_image: login_username=admin - login_password=passme - login_tenant_name=admin - name=cirros - container_format=bare - disk_format=qcow2 - state=present - copy_from=http:launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img -''' - -import time -try: - import glanceclient - from keystoneclient.v2_0 import client as ksclient -except ImportError: - print("failed=True msg='glanceclient and keystone client are required'") - - -def _get_ksclient(module, kwargs): - try: - client = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg="Error authenticating to the keystone: %s " % e.message) - return client - - -def _get_endpoint(module, client, endpoint_type): - try: - endpoint = client.service_catalog.url_for(service_type='image', endpoint_type=endpoint_type) - except Exception, e: - module.fail_json(msg="Error getting endpoint for glance: %s" % e.message) - return endpoint - - -def _get_glance_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint =_get_endpoint(module, _ksclient, kwargs.get('endpoint_type')) - kwargs = { - 'token': token, - } - try: - client = glanceclient.Client('1', endpoint, **kwargs) - except Exception, e: - module.fail_json(msg="Error in connecting to glance: %s" % e.message) - return client - - -def _glance_image_present(module, params, client): - try: - for image in client.images.list(): - if image.name == params['name']: - return image.id - return None - except Exception, e: - module.fail_json(msg="Error in fetching image list: %s" % e.message) - - -def _glance_image_create(module, params, client): - kwargs = { - 'name': params.get('name'), - 'disk_format': params.get('disk_format'), - 'container_format': params.get('container_format'), - 'owner': params.get('owner'), - 'is_public': params.get('is_public'), - 'copy_from': params.get('copy_from'), - } - try: - timeout = float(params.get('timeout')) - expire = time.time() + timeout - image = client.images.create(**kwargs) - if not params['copy_from']: - image.update(data=open(params['file'], 'rb')) - while time.time() < expire: - image = client.images.get(image.id) - if image.status == 'active': - break - time.sleep(5) - except Exception, e: - module.fail_json(msg="Error in creating image: %s" % e.message) - if image.status == 'active': - module.exit_json(changed=True, result=image.status, id=image.id) - else: - module.fail_json(msg=" The module timed out, please check manually " + image.status) - - -def _glance_delete_image(module, params, client): - try: - for image in client.images.list(): - if image.name == params['name']: - client.images.delete(image) - except Exception, e: - module.fail_json(msg="Error in deleting image: %s" % e.message) - module.exit_json(changed=True, result="Deleted") - - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - name = dict(required=True), - disk_format = dict(default='qcow2', choices=['aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']), - container_format = dict(default='bare', choices=['aki', 'ari', 'bare', 'ovf']), - owner = dict(default=None), - min_disk = dict(default=None), - min_ram = dict(default=None), - is_public = dict(default=True), - copy_from = dict(default= None), - timeout = dict(default=180), - file = dict(default=None), - endpoint_type = dict(default='publicURL', choices=['publicURL', 'internalURL']), - state = dict(default='present', choices=['absent', 'present']) - )) - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive = [['file','copy_from']], - ) - if module.params['state'] == 'present': - if not module.params['file'] and not module.params['copy_from']: - module.fail_json(msg="Either file or copy_from variable should be set to create the image") - client = _get_glance_client(module, module.params) - id = _glance_image_present(module, module.params, client) - if not id: - _glance_image_create(module, module.params, client) - module.exit_json(changed=False, id=id, result="success") - - if module.params['state'] == 'absent': - client = _get_glance_client(module, module.params) - id = _glance_image_present(module, module.params, client) - if not id: - module.exit_json(changed=False, result="Success") - else: - _glance_delete_image(module, module.params, client) - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() diff --git a/library/cloud/keystone_user b/library/cloud/keystone_user deleted file mode 100644 index 5b412ca800..0000000000 --- a/library/cloud/keystone_user +++ /dev/null @@ -1,394 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Based on Jimmy Tang's implementation - -DOCUMENTATION = ''' ---- -module: keystone_user -version_added: "1.2" -short_description: Manage OpenStack Identity (keystone) users, tenants and roles -description: - - Manage users,tenants, roles from OpenStack. -options: - login_user: - description: - - login username to authenticate to keystone - required: false - default: admin - login_password: - description: - - Password of login user - required: false - default: 'yes' - login_tenant_name: - description: - - The tenant login_user belongs to - required: false - default: None - version_added: "1.3" - token: - description: - - The token to be uses in case the password is not specified - required: false - default: None - endpoint: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - user: - description: - - The name of the user that has to added/removed from OpenStack - required: false - default: None - password: - description: - - The password to be assigned to the user - required: false - default: None - tenant: - description: - - The tenant name that has be added/removed - required: false - default: None - tenant_description: - description: - - A description for the tenant - required: false - default: None - email: - description: - - An email address for the user - required: false - default: None - role: - description: - - The name of the role to be assigned or created - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -requirements: [ python-keystoneclient ] -author: Lorin Hochstein -''' - -EXAMPLES = ''' -# Create a tenant -- keystone_user: tenant=demo tenant_description="Default Tenant" - -# Create a user -- keystone_user: user=john tenant=demo password=secrete - -# Apply the admin role to the john user in the demo tenant -- keystone_user: role=admin user=john tenant=demo -''' - -try: - from keystoneclient.v2_0 import client -except ImportError: - keystoneclient_found = False -else: - keystoneclient_found = True - - -def authenticate(endpoint, token, login_user, login_password, login_tenant_name): - """Return a keystone client object""" - - if token: - return client.Client(endpoint=endpoint, token=token) - else: - return client.Client(auth_url=endpoint, username=login_user, - password=login_password, tenant_name=login_tenant_name) - - -def tenant_exists(keystone, tenant): - """ Return True if tenant already exists""" - return tenant in [x.name for x in keystone.tenants.list()] - - -def user_exists(keystone, user): - """" Return True if user already exists""" - return user in [x.name for x in keystone.users.list()] - - -def get_tenant(keystone, name): - """ Retrieve a tenant by name""" - tenants = [x for x in keystone.tenants.list() if x.name == name] - count = len(tenants) - if count == 0: - raise KeyError("No keystone tenants with name %s" % name) - elif count > 1: - raise ValueError("%d tenants with name %s" % (count, name)) - else: - return tenants[0] - - -def get_user(keystone, name): - """ Retrieve a user by name""" - users = [x for x in keystone.users.list() if x.name == name] - count = len(users) - if count == 0: - raise KeyError("No keystone users with name %s" % name) - elif count > 1: - raise ValueError("%d users with name %s" % (count, name)) - else: - return users[0] - - -def get_role(keystone, name): - """ Retrieve a role by name""" - roles = [x for x in keystone.roles.list() if x.name == name] - count = len(roles) - if count == 0: - raise KeyError("No keystone roles with name %s" % name) - elif count > 1: - raise ValueError("%d roles with name %s" % (count, name)) - else: - return roles[0] - - -def get_tenant_id(keystone, name): - return get_tenant(keystone, name).id - - -def get_user_id(keystone, name): - return get_user(keystone, name).id - - -def ensure_tenant_exists(keystone, tenant_name, tenant_description, - check_mode): - """ Ensure that a tenant exists. - - Return (True, id) if a new tenant was created, (False, None) if it - already existed. - """ - - # Check if tenant already exists - try: - tenant = get_tenant(keystone, tenant_name) - except KeyError: - # Tenant doesn't exist yet - pass - else: - if tenant.description == tenant_description: - return (False, tenant.id) - else: - # We need to update the tenant description - if check_mode: - return (True, tenant.id) - else: - tenant.update(description=tenant_description) - return (True, tenant.id) - - # We now know we will have to create a new tenant - if check_mode: - return (True, None) - - ks_tenant = keystone.tenants.create(tenant_name=tenant_name, - description=tenant_description, - enabled=True) - return (True, ks_tenant.id) - - -def ensure_tenant_absent(keystone, tenant, check_mode): - """ Ensure that a tenant does not exist - - Return True if the tenant was removed, False if it didn't exist - in the first place - """ - if not tenant_exists(keystone, tenant): - return False - - # We now know we will have to delete the tenant - if check_mode: - return True - - -def ensure_user_exists(keystone, user_name, password, email, tenant_name, - check_mode): - """ Check if user exists - - Return (True, id) if a new user was created, (False, id) user alrady - exists - """ - - # Check if tenant already exists - try: - user = get_user(keystone, user_name) - except KeyError: - # Tenant doesn't exist yet - pass - else: - # User does exist, we're done - return (False, user.id) - - # We now know we will have to create a new user - if check_mode: - return (True, None) - - tenant = get_tenant(keystone, tenant_name) - - user = keystone.users.create(name=user_name, password=password, - email=email, tenant_id=tenant.id) - return (True, user.id) - - -def ensure_role_exists(keystone, user_name, tenant_name, role_name, - check_mode): - """ Check if role exists - - Return (True, id) if a new role was created or if the role was newly - assigned to the user for the tenant. (False, id) if the role already - exists and was already assigned to the user ofr the tenant. - - """ - # Check if the user has the role in the tenant - user = get_user(keystone, user_name) - tenant = get_tenant(keystone, tenant_name) - roles = [x for x in keystone.roles.roles_for_user(user, tenant) - if x.name == role_name] - count = len(roles) - - if count == 1: - # If the role is in there, we are done - role = roles[0] - return (False, role.id) - elif count > 1: - # Too many roles with the same name, throw an error - raise ValueError("%d roles with name %s" % (count, role_name)) - - # At this point, we know we will need to make changes - if check_mode: - return (True, None) - - # Get the role if it exists - try: - role = get_role(keystone, role_name) - except KeyError: - # Role doesn't exist yet - role = keystone.roles.create(role_name) - - # Associate the role with the user in the admin - keystone.roles.add_user_role(user, role, tenant) - return (True, role.id) - - -def ensure_user_absent(keystone, user, check_mode): - raise NotImplementedError("Not yet implemented") - - -def ensure_role_absent(keystone, uesr, tenant, role, check_mode): - raise NotImplementedError("Not yet implemented") - - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - tenant_description=dict(required=False), - email=dict(required=False), - role=dict(required=False), - state=dict(default='present', choices=['present', 'absent']), - endpoint=dict(required=False, - default="http://127.0.0.1:35357/v2.0"), - token=dict(required=False), - login_user=dict(required=False), - login_password=dict(required=False), - login_tenant_name=dict(required=False) - )) - # keystone operations themselves take an endpoint, not a keystone auth_url - del(argument_spec['auth_url']) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['token', 'login_user'], - ['token', 'login_password'], - ['token', 'login_tenant_name']] - ) - - if not keystoneclient_found: - module.fail_json(msg="the python-keystoneclient module is required") - - user = module.params['user'] - password = module.params['password'] - tenant = module.params['tenant'] - tenant_description = module.params['tenant_description'] - email = module.params['email'] - role = module.params['role'] - state = module.params['state'] - endpoint = module.params['endpoint'] - token = module.params['token'] - login_user = module.params['login_user'] - login_password = module.params['login_password'] - login_tenant_name = module.params['login_tenant_name'] - - keystone = authenticate(endpoint, token, login_user, login_password, login_tenant_name) - - check_mode = module.check_mode - - try: - d = dispatch(keystone, user, password, tenant, tenant_description, - email, role, state, endpoint, token, login_user, - login_password, check_mode) - except Exception, e: - if check_mode: - # If we have a failure in check mode - module.exit_json(changed=True, - msg="exception: %s" % e) - else: - module.fail_json(msg="exception: %s" % e) - else: - module.exit_json(**d) - - -def dispatch(keystone, user=None, password=None, tenant=None, - tenant_description=None, email=None, role=None, - state="present", endpoint=None, token=None, login_user=None, - login_password=None, check_mode=False): - """ Dispatch to the appropriate method. - - Returns a dict that will be passed to exit_json - - tenant user role state - ------ ---- ---- -------- - X present ensure_tenant_exists - X absent ensure_tenant_absent - X X present ensure_user_exists - X X absent ensure_user_absent - X X X present ensure_role_exists - X X X absent ensure_role_absent - - - """ - changed = False - id = None - if tenant and not user and not role and state == "present": - changed, id = ensure_tenant_exists(keystone, tenant, - tenant_description, check_mode) - elif tenant and not user and not role and state == "absent": - changed = ensure_tenant_absent(keystone, tenant, check_mode) - elif tenant and user and not role and state == "present": - changed, id = ensure_user_exists(keystone, user, password, - email, tenant, check_mode) - elif tenant and user and not role and state == "absent": - changed = ensure_user_absent(keystone, user, check_mode) - elif tenant and user and role and state == "present": - changed, id = ensure_role_exists(keystone, user, tenant, role, - check_mode) - elif tenant and user and role and state == "absent": - changed = ensure_role_absent(keystone, user, tenant, role, check_mode) - else: - # Should never reach here - raise ValueError("Code should never reach here") - - return dict(changed=changed, id=id) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -if __name__ == '__main__': - main() diff --git a/library/cloud/linode b/library/cloud/linode deleted file mode 100644 index 9fd265fde0..0000000000 --- a/library/cloud/linode +++ /dev/null @@ -1,493 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: linode -short_description: create / delete / stop / restart an instance in Linode Public Cloud -description: - - creates / deletes a Linode Public Cloud instance and optionally waits for it to be 'running'. -version_added: "1.3" -options: - state: - description: - - Indicate desired state of the resource - choices: ['present', 'active', 'started', 'absent', 'deleted', 'stopped', 'restarted'] - default: present - api_key: - description: - - Linode API key - default: null - name: - description: - - Name to give the instance (alphanumeric, dashes, underscore) - - To keep sanity on the Linode Web Console, name is prepended with LinodeID_ - default: null - type: string - linode_id: - description: - - Unique ID of a linode server - aliases: lid - default: null - type: integer - plan: - description: - - plan to use for the instance (Linode plan) - default: null - type: integer - payment_term: - description: - - payment term to use for the instance (payment term in months) - default: 1 - type: integer - choices: [1, 12, 24] - password: - description: - - root password to apply to a new server (auto generated if missing) - default: null - type: string - ssh_pub_key: - description: - - SSH public key applied to root user - default: null - type: string - swap: - description: - - swap size in MB - default: 512 - type: integer - distribution: - description: - - distribution to use for the instance (Linode Distribution) - default: null - type: integer - datacenter: - description: - - datacenter to create an instance in (Linode Datacenter) - default: null - type: integer - wait: - description: - - wait for the instance to be in state 'running' before returning - default: "no" - choices: [ "yes", "no" ] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 -requirements: [ "linode-python", "pycurl" ] -author: Vincent Viallet -notes: - - LINODE_API_KEY env variable can be used instead -''' - -EXAMPLES = ''' -# Create a server -- local_action: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 1 - datacenter: 2 - distribution: 99 - password: 'superSecureRootPassword' - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: yes - wait_timeout: 600 - state: present - -# Ensure a running server (create if missing) -- local_action: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: 12345678 - plan: 1 - datacenter: 2 - distribution: 99 - password: 'superSecureRootPassword' - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: yes - wait_timeout: 600 - state: present - -# Delete a server -- local_action: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: 12345678 - state: absent - -# Stop a server -- local_action: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: 12345678 - state: stopped - -# Reboot a server -- local_action: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: 12345678 - state: restarted -''' - -import sys -import time -import os - -try: - import pycurl -except ImportError: - print("failed=True msg='pycurl required for this module'") - sys.exit(1) - - -try: - from linode import api as linode_api -except ImportError: - print("failed=True msg='linode-python required for this module'") - sys.exit(1) - - -def randompass(): - ''' - Generate a long random password that comply to Linode requirements - ''' - # Linode API currently requires the following: - # It must contain at least two of these four character classes: - # lower case letters - upper case letters - numbers - punctuation - # we play it safe :) - import random - import string - # as of python 2.4, this reseeds the PRNG from urandom - random.seed() - lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6)) - upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) - number = ''.join(random.choice(string.digits) for x in range(6)) - punct = ''.join(random.choice(string.punctuation) for x in range(6)) - p = lower + upper + number + punct - return ''.join(random.sample(p, len(p))) - -def getInstanceDetails(api, server): - ''' - Return the details of an instance, populating IPs, etc. - ''' - instance = {'id': server['LINODEID'], - 'name': server['LABEL'], - 'public': [], - 'private': []} - - # Populate with ips - for ip in api.linode_ip_list(LinodeId=server['LINODEID']): - if ip['ISPUBLIC'] and 'ipv4' not in instance: - instance['ipv4'] = ip['IPADDRESS'] - instance['fqdn'] = ip['RDNS_NAME'] - if ip['ISPUBLIC']: - instance['public'].append({'ipv4': ip['IPADDRESS'], - 'fqdn': ip['RDNS_NAME'], - 'ip_id': ip['IPADDRESSID']}) - else: - instance['private'].append({'ipv4': ip['IPADDRESS'], - 'fqdn': ip['RDNS_NAME'], - 'ip_id': ip['IPADDRESSID']}) - return instance - -def linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id, - payment_term, password, ssh_pub_key, swap, wait, wait_timeout): - instances = [] - changed = False - new_server = False - servers = [] - disks = [] - configs = [] - jobs = [] - - # See if we can match an existing server details with the provided linode_id - if linode_id: - # For the moment we only consider linode_id as criteria for match - # Later we can use more (size, name, etc.) and update existing - servers = api.linode_list(LinodeId=linode_id) - # Attempt to fetch details about disks and configs only if servers are - # found with linode_id - if servers: - disks = api.linode_disk_list(LinodeId=linode_id) - configs = api.linode_config_list(LinodeId=linode_id) - - # Act on the state - if state in ('active', 'present', 'started'): - # TODO: validate all the plan / distribution / datacenter are valid - - # Multi step process/validation: - # - need linode_id (entity) - # - need disk_id for linode_id - create disk from distrib - # - need config_id for linode_id - create config (need kernel) - - # Any create step triggers a job that need to be waited for. - if not servers: - for arg in ('name', 'plan', 'distribution', 'datacenter'): - if not eval(arg): - module.fail_json(msg='%s is required for active state' % arg) - # Create linode entity - new_server = True - try: - res = api.linode_create(DatacenterID=datacenter, PlanID=plan, - PaymentTerm=payment_term) - linode_id = res['LinodeID'] - # Update linode Label to match name - api.linode_update(LinodeId=linode_id, Label='%s_%s' % (linode_id, name)) - # Save server - servers = api.linode_list(LinodeId=linode_id) - except Exception, e: - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - - if not disks: - for arg in ('name', 'linode_id', 'distribution'): - if not eval(arg): - module.fail_json(msg='%s is required for active state' % arg) - # Create disks (1 from distrib, 1 for SWAP) - new_server = True - try: - if not password: - # Password is required on creation, if not provided generate one - password = randompass() - if not swap: - swap = 512 - # Create data disk - size = servers[0]['TOTALHD'] - swap - if ssh_pub_key: - res = api.linode_disk_createfromdistribution( - LinodeId=linode_id, DistributionID=distribution, - rootPass=password, rootSSHKey=ssh_pub_key, - Label='%s data disk (lid: %s)' % (name, linode_id), Size=size) - else: - res = api.linode_disk_createfromdistribution( - LinodeId=linode_id, DistributionID=distribution, rootPass=password, - Label='%s data disk (lid: %s)' % (name, linode_id), Size=size) - jobs.append(res['JobID']) - # Create SWAP disk - res = api.linode_disk_create(LinodeId=linode_id, Type='swap', - Label='%s swap disk (lid: %s)' % (name, linode_id), - Size=swap) - jobs.append(res['JobID']) - except Exception, e: - # TODO: destroy linode ? - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - - if not configs: - for arg in ('name', 'linode_id', 'distribution'): - if not eval(arg): - module.fail_json(msg='%s is required for active state' % arg) - - # Check architecture - for distrib in api.avail_distributions(): - if distrib['DISTRIBUTIONID'] != distribution: - continue - arch = '32' - if distrib['IS64BIT']: - arch = '64' - break - - # Get latest kernel matching arch - for kernel in api.avail_kernels(): - if not kernel['LABEL'].startswith('Latest %s' % arch): - continue - kernel_id = kernel['KERNELID'] - break - - # Get disk list - disks_id = [] - for disk in api.linode_disk_list(LinodeId=linode_id): - if disk['TYPE'] == 'ext3': - disks_id.insert(0, str(disk['DISKID'])) - continue - disks_id.append(str(disk['DISKID'])) - # Trick to get the 9 items in the list - while len(disks_id) < 9: - disks_id.append('') - disks_list = ','.join(disks_id) - - # Create config - new_server = True - try: - api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id, - Disklist=disks_list, Label='%s config' % name) - configs = api.linode_config_list(LinodeId=linode_id) - except Exception, e: - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - - # Start / Ensure servers are running - for server in servers: - # Refresh server state - server = api.linode_list(LinodeId=server['LINODEID'])[0] - # Ensure existing servers are up and running, boot if necessary - if server['STATUS'] != 1: - res = api.linode_boot(LinodeId=linode_id) - jobs.append(res['JobID']) - changed = True - - # wait here until the instances are up - wait_timeout = time.time() + wait_timeout - while wait and wait_timeout > time.time(): - # refresh the server details - server = api.linode_list(LinodeId=server['LINODEID'])[0] - # status: - # -2: Boot failed - # 1: Running - if server['STATUS'] in (-2, 1): - break - time.sleep(5) - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = 'Timeout waiting on %s (lid: %s)' % - (server['LABEL'], server['LINODEID'])) - # Get a fresh copy of the server details - server = api.linode_list(LinodeId=server['LINODEID'])[0] - if server['STATUS'] == -2: - module.fail_json(msg = '%s (lid: %s) failed to boot' % - (server['LABEL'], server['LINODEID'])) - # From now on we know the task is a success - # Build instance report - instance = getInstanceDetails(api, server) - # depending on wait flag select the status - if wait: - instance['status'] = 'Running' - else: - instance['status'] = 'Starting' - - # Return the root password if this is a new box and no SSH key - # has been provided - if new_server and not ssh_pub_key: - instance['password'] = password - instances.append(instance) - - elif state in ('stopped'): - for arg in ('name', 'linode_id'): - if not eval(arg): - module.fail_json(msg='%s is required for active state' % arg) - - if not servers: - module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id)) - - for server in servers: - instance = getInstanceDetails(api, server) - if server['STATUS'] != 2: - try: - res = api.linode_shutdown(LinodeId=linode_id) - except Exception, e: - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - instance['status'] = 'Stopping' - changed = True - else: - instance['status'] = 'Stopped' - instances.append(instance) - - elif state in ('restarted'): - for arg in ('name', 'linode_id'): - if not eval(arg): - module.fail_json(msg='%s is required for active state' % arg) - - if not servers: - module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id)) - - for server in servers: - instance = getInstanceDetails(api, server) - try: - res = api.linode_reboot(LinodeId=server['LINODEID']) - except Exception, e: - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - instance['status'] = 'Restarting' - changed = True - instances.append(instance) - - elif state in ('absent', 'deleted'): - for server in servers: - instance = getInstanceDetails(api, server) - try: - api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True) - except Exception, e: - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - instance['status'] = 'Deleting' - changed = True - instances.append(instance) - - # Ease parsing if only 1 instance - if len(instances) == 1: - module.exit_json(changed=changed, instance=instances[0]) - module.exit_json(changed=changed, instances=instances) - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default='present', choices=['active', 'present', 'started', - 'deleted', 'absent', 'stopped', - 'restarted']), - api_key = dict(), - name = dict(type='str'), - plan = dict(type='int'), - distribution = dict(type='int'), - datacenter = dict(type='int'), - linode_id = dict(type='int', aliases=['lid']), - payment_term = dict(type='int', default=1, choices=[1, 12, 24]), - password = dict(type='str'), - ssh_pub_key = dict(type='str'), - swap = dict(type='int', default=512), - wait = dict(type='bool', default=True), - wait_timeout = dict(default=300), - ) - ) - - state = module.params.get('state') - api_key = module.params.get('api_key') - name = module.params.get('name') - plan = module.params.get('plan') - distribution = module.params.get('distribution') - datacenter = module.params.get('datacenter') - linode_id = module.params.get('linode_id') - payment_term = module.params.get('payment_term') - password = module.params.get('password') - ssh_pub_key = module.params.get('ssh_pub_key') - swap = module.params.get('swap') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - # Setup the api_key - if not api_key: - try: - api_key = os.environ['LINODE_API_KEY'] - except KeyError, e: - module.fail_json(msg = 'Unable to load %s' % e.message) - - # setup the auth - try: - api = linode_api.Api(api_key) - api.test_echo() - except Exception, e: - module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) - - linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id, - payment_term, password, ssh_pub_key, swap, wait, wait_timeout) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/cloud/nova_compute b/library/cloud/nova_compute deleted file mode 100644 index 42c54753fb..0000000000 --- a/library/cloud/nova_compute +++ /dev/null @@ -1,585 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# (c) 2013, John Dewey -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -import operator -import os - -try: - from novaclient.v1_1 import client as nova_client - from novaclient.v1_1 import floating_ips - from novaclient import exceptions - from novaclient import utils - import time -except ImportError: - print("failed=True msg='novaclient is required for this module'") - -DOCUMENTATION = ''' ---- -module: nova_compute -version_added: "1.2" -short_description: Create/Delete VMs from OpenStack -description: - - Create or Remove virtual machines from Openstack. -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - name: - description: - - Name that has to be given to the instance - required: true - default: None - image_id: - description: - - The id of the base image to boot. Mutually exclusive with image_name - required: true - default: None - image_name: - description: - - The name of the base image to boot. Mutually exclusive with image_id - required: true - default: None - version_added: "1.8" - image_exclude: - description: - - Text to use to filter image names, for the case, such as HP, where there are multiple image names matching the common identifying portions. image_exclude is a negative match filter - it is text that may not exist in the image name. Defaults to "(deprecated)" - version_added: "1.8" - flavor_id: - description: - - The id of the flavor in which the new VM has to be created. Mutually exclusive with flavor_ram - required: false - default: 1 - flavor_ram: - description: - - The minimum amount of ram in MB that the flavor in which the new VM has to be created must have. Mutually exclusive with flavor_id - required: false - default: 1 - version_added: "1.8" - flavor_include: - description: - - Text to use to filter flavor names, for the case, such as Rackspace, where there are multiple flavors that have the same ram count. flavor_include is a positive match filter - it must exist in the flavor name. - version_added: "1.8" - key_name: - description: - - The key pair name to be used when creating a VM - required: false - default: None - security_groups: - description: - - The name of the security group to which the VM should be added - required: false - default: None - nics: - description: - - A list of network id's to which the VM's interface should be attached - required: false - default: None - auto_floating_ip: - description: - - Should a floating ip be auto created and assigned - required: false - default: 'yes' - version_added: "1.8" - floating_ips: - decription: - - list of valid floating IPs that pre-exist to assign to this node - required: false - default: None - version_added: "1.8" - floating_ip_pools: - description: - - list of floating IP pools from which to choose a floating IP - required: false - default: None - version_added: "1.8" - availability_zone: - description: - - Name of the availability zone - required: false - default: None - version_added: "1.8" - meta: - description: - - A list of key value pairs that should be provided as a metadata to the new VM - required: false - default: None - wait: - description: - - If the module should wait for the VM to be created. - required: false - default: 'yes' - wait_for: - description: - - The amount of time the module should wait for the VM to get into active state - required: false - default: 180 - config_drive: - description: - - Whether to boot the server with config drive enabled - required: false - default: 'no' - version_added: "1.8" - user_data: - description: - - Opaque blob of data which is made available to the instance - required: false - default: None - version_added: "1.6" -requirements: ["novaclient"] -''' - -EXAMPLES = ''' -# Creates a new VM and attaches to a network and passes metadata to the instance -- nova_compute: - state: present - login_username: admin - login_password: admin - login_tenant_name: admin - name: vm1 - image_id: 4f905f38-e52a-43d2-b6ec-754a13ffb529 - key_name: ansible_key - wait_for: 200 - flavor_id: 4 - nics: - - net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723 - meta: - hostname: test1 - group: uge_master - -# Creates a new VM in HP Cloud AE1 region availability zone az2 and automatically assigns a floating IP -- name: launch a nova instance - hosts: localhost - tasks: - - name: launch an instance - nova_compute: - state: present - login_username: username - login_password: Equality7-2521 - login_tenant_name: username-project1 - name: vm1 - auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ - region_name: region-b.geo-1 - availability_zone: az2 - image_id: 9302692b-b787-4b52-a3a6-daebb79cb498 - key_name: test - wait_for: 200 - flavor_id: 101 - security_groups: default - auto_floating_ip: yes - -# Creates a new VM in HP Cloud AE1 region availability zone az2 and assigns a pre-known floating IP -- name: launch a nova instance - hosts: localhost - tasks: - - name: launch an instance - nova_compute: - state: present - login_username: username - login_password: Equality7-2521 - login_tenant_name: username-project1 - name: vm1 - auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ - region_name: region-b.geo-1 - availability_zone: az2 - image_id: 9302692b-b787-4b52-a3a6-daebb79cb498 - key_name: test - wait_for: 200 - flavor_id: 101 - floating-ips: - - 12.34.56.79 - -# Creates a new VM with 4G of RAM on Ubuntu Trusty, ignoring deprecated images -- name: launch a nova instance - hosts: localhost - tasks: - - name: launch an instance - nova_compute: - name: vm1 - state: present - login_username: username - login_password: Equality7-2521 - login_tenant_name: username-project1 - auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ - region_name: region-b.geo-1 - image_name: Ubuntu Server 14.04 - image_exclude: deprecated - flavor_ram: 4096 - -# Creates a new VM with 4G of RAM on Ubuntu Trusty on a Rackspace Performance node in DFW -- name: launch a nova instance - hosts: localhost - tasks: - - name: launch an instance - nova_compute: - name: vm1 - state: present - login_username: username - login_password: Equality7-2521 - login_tenant_name: username-project1 - auth_url: https://identity.api.rackspacecloud.com/v2.0/ - region_name: DFW - image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM) - flavor_ram: 4096 - flavor_include: Performance -''' - - - -def _delete_server(module, nova): - name = None - server_list = None - try: - server_list = nova.servers.list(True, {'name': module.params['name']}) - if server_list: - server = [x for x in server_list if x.name == module.params['name']] - nova.servers.delete(server.pop()) - except Exception, e: - module.fail_json( msg = "Error in deleting vm: %s" % e.message) - if module.params['wait'] == 'no': - module.exit_json(changed = True, result = "deleted") - expire = time.time() + int(module.params['wait_for']) - while time.time() < expire: - name = nova.servers.list(True, {'name': module.params['name']}) - if not name: - module.exit_json(changed = True, result = "deleted") - time.sleep(5) - module.fail_json(msg = "Timed out waiting for server to get deleted, please check manually") - - -def _add_floating_ip_from_pool(module, nova, server): - - # instantiate FloatingIPManager object - floating_ip_obj = floating_ips.FloatingIPManager(nova) - - # empty dict and list - usable_floating_ips = {} - pools = [] - - # user specified - pools = module.params['floating_ip_pools'] - - # get the list of all floating IPs. Mileage may - # vary according to Nova Compute configuration - # per cloud provider - all_floating_ips = floating_ip_obj.list() - - # iterate through all pools of IP address. Empty - # string means all and is the default value - for pool in pools: - # temporary list per pool - pool_ips = [] - # loop through all floating IPs - for f_ip in all_floating_ips: - # if not reserved and the correct pool, add - if f_ip.instance_id is None and (f_ip.pool == pool): - pool_ips.append(f_ip.ip) - # only need one - break - - # if the list is empty, add for this pool - if not pool_ips: - try: - new_ip = nova.floating_ips.create(pool) - except Exception, e: - module.fail_json(msg = "Unable to create floating ip") - pool_ips.append(new_ip.ip) - # Add to the main list - usable_floating_ips[pool] = pool_ips - - # finally, add ip(s) to instance for each pool - for pool in usable_floating_ips: - for ip in usable_floating_ips[pool]: - try: - server.add_floating_ip(ip) - # We only need to assign one ip - but there is an inherent - # race condition and some other cloud operation may have - # stolen an available floating ip - break - except Exception, e: - module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message)) - - -def _add_floating_ip_list(module, server, ips): - # add ip(s) to instance - for ip in ips: - try: - server.add_floating_ip(ip) - except Exception, e: - module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message)) - - -def _add_auto_floating_ip(module, nova, server): - - try: - new_ip = nova.floating_ips.create() - except Exception as e: - module.fail_json(msg = "Unable to create floating ip: %s" % (e.message)) - - try: - server.add_floating_ip(new_ip) - except Exception as e: - # Clean up - we auto-created this ip, and it's not attached - # to the server, so the cloud will not know what to do with it - server.floating_ips.delete(new_ip) - module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message)) - - -def _add_floating_ip(module, nova, server): - - if module.params['floating_ip_pools']: - _add_floating_ip_from_pool(module, nova, server) - elif module.params['floating_ips']: - _add_floating_ip_list(module, server, module.params['floating_ips']) - elif module.params['auto_floating_ip']: - _add_auto_floating_ip(module, nova, server) - else: - return server - - # this may look redundant, but if there is now a - # floating IP, then it needs to be obtained from - # a recent server object if the above code path exec'd - try: - server = nova.servers.get(server.id) - except Exception, e: - module.fail_json(msg = "Error in getting info from instance: %s " % e.message) - return server - - -def _get_image_id(module, nova): - if module.params['image_name']: - for image in nova.images.list(): - if (module.params['image_name'] in image.name and ( - not module.params['image_exclude'] - or module.params['image_exclude'] not in image.name)): - return image.id - module.fail_json(msg = "Error finding image id from name(%s)" % module.params['image_name']) - return module.params['image_id'] - - -def _get_flavor_id(module, nova): - if module.params['flavor_ram']: - for flavor in sorted(nova.flavors.list(), key=operator.attrgetter('ram')): - if (flavor.ram >= module.params['flavor_ram'] and - (not module.params['flavor_include'] or module.params['flavor_include'] in flavor.name)): - return flavor.id - module.fail_json(msg = "Error finding flavor with %sMB of RAM" % module.params['flavor_ram']) - return module.params['flavor_id'] - - -def _create_server(module, nova): - image_id = _get_image_id(module, nova) - flavor_id = _get_flavor_id(module, nova) - bootargs = [module.params['name'], image_id, flavor_id] - bootkwargs = { - 'nics' : module.params['nics'], - 'meta' : module.params['meta'], - 'security_groups': module.params['security_groups'].split(','), - #userdata is unhyphenated in novaclient, but hyphenated here for consistency with the ec2 module: - 'userdata': module.params['user_data'], - 'config_drive': module.params['config_drive'], - } - - for optional_param in ('region_name', 'key_name', 'availability_zone'): - if module.params[optional_param]: - bootkwargs[optional_param] = module.params[optional_param] - try: - server = nova.servers.create(*bootargs, **bootkwargs) - server = nova.servers.get(server.id) - except Exception, e: - module.fail_json( msg = "Error in creating instance: %s " % e.message) - if module.params['wait'] == 'yes': - expire = time.time() + int(module.params['wait_for']) - while time.time() < expire: - try: - server = nova.servers.get(server.id) - except Exception, e: - module.fail_json( msg = "Error in getting info from instance: %s" % e.message) - if server.status == 'ACTIVE': - server = _add_floating_ip(module, nova, server) - - private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private') - public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public') - - # now exit with info - module.exit_json(changed = True, id = server.id, private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info) - - if server.status == 'ERROR': - module.fail_json(msg = "Error in creating the server, please check logs") - time.sleep(2) - - module.fail_json(msg = "Timeout waiting for the server to come up.. Please check manually") - if server.status == 'ERROR': - module.fail_json(msg = "Error in creating the server.. Please check manually") - private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private') - public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public') - - module.exit_json(changed = True, id = info['id'], private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info) - - -def _delete_floating_ip_list(module, nova, server, extra_ips): - for ip in extra_ips: - nova.servers.remove_floating_ip(server=server.id, address=ip) - - -def _check_floating_ips(module, nova, server): - changed = False - if module.params['floating_ip_pools'] or module.params['floating_ips'] or module.params['auto_floating_ip']: - ips = openstack_find_nova_addresses(server.addresses, 'floating') - if not ips: - # If we're configured to have a floating but we don't have one, - # let's add one - server = _add_floating_ip(module, nova, server) - changed = True - elif module.params['floating_ips']: - # we were configured to have specific ips, let's make sure we have - # those - missing_ips = [] - for ip in module.params['floating_ips']: - if ip not in ips: - missing_ips.append(ip) - if missing_ips: - server = _add_floating_ip_list(module, server, missing_ips) - changed = True - extra_ips = [] - for ip in ips: - if ip not in module.params['floating_ips']: - extra_ips.append(ip) - if extra_ips: - _delete_floating_ip_list(module, server, extra_ips) - changed = True - return (changed, server) - - -def _get_server_state(module, nova): - server = None - try: - servers = nova.servers.list(True, {'name': module.params['name']}) - if servers: - # the {'name': module.params['name']} will also return servers - # with names that partially match the server name, so we have to - # strictly filter here - servers = [x for x in servers if x.name == module.params['name']] - if servers: - server = servers[0] - except Exception, e: - module.fail_json(msg = "Error in getting the server list: %s" % e.message) - if server and module.params['state'] == 'present': - if server.status != 'ACTIVE': - module.fail_json( msg="The VM is available but not Active. state:" + server.status) - (ip_changed, server) = _check_floating_ips(module, nova, server) - private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private') - public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public') - module.exit_json(changed = ip_changed, id = server.id, public_ip = ''.join(public), private_ip = ''.join(private), info = server._info) - if server and module.params['state'] == 'absent': - return True - if module.params['state'] == 'absent': - module.exit_json(changed = False, result = "not present") - return True - - - -def main(): - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - name = dict(required=True), - image_id = dict(default=None), - image_name = dict(default=None), - image_exclude = dict(default='(deprecated)'), - flavor_id = dict(default=1), - flavor_ram = dict(default=None, type='int'), - flavor_include = dict(default=None), - key_name = dict(default=None), - security_groups = dict(default='default'), - nics = dict(default=None), - meta = dict(default=None), - wait = dict(default='yes', choices=['yes', 'no']), - wait_for = dict(default=180), - state = dict(default='present', choices=['absent', 'present']), - user_data = dict(default=None), - config_drive = dict(default=False, type='bool'), - auto_floating_ip = dict(default=False, type='bool'), - floating_ips = dict(default=None), - floating_ip_pools = dict(default=None), - )) - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['auto_floating_ip','floating_ips'], - ['auto_floating_ip','floating_ip_pools'], - ['floating_ips','floating_ip_pools'], - ['image_id','image_name'], - ['flavor_id','flavor_ram'], - ], - ) - - nova = nova_client.Client(module.params['login_username'], - module.params['login_password'], - module.params['login_tenant_name'], - module.params['auth_url'], - region_name=module.params['region_name'], - service_type='compute') - try: - nova.authenticate() - except exceptions.Unauthorized, e: - module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message) - except exceptions.AuthorizationFailure, e: - module.fail_json(msg = "Unable to authorize user: %s" % e.message) - - if module.params['state'] == 'present': - if not module.params['image_id'] and not module.params['image_name']: - module.fail_json( msg = "Parameter 'image_id' or `image_name` is required if state == 'present'") - else: - _get_server_state(module, nova) - _create_server(module, nova) - if module.params['state'] == 'absent': - _get_server_state(module, nova) - _delete_server(module, nova) - -# this is magic, see lib/ansible/module_common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/nova_keypair b/library/cloud/nova_keypair deleted file mode 100644 index c7c9affb3e..0000000000 --- a/library/cloud/nova_keypair +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# (c) 2013, John Dewey -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - from novaclient.v1_1 import client as nova_client - from novaclient import exceptions as exc - import time -except ImportError: - print("failed=True msg='novaclient is required for this module to work'") - -DOCUMENTATION = ''' ---- -module: nova_keypair -version_added: "1.2" -short_description: Add/Delete key pair from nova -description: - - Add or Remove key pair from nova . -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - name: - description: - - Name that has to be given to the key pair - required: true - default: None - public_key: - description: - - The public key that would be uploaded to nova and injected to vm's upon creation - required: false - default: None - -requirements: ["novaclient"] -''' -EXAMPLES = ''' -# Creates a key pair with the running users public key -- nova_keypair: state=present login_username=admin - login_password=admin login_tenant_name=admin name=ansible_key - public_key={{ lookup('file','~/.ssh/id_rsa.pub') }} - -# Creates a new key pair and the private key returned after the run. -- nova_keypair: state=present login_username=admin login_password=admin - login_tenant_name=admin name=ansible_key -''' - -def main(): - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - name = dict(required=True), - public_key = dict(default=None), - state = dict(default='present', choices=['absent', 'present']) - )) - module = AnsibleModule(argument_spec=argument_spec) - - nova = nova_client.Client(module.params['login_username'], - module.params['login_password'], - module.params['login_tenant_name'], - module.params['auth_url'], - region_name=module.params['region_name'], - service_type='compute') - try: - nova.authenticate() - except exc.Unauthorized, e: - module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message) - except exc.AuthorizationFailure, e: - module.fail_json(msg = "Unable to authorize user: %s" % e.message) - - if module.params['state'] == 'present': - for key in nova.keypairs.list(): - if key.name == module.params['name']: - if module.params['public_key'] and (module.params['public_key'] != key.public_key ): - module.fail_json(msg = "name {} present but key hash not the same as offered. Delete key first.".format(key['name'])) - else: - module.exit_json(changed = False, result = "Key present") - try: - key = nova.keypairs.create(module.params['name'], module.params['public_key']) - except Exception, e: - module.exit_json(msg = "Error in creating the keypair: %s" % e.message) - if not module.params['public_key']: - module.exit_json(changed = True, key = key.private_key) - module.exit_json(changed = True, key = None) - if module.params['state'] == 'absent': - for key in nova.keypairs.list(): - if key.name == module.params['name']: - try: - nova.keypairs.delete(module.params['name']) - except Exception, e: - module.fail_json(msg = "The keypair deletion has failed: %s" % e.message) - module.exit_json( changed = True, result = "deleted") - module.exit_json(changed = False, result = "not present") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/ovirt b/library/cloud/ovirt deleted file mode 100755 index fb84e91800..0000000000 --- a/library/cloud/ovirt +++ /dev/null @@ -1,425 +0,0 @@ -#!/usr/bin/python - -# (c) 2013, Vincent Van der Kussen -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ovirt -author: Vincent Van der Kussen -short_description: oVirt/RHEV platform management -description: - - allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform -version_added: "1.4" -options: - user: - description: - - the user to authenticate with - default: null - required: true - aliases: [] - url: - description: - - the url of the oVirt instance - default: null - required: true - aliases: [] - instance_name: - description: - - the name of the instance to use - default: null - required: true - aliases: [ vmname ] - password: - description: - - password of the user to authenticate with - default: null - required: true - aliases: [] - image: - description: - - template to use for the instance - default: null - required: false - aliases: [] - resource_type: - description: - - whether you want to deploy an image or create an instance from scratch. - default: null - required: false - aliases: [] - choices: [ 'new', 'template' ] - zone: - description: - - deploy the image to this oVirt cluster - default: null - required: false - aliases: [] - instance_disksize: - description: - - size of the instance's disk in GB - default: null - required: false - aliases: [ vm_disksize] - instance_cpus: - description: - - the instance's number of cpu's - default: 1 - required: false - aliases: [ vmcpus ] - instance_nic: - description: - - name of the network interface in oVirt/RHEV - default: null - required: false - aliases: [ vmnic ] - instance_network: - description: - - the logical network the machine should belong to - default: rhevm - required: false - aliases: [ vmnetwork ] - instance_mem: - description: - - the instance's amount of memory in MB - default: null - required: false - aliases: [ vmmem ] - instance_type: - description: - - define if the instance is a server or desktop - default: server - required: false - aliases: [ vmtype ] - choices: [ 'server', 'desktop' ] - disk_alloc: - description: - - define if disk is thin or preallocated - default: thin - required: false - aliases: [] - choices: [ 'thin', 'preallocated' ] - disk_int: - description: - - interface type of the disk - default: virtio - required: false - aliases: [] - choices: [ 'virtio', 'ide' ] - instance_os: - description: - - type of Operating System - default: null - required: false - aliases: [ vmos ] - instance_cores: - description: - - define the instance's number of cores - default: 1 - required: false - aliases: [ vmcores ] - sdomain: - description: - - the Storage Domain where you want to create the instance's disk on. - default: null - required: false - aliases: [] - region: - description: - - the oVirt/RHEV datacenter where you want to deploy to - default: null - required: false - aliases: [] - state: - description: - - create, terminate or remove instances - default: 'present' - required: false - aliases: [] - choices: ['present', 'absent', 'shutdown', 'started', 'restarted'] - -requirements: [ "ovirt-engine-sdk" ] -''' -EXAMPLES = ''' -# Basic example provisioning from image. - -action: ovirt > - user=admin@internal - url=https://ovirt.example.com - instance_name=ansiblevm04 - password=secret - image=centos_64 - zone=cluster01 - resource_type=template" - -# Full example to create new instance from scratch -action: ovirt > - instance_name=testansible - resource_type=new - instance_type=server - user=admin@internal - password=secret - url=https://ovirt.example.com - instance_disksize=10 - zone=cluster01 - region=datacenter1 - instance_cpus=1 - instance_nic=nic1 - instance_network=rhevm - instance_mem=1000 - disk_alloc=thin - sdomain=FIBER01 - instance_cores=1 - instance_os=rhel_6x64 - disk_int=virtio" - -# stopping an instance -action: ovirt > - instance_name=testansible - state=stopped - user=admin@internal - password=secret - url=https://ovirt.example.com - -# starting an instance -action: ovirt > - instance_name=testansible - state=started - user=admin@internal - password=secret - url=https://ovirt.example.com - - -''' -try: - from ovirtsdk.api import API - from ovirtsdk.xml import params -except ImportError: - print "failed=True msg='ovirtsdk required for this module'" - sys.exit(1) - -# ------------------------------------------------------------------- # -# create connection with API -# -def conn(url, user, password): - api = API(url=url, username=user, password=password, insecure=True) - try: - value = api.test() - except: - print "error connecting to the oVirt API" - sys.exit(1) - return api - -# ------------------------------------------------------------------- # -# Create VM from scratch -def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int): - if vmdisk_alloc == 'thin': - # define VM params - vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype) - # define disk params - vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow', - storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) - # define network parameters - network_net = params.Network(name=vmnetwork) - nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio') - elif vmdisk_alloc == 'preallocated': - # define VM params - vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype) - # define disk params - vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw', - storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) - # define network parameters - network_net = params.Network(name=vmnetwork) - nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio') - - try: - conn.vms.add(vmparams) - except: - print "Error creating VM with specified parameters" - sys.exit(1) - vm = conn.vms.get(name=vmname) - try: - vm.disks.add(vmdisk) - except: - print "Error attaching disk" - try: - vm.nics.add(nic_net1) - except: - print "Error adding nic" - - -# create an instance from a template -def create_vm_template(conn, vmname, image, zone): - vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True)) - try: - conn.vms.add(vmparams) - except: - print 'error adding template %s' % image - sys.exit(1) - - -# start instance -def vm_start(conn, vmname): - vm = conn.vms.get(name=vmname) - vm.start() - -# Stop instance -def vm_stop(conn, vmname): - vm = conn.vms.get(name=vmname) - vm.stop() - -# restart instance -def vm_restart(conn, vmname): - state = vm_status(conn, vmname) - vm = conn.vms.get(name=vmname) - vm.stop() - while conn.vms.get(vmname).get_status().get_state() != 'down': - time.sleep(5) - vm.start() - -# remove an instance -def vm_remove(conn, vmname): - vm = conn.vms.get(name=vmname) - vm.delete() - -# ------------------------------------------------------------------- # -# VM statuses -# -# Get the VMs status -def vm_status(conn, vmname): - status = conn.vms.get(name=vmname).status.state - print "vm status is : %s" % status - return status - - -# Get VM object and return it's name if object exists -def get_vm(conn, vmname): - vm = conn.vms.get(name=vmname) - if vm == None: - name = "empty" - print "vmname: %s" % name - else: - name = vm.get_name() - print "vmname: %s" % name - return name - -# ------------------------------------------------------------------- # -# Hypervisor operations -# -# not available yet -# ------------------------------------------------------------------- # -# Main - -def main(): - - module = AnsibleModule( - argument_spec = dict( - state = dict(default='present', choices=['present', 'absent', 'shutdown', 'started', 'restart']), - #name = dict(required=True), - user = dict(required=True), - url = dict(required=True), - instance_name = dict(required=True, aliases=['vmname']), - password = dict(required=True), - image = dict(), - resource_type = dict(choices=['new', 'template']), - zone = dict(), - instance_disksize = dict(aliases=['vm_disksize']), - instance_cpus = dict(default=1, aliases=['vmcpus']), - instance_nic = dict(aliases=['vmnic']), - instance_network = dict(default='rhevm', aliases=['vmnetwork']), - instance_mem = dict(aliases=['vmmem']), - instance_type = dict(default='server', aliases=['vmtype'], choices=['server', 'desktop']), - disk_alloc = dict(default='thin', choices=['thin', 'preallocated']), - disk_int = dict(default='virtio', choices=['virtio', 'ide']), - instance_os = dict(aliases=['vmos']), - instance_cores = dict(default=1, aliases=['vmcores']), - sdomain = dict(), - region = dict(), - ) - ) - - state = module.params['state'] - user = module.params['user'] - url = module.params['url'] - vmname = module.params['instance_name'] - password = module.params['password'] - image = module.params['image'] # name of the image to deploy - resource_type = module.params['resource_type'] # template or from scratch - zone = module.params['zone'] # oVirt cluster - vmdisk_size = module.params['instance_disksize'] # disksize - vmcpus = module.params['instance_cpus'] # number of cpu - vmnic = module.params['instance_nic'] # network interface - vmnetwork = module.params['instance_network'] # logical network - vmmem = module.params['instance_mem'] # mem size - vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated - vmdisk_int = module.params['disk_int'] # disk interface virtio or ide - vmos = module.params['instance_os'] # Operating System - vmtype = module.params['instance_type'] # server or desktop - vmcores = module.params['instance_cores'] # number of cores - sdomain = module.params['sdomain'] # storage domain to store disk on - region = module.params['region'] # oVirt Datacenter - #initialize connection - c = conn(url+"/api", user, password) - - if state == 'present': - if get_vm(c, vmname) == "empty": - if resource_type == 'template': - create_vm_template(c, vmname, image, zone) - module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image)) - elif resource_type == 'new': - # FIXME: refactor, use keyword args. - create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int) - module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname) - else: - module.exit_json(changed=False, msg="You did not specify a resource type") - else: - module.exit_json(changed=False, msg="VM %s already exists" % vmname) - - if state == 'started': - if vm_status(c, vmname) == 'up': - module.exit_json(changed=False, msg="VM %s is already running" % vmname) - else: - vm_start(c, vmname) - module.exit_json(changed=True, msg="VM %s started" % vmname) - - if state == 'shutdown': - if vm_status(c, vmname) == 'down': - module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname) - else: - vm_stop(c, vmname) - module.exit_json(changed=True, msg="VM %s is shutting down" % vmname) - - if state == 'restart': - if vm_status(c, vmname) == 'up': - vm_restart(c, vmname) - module.exit_json(changed=True, msg="VM %s is restarted" % vmname) - else: - module.exit_json(changed=False, msg="VM %s is not running" % vmname) - - if state == 'absent': - if get_vm(c, vmname) == "empty": - module.exit_json(changed=False, msg="VM %s does not exist" % vmname) - else: - vm_remove(c, vmname) - module.exit_json(changed=True, msg="VM %s removed" % vmname) - - - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/cloud/quantum_floating_ip b/library/cloud/quantum_floating_ip deleted file mode 100644 index 17f78effff..0000000000 --- a/library/cloud/quantum_floating_ip +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - from novaclient.v1_1 import client as nova_client - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient - import time -except ImportError: - print("failed=True msg='novaclient,keystoneclient and quantumclient (or neutronclient) are required'") - -DOCUMENTATION = ''' ---- -module: quantum_floating_ip -version_added: "1.2" -short_description: Add/Remove floating IP from an instance -description: - - Add or Remove a floating IP to an instance -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - network_name: - description: - - Name of the network from which IP has to be assigned to VM. Please make sure the network is an external network - required: true - default: None - instance_name: - description: - - The name of the instance to which the IP address should be assigned - required: true - default: None - internal_network_name: - description: - - The name of the network of the port to associate with the floating ip. Necessary when VM multiple networks. - required: false - default: None - version_added: "1.5" -requirements: ["novaclient", "quantumclient", "neutronclient", "keystoneclient"] -''' - -EXAMPLES = ''' -# Assign a floating ip to the instance from an external network -- quantum_floating_ip: state=present login_username=admin login_password=admin - login_tenant_name=admin network_name=external_network - instance_name=vm1 internal_network_name=internal_network -''' - -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s" % e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) - return neutron - -def _get_server_state(module, nova): - server_info = None - server = None - try: - for server in nova.servers.list(): - if server: - info = server._info - if info['name'] == module.params['instance_name']: - if info['status'] != 'ACTIVE' and module.params['state'] == 'present': - module.fail_json( msg="The VM is available but not Active. state:" + info['status']) - server_info = info - break - except Exception, e: - module.fail_json(msg = "Error in getting the server list: %s" % e.message) - return server_info, server - -def _get_port_info(neutron, module, instance_id, internal_network_name=None): - subnet_id = None - if internal_network_name: - kwargs = {'name': internal_network_name} - networks = neutron.list_networks(**kwargs) - network_id = networks['networks'][0]['id'] - kwargs = { - 'network_id': network_id, - 'ip_version': 4 - } - subnets = neutron.list_subnets(**kwargs) - subnet_id = subnets['subnets'][0]['id'] - kwargs = { - 'device_id': instance_id, - } - try: - ports = neutron.list_ports(**kwargs) - except Exception, e: - module.fail_json( msg = "Error in listing ports: %s" % e.message) - if subnet_id: - port = next(port for port in ports['ports'] if port['fixed_ips'][0]['subnet_id'] == subnet_id) - port_id = port['id'] - fixed_ip_address = port['fixed_ips'][0]['ip_address'] - else: - port_id = ports['ports'][0]['id'] - fixed_ip_address = ports['ports'][0]['fixed_ips'][0]['ip_address'] - if not ports['ports']: - return None, None - return fixed_ip_address, port_id - -def _get_floating_ip(module, neutron, fixed_ip_address): - kwargs = { - 'fixed_ip_address': fixed_ip_address - } - try: - ips = neutron.list_floatingips(**kwargs) - except Exception, e: - module.fail_json(msg = "error in fetching the floatingips's %s" % e.message) - if not ips['floatingips']: - return None, None - return ips['floatingips'][0]['id'], ips['floatingips'][0]['floating_ip_address'] - -def _create_floating_ip(neutron, module, port_id, net_id, fixed_ip): - kwargs = { - 'port_id': port_id, - 'floating_network_id': net_id, - 'fixed_ip_address': fixed_ip - } - try: - result = neutron.create_floatingip({'floatingip': kwargs}) - except Exception, e: - module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message) - module.exit_json(changed=True, result=result, public_ip=result['floatingip']['floating_ip_address']) - -def _get_net_id(neutron, module): - kwargs = { - 'name': module.params['network_name'], - } - try: - networks = neutron.list_networks(**kwargs) - except Exception, e: - module.fail_json("Error in listing neutron networks: %s" % e.message) - if not networks['networks']: - return None - return networks['networks'][0]['id'] - -def _update_floating_ip(neutron, module, port_id, floating_ip_id): - kwargs = { - 'port_id': port_id - } - try: - result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs}) - except Exception, e: - module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message) - module.exit_json(changed=True, result=result) - - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - network_name = dict(required=True), - instance_name = dict(required=True), - state = dict(default='present', choices=['absent', 'present']), - internal_network_name = dict(default=None), - )) - module = AnsibleModule(argument_spec=argument_spec) - - try: - nova = nova_client.Client(module.params['login_username'], module.params['login_password'], - module.params['login_tenant_name'], module.params['auth_url'], service_type='compute') - neutron = _get_neutron_client(module, module.params) - except Exception, e: - module.fail_json(msg="Error in authenticating to nova: %s" % e.message) - - server_info, server_obj = _get_server_state(module, nova) - if not server_info: - module.fail_json(msg="The instance name provided cannot be found") - - fixed_ip, port_id = _get_port_info(neutron, module, server_info['id'], module.params['internal_network_name']) - if not port_id: - module.fail_json(msg="Cannot find a port for this instance, maybe fixed ip is not assigned") - - floating_id, floating_ip = _get_floating_ip(module, neutron, fixed_ip) - - if module.params['state'] == 'present': - if floating_ip: - module.exit_json(changed = False, public_ip=floating_ip) - net_id = _get_net_id(neutron, module) - if not net_id: - module.fail_json(msg = "cannot find the network specified, please check") - _create_floating_ip(neutron, module, port_id, net_id, fixed_ip) - - if module.params['state'] == 'absent': - if floating_ip: - _update_floating_ip(neutron, module, None, floating_id) - module.exit_json(changed=False) - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/quantum_floating_ip_associate b/library/cloud/quantum_floating_ip_associate deleted file mode 100644 index 91df2690b6..0000000000 --- a/library/cloud/quantum_floating_ip_associate +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - from novaclient.v1_1 import client as nova_client - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient - import time -except ImportError: - print "failed=True msg='novaclient, keystone, and quantumclient (or neutronclient) client are required'" - -DOCUMENTATION = ''' ---- -module: quantum_floating_ip_associate -version_added: "1.2" -short_description: Associate or disassociate a particular floating IP with an instance -description: - - Associates or disassociates a specific floating IP with a particular instance -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - the tenant name of the login user - required: true - default: true - auth_url: - description: - - the keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - name of the region - required: false - default: None - state: - description: - - indicates the desired state of the resource - choices: ['present', 'absent'] - default: present - instance_name: - description: - - name of the instance to which the public IP should be assigned - required: true - default: None - ip_address: - description: - - floating ip that should be assigned to the instance - required: true - default: None -requirements: ["quantumclient", "neutronclient", "keystoneclient"] -''' - -EXAMPLES = ''' -# Associate a specific floating IP with an Instance -- quantum_floating_ip_associate: - state=present - login_username=admin - login_password=admin - login_tenant_name=admin - ip_address=1.1.1.1 - instance_name=vm1 -''' - -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s" % e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) - return neutron - -def _get_server_state(module, nova): - server_info = None - server = None - try: - for server in nova.servers.list(): - if server: - info = server._info - if info['name'] == module.params['instance_name']: - if info['status'] != 'ACTIVE' and module.params['state'] == 'present': - module.fail_json(msg="The VM is available but not Active. state:" + info['status']) - server_info = info - break - except Exception, e: - module.fail_json(msg = "Error in getting the server list: %s" % e.message) - return server_info, server - -def _get_port_id(neutron, module, instance_id): - kwargs = dict(device_id = instance_id) - try: - ports = neutron.list_ports(**kwargs) - except Exception, e: - module.fail_json( msg = "Error in listing ports: %s" % e.message) - if not ports['ports']: - return None - return ports['ports'][0]['id'] - -def _get_floating_ip_id(module, neutron): - kwargs = { - 'floating_ip_address': module.params['ip_address'] - } - try: - ips = neutron.list_floatingips(**kwargs) - except Exception, e: - module.fail_json(msg = "error in fetching the floatingips's %s" % e.message) - if not ips['floatingips']: - module.fail_json(msg = "Could find the ip specified in parameter, Please check") - ip = ips['floatingips'][0]['id'] - if not ips['floatingips'][0]['port_id']: - state = "detached" - else: - state = "attached" - return state, ip - -def _update_floating_ip(neutron, module, port_id, floating_ip_id): - kwargs = { - 'port_id': port_id - } - try: - result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs}) - except Exception, e: - module.fail_json(msg = "There was an error in updating the floating ip address: %s" % e.message) - module.exit_json(changed = True, result = result, public_ip=module.params['ip_address']) - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - ip_address = dict(required=True), - instance_name = dict(required=True), - state = dict(default='present', choices=['absent', 'present']) - )) - module = AnsibleModule(argument_spec=argument_spec) - - try: - nova = nova_client.Client(module.params['login_username'], module.params['login_password'], - module.params['login_tenant_name'], module.params['auth_url'], service_type='compute') - except Exception, e: - module.fail_json( msg = " Error in authenticating to nova: %s" % e.message) - neutron = _get_neutron_client(module, module.params) - state, floating_ip_id = _get_floating_ip_id(module, neutron) - if module.params['state'] == 'present': - if state == 'attached': - module.exit_json(changed = False, result = 'attached', public_ip=module.params['ip_address']) - server_info, server_obj = _get_server_state(module, nova) - if not server_info: - module.fail_json(msg = " The instance name provided cannot be found") - port_id = _get_port_id(neutron, module, server_info['id']) - if not port_id: - module.fail_json(msg = "Cannot find a port for this instance, maybe fixed ip is not assigned") - _update_floating_ip(neutron, module, port_id, floating_ip_id) - - if module.params['state'] == 'absent': - if state == 'detached': - module.exit_json(changed = False, result = 'detached') - if state == 'attached': - _update_floating_ip(neutron, module, None, floating_ip_id) - module.exit_json(changed = True, result = "detached") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/quantum_network b/library/cloud/quantum_network deleted file mode 100644 index 6b0c66e7a1..0000000000 --- a/library/cloud/quantum_network +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient -except ImportError: - print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'") - -DOCUMENTATION = ''' ---- -module: quantum_network -version_added: "1.4" -short_description: Creates/Removes networks from OpenStack -description: - - Add or Remove network from OpenStack. -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - tenant_name: - description: - - The name of the tenant for whom the network is created - required: false - default: None - auth_url: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - name: - description: - - Name to be assigned to the nework - required: true - default: None - provider_network_type: - description: - - The type of the network to be created, gre, vlan, local. Available types depend on the plugin. The Quantum service decides if not specified. - required: false - default: None - provider_physical_network: - description: - - The physical network which would realize the virtual network for flat and vlan networks. - required: false - default: None - provider_segmentation_id: - description: - - The id that has to be assigned to the network, in case of vlan networks that would be vlan id and for gre the tunnel id - required: false - default: None - router_external: - description: - - If 'yes', specifies that the virtual network is a external network (public). - required: false - default: false - shared: - description: - - Whether this network is shared or not - required: false - default: false - admin_state_up: - description: - - Whether the state should be marked as up or down - required: false - default: true -requirements: ["quantumclient", "neutronclient", "keystoneclient"] - -''' - -EXAMPLES = ''' -# Create a GRE backed Quantum network with tunnel id 1 for tenant1 -- quantum_network: name=t1network tenant_name=tenant1 state=present - provider_network_type=gre provider_segmentation_id=1 - login_username=admin login_password=admin login_tenant_name=admin - -# Create an external network -- quantum_network: name=external_network state=present - provider_network_type=local router_external=yes - login_username=admin login_password=admin login_tenant_name=admin -''' - -_os_keystone = None -_os_tenant_id = None - -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s " %e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = " Error in connecting to neutron: %s " %e.message) - return neutron - -def _set_tenant_id(module): - global _os_tenant_id - if not module.params['tenant_name']: - tenant_name = module.params['login_tenant_name'] - else: - tenant_name = module.params['tenant_name'] - - for tenant in _os_keystone.tenants.list(): - if tenant.name == tenant_name: - _os_tenant_id = tenant.id - break - if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - - -def _get_net_id(neutron, module): - kwargs = { - 'tenant_id': _os_tenant_id, - 'name': module.params['name'], - } - try: - networks = neutron.list_networks(**kwargs) - except Exception, e: - module.fail_json(msg = "Error in listing neutron networks: %s" % e.message) - if not networks['networks']: - return None - return networks['networks'][0]['id'] - -def _create_network(module, neutron): - - neutron.format = 'json' - - network = { - 'name': module.params.get('name'), - 'tenant_id': _os_tenant_id, - 'provider:network_type': module.params.get('provider_network_type'), - 'provider:physical_network': module.params.get('provider_physical_network'), - 'provider:segmentation_id': module.params.get('provider_segmentation_id'), - 'router:external': module.params.get('router_external'), - 'shared': module.params.get('shared'), - 'admin_state_up': module.params.get('admin_state_up'), - } - - if module.params['provider_network_type'] == 'local': - network.pop('provider:physical_network', None) - network.pop('provider:segmentation_id', None) - - if module.params['provider_network_type'] == 'flat': - network.pop('provider:segmentation_id', None) - - if module.params['provider_network_type'] == 'gre': - network.pop('provider:physical_network', None) - - if module.params['provider_network_type'] is None: - network.pop('provider:network_type', None) - network.pop('provider:physical_network', None) - network.pop('provider:segmentation_id', None) - - try: - net = neutron.create_network({'network':network}) - except Exception, e: - module.fail_json(msg = "Error in creating network: %s" % e.message) - return net['network']['id'] - -def _delete_network(module, net_id, neutron): - - try: - id = neutron.delete_network(net_id) - except Exception, e: - module.fail_json(msg = "Error in deleting the network: %s" % e.message) - return True - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - name = dict(required=True), - tenant_name = dict(default=None), - provider_network_type = dict(default=None, choices=['local', 'vlan', 'flat', 'gre']), - provider_physical_network = dict(default=None), - provider_segmentation_id = dict(default=None), - router_external = dict(default=False, type='bool'), - shared = dict(default=False, type='bool'), - admin_state_up = dict(default=True, type='bool'), - state = dict(default='present', choices=['absent', 'present']) - )) - module = AnsibleModule(argument_spec=argument_spec) - - if module.params['provider_network_type'] in ['vlan' , 'flat']: - if not module.params['provider_physical_network']: - module.fail_json(msg = " for vlan and flat networks, variable provider_physical_network should be set.") - - if module.params['provider_network_type'] in ['vlan', 'gre']: - if not module.params['provider_segmentation_id']: - module.fail_json(msg = " for vlan & gre networks, variable provider_segmentation_id should be set.") - - neutron = _get_neutron_client(module, module.params) - - _set_tenant_id(module) - - if module.params['state'] == 'present': - network_id = _get_net_id(neutron, module) - if not network_id: - network_id = _create_network(module, neutron) - module.exit_json(changed = True, result = "Created", id = network_id) - else: - module.exit_json(changed = False, result = "Success", id = network_id) - - if module.params['state'] == 'absent': - network_id = _get_net_id(neutron, module) - if not network_id: - module.exit_json(changed = False, result = "Success") - else: - _delete_network(module, network_id, neutron) - module.exit_json(changed = True, result = "Deleted") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/quantum_router b/library/cloud/quantum_router deleted file mode 100644 index 38d479128f..0000000000 --- a/library/cloud/quantum_router +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient -except ImportError: - print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'") - -DOCUMENTATION = ''' ---- -module: quantum_router -version_added: "1.2" -short_description: Create or Remove router from openstack -description: - - Create or Delete routers from OpenStack -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone url for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - name: - description: - - Name to be give to the router - required: true - default: None - tenant_name: - description: - - Name of the tenant for which the router has to be created, if none router would be created for the login tenant. - required: false - default: None - admin_state_up: - description: - - desired admin state of the created router . - required: false - default: true -requirements: ["quantumclient", "neutronclient", "keystoneclient"] -''' - -EXAMPLES = ''' -# Creates a router for tenant admin -- quantum_router: state=present - login_username=admin - login_password=admin - login_tenant_name=admin - name=router1" -''' - -_os_keystone = None -_os_tenant_id = None - -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s" % e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) - return neutron - -def _set_tenant_id(module): - global _os_tenant_id - if not module.params['tenant_name']: - login_tenant_name = module.params['login_tenant_name'] - else: - login_tenant_name = module.params['tenant_name'] - - for tenant in _os_keystone.tenants.list(): - if tenant.name == login_tenant_name: - _os_tenant_id = tenant.id - break - if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - - -def _get_router_id(module, neutron): - kwargs = { - 'name': module.params['name'], - 'tenant_id': _os_tenant_id, - } - try: - routers = neutron.list_routers(**kwargs) - except Exception, e: - module.fail_json(msg = "Error in getting the router list: %s " % e.message) - if not routers['routers']: - return None - return routers['routers'][0]['id'] - -def _create_router(module, neutron): - router = { - 'name': module.params['name'], - 'tenant_id': _os_tenant_id, - 'admin_state_up': module.params['admin_state_up'], - } - try: - new_router = neutron.create_router(dict(router=router)) - except Exception, e: - module.fail_json( msg = "Error in creating router: %s" % e.message) - return new_router['router']['id'] - -def _delete_router(module, neutron, router_id): - try: - neutron.delete_router(router_id) - except: - module.fail_json("Error in deleting the router") - return True - -def main(): - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - name = dict(required=True), - tenant_name = dict(default=None), - state = dict(default='present', choices=['absent', 'present']), - admin_state_up = dict(type='bool', default=True), - )) - module = AnsibleModule(argument_spec=argument_spec) - - neutron = _get_neutron_client(module, module.params) - _set_tenant_id(module) - - if module.params['state'] == 'present': - router_id = _get_router_id(module, neutron) - if not router_id: - router_id = _create_router(module, neutron) - module.exit_json(changed=True, result="Created", id=router_id) - else: - module.exit_json(changed=False, result="success" , id=router_id) - - else: - router_id = _get_router_id(module, neutron) - if not router_id: - module.exit_json(changed=False, result="success") - else: - _delete_router(module, neutron, router_id) - module.exit_json(changed=True, result="deleted") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/quantum_router_gateway b/library/cloud/quantum_router_gateway deleted file mode 100644 index 5de19fd478..0000000000 --- a/library/cloud/quantum_router_gateway +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient -except ImportError: - print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'") -DOCUMENTATION = ''' ---- -module: quantum_router_gateway -version_added: "1.2" -short_description: set/unset a gateway interface for the router with the specified external network -description: - - Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic. -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone URL for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - router_name: - description: - - Name of the router to which the gateway should be attached. - required: true - default: None - network_name: - description: - - Name of the external network which should be attached to the router. - required: true - default: None -requirements: ["quantumclient", "neutronclient", "keystoneclient"] -''' - -EXAMPLES = ''' -# Attach an external network with a router to allow flow of external traffic -- quantum_router_gateway: state=present login_username=admin login_password=admin - login_tenant_name=admin router_name=external_router - network_name=external_network -''' - -_os_keystone = None -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s" % e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) - return neutron - -def _get_router_id(module, neutron): - kwargs = { - 'name': module.params['router_name'], - } - try: - routers = neutron.list_routers(**kwargs) - except Exception, e: - module.fail_json(msg = "Error in getting the router list: %s " % e.message) - if not routers['routers']: - return None - return routers['routers'][0]['id'] - -def _get_net_id(neutron, module): - kwargs = { - 'name': module.params['network_name'], - 'router:external': True - } - try: - networks = neutron.list_networks(**kwargs) - except Exception, e: - module.fail_json("Error in listing neutron networks: %s" % e.message) - if not networks['networks']: - return None - return networks['networks'][0]['id'] - -def _get_port_id(neutron, module, router_id, network_id): - kwargs = { - 'device_id': router_id, - 'network_id': network_id, - } - try: - ports = neutron.list_ports(**kwargs) - except Exception, e: - module.fail_json( msg = "Error in listing ports: %s" % e.message) - if not ports['ports']: - return None - return ports['ports'][0]['id'] - -def _add_gateway_router(neutron, module, router_id, network_id): - kwargs = { - 'network_id': network_id - } - try: - neutron.add_gateway_router(router_id, kwargs) - except Exception, e: - module.fail_json(msg = "Error in adding gateway to router: %s" % e.message) - return True - -def _remove_gateway_router(neutron, module, router_id): - try: - neutron.remove_gateway_router(router_id) - except Exception, e: - module.fail_json(msg = "Error in removing gateway to router: %s" % e.message) - return True - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - router_name = dict(required=True), - network_name = dict(required=True), - state = dict(default='present', choices=['absent', 'present']), - )) - module = AnsibleModule(argument_spec=argument_spec) - - neutron = _get_neutron_client(module, module.params) - router_id = _get_router_id(module, neutron) - - if not router_id: - module.fail_json(msg="failed to get the router id, please check the router name") - - network_id = _get_net_id(neutron, module) - if not network_id: - module.fail_json(msg="failed to get the network id, please check the network name and make sure it is external") - - if module.params['state'] == 'present': - port_id = _get_port_id(neutron, module, router_id, network_id) - if not port_id: - _add_gateway_router(neutron, module, router_id, network_id) - module.exit_json(changed=True, result="created") - module.exit_json(changed=False, result="success") - - if module.params['state'] == 'absent': - port_id = _get_port_id(neutron, module, router_id, network_id) - if not port_id: - module.exit_json(changed=False, result="Success") - _remove_gateway_router(neutron, module, router_id) - module.exit_json(changed=True, result="Deleted") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/quantum_router_interface b/library/cloud/quantum_router_interface deleted file mode 100644 index c5828ad410..0000000000 --- a/library/cloud/quantum_router_interface +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient -except ImportError: - print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'") -DOCUMENTATION = ''' ---- -module: quantum_router_interface -version_added: "1.2" -short_description: Attach/Dettach a subnet's interface to a router -description: - - Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet. -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: 'yes' - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: 'yes' - auth_url: - description: - - The keystone URL for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - router_name: - description: - - Name of the router to which the subnet's interface should be attached. - required: true - default: None - subnet_name: - description: - - Name of the subnet to whose interface should be attached to the router. - required: true - default: None - tenant_name: - description: - - Name of the tenant whose subnet has to be attached. - required: false - default: None -requirements: ["quantumclient", "keystoneclient"] -''' - -EXAMPLES = ''' -# Attach tenant1's subnet to the external router -- quantum_router_interface: state=present login_username=admin - login_password=admin - login_tenant_name=admin - tenant_name=tenant1 - router_name=external_route - subnet_name=t1subnet -''' - - -_os_keystone = None -_os_tenant_id = None - -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s" % e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) - return neutron - -def _set_tenant_id(module): - global _os_tenant_id - if not module.params['tenant_name']: - login_tenant_name = module.params['login_tenant_name'] - else: - login_tenant_name = module.params['tenant_name'] - - for tenant in _os_keystone.tenants.list(): - if tenant.name == login_tenant_name: - _os_tenant_id = tenant.id - break - if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - - -def _get_router_id(module, neutron): - kwargs = { - 'name': module.params['router_name'], - } - try: - routers = neutron.list_routers(**kwargs) - except Exception, e: - module.fail_json(msg = "Error in getting the router list: %s " % e.message) - if not routers['routers']: - return None - return routers['routers'][0]['id'] - - -def _get_subnet_id(module, neutron): - subnet_id = None - kwargs = { - 'tenant_id': _os_tenant_id, - 'name': module.params['subnet_name'], - } - try: - subnets = neutron.list_subnets(**kwargs) - except Exception, e: - module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) - if not subnets['subnets']: - return None - return subnets['subnets'][0]['id'] - -def _get_port_id(neutron, module, router_id, subnet_id): - kwargs = { - 'tenant_id': _os_tenant_id, - 'device_id': router_id, - } - try: - ports = neutron.list_ports(**kwargs) - except Exception, e: - module.fail_json( msg = "Error in listing ports: %s" % e.message) - if not ports['ports']: - return None - for port in ports['ports']: - for subnet in port['fixed_ips']: - if subnet['subnet_id'] == subnet_id: - return port['id'] - return None - -def _add_interface_router(neutron, module, router_id, subnet_id): - kwargs = { - 'subnet_id': subnet_id - } - try: - neutron.add_interface_router(router_id, kwargs) - except Exception, e: - module.fail_json(msg = "Error in adding interface to router: %s" % e.message) - return True - -def _remove_interface_router(neutron, module, router_id, subnet_id): - kwargs = { - 'subnet_id': subnet_id - } - try: - neutron.remove_interface_router(router_id, kwargs) - except Exception, e: - module.fail_json(msg="Error in removing interface from router: %s" % e.message) - return True - -def main(): - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - router_name = dict(required=True), - subnet_name = dict(required=True), - tenant_name = dict(default=None), - state = dict(default='present', choices=['absent', 'present']), - )) - module = AnsibleModule(argument_spec=argument_spec) - - neutron = _get_neutron_client(module, module.params) - _set_tenant_id(module) - - router_id = _get_router_id(module, neutron) - if not router_id: - module.fail_json(msg="failed to get the router id, please check the router name") - - subnet_id = _get_subnet_id(module, neutron) - if not subnet_id: - module.fail_json(msg="failed to get the subnet id, please check the subnet name") - - if module.params['state'] == 'present': - port_id = _get_port_id(neutron, module, router_id, subnet_id) - if not port_id: - _add_interface_router(neutron, module, router_id, subnet_id) - module.exit_json(changed=True, result="created", id=port_id) - module.exit_json(changed=False, result="success", id=port_id) - - if module.params['state'] == 'absent': - port_id = _get_port_id(neutron, module, router_id, subnet_id) - if not port_id: - module.exit_json(changed = False, result = "Success") - _remove_interface_router(neutron, module, router_id, subnet_id) - module.exit_json(changed=True, result="Deleted") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/quantum_subnet b/library/cloud/quantum_subnet deleted file mode 100644 index e38b2c94aa..0000000000 --- a/library/cloud/quantum_subnet +++ /dev/null @@ -1,291 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - try: - from neutronclient.neutron import client - except ImportError: - from quantumclient.quantum import client - from keystoneclient.v2_0 import client as ksclient -except ImportError: - print("failed=True msg='quantumclient (or neutronclient) and keystoneclient are required'") - -DOCUMENTATION = ''' ---- -module: quantum_subnet -version_added: "1.2" -short_description: Add/remove subnet from a network -description: - - Add/remove subnet from a network -options: - login_username: - description: - - login username to authenticate to keystone - required: true - default: admin - login_password: - description: - - Password of login user - required: true - default: True - login_tenant_name: - description: - - The tenant name of the login user - required: true - default: True - auth_url: - description: - - The keystone URL for authentication - required: false - default: 'http://127.0.0.1:35357/v2.0/' - region_name: - description: - - Name of the region - required: false - default: None - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - network_name: - description: - - Name of the network to which the subnet should be attached - required: true - default: None - name: - description: - - The name of the subnet that should be created - required: true - default: None - cidr: - description: - - The CIDR representation of the subnet that should be assigned to the subnet - required: true - default: None - tenant_name: - description: - - The name of the tenant for whom the subnet should be created - required: false - default: None - ip_version: - description: - - The IP version of the subnet 4 or 6 - required: false - default: 4 - enable_dhcp: - description: - - Whether DHCP should be enabled for this subnet. - required: false - default: true - gateway_ip: - description: - - The ip that would be assigned to the gateway for this subnet - required: false - default: None - dns_nameservers: - description: - - DNS nameservers for this subnet, comma-separated - required: false - default: None - version_added: "1.4" - allocation_pool_start: - description: - - From the subnet pool the starting address from which the IP should be allocated - required: false - default: None - allocation_pool_end: - description: - - From the subnet pool the last IP that should be assigned to the virtual machines - required: false - default: None -requirements: ["quantumclient", "neutronclient", "keystoneclient"] -''' - -EXAMPLES = ''' -# Create a subnet for a tenant with the specified subnet -- quantum_subnet: state=present login_username=admin login_password=admin - login_tenant_name=admin tenant_name=tenant1 - network_name=network1 name=net1subnet cidr=192.168.0.0/24" -''' - -_os_keystone = None -_os_tenant_id = None -_os_network_id = None - -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) - global _os_keystone - _os_keystone = kclient - return kclient - - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s" % e.message) - return endpoint - -def _get_neutron_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - kwargs = { - 'token': token, - 'endpoint_url': endpoint - } - try: - neutron = client.Client('2.0', **kwargs) - except Exception, e: - module.fail_json(msg = " Error in connecting to neutron: %s" % e.message) - return neutron - -def _set_tenant_id(module): - global _os_tenant_id - if not module.params['tenant_name']: - tenant_name = module.params['login_tenant_name'] - else: - tenant_name = module.params['tenant_name'] - - for tenant in _os_keystone.tenants.list(): - if tenant.name == tenant_name: - _os_tenant_id = tenant.id - break - if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - -def _get_net_id(neutron, module): - kwargs = { - 'tenant_id': _os_tenant_id, - 'name': module.params['network_name'], - } - try: - networks = neutron.list_networks(**kwargs) - except Exception, e: - module.fail_json("Error in listing neutron networks: %s" % e.message) - if not networks['networks']: - return None - return networks['networks'][0]['id'] - - -def _get_subnet_id(module, neutron): - global _os_network_id - subnet_id = None - _os_network_id = _get_net_id(neutron, module) - if not _os_network_id: - module.fail_json(msg = "network id of network not found.") - else: - kwargs = { - 'tenant_id': _os_tenant_id, - 'name': module.params['name'], - } - try: - subnets = neutron.list_subnets(**kwargs) - except Exception, e: - module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) - if not subnets['subnets']: - return None - return subnets['subnets'][0]['id'] - -def _create_subnet(module, neutron): - neutron.format = 'json' - subnet = { - 'name': module.params['name'], - 'ip_version': module.params['ip_version'], - 'enable_dhcp': module.params['enable_dhcp'], - 'tenant_id': _os_tenant_id, - 'gateway_ip': module.params['gateway_ip'], - 'dns_nameservers': module.params['dns_nameservers'], - 'network_id': _os_network_id, - 'cidr': module.params['cidr'], - } - if module.params['allocation_pool_start'] and module.params['allocation_pool_end']: - allocation_pools = [ - { - 'start' : module.params['allocation_pool_start'], - 'end' : module.params['allocation_pool_end'] - } - ] - subnet.update({'allocation_pools': allocation_pools}) - if not module.params['gateway_ip']: - subnet.pop('gateway_ip') - if module.params['dns_nameservers']: - subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',') - else: - subnet.pop('dns_nameservers') - try: - new_subnet = neutron.create_subnet(dict(subnet=subnet)) - except Exception, e: - module.fail_json(msg = "Failure in creating subnet: %s" % e.message) - return new_subnet['subnet']['id'] - - -def _delete_subnet(module, neutron, subnet_id): - try: - neutron.delete_subnet(subnet_id) - except Exception, e: - module.fail_json( msg = "Error in deleting subnet: %s" % e.message) - return True - - -def main(): - - argument_spec = openstack_argument_spec() - argument_spec.update(dict( - name = dict(required=True), - network_name = dict(required=True), - cidr = dict(required=True), - tenant_name = dict(default=None), - state = dict(default='present', choices=['absent', 'present']), - ip_version = dict(default='4', choices=['4', '6']), - enable_dhcp = dict(default='true', type='bool'), - gateway_ip = dict(default=None), - dns_nameservers = dict(default=None), - allocation_pool_start = dict(default=None), - allocation_pool_end = dict(default=None), - )) - module = AnsibleModule(argument_spec=argument_spec) - neutron = _get_neutron_client(module, module.params) - _set_tenant_id(module) - if module.params['state'] == 'present': - subnet_id = _get_subnet_id(module, neutron) - if not subnet_id: - subnet_id = _create_subnet(module, neutron) - module.exit_json(changed = True, result = "Created" , id = subnet_id) - else: - module.exit_json(changed = False, result = "success" , id = subnet_id) - else: - subnet_id = _get_subnet_id(module, neutron) - if not subnet_id: - module.exit_json(changed = False, result = "success") - else: - _delete_subnet(module, neutron, subnet_id) - module.exit_json(changed = True, result = "deleted") - -# this is magic, see lib/ansible/module.params['common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/library/cloud/rax b/library/cloud/rax deleted file mode 100644 index e01367ed5b..0000000000 --- a/library/cloud/rax +++ /dev/null @@ -1,711 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax -short_description: create / delete an instance in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud instance and optionally - waits for it to be 'running'. -version_added: "1.2" -options: - auto_increment: - description: - - Whether or not to increment a single number with the name of the - created servers. Only applicable when used with the I(group) attribute - or meta key. - default: yes - choices: - - "yes" - - "no" - version_added: 1.5 - config_drive: - description: - - Attach read-only configuration drive to server as label config-2 - default: no - choices: - - "yes" - - "no" - version_added: 1.7 - count: - description: - - number of instances to launch - default: 1 - version_added: 1.4 - count_offset: - description: - - number count to start at - default: 1 - version_added: 1.4 - disk_config: - description: - - Disk partitioning strategy - choices: - - auto - - manual - version_added: '1.4' - default: auto - exact_count: - description: - - Explicitly ensure an exact count of instances, used with - state=active/present - default: no - choices: - - "yes" - - "no" - version_added: 1.4 - extra_client_args: - description: - - A hash of key/value pairs to be used when creating the cloudservers - client. This is considered an advanced option, use it wisely and - with caution. - version_added: 1.6 - extra_create_args: - description: - - A hash of key/value pairs to be used when creating a new server. - This is considered an advanced option, use it wisely and with caution. - version_added: 1.6 - files: - description: - - Files to insert into the instance. remotefilename:localcontent - default: null - flavor: - description: - - flavor to use for the instance - default: null - group: - description: - - host group to assign to server, is also used for idempotent operations - to ensure a specific number of instances - version_added: 1.4 - image: - description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name) - default: null - instance_ids: - description: - - list of instance ids, currently only used when state='absent' to - remove instances - version_added: 1.4 - key_name: - description: - - key pair to use on the instance - default: null - aliases: - - keypair - meta: - description: - - A hash of metadata to associate with the instance - default: null - name: - description: - - Name to give the instance - default: null - networks: - description: - - The network to attach to the instances. If specified, you must include - ALL networks including the public and private interfaces. Can be C(id) - or C(label). - default: - - public - - private - version_added: 1.4 - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - user_data: - description: - - Data to be uploaded to the servers config drive. This option implies - I(config_drive). Can be a file path or a string - version_added: 1.7 - wait: - description: - - wait for the instance to be in state 'running' before returning - default: "no" - choices: - - "yes" - - "no" - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 -author: Jesse Keating, Matt Martz -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Build a Cloud Server - gather_facts: False - tasks: - - name: Server build request - local_action: - module: rax - credentials: ~/.raxpub - name: rax-test1 - flavor: 5 - image: b11d9567-e412-4255-96b9-bd63ab23bcfe - key_name: my_rackspace_key - files: - /root/test.txt: /home/localuser/test.txt - wait: yes - state: present - networks: - - private - - public - register: rax - -- name: Build an exact count of cloud servers with incremented names - hosts: local - gather_facts: False - tasks: - - name: Server build requests - local_action: - module: rax - credentials: ~/.raxpub - name: test%03d.example.org - flavor: performance1-1 - image: ubuntu-1204-lts-precise-pangolin - state: present - count: 10 - count_offset: 10 - exact_count: yes - group: test - wait: yes - register: rax -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def create(module, names=[], flavor=None, image=None, meta={}, key_name=None, - files={}, wait=True, wait_timeout=300, disk_config=None, - group=None, nics=[], extra_create_args={}, user_data=None, - config_drive=False, existing=[]): - cs = pyrax.cloudservers - changed = False - - if user_data: - config_drive = True - - if user_data and os.path.isfile(user_data): - try: - f = open(user_data) - user_data = f.read() - f.close() - except Exception, e: - module.fail_json(msg='Failed to load %s' % user_data) - - # Handle the file contents - for rpath in files.keys(): - lpath = os.path.expanduser(files[rpath]) - try: - fileobj = open(lpath, 'r') - files[rpath] = fileobj.read() - fileobj.close() - except Exception, e: - module.fail_json(msg='Failed to load %s' % lpath) - try: - servers = [] - for name in names: - servers.append(cs.servers.create(name=name, image=image, - flavor=flavor, meta=meta, - key_name=key_name, - files=files, nics=nics, - disk_config=disk_config, - config_drive=config_drive, - userdata=user_data, - **extra_create_args)) - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - for server in servers: - try: - server.get() - except: - server.status == 'ERROR' - - if not filter(lambda s: s.status not in FINAL_STATUSES, - servers): - break - time.sleep(5) - - success = [] - error = [] - timeout = [] - for server in servers: - try: - server.get() - except: - server.status == 'ERROR' - instance = rax_to_dict(server, 'server') - if server.status == 'ACTIVE' or not wait: - success.append(instance) - elif server.status == 'ERROR': - error.append(instance) - elif wait: - timeout.append(instance) - - untouched = [rax_to_dict(s, 'server') for s in existing] - instances = success + untouched - - results = { - 'changed': changed, - 'action': 'create', - 'instances': instances, - 'success': success, - 'error': error, - 'timeout': timeout, - 'instance_ids': { - 'instances': [i['id'] for i in instances], - 'success': [i['id'] for i in success], - 'error': [i['id'] for i in error], - 'timeout': [i['id'] for i in timeout] - } - } - - if timeout: - results['msg'] = 'Timeout waiting for all servers to build' - elif error: - results['msg'] = 'Failed to build all servers' - - if 'msg' in results: - module.fail_json(**results) - else: - module.exit_json(**results) - - -def delete(module, instance_ids=[], wait=True, wait_timeout=300, kept=[]): - cs = pyrax.cloudservers - - changed = False - instances = {} - servers = [] - - for instance_id in instance_ids: - servers.append(cs.servers.get(instance_id)) - - for server in servers: - try: - server.delete() - except Exception, e: - module.fail_json(msg=e.message) - else: - changed = True - - instance = rax_to_dict(server, 'server') - instances[instance['id']] = instance - - # If requested, wait for server deletion - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - for server in servers: - instance_id = server.id - try: - server.get() - except: - instances[instance_id]['status'] = 'DELETED' - instances[instance_id]['rax_status'] = 'DELETED' - - if not filter(lambda s: s['status'] not in ('', 'DELETED', - 'ERROR'), - instances.values()): - break - - time.sleep(5) - - timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'), - instances.values()) - error = filter(lambda s: s['status'] in ('ERROR'), - instances.values()) - success = filter(lambda s: s['status'] in ('', 'DELETED'), - instances.values()) - - instances = [rax_to_dict(s, 'server') for s in kept] - - results = { - 'changed': changed, - 'action': 'delete', - 'instances': instances, - 'success': success, - 'error': error, - 'timeout': timeout, - 'instance_ids': { - 'instances': [i['id'] for i in instances], - 'success': [i['id'] for i in success], - 'error': [i['id'] for i in error], - 'timeout': [i['id'] for i in timeout] - } - } - - if timeout: - results['msg'] = 'Timeout waiting for all servers to delete' - elif error: - results['msg'] = 'Failed to delete all servers' - - if 'msg' in results: - module.fail_json(**results) - else: - module.exit_json(**results) - - -def cloudservers(module, state=None, name=None, flavor=None, image=None, - meta={}, key_name=None, files={}, wait=True, wait_timeout=300, - disk_config=None, count=1, group=None, instance_ids=[], - exact_count=False, networks=[], count_offset=0, - auto_increment=False, extra_create_args={}, user_data=None, - config_drive=False): - cs = pyrax.cloudservers - cnw = pyrax.cloud_networks - if not cnw: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - servers = [] - - # Add the group meta key - if group and 'group' not in meta: - meta['group'] = group - elif 'group' in meta and group is None: - group = meta['group'] - - # Normalize and ensure all metadata values are strings - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, basestring): - meta[k] = '%s' % v - - # When using state=absent with group, the absent block won't match the - # names properly. Use the exact_count functionality to decrease the count - # to the desired level - was_absent = False - if group is not None and state == 'absent': - exact_count = True - state = 'present' - was_absent = True - - if image: - image = rax_find_image(module, pyrax, image) - - nics = [] - if networks: - for network in networks: - nics.extend(rax_find_network(module, pyrax, network)) - - # act on the state - if state == 'present': - for arg, value in dict(name=name, flavor=flavor, - image=image).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax" module' % - arg) - - # Idempotent ensurance of a specific count of servers - if exact_count is not False: - # See if we can find servers that match our options - if group is None: - module.fail_json(msg='"group" must be provided when using ' - '"exact_count"') - else: - if auto_increment: - numbers = set() - - try: - name % 0 - except TypeError, e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) - - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, count_offset + count) - available_numbers = list(set(number_range) - .difference(numbers)) - else: - for server in cs.servers.list(): - if server.metadata.get('group') == group: - servers.append(server) - - # If state was absent but the count was changed, - # assume we only wanted to remove that number of instances - if was_absent: - diff = len(servers) - count - if diff < 0: - count = 0 - else: - count = diff - - if len(servers) > count: - state = 'absent' - kept = servers[:count] - del servers[:count] - instance_ids = [] - for server in servers: - instance_ids.append(server.id) - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout, kept=kept) - elif len(servers) < count: - if auto_increment: - names = [] - name_slice = count - len(servers) - numbers_to_use = available_numbers[:name_slice] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] * (count - len(servers)) - else: - instances = [] - instance_ids = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - instance_ids.append(server.id) - module.exit_json(changed=False, action=None, - instances=instances, - success=[], error=[], timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - else: - if group is not None: - if auto_increment: - numbers = set() - - try: - name % 0 - except TypeError, e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) - - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, - count_offset + count + len(numbers)) - available_numbers = list(set(number_range) - .difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] * count - else: - search_opts = { - 'name': '^%s$' % name, - 'image': image, - 'flavor': flavor - } - servers = [] - for server in cs.servers.list(search_opts=search_opts): - if server.metadata != meta: - continue - servers.append(server) - - if len(servers) >= count: - instances = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - - instance_ids = [i['id'] for i in instances] - module.exit_json(changed=False, action=None, - instances=instances, success=[], error=[], - timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - - names = [name] * (count - len(servers)) - - create(module, names=names, flavor=flavor, image=image, - meta=meta, key_name=key_name, files=files, wait=wait, - wait_timeout=wait_timeout, disk_config=disk_config, group=group, - nics=nics, extra_create_args=extra_create_args, - user_data=user_data, config_drive=config_drive, - existing=servers) - - elif state == 'absent': - if instance_ids is None: - for arg, value in dict(name=name, flavor=flavor, - image=image).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax" ' - 'module' % arg) - search_opts = { - 'name': '^%s$' % name, - 'image': image, - 'flavor': flavor - } - for server in cs.servers.list(search_opts=search_opts): - if meta != server.metadata: - continue - servers.append(server) - - instance_ids = [] - for server in servers: - if len(instance_ids) < count: - instance_ids.append(server.id) - else: - break - - if not instance_ids: - module.exit_json(changed=False, action=None, instances=[], - success=[], error=[], timeout=[], - instance_ids={'instances': [], - 'success': [], 'error': [], - 'timeout': []}) - - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - auto_increment=dict(default=True, type='bool'), - config_drive=dict(default=False, type='bool'), - count=dict(default=1, type='int'), - count_offset=dict(default=1, type='int'), - disk_config=dict(choices=['auto', 'manual']), - exact_count=dict(default=False, type='bool'), - extra_client_args=dict(type='dict', default={}), - extra_create_args=dict(type='dict', default={}), - files=dict(type='dict', default={}), - flavor=dict(), - group=dict(), - image=dict(), - instance_ids=dict(type='list'), - key_name=dict(aliases=['keypair']), - meta=dict(type='dict', default={}), - name=dict(), - networks=dict(type='list', default=['public', 'private']), - service=dict(), - state=dict(default='present', choices=['present', 'absent']), - user_data=dict(no_log=True), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - service = module.params.get('service') - - if service is not None: - module.fail_json(msg='The "service" attribute has been deprecated, ' - 'please remove "service: cloudservers" from your ' - 'playbook pertaining to the "rax" module') - - auto_increment = module.params.get('auto_increment') - config_drive = module.params.get('config_drive') - count = module.params.get('count') - count_offset = module.params.get('count_offset') - disk_config = module.params.get('disk_config') - if disk_config: - disk_config = disk_config.upper() - exact_count = module.params.get('exact_count', False) - extra_client_args = module.params.get('extra_client_args') - extra_create_args = module.params.get('extra_create_args') - files = module.params.get('files') - flavor = module.params.get('flavor') - group = module.params.get('group') - image = module.params.get('image') - instance_ids = module.params.get('instance_ids') - key_name = module.params.get('key_name') - meta = module.params.get('meta') - name = module.params.get('name') - networks = module.params.get('networks') - state = module.params.get('state') - user_data = module.params.get('user_data') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - setup_rax_module(module, pyrax) - - if extra_client_args: - pyrax.cloudservers = pyrax.connect_to_cloudservers( - region=pyrax.cloudservers.client.region_name, - **extra_client_args) - client = pyrax.cloudservers.client - if 'bypass_url' in extra_client_args: - client.management_url = extra_client_args['bypass_url'] - - if pyrax.cloudservers is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - cloudservers(module, state=state, name=name, flavor=flavor, - image=image, meta=meta, key_name=key_name, files=files, - wait=wait, wait_timeout=wait_timeout, disk_config=disk_config, - count=count, group=group, instance_ids=instance_ids, - exact_count=exact_count, networks=networks, - count_offset=count_offset, auto_increment=auto_increment, - extra_create_args=extra_create_args, user_data=user_data, - config_drive=config_drive) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -# invoke the module -main() diff --git a/library/cloud/rax_cbs b/library/cloud/rax_cbs deleted file mode 100644 index a1b6ce46a6..0000000000 --- a/library/cloud/rax_cbs +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_cbs -short_description: Manipulate Rackspace Cloud Block Storage Volumes -description: - - Manipulate Rackspace Cloud Block Storage Volumes -version_added: 1.6 -options: - description: - description: - - Description to give the volume being created - default: null - meta: - description: - - A hash of metadata to associate with the volume - default: null - name: - description: - - Name to give the volume being created - default: null - required: true - size: - description: - - Size of the volume to create in Gigabytes - default: 100 - required: true - snapshot_id: - description: - - The id of the snapshot to create the volume from - default: null - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - required: true - volume_type: - description: - - Type of the volume being created - choices: - - SATA - - SSD - default: SATA - required: true - wait: - description: - - wait for the volume to be in state 'available' before returning - default: "no" - choices: - - "yes" - - "no" - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 -author: Christopher H. Laco, Matt Martz -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Build a Block Storage Volume - gather_facts: False - hosts: local - connection: local - tasks: - - name: Storage volume create request - local_action: - module: rax_cbs - credentials: ~/.raxpub - name: my-volume - description: My Volume - volume_type: SSD - size: 150 - region: DFW - wait: yes - state: present - meta: - app: my-cool-app - register: my_volume -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout): - for arg in (state, name, size, volume_type): - if not arg: - module.fail_json(msg='%s is required for rax_cbs' % arg) - - if size < 100: - module.fail_json(msg='"size" must be greater than or equal to 100') - - changed = False - volume = None - instance = {} - - cbs = pyrax.cloud_blockstorage - - if cbs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - volume = rax_find_volume(module, pyrax, name) - - if state == 'present': - if not volume: - try: - volume = cbs.create(name, size=size, volume_type=volume_type, - description=description, - metadata=meta, - snapshot_id=snapshot_id) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - if wait: - attempts = wait_timeout / 5 - pyrax.utils.wait_for_build(volume, interval=5, - attempts=attempts) - - volume.get() - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed, volume=instance) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - elif wait and volume.status not in VOLUME_STATUS: - result['msg'] = 'Timeout waiting on %s' % volume.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - if volume: - try: - volume.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, volume=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - description=dict(), - meta=dict(type='dict', default={}), - name=dict(required=True), - size=dict(type='int', default=100), - snapshot_id=dict(), - state=dict(default='present', choices=['present', 'absent']), - volume_type=dict(choices=['SSD', 'SATA'], default='SATA'), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - description = module.params.get('description') - meta = module.params.get('meta') - name = module.params.get('name') - size = module.params.get('size') - snapshot_id = module.params.get('snapshot_id') - state = module.params.get('state') - volume_type = module.params.get('volume_type') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_cbs_attachments b/library/cloud/rax_cbs_attachments deleted file mode 100644 index 365f93cd6e..0000000000 --- a/library/cloud/rax_cbs_attachments +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_cbs_attachments -short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments -description: - - Manipulate Rackspace Cloud Block Storage Volume Attachments -version_added: 1.6 -options: - device: - description: - - The device path to attach the volume to, e.g. /dev/xvde - default: null - required: true - volume: - description: - - Name or id of the volume to attach/detach - default: null - required: true - server: - description: - - Name or id of the server to attach/detach - default: null - required: true - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - required: true - wait: - description: - - wait for the volume to be in 'in-use'/'available' state before returning - default: "no" - choices: - - "yes" - - "no" - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 -author: Christopher H. Laco, Matt Martz -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Attach a Block Storage Volume - gather_facts: False - hosts: local - connection: local - tasks: - - name: Storage volume attach request - local_action: - module: rax_cbs_attachments - credentials: ~/.raxpub - volume: my-volume - server: my-server - device: /dev/xvdd - region: DFW - wait: yes - state: present - register: my_volume -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def cloud_block_storage_attachments(module, state, volume, server, device, - wait, wait_timeout): - for arg in (state, volume, server, device): - if not arg: - module.fail_json(msg='%s is required for rax_cbs_attachments' % - arg) - - cbs = pyrax.cloud_blockstorage - cs = pyrax.cloudservers - - if cbs is None or cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - changed = False - instance = {} - - volume = rax_find_volume(module, pyrax, volume) - - if not volume: - module.fail_json(msg='No matching storage volumes were found') - - if state == 'present': - server = rax_find_server(module, pyrax, server) - - if (volume.attachments and - volume.attachments[0]['server_id'] == server.id): - changed = False - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') - else: - try: - volume.attach_to_instance(server, mountpoint=device) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - volume.get() - - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed, volume=instance) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - elif wait: - attempts = wait_timeout / 5 - pyrax.utils.wait_until(volume, 'status', 'in-use', - interval=5, attempts=attempts) - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - server = rax_find_server(module, pyrax, server) - - if (volume.attachments and - volume.attachments[0]['server_id'] == server.id): - try: - volume.detach() - if wait: - pyrax.utils.wait_until(volume, 'status', 'available', - interval=3, attempts=0, - verbose=False) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - volume.get() - changed = True - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') - - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed, volume=instance) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - module.exit_json(changed=changed, volume=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - device=dict(required=True), - volume=dict(required=True), - server=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - device = module.params.get('device') - volume = module.params.get('volume') - server = module.params.get('server') - state = module.params.get('state') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_block_storage_attachments(module, state, volume, server, device, - wait, wait_timeout) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_cdb b/library/cloud/rax_cdb deleted file mode 100644 index 55e486f79e..0000000000 --- a/library/cloud/rax_cdb +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/python -tt -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_cdb -short_description: create/delete or resize a Rackspace Cloud Databases instance -description: - - creates / deletes or resize a Rackspace Cloud Databases instance - and optionally waits for it to be 'running'. The name option needs to be - unique since it's used to identify the instance. -version_added: "1.8" -options: - name: - description: - - Name of the databases server instance - default: null - flavor: - description: - - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB) - default: 1 - volume: - description: - - Volume size of the database 1-150GB - default: 2 - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - wait: - description: - - wait for the instance to be in state 'running' before returning - default: "no" - choices: [ "yes", "no" ] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 -author: Simon JAILLET -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Build a Cloud Databases - gather_facts: False - tasks: - - name: Server build request - local_action: - module: rax_cdb - credentials: ~/.raxpub - region: IAD - name: db-server1 - flavor: 1 - volume: 2 - wait: yes - state: present - register: rax_db_server -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def find_instance(name): - - cdb = pyrax.cloud_databases - instances = cdb.list() - if instances: - for instance in instances: - if instance.name == name: - return instance - return False - - -def save_instance(module, name, flavor, volume, wait, wait_timeout): - - for arg, value in dict(name=name, flavor=flavor, - volume=volume).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb"' - ' module' % arg) - - if not (volume >= 1 and volume <= 150): - module.fail_json(msg='volume is required to be between 1 and 150') - - cdb = pyrax.cloud_databases - - flavors = [] - for item in cdb.list_flavors(): - flavors.append(item.id) - - if not (flavor in flavors): - module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor)) - - changed = False - - instance = find_instance(name) - - if not instance: - action = 'create' - try: - instance = cdb.create(name=name, flavor=flavor, volume=volume) - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - else: - action = None - - if instance.volume.size != volume: - action = 'resize' - if instance.volume.size > volume: - module.fail_json(changed=False, action=action, - msg='The new volume size must be larger than ' - 'the current volume size', - cdb=rax_to_dict(instance)) - instance.resize_volume(volume) - changed = True - - if int(instance.flavor.id) != flavor: - action = 'resize' - pyrax.utils.wait_until(instance, 'status', 'ACTIVE', - attempts=wait_timeout) - instance.resize(flavor) - changed = True - - if wait: - pyrax.utils.wait_until(instance, 'status', 'ACTIVE', - attempts=wait_timeout) - - if wait and instance.status != 'ACTIVE': - module.fail_json(changed=changed, action=action, - cdb=rax_to_dict(instance), - msg='Timeout waiting for "%s" databases instance to ' - 'be created' % name) - - module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance)) - - -def delete_instance(module, name, wait, wait_timeout): - - if not name: - module.fail_json(msg='name is required for the "rax_cdb" module') - - changed = False - - instance = find_instance(name) - if not instance: - module.exit_json(changed=False, action='delete') - - try: - instance.delete() - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - if wait: - pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN', - attempts=wait_timeout) - - if wait and instance.status != 'SHUTDOWN': - module.fail_json(changed=changed, action='delete', - cdb=rax_to_dict(instance), - msg='Timeout waiting for "%s" databases instance to ' - 'be deleted' % name) - - module.exit_json(changed=changed, action='delete', - cdb=rax_to_dict(instance)) - - -def rax_cdb(module, state, name, flavor, volume, wait, wait_timeout): - - # act on the state - if state == 'present': - save_instance(module, name, flavor, volume, wait, wait_timeout) - elif state == 'absent': - delete_instance(module, name, wait, wait_timeout) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(type='str', required=True), - flavor=dict(type='int', default=1), - volume=dict(type='int', default=2), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - flavor = module.params.get('flavor') - volume = module.params.get('volume') - state = module.params.get('state') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - rax_cdb(module, state, name, flavor, volume, wait, wait_timeout) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -# invoke the module -main() diff --git a/library/cloud/rax_cdb_database b/library/cloud/rax_cdb_database deleted file mode 100644 index 421b6dcb09..0000000000 --- a/library/cloud/rax_cdb_database +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/python -tt -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' -module: rax_cdb_database -short_description: 'create / delete a database in the Cloud Databases' -description: - - create / delete a database in the Cloud Databases. -version_added: "1.8" -options: - cdb_id: - description: - - The databases server UUID - default: null - name: - description: - - Name to give to the database - default: null - character_set: - description: - - Set of symbols and encodings - default: 'utf8' - collate: - description: - - Set of rules for comparing characters in a character set - default: 'utf8_general_ci' - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: Simon JAILLET -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Build a database in Cloud Databases - tasks: - - name: Database build request - local_action: - module: rax_cdb_database - credentials: ~/.raxpub - region: IAD - cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 - name: db1 - state: present - register: rax_db_database -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def find_database(instance, name): - try: - database = instance.get_database(name) - except Exception: - return False - - return database - - -def save_database(module, cdb_id, name, character_set, collate): - - for arg, value in dict(cdb_id=cdb_id, name=name).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_database" ' - 'module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - changed = False - - database = find_database(instance, name) - - if not database: - try: - database = instance.create_database(name=name, - character_set=character_set, - collate=collate) - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='create', - database=rax_to_dict(database)) - - -def delete_database(module, cdb_id, name): - - for arg, value in dict(cdb_id=cdb_id, name=name).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_database" ' - 'module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - changed = False - - database = find_database(instance, name) - - if database: - try: - database.delete() - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='delete') - - -def rax_cdb_database(module, state, cdb_id, name, character_set, collate): - - # act on the state - if state == 'present': - save_database(module, cdb_id, name, character_set, collate) - elif state == 'absent': - delete_database(module, cdb_id, name) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cdb_id=dict(type='str', required=True), - name=dict(type='str', required=True), - character_set=dict(type='str', default='utf8'), - collate=dict(type='str', default='utf8_general_ci'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cdb_id = module.params.get('cdb_id') - name = module.params.get('name') - character_set = module.params.get('character_set') - collate = module.params.get('collate') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - rax_cdb_database(module, state, cdb_id, name, character_set, collate) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -# invoke the module -main() diff --git a/library/cloud/rax_cdb_user b/library/cloud/rax_cdb_user deleted file mode 100644 index a0958084c9..0000000000 --- a/library/cloud/rax_cdb_user +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/python -tt -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_cdb_user -short_description: create / delete a Rackspace Cloud Database -description: - - create / delete a database in the Cloud Databases. -version_added: "1.8" -options: - cdb_id: - description: - - The databases server UUID - default: null - db_username: - description: - - Name of the database user - default: null - db_password: - description: - - Database user password - default: null - databases: - description: - - Name of the databases that the user can access - default: [] - host: - description: - - Specifies the host from which a user is allowed to connect to - the database. Possible values are a string containing an IPv4 address - or "%" to allow connecting from any host - default: '%' - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: Simon JAILLET -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Build a user in Cloud Databases - tasks: - - name: User build request - local_action: - module: rax_cdb_user - credentials: ~/.raxpub - region: IAD - cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 - db_username: user1 - db_password: user1 - databases: ['db1'] - state: present - register: rax_db_user -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def find_user(instance, name): - try: - user = instance.get_user(name) - except Exception: - return False - - return user - - -def save_user(module, cdb_id, name, password, databases, host): - - for arg, value in dict(cdb_id=cdb_id, name=name).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_user" ' - 'module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - changed = False - - user = find_user(instance, name) - - if not user: - action = 'create' - try: - user = instance.create_user(name=name, - password=password, - database_names=databases, - host=host) - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - else: - action = 'update' - - if user.host != host: - changed = True - - user.update(password=password, host=host) - - former_dbs = set([item.name for item in user.list_user_access()]) - databases = set(databases) - - if databases != former_dbs: - try: - revoke_dbs = [db for db in former_dbs if db not in databases] - user.revoke_user_access(db_names=revoke_dbs) - - new_dbs = [db for db in databases if db not in former_dbs] - user.grant_user_access(db_names=new_dbs) - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action=action, user=rax_to_dict(user)) - - -def delete_user(module, cdb_id, name): - - for arg, value in dict(cdb_id=cdb_id, name=name).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_user"' - ' module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - changed = False - - user = find_user(instance, name) - - if user: - try: - user.delete() - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='delete') - - -def rax_cdb_user(module, state, cdb_id, name, password, databases, host): - - # act on the state - if state == 'present': - save_user(module, cdb_id, name, password, databases, host) - elif state == 'absent': - delete_user(module, cdb_id, name) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cdb_id=dict(type='str', required=True), - db_username=dict(type='str', required=True), - db_password=dict(type='str', required=True, no_log=True), - databases=dict(type='list', default=[]), - host=dict(type='str', default='%'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cdb_id = module.params.get('cdb_id') - name = module.params.get('db_username') - password = module.params.get('db_password') - databases = module.params.get('databases') - host = unicode(module.params.get('host')) - state = module.params.get('state') - - setup_rax_module(module, pyrax) - rax_cdb_user(module, state, cdb_id, name, password, databases, host) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -# invoke the module -main() diff --git a/library/cloud/rax_clb b/library/cloud/rax_clb deleted file mode 100644 index 7a2699709d..0000000000 --- a/library/cloud/rax_clb +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_clb -short_description: create / delete a load balancer in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud load balancer. -version_added: "1.4" -options: - algorithm: - description: - - algorithm for the balancer being created - choices: - - RANDOM - - LEAST_CONNECTIONS - - ROUND_ROBIN - - WEIGHTED_LEAST_CONNECTIONS - - WEIGHTED_ROUND_ROBIN - default: LEAST_CONNECTIONS - meta: - description: - - A hash of metadata to associate with the instance - default: null - name: - description: - - Name to give the load balancer - default: null - port: - description: - - Port for the balancer being created - default: 80 - protocol: - description: - - Protocol for the balancer being created - choices: - - DNS_TCP - - DNS_UDP - - FTP - - HTTP - - HTTPS - - IMAPS - - IMAPv4 - - LDAP - - LDAPS - - MYSQL - - POP3 - - POP3S - - SMTP - - TCP - - TCP_CLIENT_FIRST - - UDP - - UDP_STREAM - - SFTP - default: HTTP - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - timeout: - description: - - timeout for communication between the balancer and the node - default: 30 - type: - description: - - type of interface for the balancer being created - choices: - - PUBLIC - - SERVICENET - default: PUBLIC - vip_id: - description: - - Virtual IP ID to use when creating the load balancer for purposes of - sharing an IP with another load balancer of another protocol - version_added: 1.5 - wait: - description: - - wait for the balancer to be in state 'running' before returning - default: "no" - choices: - - "yes" - - "no" - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 -author: Christopher H. Laco, Matt Martz -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Build a Load Balancer - gather_facts: False - hosts: local - connection: local - tasks: - - name: Load Balancer create request - local_action: - module: rax_clb - credentials: ~/.raxpub - name: my-lb - port: 8080 - protocol: HTTP - type: SERVICENET - timeout: 30 - region: DFW - wait: yes - state: present - meta: - app: my-cool-app - register: my_lb -''' - - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, - vip_type, timeout, wait, wait_timeout, vip_id): - for arg in (state, name, port, protocol, vip_type): - if not arg: - module.fail_json(msg='%s is required for rax_clb' % arg) - - if int(timeout) < 30: - module.fail_json(msg='"timeout" must be greater than or equal to 30') - - changed = False - balancers = [] - - clb = pyrax.cloud_loadbalancers - if not clb: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - for balancer in clb.list(): - if name != balancer.name and name != balancer.id: - continue - - balancers.append(balancer) - - if len(balancers) > 1: - module.fail_json(msg='Multiple Load Balancers were matched by name, ' - 'try using the Load Balancer ID instead') - - if state == 'present': - if isinstance(meta, dict): - metadata = [dict(key=k, value=v) for k, v in meta.items()] - - if not balancers: - try: - virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)] - balancer = clb.create(name, metadata=metadata, port=port, - algorithm=algorithm, protocol=protocol, - timeout=timeout, virtual_ips=virtual_ips) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - balancer = balancers[0] - setattr(balancer, 'metadata', - [dict(key=k, value=v) for k, v in - balancer.get_metadata().items()]) - atts = { - 'name': name, - 'algorithm': algorithm, - 'port': port, - 'protocol': protocol, - 'timeout': timeout - } - for att, value in atts.iteritems(): - current = getattr(balancer, att) - if current != value: - changed = True - - if changed: - balancer.update(**atts) - - if balancer.metadata != metadata: - balancer.set_metadata(meta) - changed = True - - virtual_ips = [clb.VirtualIP(type=vip_type)] - current_vip_types = set([v.type for v in balancer.virtual_ips]) - vip_types = set([v.type for v in virtual_ips]) - if current_vip_types != vip_types: - module.fail_json(msg='Load balancer Virtual IP type cannot ' - 'be changed') - - if wait: - attempts = wait_timeout / 5 - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - balancer.get() - instance = rax_to_dict(balancer, 'clb') - - result = dict(changed=changed, balancer=instance) - - if balancer.status == 'ERROR': - result['msg'] = '%s failed to build' % balancer.id - elif wait and balancer.status not in ('ACTIVE', 'ERROR'): - result['msg'] = 'Timeout waiting on %s' % balancer.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - if balancers: - balancer = balancers[0] - try: - balancer.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - instance = rax_to_dict(balancer, 'clb') - - if wait: - attempts = wait_timeout / 5 - pyrax.utils.wait_until(balancer, 'status', ('DELETED'), - interval=5, attempts=attempts) - else: - instance = {} - - module.exit_json(changed=changed, balancer=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - algorithm=dict(choices=CLB_ALGORITHMS, - default='LEAST_CONNECTIONS'), - meta=dict(type='dict', default={}), - name=dict(), - port=dict(type='int', default=80), - protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'), - state=dict(default='present', choices=['present', 'absent']), - timeout=dict(type='int', default=30), - type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'), - vip_id=dict(), - wait=dict(type='bool'), - wait_timeout=dict(default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - algorithm = module.params.get('algorithm') - meta = module.params.get('meta') - name = module.params.get('name') - port = module.params.get('port') - protocol = module.params.get('protocol') - state = module.params.get('state') - timeout = int(module.params.get('timeout')) - vip_id = module.params.get('vip_id') - vip_type = module.params.get('type') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - setup_rax_module(module, pyrax) - - cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, - vip_type, timeout, wait, wait_timeout, vip_id) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_clb_nodes b/library/cloud/rax_clb_nodes deleted file mode 100644 index 24325b4459..0000000000 --- a/library/cloud/rax_clb_nodes +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_clb_nodes -short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer -description: - - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer -version_added: "1.4" -options: - address: - required: false - description: - - IP address or domain name of the node - condition: - required: false - choices: - - enabled - - disabled - - draining - description: - - Condition for the node, which determines its role within the load - balancer - load_balancer_id: - required: true - type: integer - description: - - Load balancer id - node_id: - required: false - type: integer - description: - - Node id - port: - required: false - type: integer - description: - - Port number of the load balanced service on the node - state: - required: false - default: "present" - choices: - - present - - absent - description: - - Indicate desired state of the node - type: - required: false - choices: - - primary - - secondary - description: - - Type of node - wait: - required: false - default: "no" - choices: - - "yes" - - "no" - description: - - Wait for the load balancer to become active before returning - wait_timeout: - required: false - type: integer - default: 30 - description: - - How long to wait before giving up and returning an error - weight: - required: false - description: - - Weight of node -author: Lukasz Kawczynski -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -# Add a new node to the load balancer -- local_action: - module: rax_clb_nodes - load_balancer_id: 71 - address: 10.2.2.3 - port: 80 - condition: enabled - type: primary - wait: yes - credentials: /path/to/credentials - -# Drain connections from a node -- local_action: - module: rax_clb_nodes - load_balancer_id: 71 - node_id: 410 - condition: draining - wait: yes - credentials: /path/to/credentials - -# Remove a node from the load balancer -- local_action: - module: rax_clb_nodes - load_balancer_id: 71 - node_id: 410 - state: absent - wait: yes - credentials: /path/to/credentials -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def _activate_virtualenv(path): - path = os.path.expanduser(path) - activate_this = os.path.join(path, 'bin', 'activate_this.py') - execfile(activate_this, dict(__file__=activate_this)) - - -def _get_node(lb, node_id=None, address=None, port=None): - """Return a matching node""" - for node in getattr(lb, 'nodes', []): - match_list = [] - if node_id is not None: - match_list.append(getattr(node, 'id', None) == node_id) - if address is not None: - match_list.append(getattr(node, 'address', None) == address) - if port is not None: - match_list.append(getattr(node, 'port', None) == port) - - if match_list and all(match_list): - return node - - return None - - -def _is_primary(node): - """Return True if node is primary and enabled""" - return (node.type.lower() == 'primary' and - node.condition.lower() == 'enabled') - - -def _get_primary_nodes(lb): - """Return a list of primary and enabled nodes""" - nodes = [] - for node in lb.nodes: - if _is_primary(node): - nodes.append(node) - return nodes - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - condition=dict(choices=['enabled', 'disabled', 'draining']), - load_balancer_id=dict(required=True, type='int'), - node_id=dict(type='int'), - port=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - type=dict(choices=['primary', 'secondary']), - virtualenv=dict(), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=30, type='int'), - weight=dict(type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params['address'] - condition = (module.params['condition'] and - module.params['condition'].upper()) - load_balancer_id = module.params['load_balancer_id'] - node_id = module.params['node_id'] - port = module.params['port'] - state = module.params['state'] - typ = module.params['type'] and module.params['type'].upper() - virtualenv = module.params['virtualenv'] - wait = module.params['wait'] - wait_timeout = module.params['wait_timeout'] or 1 - weight = module.params['weight'] - - if virtualenv: - try: - _activate_virtualenv(virtualenv) - except IOError, e: - module.fail_json(msg='Failed to activate virtualenv %s (%s)' % ( - virtualenv, e)) - - setup_rax_module(module, pyrax) - - if not pyrax.cloud_loadbalancers: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - try: - lb = pyrax.cloud_loadbalancers.get(load_balancer_id) - except pyrax.exc.PyraxException, e: - module.fail_json(msg='%s' % e.message) - - node = _get_node(lb, node_id, address, port) - - result = rax_clb_node_to_dict(node) - - if state == 'absent': - if not node: # Removing a non-existent node - module.exit_json(changed=False, state=state) - - # The API detects this as well but currently pyrax does not return a - # meaningful error message - if _is_primary(node) and len(_get_primary_nodes(lb)) == 1: - module.fail_json( - msg='At least one primary node has to be enabled') - - try: - lb.delete_node(node) - result = {} - except pyrax.exc.NotFound: - module.exit_json(changed=False, state=state) - except pyrax.exc.PyraxException, e: - module.fail_json(msg='%s' % e.message) - else: # present - if not node: - if node_id: # Updating a non-existent node - msg = 'Node %d not found' % node_id - if lb.nodes: - msg += (' (available nodes: %s)' % - ', '.join([str(x.id) for x in lb.nodes])) - module.fail_json(msg=msg) - else: # Creating a new node - try: - node = pyrax.cloudloadbalancers.Node( - address=address, port=port, condition=condition, - weight=weight, type=typ) - resp, body = lb.add_nodes([node]) - result.update(body['nodes'][0]) - except pyrax.exc.PyraxException, e: - module.fail_json(msg='%s' % e.message) - else: # Updating an existing node - mutable = { - 'condition': condition, - 'type': typ, - 'weight': weight, - } - - for name, value in mutable.items(): - if value is None or value == getattr(node, name): - mutable.pop(name) - - if not mutable: - module.exit_json(changed=False, state=state, node=result) - - try: - # The diff has to be set explicitly to update node's weight and - # type; this should probably be fixed in pyrax - lb.update_node(node, diff=mutable) - result.update(mutable) - except pyrax.exc.PyraxException, e: - module.fail_json(msg='%s' % e.message) - - if wait: - pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1, - attempts=wait_timeout) - if lb.status != 'ACTIVE': - module.fail_json( - msg='Load balancer not active after %ds (current status: %s)' % - (wait_timeout, lb.status.lower())) - - kwargs = {'node': result} if result else {} - module.exit_json(changed=True, state=state, **kwargs) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_dns b/library/cloud/rax_dns deleted file mode 100644 index dacc4c672f..0000000000 --- a/library/cloud/rax_dns +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_dns -short_description: Manage domains on Rackspace Cloud DNS -description: - - Manage domains on Rackspace Cloud DNS -version_added: 1.5 -options: - comment: - description: - - Brief description of the domain. Maximum length of 160 characters - email: - desctiption: - - Email address of the domain administrator - name: - description: - - Domain name to create - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - ttl: - description: - - Time to live of domain in seconds - default: 3600 -notes: - - "It is recommended that plays utilizing this module be run with - C(serial: 1) to avoid exceeding the API request limit imposed by - the Rackspace CloudDNS API" -author: Matt Martz -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Create domain - hosts: all - gather_facts: False - tasks: - - name: Domain create request - local_action: - module: rax_dns - credentials: ~/.raxpub - name: example.org - email: admin@example.org - register: rax_dns -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_dns(module, comment, email, name, state, ttl): - changed = False - - dns = pyrax.cloud_dns - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not email: - module.fail_json(msg='An "email" attribute is required for ' - 'creating a domain') - - try: - domain = dns.find(name=name) - except pyrax.exceptions.NoUniqueMatch, e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.NotFound: - try: - domain = dns.create(name=name, emailAddress=email, ttl=ttl, - comment=comment) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - update = {} - if comment != getattr(domain, 'comment', None): - update['comment'] = comment - if ttl != getattr(domain, 'ttl', None): - update['ttl'] = ttl - if email != getattr(domain, 'emailAddress', None): - update['emailAddress'] = email - - if update: - try: - domain.update(**update) - changed = True - domain.get() - except Exception, e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - domain = dns.find(name=name) - except pyrax.exceptions.NotFound: - domain = {} - pass - except Exception, e: - module.fail_json(msg='%s' % e.message) - - if domain: - try: - domain.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, domain=rax_to_dict(domain)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - comment=dict(), - email=dict(), - name=dict(), - state=dict(default='present', choices=['present', 'absent']), - ttl=dict(type='int', default=3600), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - comment = module.params.get('comment') - email = module.params.get('email') - name = module.params.get('name') - state = module.params.get('state') - ttl = module.params.get('ttl') - - setup_rax_module(module, pyrax, False) - - rax_dns(module, comment, email, name, state, ttl) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_dns_record b/library/cloud/rax_dns_record deleted file mode 100644 index a28f5b9a9b..0000000000 --- a/library/cloud/rax_dns_record +++ /dev/null @@ -1,335 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_dns_record -short_description: Manage DNS records on Rackspace Cloud DNS -description: - - Manage DNS records on Rackspace Cloud DNS -version_added: 1.5 -options: - comment: - description: - - Brief description of the domain. Maximum length of 160 characters - data: - description: - - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for - SRV/TXT - required: True - domain: - description: - - Domain name to create the record in. This is an invalid option when - type=PTR - loadbalancer: - description: - - Load Balancer ID to create a PTR record for. Only used with type=PTR - version_added: 1.7 - name: - description: - - FQDN record name to create - required: True - priority: - description: - - Required for MX and SRV records, but forbidden for other record types. - If specified, must be an integer from 0 to 65535. - server: - description: - - Server ID to create a PTR record for. Only used with type=PTR - version_added: 1.7 - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - ttl: - description: - - Time to live of record in seconds - default: 3600 - type: - description: - - DNS record type - choices: - - A - - AAAA - - CNAME - - MX - - NS - - SRV - - TXT - - PTR - required: true -notes: - - "It is recommended that plays utilizing this module be run with - C(serial: 1) to avoid exceeding the API request limit imposed by - the Rackspace CloudDNS API" - - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be - supplied - - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record. - - C(PTR) record support was added in version 1.7 -author: Matt Martz -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Create DNS Records - hosts: all - gather_facts: False - tasks: - - name: Create A record - local_action: - module: rax_dns_record - credentials: ~/.raxpub - domain: example.org - name: www.example.org - data: "{{ rax_accessipv4 }}" - type: A - register: a_record - - - name: Create PTR record - local_action: - module: rax_dns_record - credentials: ~/.raxpub - server: "{{ rax_id }}" - name: "{{ inventory_hostname }}" - region: DFW - register: ptr_record -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None, - name=None, server=None, state='present', ttl=7200): - changed = False - results = [] - - dns = pyrax.cloud_dns - - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if loadbalancer: - item = rax_find_loadbalancer(module, pyrax, loadbalancer) - elif server: - item = rax_find_server(module, pyrax, server) - - if state == 'present': - current = dns.list_ptr_records(item) - for record in current: - if record.data == data: - if record.ttl != ttl or record.name != name: - try: - dns.update_ptr_record(item, record, name, data, ttl) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - record.ttl = ttl - record.name = name - results.append(rax_to_dict(record)) - break - else: - results.append(rax_to_dict(record)) - break - - if not results: - record = dict(name=name, type='PTR', data=data, ttl=ttl, - comment=comment) - try: - results = dns.add_ptr_records(item, [record]) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, records=results) - - elif state == 'absent': - current = dns.list_ptr_records(item) - for record in current: - if record.data == data: - results.append(rax_to_dict(record)) - break - - if results: - try: - dns.delete_ptr_records(item, data) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, records=results) - - -def rax_dns_record(module, comment=None, data=None, domain=None, name=None, - priority=None, record_type='A', state='present', ttl=7200): - """Function for manipulating record types other than PTR""" - - changed = False - - dns = pyrax.cloud_dns - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not priority and record_type in ['MX', 'SRV']: - module.fail_json(msg='A "priority" attribute is required for ' - 'creating a MX or SRV record') - - try: - domain = dns.find(name=domain) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - try: - record = domain.find_record(record_type, name=name) - except pyrax.exceptions.DomainRecordNotUnique, e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.DomainRecordNotFound, e: - try: - record_data = { - 'type': record_type, - 'name': name, - 'data': data, - 'ttl': ttl - } - if comment: - record_data.update(dict(comment=comment)) - if priority and record_type.upper() in ['MX', 'SRV']: - record_data.update(dict(priority=priority)) - - record = domain.add_records([record_data])[0] - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - update = {} - if comment != getattr(record, 'comment', None): - update['comment'] = comment - if ttl != getattr(record, 'ttl', None): - update['ttl'] = ttl - if priority != getattr(record, 'priority', None): - update['priority'] = priority - if data != getattr(record, 'data', None): - update['data'] = data - - if update: - try: - record.update(**update) - changed = True - record.get() - except Exception, e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - domain = dns.find(name=domain) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - try: - record = domain.find_record(record_type, name=name, data=data) - except pyrax.exceptions.DomainRecordNotFound, e: - record = {} - pass - except pyrax.exceptions.DomainRecordNotUnique, e: - module.fail_json(msg='%s' % e.message) - - if record: - try: - record.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, record=rax_to_dict(record)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - comment=dict(), - data=dict(required=True), - domain=dict(), - loadbalancer=dict(), - name=dict(required=True), - priority=dict(type='int'), - server=dict(), - state=dict(default='present', choices=['present', 'absent']), - ttl=dict(type='int', default=3600), - type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', - 'SRV', 'TXT', 'PTR']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[ - ['server', 'loadbalancer', 'domain'], - ], - required_one_of=[ - ['server', 'loadbalancer', 'domain'], - ], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - comment = module.params.get('comment') - data = module.params.get('data') - domain = module.params.get('domain') - loadbalancer = module.params.get('loadbalancer') - name = module.params.get('name') - priority = module.params.get('priority') - server = module.params.get('server') - state = module.params.get('state') - ttl = module.params.get('ttl') - record_type = module.params.get('type') - - setup_rax_module(module, pyrax, False) - - if record_type.upper() == 'PTR': - if not server and not loadbalancer: - module.fail_json(msg='one of the following is required: ' - 'server,loadbalancer') - rax_dns_record_ptr(module, data=data, comment=comment, - loadbalancer=loadbalancer, name=name, server=server, - state=state, ttl=ttl) - else: - rax_dns_record(module, comment=comment, data=data, domain=domain, - name=name, priority=priority, record_type=record_type, - state=state, ttl=ttl) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_facts b/library/cloud/rax_facts deleted file mode 100644 index 68ef446f76..0000000000 --- a/library/cloud/rax_facts +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_facts -short_description: Gather facts for Rackspace Cloud Servers -description: - - Gather facts for Rackspace Cloud Servers. -version_added: "1.4" -options: - address: - description: - - Server IP address to retrieve facts for, will match any IP assigned to - the server - id: - description: - - Server ID to retrieve facts for - name: - description: - - Server name to retrieve facts for - default: null -author: Matt Martz -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Gather info about servers - hosts: all - gather_facts: False - tasks: - - name: Get facts about servers - local_action: - module: rax_facts - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW - - name: Map some facts - set_fact: - ansible_ssh_host: "{{ rax_accessipv4 }}" -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_facts(module, address, name, server_id): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - ansible_facts = {} - - search_opts = {} - if name: - search_opts = dict(name='^%s$' % name) - try: - servers = cs.servers.list(search_opts=search_opts) - except Exception, e: - module.fail_json(msg='%s' % e.message) - elif address: - servers = [] - try: - for server in cs.servers.list(): - for addresses in server.networks.values(): - if address in addresses: - servers.append(server) - break - except Exception, e: - module.fail_json(msg='%s' % e.message) - elif server_id: - servers = [] - try: - servers.append(cs.servers.get(server_id)) - except Exception, e: - pass - - if len(servers) > 1: - module.fail_json(msg='Multiple servers found matching provided ' - 'search parameters') - elif len(servers) == 1: - ansible_facts = rax_to_dict(servers[0], 'server') - - module.exit_json(changed=changed, ansible_facts=ansible_facts) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - id=dict(), - name=dict(), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[['address', 'id', 'name']], - required_one_of=[['address', 'id', 'name']], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params.get('address') - server_id = module.params.get('id') - name = module.params.get('name') - - setup_rax_module(module, pyrax) - - rax_facts(module, address, name, server_id) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_files b/library/cloud/rax_files deleted file mode 100644 index 3c54b0a9e2..0000000000 --- a/library/cloud/rax_files +++ /dev/null @@ -1,379 +0,0 @@ -#!/usr/bin/python - -# (c) 2013, Paul Durivage -# -# This file is part of Ansible. -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_files -short_description: Manipulate Rackspace Cloud Files Containers -description: - - Manipulate Rackspace Cloud Files Containers -version_added: "1.5" -options: - clear_meta: - description: - - Optionally clear existing metadata when applying metadata to existing containers. - Selecting this option is only appropriate when setting type=meta - choices: - - "yes" - - "no" - default: "no" - container: - description: - - The container to use for container or metadata operations. - required: true - meta: - description: - - A hash of items to set as metadata values on a container - private: - description: - - Used to set a container as private, removing it from the CDN. B(Warning!) - Private containers, if previously made public, can have live objects - available until the TTL on cached objects expires - public: - description: - - Used to set a container as public, available via the Cloud Files CDN - region: - description: - - Region to create an instance in - default: DFW - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - ttl: - description: - - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes. - Setting a TTL is only appropriate for containers that are public - type: - description: - - Type of object to do work on, i.e. metadata object or a container object - choices: - - file - - meta - default: file - web_error: - description: - - Sets an object to be presented as the HTTP error page when accessed by the CDN URL - web_index: - description: - - Sets an object to be presented as the HTTP index page when accessed by the CDN URL -author: Paul Durivage -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: "Test Cloud Files Containers" - hosts: local - gather_facts: no - tasks: - - name: "List all containers" - rax_files: state=list - - - name: "Create container called 'mycontainer'" - rax_files: container=mycontainer - - - name: "Create container 'mycontainer2' with metadata" - rax_files: - container: mycontainer2 - meta: - key: value - file_for: someuser@example.com - - - name: "Set a container's web index page" - rax_files: container=mycontainer web_index=index.html - - - name: "Set a container's web error page" - rax_files: container=mycontainer web_error=error.html - - - name: "Make container public" - rax_files: container=mycontainer public=yes - - - name: "Make container public with a 24 hour TTL" - rax_files: container=mycontainer public=yes ttl=86400 - - - name: "Make container private" - rax_files: container=mycontainer private=yes - -- name: "Test Cloud Files Containers Metadata Storage" - hosts: local - gather_facts: no - tasks: - - name: "Get mycontainer2 metadata" - rax_files: - container: mycontainer2 - type: meta - - - name: "Set mycontainer2 metadata" - rax_files: - container: mycontainer2 - type: meta - meta: - uploaded_by: someuser@example.com - - - name: "Remove mycontainer2 metadata" - rax_files: - container: "mycontainer2" - type: meta - state: absent - meta: - key: "" - file_for: "" -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError, e: - HAS_PYRAX = False - -EXIT_DICT = dict(success=True) -META_PREFIX = 'x-container-meta-' - - -def _get_container(module, cf, container): - try: - return cf.get_container(container) - except pyrax.exc.NoSuchContainer, e: - module.fail_json(msg=e.message) - - -def _fetch_meta(module, container): - EXIT_DICT['meta'] = dict() - try: - for k, v in container.get_metadata().items(): - split_key = k.split(META_PREFIX)[-1] - EXIT_DICT['meta'][split_key] = v - except Exception, e: - module.fail_json(msg=e.message) - - -def meta(cf, module, container_, state, meta_, clear_meta): - c = _get_container(module, cf, container_) - - if meta_ and state == 'present': - try: - meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception, e: - module.fail_json(msg=e.message) - elif meta_ and state == 'absent': - remove_results = [] - for k, v in meta_.items(): - c.remove_metadata_key(k) - remove_results.append(k) - EXIT_DICT['deleted_meta_keys'] = remove_results - elif state == 'absent': - remove_results = [] - for k, v in c.get_metadata().items(): - c.remove_metadata_key(k) - remove_results.append(k) - EXIT_DICT['deleted_meta_keys'] = remove_results - - _fetch_meta(module, c) - _locals = locals().keys() - - EXIT_DICT['container'] = c.name - if 'meta_set' in _locals or 'remove_results' in _locals: - EXIT_DICT['changed'] = True - - module.exit_json(**EXIT_DICT) - - -def container(cf, module, container_, state, meta_, clear_meta, ttl, public, - private, web_index, web_error): - if public and private: - module.fail_json(msg='container cannot be simultaneously ' - 'set to public and private') - - if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error): - module.fail_json(msg='state cannot be omitted when setting/removing ' - 'attributes on a container') - - if state == 'list': - # We don't care if attributes are specified, let's list containers - EXIT_DICT['containers'] = cf.list_containers() - module.exit_json(**EXIT_DICT) - - try: - c = cf.get_container(container_) - except pyrax.exc.NoSuchContainer, e: - # Make the container if state=present, otherwise bomb out - if state == 'present': - try: - c = cf.create_container(container_) - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['changed'] = True - EXIT_DICT['created'] = True - else: - module.fail_json(msg=e.message) - else: - # Successfully grabbed a container object - # Delete if state is absent - if state == 'absent': - try: - cont_deleted = c.delete() - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['deleted'] = True - - if meta_: - try: - meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception, e: - module.fail_json(msg=e.message) - finally: - _fetch_meta(module, c) - - if ttl: - try: - c.cdn_ttl = ttl - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['ttl'] = c.cdn_ttl - - if public: - try: - cont_public = c.make_public() - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['container_urls'] = dict(url=c.cdn_uri, - ssl_url=c.cdn_ssl_uri, - streaming_url=c.cdn_streaming_uri, - ios_uri=c.cdn_ios_uri) - - if private: - try: - cont_private = c.make_private() - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_private'] = True - - if web_index: - try: - cont_web_index = c.set_web_index_page(web_index) - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_index'] = True - finally: - _fetch_meta(module, c) - - if web_error: - try: - cont_err_index = c.set_web_error_page(web_error) - except Exception, e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_error'] = True - finally: - _fetch_meta(module, c) - - EXIT_DICT['container'] = c.name - EXIT_DICT['objs_in_container'] = c.object_count - EXIT_DICT['total_bytes'] = c.total_bytes - - _locals = locals().keys() - if ('cont_deleted' in _locals - or 'meta_set' in _locals - or 'cont_public' in _locals - or 'cont_private' in _locals - or 'cont_web_index' in _locals - or 'cont_err_index' in _locals): - EXIT_DICT['changed'] = True - - module.exit_json(**EXIT_DICT) - - -def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, - private, web_index, web_error): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles - - if cf is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if typ == "container": - container(cf, module, container_, state, meta_, clear_meta, ttl, - public, private, web_index, web_error) - else: - meta(cf, module, container_, state, meta_, clear_meta) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - container=dict(), - state=dict(choices=['present', 'absent', 'list'], - default='present'), - meta=dict(type='dict', default=dict()), - clear_meta=dict(default=False, type='bool'), - type=dict(choices=['container', 'meta'], default='container'), - ttl=dict(type='int'), - public=dict(default=False, type='bool'), - private=dict(default=False, type='bool'), - web_index=dict(), - web_error=dict() - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - container_ = module.params.get('container') - state = module.params.get('state') - meta_ = module.params.get('meta') - clear_meta = module.params.get('clear_meta') - typ = module.params.get('type') - ttl = module.params.get('ttl') - public = module.params.get('public') - private = module.params.get('private') - web_index = module.params.get('web_index') - web_error = module.params.get('web_error') - - if state in ['present', 'absent'] and not container_: - module.fail_json(msg='please specify a container name') - if clear_meta and not typ == 'meta': - module.fail_json(msg='clear_meta can only be used when setting ' - 'metadata') - - setup_rax_module(module, pyrax) - cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, - private, web_index, web_error) - - -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -main() diff --git a/library/cloud/rax_files_objects b/library/cloud/rax_files_objects deleted file mode 100644 index f251047767..0000000000 --- a/library/cloud/rax_files_objects +++ /dev/null @@ -1,603 +0,0 @@ -#!/usr/bin/python - -# (c) 2013, Paul Durivage -# -# This file is part of Ansible. -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_files_objects -short_description: Upload, download, and delete objects in Rackspace Cloud Files -description: - - Upload, download, and delete objects in Rackspace Cloud Files -version_added: "1.5" -options: - clear_meta: - description: - - Optionally clear existing metadata when applying metadata to existing objects. - Selecting this option is only appropriate when setting type=meta - choices: - - "yes" - - "no" - default: "no" - container: - description: - - The container to use for file object operations. - required: true - default: null - dest: - description: - - The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder". - Used to specify the destination of an operation on a remote object; i.e. a file name, - "file1", or a comma-separated list of remote objects, "file1,file2,file17" - expires: - description: - - Used to set an expiration on a file or folder uploaded to Cloud Files. - Requires an integer, specifying expiration in seconds - default: null - meta: - description: - - A hash of items to set as metadata values on an uploaded file or folder - default: null - method: - description: - - The method of operation to be performed. For example, put to upload files - to Cloud Files, get to download files from Cloud Files or delete to delete - remote objects in Cloud Files - choices: - - get - - put - - delete - default: get - src: - description: - - Source from which to upload files. Used to specify a remote object as a source for - an operation, i.e. a file name, "file1", or a comma-separated list of remote objects, - "file1,file2,file17". src and dest are mutually exclusive on remote-only object operations - default: null - structure: - description: - - Used to specify whether to maintain nested directory structure when downloading objects - from Cloud Files. Setting to false downloads the contents of a container to a single, - flat directory - choices: - - yes - - "no" - default: "yes" - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - type: - description: - - Type of object to do work on - - Metadata object or a file object - choices: - - file - - meta - default: file -author: Paul Durivage -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: "Test Cloud Files Objects" - hosts: local - gather_facts: False - tasks: - - name: "Get objects from test container" - rax_files_objects: container=testcont dest=~/Downloads/testcont - - - name: "Get single object from test container" - rax_files_objects: container=testcont src=file1 dest=~/Downloads/testcont - - - name: "Get several objects from test container" - rax_files_objects: container=testcont src=file1,file2,file3 dest=~/Downloads/testcont - - - name: "Delete one object in test container" - rax_files_objects: container=testcont method=delete dest=file1 - - - name: "Delete several objects in test container" - rax_files_objects: container=testcont method=delete dest=file2,file3,file4 - - - name: "Delete all objects in test container" - rax_files_objects: container=testcont method=delete - - - name: "Upload all files to test container" - rax_files_objects: container=testcont method=put src=~/Downloads/onehundred - - - name: "Upload one file to test container" - rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file1 - - - name: "Upload one file to test container with metadata" - rax_files_objects: - container: testcont - src: ~/Downloads/testcont/file2 - method: put - meta: - testkey: testdata - who_uploaded_this: someuser@example.com - - - name: "Upload one file to test container with TTL of 60 seconds" - rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file3 expires=60 - - - name: "Attempt to get remote object that does not exist" - rax_files_objects: container=testcont method=get src=FileThatDoesNotExist.jpg dest=~/Downloads/testcont - ignore_errors: yes - - - name: "Attempt to delete remote object that does not exist" - rax_files_objects: container=testcont method=delete dest=FileThatDoesNotExist.jpg - ignore_errors: yes - -- name: "Test Cloud Files Objects Metadata" - hosts: local - gather_facts: false - tasks: - - name: "Get metadata on one object" - rax_files_objects: container=testcont type=meta dest=file2 - - - name: "Get metadata on several objects" - rax_files_objects: container=testcont type=meta src=file2,file1 - - - name: "Set metadata on an object" - rax_files_objects: - container: testcont - type: meta - dest: file17 - method: put - meta: - key1: value1 - key2: value2 - clear_meta: true - - - name: "Verify metadata is set" - rax_files_objects: container=testcont type=meta src=file17 - - - name: "Delete metadata" - rax_files_objects: - container: testcont - type: meta - dest: file17 - method: delete - meta: - key1: '' - key2: '' - - - name: "Get metadata on all objects" - rax_files_objects: container=testcont type=meta -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -EXIT_DICT = dict(success=False) -META_PREFIX = 'x-object-meta-' - - -def _get_container(module, cf, container): - try: - return cf.get_container(container) - except pyrax.exc.NoSuchContainer, e: - module.fail_json(msg=e.message) - - -def upload(module, cf, container, src, dest, meta, expires): - """ Uploads a single object or a folder to Cloud Files Optionally sets an - metadata, TTL value (expires), or Content-Disposition and Content-Encoding - headers. - """ - c = _get_container(module, cf, container) - - num_objs_before = len(c.get_object_names()) - - if not src: - module.fail_json(msg='src must be specified when uploading') - - src = os.path.abspath(os.path.expanduser(src)) - is_dir = os.path.isdir(src) - - if not is_dir and not os.path.isfile(src) or not os.path.exists(src): - module.fail_json(msg='src must be a file or a directory') - if dest and is_dir: - module.fail_json(msg='dest cannot be set when whole ' - 'directories are uploaded') - - cont_obj = None - if dest and not is_dir: - try: - cont_obj = c.upload_file(src, obj_name=dest, ttl=expires) - except Exception, e: - module.fail_json(msg=e.message) - elif is_dir: - try: - id, total_bytes = cf.upload_folder(src, container=c.name, ttl=expires) - except Exception, e: - module.fail_json(msg=e.message) - - while True: - bytes = cf.get_uploaded(id) - if bytes == total_bytes: - break - time.sleep(1) - else: - try: - cont_obj = c.upload_file(src, ttl=expires) - except Exception, e: - module.fail_json(msg=e.message) - - num_objs_after = len(c.get_object_names()) - - if not meta: - meta = dict() - - meta_result = dict() - if meta: - if cont_obj: - meta_result = cont_obj.set_metadata(meta) - else: - def _set_meta(objs, meta): - """ Sets metadata on a list of objects specified by name """ - for obj in objs: - try: - result = c.get_object(obj).set_metadata(meta) - except Exception, e: - module.fail_json(msg=e.message) - else: - meta_result[obj] = result - return meta_result - - def _walker(objs, path, filenames): - """ Callback func for os.path.walk """ - prefix = '' - if path != src: - prefix = path.split(src)[-1].lstrip('/') - filenames = [os.path.join(prefix, name) for name in filenames - if not os.path.isdir(name)] - objs += filenames - - _objs = [] - os.path.walk(src, _walker, _objs) - meta_result = _set_meta(_objs, meta) - - EXIT_DICT['success'] = True - EXIT_DICT['container'] = c.name - EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name) - if cont_obj or locals().get('bytes'): - EXIT_DICT['changed'] = True - if meta_result: - EXIT_DICT['meta'] = dict(updated=True) - - if cont_obj: - EXIT_DICT['bytes'] = cont_obj.total_bytes - EXIT_DICT['etag'] = cont_obj.etag - else: - EXIT_DICT['bytes'] = total_bytes - - module.exit_json(**EXIT_DICT) - - -def download(module, cf, container, src, dest, structure): - """ Download objects from Cloud Files to a local path specified by "dest". - Optionally disable maintaining a directory structure by by passing a - false value to "structure". - """ - # Looking for an explicit destination - if not dest: - module.fail_json(msg='dest is a required argument when ' - 'downloading from Cloud Files') - - # Attempt to fetch the container by name - c = _get_container(module, cf, container) - - # Accept a single object name or a comma-separated list of objs - # If not specified, get the entire container - if src: - objs = src.split(',') - objs = map(str.strip, objs) - else: - objs = c.get_object_names() - - dest = os.path.abspath(os.path.expanduser(dest)) - is_dir = os.path.isdir(dest) - - if not is_dir: - module.fail_json(msg='dest must be a directory') - - results = [] - for obj in objs: - try: - c.download_object(obj, dest, structure=structure) - except Exception, e: - module.fail_json(msg=e.message) - else: - results.append(obj) - - len_results = len(results) - len_objs = len(objs) - - EXIT_DICT['container'] = c.name - EXIT_DICT['requested_downloaded'] = results - if results: - EXIT_DICT['changed'] = True - if len_results == len_objs: - EXIT_DICT['success'] = True - EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest) - else: - EXIT_DICT['msg'] = "Error: only %s of %s objects were " \ - "downloaded" % (len_results, len_objs) - module.exit_json(**EXIT_DICT) - - -def delete(module, cf, container, src, dest): - """ Delete specific objects by proving a single file name or a - comma-separated list to src OR dest (but not both). Omitting file name(s) - assumes the entire container is to be deleted. - """ - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to be deleted " - "have been specified on both src and dest args") - elif dest: - objs = dest - else: - objs = src - - c = _get_container(module, cf, container) - - if objs: - objs = objs.split(',') - objs = map(str.strip, objs) - else: - objs = c.get_object_names() - - num_objs = len(objs) - - results = [] - for obj in objs: - try: - result = c.delete_object(obj) - except Exception, e: - module.fail_json(msg=e.message) - else: - results.append(result) - - num_deleted = results.count(True) - - EXIT_DICT['container'] = c.name - EXIT_DICT['deleted'] = num_deleted - EXIT_DICT['requested_deleted'] = objs - - if num_deleted: - EXIT_DICT['changed'] = True - - if num_objs == num_deleted: - EXIT_DICT['success'] = True - EXIT_DICT['msg'] = "%s objects deleted" % num_deleted - else: - EXIT_DICT['msg'] = ("Error: only %s of %s objects " - "deleted" % (num_deleted, num_objs)) - module.exit_json(**EXIT_DICT) - - -def get_meta(module, cf, container, src, dest): - """ Get metadata for a single file, comma-separated list, or entire - container - """ - c = _get_container(module, cf, container) - - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to be deleted " - "have been specified on both src and dest args") - elif dest: - objs = dest - else: - objs = src - - if objs: - objs = objs.split(',') - objs = map(str.strip, objs) - else: - objs = c.get_object_names() - - results = dict() - for obj in objs: - try: - meta = c.get_object(obj).get_metadata() - except Exception, e: - module.fail_json(msg=e.message) - else: - results[obj] = dict() - for k, v in meta.items(): - meta_key = k.split(META_PREFIX)[-1] - results[obj][meta_key] = v - - EXIT_DICT['container'] = c.name - if results: - EXIT_DICT['meta_results'] = results - EXIT_DICT['success'] = True - module.exit_json(**EXIT_DICT) - - -def put_meta(module, cf, container, src, dest, meta, clear_meta): - """ Set metadata on a container, single file, or comma-separated list. - Passing a true value to clear_meta clears the metadata stored in Cloud - Files before setting the new metadata to the value of "meta". - """ - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to set meta" - " have been specified on both src and dest args") - elif dest: - objs = dest - else: - objs = src - - objs = objs.split(',') - objs = map(str.strip, objs) - - c = _get_container(module, cf, container) - - results = [] - for obj in objs: - try: - result = c.get_object(obj).set_metadata(meta, clear=clear_meta) - except Exception, e: - module.fail_json(msg=e.message) - else: - results.append(result) - - EXIT_DICT['container'] = c.name - EXIT_DICT['success'] = True - if results: - EXIT_DICT['changed'] = True - EXIT_DICT['num_changed'] = True - module.exit_json(**EXIT_DICT) - - -def delete_meta(module, cf, container, src, dest, meta): - """ Removes metadata keys and values specified in meta, if any. Deletes on - all objects specified by src or dest (but not both), if any; otherwise it - deletes keys on all objects in the container - """ - objs = None - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; meta keys to be " - "deleted have been specified on both src and dest" - " args") - elif dest: - objs = dest - else: - objs = src - - objs = objs.split(',') - objs = map(str.strip, objs) - - c = _get_container(module, cf, container) - - results = [] # Num of metadata keys removed, not objects affected - for obj in objs: - if meta: - for k, v in meta.items(): - try: - result = c.get_object(obj).remove_metadata_key(k) - except Exception, e: - module.fail_json(msg=e.message) - else: - results.append(result) - else: - try: - o = c.get_object(obj) - except pyrax.exc.NoSuchObject, e: - module.fail_json(msg=e.message) - - for k, v in o.get_metadata().items(): - try: - result = o.remove_metadata_key(k) - except Exception, e: - module.fail_json(msg=e.message) - results.append(result) - - EXIT_DICT['container'] = c.name - EXIT_DICT['success'] = True - if results: - EXIT_DICT['changed'] = True - EXIT_DICT['num_deleted'] = len(results) - module.exit_json(**EXIT_DICT) - - -def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, - structure, expires): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles - - if cf is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if typ == "file": - if method == 'put': - upload(module, cf, container, src, dest, meta, expires) - - elif method == 'get': - download(module, cf, container, src, dest, structure) - - elif method == 'delete': - delete(module, cf, container, src, dest) - - else: - if method == 'get': - get_meta(module, cf, container, src, dest) - - if method == 'put': - put_meta(module, cf, container, src, dest, meta, clear_meta) - - if method == 'delete': - delete_meta(module, cf, container, src, dest, meta) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - container=dict(required=True), - src=dict(), - dest=dict(), - method=dict(default='get', choices=['put', 'get', 'delete']), - type=dict(default='file', choices=['file', 'meta']), - meta=dict(type='dict', default=dict()), - clear_meta=dict(default=False, type='bool'), - structure=dict(default=True, type='bool'), - expires=dict(type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - container = module.params.get('container') - src = module.params.get('src') - dest = module.params.get('dest') - method = module.params.get('method') - typ = module.params.get('type') - meta = module.params.get('meta') - clear_meta = module.params.get('clear_meta') - structure = module.params.get('structure') - expires = module.params.get('expires') - - if clear_meta and not typ == 'meta': - module.fail_json(msg='clear_meta can only be used when setting metadata') - - setup_rax_module(module, pyrax) - cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires) - - -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -main() diff --git a/library/cloud/rax_identity b/library/cloud/rax_identity deleted file mode 100644 index ea40ea2ef4..0000000000 --- a/library/cloud/rax_identity +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_identity -short_description: Load Rackspace Cloud Identity -description: - - Verifies Rackspace Cloud credentials and returns identity information -version_added: "1.5" -options: - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: Christopher H. Laco, Matt Martz -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Load Rackspace Cloud Identity - gather_facts: False - hosts: local - connection: local - tasks: - - name: Load Identity - local_action: - module: rax_identity - credentials: ~/.raxpub - region: DFW - register: rackspace_identity -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def cloud_identity(module, state, identity): - for arg in (state, identity): - if not arg: - module.fail_json(msg='%s is required for rax_identity' % arg) - - instance = dict( - authenticated=identity.authenticated, - credentials=identity._creds_file - ) - changed = False - - instance.update(rax_to_dict(identity)) - instance['services'] = instance.get('services', {}).keys() - - if state == 'present': - if not identity.authenticated: - module.fail_json(msg='Credentials could not be verified!') - - module.exit_json(changed=changed, identity=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - if pyrax.identity is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - cloud_identity(module, state, pyrax.identity) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_keypair b/library/cloud/rax_keypair deleted file mode 100644 index 591ad8c359..0000000000 --- a/library/cloud/rax_keypair +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_keypair -short_description: Create a keypair for use with Rackspace Cloud Servers -description: - - Create a keypair for use with Rackspace Cloud Servers -version_added: 1.5 -options: - name: - description: - - Name of keypair - required: true - public_key: - description: - - Public Key string to upload. Can be a file path or string - default: null - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: Matt Martz -notes: - - Keypairs cannot be manipulated, only created and deleted. To "update" a - keypair you must first delete and then recreate. - - The ability to specify a file path for the public key was added in 1.7 -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Create a keypair - hosts: localhost - gather_facts: False - tasks: - - name: keypair request - local_action: - module: rax_keypair - credentials: ~/.raxpub - name: my_keypair - region: DFW - register: keypair - - name: Create local public key - local_action: - module: copy - content: "{{ keypair.keypair.public_key }}" - dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub" - - name: Create local private key - local_action: - module: copy - content: "{{ keypair.keypair.private_key }}" - dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}" - -- name: Create a keypair - hosts: localhost - gather_facts: False - tasks: - - name: keypair request - local_action: - module: rax_keypair - credentials: ~/.raxpub - name: my_keypair - public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}" - region: DFW - register: keypair -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_keypair(module, name, public_key, state): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - keypair = {} - - if state == 'present': - if os.path.isfile(public_key): - try: - f = open(public_key) - public_key = f.read() - f.close() - except Exception, e: - module.fail_json(msg='Failed to load %s' % public_key) - - try: - keypair = cs.keypairs.find(name=name) - except cs.exceptions.NotFound: - try: - keypair = cs.keypairs.create(name, public_key) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - keypair = cs.keypairs.find(name=name) - except: - pass - - if keypair: - try: - keypair.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, keypair=rax_to_dict(keypair)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(), - public_key=dict(), - state=dict(default='present', choices=['absent', 'present']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - public_key = module.params.get('public_key') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - rax_keypair(module, name, public_key, state) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_meta b/library/cloud/rax_meta deleted file mode 100644 index 2e1d90f538..0000000000 --- a/library/cloud/rax_meta +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_meta -short_description: Manipulate metadata for Rackspace Cloud Servers -description: - - Manipulate metadata for Rackspace Cloud Servers -version_added: 1.7 -options: - address: - description: - - Server IP address to modify metadata for, will match any IP assigned to - the server - id: - description: - - Server ID to modify metadata for - name: - description: - - Server name to modify metadata for - default: null - meta: - description: - - A hash of metadata to associate with the instance - default: null -author: Matt Martz -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Set metadata for a server - hosts: all - gather_facts: False - tasks: - - name: Set metadata - local_action: - module: rax_meta - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW - meta: - group: primary_group - groups: - - group_two - - group_three - app: my_app - - - name: Clear metadata - local_action: - module: rax_meta - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_meta(module, address, name, server_id, meta): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - search_opts = {} - if name: - search_opts = dict(name='^%s$' % name) - try: - servers = cs.servers.list(search_opts=search_opts) - except Exception, e: - module.fail_json(msg='%s' % e.message) - elif address: - servers = [] - try: - for server in cs.servers.list(): - for addresses in server.networks.values(): - if address in addresses: - servers.append(server) - break - except Exception, e: - module.fail_json(msg='%s' % e.message) - elif server_id: - servers = [] - try: - servers.append(cs.servers.get(server_id)) - except Exception, e: - pass - - if len(servers) > 1: - module.fail_json(msg='Multiple servers found matching provided ' - 'search parameters') - elif not servers: - module.fail_json(msg='Failed to find a server matching provided ' - 'search parameters') - - # Normalize and ensure all metadata values are strings - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, basestring): - meta[k] = '%s' % v - - server = servers[0] - if server.metadata == meta: - changed = False - else: - changed = True - removed = set(server.metadata.keys()).difference(meta.keys()) - cs.servers.delete_meta(server, list(removed)) - cs.servers.set_meta(server, meta) - server.get() - - module.exit_json(changed=changed, meta=server.metadata) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - id=dict(), - name=dict(), - meta=dict(type='dict', default=dict()), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[['address', 'id', 'name']], - required_one_of=[['address', 'id', 'name']], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params.get('address') - server_id = module.params.get('id') - name = module.params.get('name') - meta = module.params.get('meta') - - setup_rax_module(module, pyrax) - - rax_meta(module, address, name, server_id, meta) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_network b/library/cloud/rax_network deleted file mode 100644 index bc4745a7a8..0000000000 --- a/library/cloud/rax_network +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_network -short_description: create / delete an isolated network in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud isolated network. -version_added: "1.4" -options: - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - label: - description: - - Label (name) to give the network - default: null - cidr: - description: - - cidr of the network being created - default: null -author: Christopher H. Laco, Jesse Keating -extends_documentation_fragment: rackspace.openstack -''' - -EXAMPLES = ''' -- name: Build an Isolated Network - gather_facts: False - - tasks: - - name: Network create request - local_action: - module: rax_network - credentials: ~/.raxpub - label: my-net - cidr: 192.168.3.0/24 - state: present -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def cloud_network(module, state, label, cidr): - for arg in (state, label, cidr): - if not arg: - module.fail_json(msg='%s is required for cloud_networks' % arg) - - changed = False - network = None - networks = [] - - if not pyrax.cloud_networks: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - try: - network = pyrax.cloud_networks.find_network_by_label(label) - except pyrax.exceptions.NetworkNotFound: - try: - network = pyrax.cloud_networks.create(label, cidr=cidr) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - network = pyrax.cloud_networks.find_network_by_label(label) - network.delete() - changed = True - except pyrax.exceptions.NetworkNotFound: - pass - except Exception, e: - module.fail_json(msg='%s' % e.message) - - if network: - instance = dict(id=network.id, - label=network.label, - cidr=network.cidr) - networks.append(instance) - - module.exit_json(changed=changed, networks=networks) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', - choices=['present', 'absent']), - label=dict(), - cidr=dict() - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - label = module.params.get('label') - cidr = module.params.get('cidr') - - setup_rax_module(module, pyrax) - - cloud_network(module, state, label, cidr) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_queue b/library/cloud/rax_queue deleted file mode 100644 index d3e5ac3f81..0000000000 --- a/library/cloud/rax_queue +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_queue -short_description: create / delete a queue in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud queue. -version_added: "1.5" -options: - name: - description: - - Name to give the queue - default: null - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: Christopher H. Laco, Matt Martz -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' -- name: Build a Queue - gather_facts: False - hosts: local - connection: local - tasks: - - name: Queue create request - local_action: - module: rax_queue - credentials: ~/.raxpub - name: my-queue - region: DFW - state: present - register: my_queue -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def cloud_queue(module, state, name): - for arg in (state, name): - if not arg: - module.fail_json(msg='%s is required for rax_queue' % arg) - - changed = False - queues = [] - instance = {} - - cq = pyrax.queues - if not cq: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - for queue in cq.list(): - if name != queue.name: - continue - - queues.append(queue) - - if len(queues) > 1: - module.fail_json(msg='Multiple Queues were matched by name') - - if state == 'present': - if not queues: - try: - queue = cq.create(name) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - queue = queues[0] - - instance = dict(name=queue.name) - result = dict(changed=changed, queue=instance) - module.exit_json(**result) - - elif state == 'absent': - if queues: - queue = queues[0] - try: - queue.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, queue=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - cloud_queue(module, state, name) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -### invoke the module -main() diff --git a/library/cloud/rax_scaling_group b/library/cloud/rax_scaling_group deleted file mode 100644 index d884d3c130..0000000000 --- a/library/cloud/rax_scaling_group +++ /dev/null @@ -1,351 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_scaling_group -short_description: Manipulate Rackspace Cloud Autoscale Groups -description: - - Manipulate Rackspace Cloud Autoscale Groups -version_added: 1.7 -options: - cooldown: - description: - - The period of time, in seconds, that must pass before any scaling can - occur after the previous scaling. Must be an integer between 0 and - 86400 (24 hrs). - disk_config: - description: - - Disk partitioning strategy - choices: - - auto - - manual - default: auto - files: - description: - - 'Files to insert into the instance. Hash of C(remotepath: localpath)' - default: null - flavor: - description: - - flavor to use for the instance - required: true - image: - description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name) - required: true - key_name: - description: - - key pair to use on the instance - default: null - loadbalancers: - description: - - List of load balancer C(id) and C(port) hashes - max_entities: - description: - - The maximum number of entities that are allowed in the scaling group. - Must be an integer between 0 and 1000. - required: true - meta: - description: - - A hash of metadata to associate with the instance - default: null - min_entities: - description: - - The minimum number of entities that are allowed in the scaling group. - Must be an integer between 0 and 1000. - required: true - name: - description: - - Name to give the scaling group - required: true - networks: - description: - - The network to attach to the instances. If specified, you must include - ALL networks including the public and private interfaces. Can be C(id) - or C(label). - default: - - public - - private - server_name: - description: - - The base name for servers created by Autoscale - required: true - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: Matt Martz -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: false - connection: local - tasks: - - rax_scaling_group: - credentials: ~/.raxpub - region: ORD - cooldown: 300 - flavor: performance1-1 - image: bb02b1a3-bc77-4d17-ab5b-421d89850fca - min_entities: 5 - max_entities: 10 - name: ASG Test - server_name: asgtest - loadbalancers: - - id: 228385 - port: 80 - register: asg -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, - image=None, key_name=None, loadbalancers=[], meta={}, - min_entities=0, max_entities=0, name=None, networks=[], - server_name=None, state='present'): - changed = False - - au = pyrax.autoscale - cnw = pyrax.cloud_networks - cs = pyrax.cloudservers - if not au or not cnw or not cs: - module.fail_json(msg='Failed to instantiate clients. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - # Normalize and ensure all metadata values are strings - if meta: - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, basestring): - meta[k] = '%s' % v - - if image: - image = rax_find_image(module, pyrax, image) - - nics = [] - if networks: - for network in networks: - nics.extend(rax_find_network(module, pyrax, network)) - - for nic in nics: - # pyrax is currently returning net-id, but we need uuid - # this check makes this forward compatible for a time when - # pyrax uses uuid instead - if nic.get('net-id'): - nic.update(uuid=nic['net-id']) - del nic['net-id'] - - # Handle the file contents - personality = [] - if files: - for rpath in files.keys(): - lpath = os.path.expanduser(files[rpath]) - try: - f = open(lpath, 'r') - personality.append({ - 'path': rpath, - 'contents': f.read() - }) - f.close() - except Exception, e: - module.fail_json(msg='Failed to load %s' % lpath) - - lbs = [] - if loadbalancers: - for lb in loadbalancers: - lb_id = lb.get('id') - port = lb.get('port') - if not lb_id or not port: - continue - lbs.append((lb_id, port)) - - try: - sg = au.find(name=name) - except pyrax.exceptions.NoUniqueMatch, e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.NotFound: - try: - sg = au.create(name, cooldown=cooldown, - min_entities=min_entities, - max_entities=max_entities, - launch_config_type='launch_server', - server_name=server_name, image=image, - flavor=flavor, disk_config=disk_config, - metadata=meta, personality=files, - networks=nics, load_balancers=lbs, - key_name=key_name) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - if not changed: - # Scaling Group Updates - group_args = {} - if cooldown != sg.cooldown: - group_args['cooldown'] = cooldown - - if min_entities != sg.min_entities: - group_args['min_entities'] = min_entities - - if max_entities != sg.max_entities: - group_args['max_entities'] = max_entities - - if group_args: - changed = True - sg.update(**group_args) - - # Launch Configuration Updates - lc = sg.get_launch_config() - lc_args = {} - if server_name != lc.get('name'): - lc_args['name'] = server_name - - if image != lc.get('image'): - lc_args['image'] = image - - if flavor != lc.get('flavor'): - lc_args['flavor'] = flavor - - if disk_config != lc.get('disk_config'): - lc_args['disk_config'] = disk_config - - if meta != lc.get('metadata'): - lc_args['metadata'] = meta - - if files != lc.get('personality'): - lc_args['personality'] = files - - if nics != lc.get('networks'): - lc_args['networks'] = nics - - if lbs != lc.get('load_balancers'): - # Work around for https://github.com/rackspace/pyrax/pull/393 - lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs) - - if key_name != lc.get('key_name'): - lc_args['key_name'] = key_name - - if lc_args: - # Work around for https://github.com/rackspace/pyrax/pull/389 - if 'flavor' not in lc_args: - lc_args['flavor'] = lc.get('flavor') - changed = True - sg.update_launch_config(**lc_args) - - sg.get() - - module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) - - else: - try: - sg = au.find(name=name) - sg.delete() - changed = True - except pyrax.exceptions.NotFound, e: - sg = {} - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cooldown=dict(type='int', default=300), - disk_config=dict(choices=['auto', 'manual']), - files=dict(type='list', default=[]), - flavor=dict(required=True), - image=dict(required=True), - key_name=dict(), - loadbalancers=dict(type='list'), - meta=dict(type='dict', default={}), - min_entities=dict(type='int', required=True), - max_entities=dict(type='int', required=True), - name=dict(required=True), - networks=dict(type='list', default=['public', 'private']), - server_name=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cooldown = module.params.get('cooldown') - disk_config = module.params.get('disk_config') - if disk_config: - disk_config = disk_config.upper() - files = module.params.get('files') - flavor = module.params.get('flavor') - image = module.params.get('image') - key_name = module.params.get('key_name') - loadbalancers = module.params.get('loadbalancers') - meta = module.params.get('meta') - min_entities = module.params.get('min_entities') - max_entities = module.params.get('max_entities') - name = module.params.get('name') - networks = module.params.get('networks') - server_name = module.params.get('server_name') - state = module.params.get('state') - - if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000: - module.fail_json(msg='min_entities and max_entities must be an ' - 'integer between 0 and 1000') - - if not 0 <= cooldown <= 86400: - module.fail_json(msg='cooldown must be an integer between 0 and 86400') - - setup_rax_module(module, pyrax) - - rax_asg(module, cooldown=cooldown, disk_config=disk_config, - files=files, flavor=flavor, image=image, meta=meta, - key_name=key_name, loadbalancers=loadbalancers, - min_entities=min_entities, max_entities=max_entities, - name=name, networks=networks, server_name=server_name, - state=state) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -# invoke the module -main() diff --git a/library/cloud/rax_scaling_policy b/library/cloud/rax_scaling_policy deleted file mode 100644 index b3da82460d..0000000000 --- a/library/cloud/rax_scaling_policy +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a DOCUMENTATION stub specific to this module, it extends -# a documentation fragment located in ansible.utils.module_docs_fragments -DOCUMENTATION = ''' ---- -module: rax_scaling_policy -short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy -description: - - Manipulate Rackspace Cloud Autoscale Scaling Policy -version_added: 1.7 -options: - at: - description: - - The UTC time when this policy will be executed. The time must be - formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as - C(2013-05-19T08:07:08Z) - change: - description: - - The change, either as a number of servers or as a percentage, to make - in the scaling group. If this is a percentage, you must set - I(is_percent) to C(true) also. - cron: - description: - - The time when the policy will be executed, as a cron entry. For - example, if this is parameter is set to C(1 0 * * *) - cooldown: - description: - - The period of time, in seconds, that must pass before any scaling can - occur after the previous scaling. Must be an integer between 0 and - 86400 (24 hrs). - desired_capacity: - description: - - The desired server capacity of the scaling the group; that is, how - many servers should be in the scaling group. - is_percent: - description: - - Whether the value in I(change) is a percent value - default: false - name: - description: - - Name to give the policy - required: true - policy_type: - description: - - The type of policy that will be executed for the current release. - choices: - - webhook - - schedule - required: true - scaling_group: - description: - - Name of the scaling group that this policy will be added to - required: true - state: - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: Matt Martz -extends_documentation_fragment: rackspace -''' - -EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: false - connection: local - tasks: - - rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - at: '2013-05-19T08:07:08Z' - change: 25 - cooldown: 300 - is_percent: true - name: ASG Test Policy - at - policy_type: schedule - scaling_group: ASG Test - register: asps_at - - - rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - cron: '1 0 * * *' - change: 25 - cooldown: 300 - is_percent: true - name: ASG Test Policy - cron - policy_type: schedule - scaling_group: ASG Test - register: asp_cron - - - rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - cooldown: 300 - desired_capacity: 5 - name: ASG Test Policy - webhook - policy_type: webhook - scaling_group: ASG Test - register: asp_webhook -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - - -def rax_asp(module, at=None, change=0, cron=None, cooldown=300, - desired_capacity=0, is_percent=False, name=None, - policy_type=None, scaling_group=None, state='present'): - changed = False - - au = pyrax.autoscale - if not au: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - try: - UUID(scaling_group) - except ValueError: - try: - sg = au.find(name=scaling_group) - except Exception, e: - module.fail_json(msg='%s' % e.message) - else: - try: - sg = au.get(scaling_group) - except Exception, e: - module.fail_json(msg='%s' % e.message) - - if state == 'present': - policies = filter(lambda p: name == p.name, sg.list_policies()) - if len(policies) > 1: - module.fail_json(msg='No unique policy match found by name') - if at: - args = dict(at=at) - elif cron: - args = dict(cron=cron) - else: - args = None - - if not policies: - try: - policy = sg.add_policy(name, policy_type=policy_type, - cooldown=cooldown, change=change, - is_percent=is_percent, - desired_capacity=desired_capacity, - args=args) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - else: - policy = policies[0] - kwargs = {} - if policy_type != policy.type: - kwargs['policy_type'] = policy_type - - if cooldown != policy.cooldown: - kwargs['cooldown'] = cooldown - - if hasattr(policy, 'change') and change != policy.change: - kwargs['change'] = change - - if hasattr(policy, 'changePercent') and is_percent is False: - kwargs['change'] = change - kwargs['is_percent'] = False - elif hasattr(policy, 'change') and is_percent is True: - kwargs['change'] = change - kwargs['is_percent'] = True - - if hasattr(policy, 'desiredCapacity') and change: - kwargs['change'] = change - elif ((hasattr(policy, 'change') or - hasattr(policy, 'changePercent')) and desired_capacity): - kwargs['desired_capacity'] = desired_capacity - - if hasattr(policy, 'args') and args != policy.args: - kwargs['args'] = args - - if kwargs: - policy.update(**kwargs) - changed = True - - policy.get() - - module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) - - else: - try: - policies = filter(lambda p: name == p.name, sg.list_policies()) - if len(policies) > 1: - module.fail_json(msg='No unique policy match found by name') - elif not policies: - policy = {} - else: - policy.delete() - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - at=dict(), - change=dict(type='int'), - cron=dict(), - cooldown=dict(type='int', default=300), - desired_capacity=dict(type='int'), - is_percent=dict(type='bool', default=False), - name=dict(required=True), - policy_type=dict(required=True, choices=['webhook', 'schedule']), - scaling_group=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[ - ['cron', 'at'], - ['change', 'desired_capacity'], - ] - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - at = module.params.get('at') - change = module.params.get('change') - cron = module.params.get('cron') - cooldown = module.params.get('cooldown') - desired_capacity = module.params.get('desired_capacity') - is_percent = module.params.get('is_percent') - name = module.params.get('name') - policy_type = module.params.get('policy_type') - scaling_group = module.params.get('scaling_group') - state = module.params.get('state') - - if (at or cron) and policy_type == 'webhook': - module.fail_json(msg='policy_type=schedule is required for a time ' - 'based policy') - - setup_rax_module(module, pyrax) - - rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown, - desired_capacity=desired_capacity, is_percent=is_percent, - name=name, policy_type=policy_type, scaling_group=scaling_group, - state=state) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.rax import * - -# invoke the module -main() diff --git a/library/cloud/rds b/library/cloud/rds deleted file mode 100644 index ba3f1e38d3..0000000000 --- a/library/cloud/rds +++ /dev/null @@ -1,650 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rds -version_added: "1.3" -short_description: create, delete, or modify an Amazon rds instance -description: - - Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. -options: - command: - description: - - Specifies the action to take. - required: true - default: null - aliases: [] - choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'restore' ] - instance_name: - description: - - Database instance identifier. - required: true - default: null - aliases: [] - source_instance: - description: - - Name of the database to replicate. Used only when command=replicate. - required: false - default: null - aliases: [] - db_engine: - description: - - The type of database. Used only when command=create. - required: false - default: null - aliases: [] - choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'] - size: - description: - - Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify. - required: false - default: null - aliases: [] - instance_type: - description: - - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance. - required: false - default: null - aliases: [] - username: - description: - - Master database username. Used only when command=create. - required: false - default: null - aliases: [] - password: - description: - - Password for the master database username. Used only when command=create or command=modify. - required: false - default: null - aliases: [] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: true - default: null - aliases: [ 'aws_region', 'ec2_region' ] - db_name: - description: - - Name of a database to create within the instance. If not specified then no database is created. Used only when command=create. - required: false - default: null - aliases: [] - engine_version: - description: - - Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used. - required: false - default: null - aliases: [] - parameter_group: - description: - - Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify. - required: false - default: null - aliases: [] - license_model: - description: - - The license model for this DB instance. Used only when command=create or command=restore. - required: false - default: null - aliases: [] - choices: [ 'license-included', 'bring-your-own-license', 'general-public-license' ] - multi_zone: - description: - - Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify. - choices: [ "yes", "no" ] - required: false - default: null - aliases: [] - iops: - description: - - Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000. - required: false - default: null - aliases: [] - security_groups: - description: - - Comma separated list of one or more security groups. Used only when command=create or command=modify. - required: false - default: null - aliases: [] - vpc_security_groups: - description: - - Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify. - required: false - default: null - aliases: [] - port: - description: - - Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1443 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate. - required: false - default: null - aliases: [] - upgrade: - description: - - Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate. - required: false - default: no - choices: [ "yes", "no" ] - aliases: [] - option_group: - description: - - The name of the option group to use. If not specified then the default option group is used. Used only when command=create. - required: false - default: null - aliases: [] - maint_window: - description: - - "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify." - required: false - default: null - aliases: [] - backup_window: - description: - - Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify. - required: false - default: null - aliases: [] - backup_retention: - description: - - "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify." - required: false - default: null - aliases: [] - zone: - description: - - availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore. - required: false - default: null - aliases: ['aws_zone', 'ec2_zone'] - subnet: - description: - - VPC subnet group. If specified then a VPC instance is created. Used only when command=create. - required: false - default: null - aliases: [] - snapshot: - description: - - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. Used only when command=delete or command=snapshot. - required: false - default: null - aliases: [] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] - wait: - description: - - When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated. - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - wait_timeout: - description: - - how long before wait gives up, in seconds - default: 300 - aliases: [] - apply_immediately: - description: - - Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window. - default: no - choices: [ "yes", "no" ] - aliases: [] - new_instance_name: - description: - - Name to rename an instance to. Used only when command=modify. - required: false - default: null - aliases: [] - version_added: 1.5 -requirements: [ "boto" ] -author: Bruce Pennypacker -''' - -EXAMPLES = ''' -# Basic mysql provisioning example -- rds: > - command=create - instance_name=new_database - db_engine=MySQL - size=10 - instance_type=db.m1.small - username=mysql_admin - password=1nsecure - -# Create a read-only replica and wait for it to become available -- rds: > - command=replicate - instance_name=new_database_replica - source_instance=new_database - wait=yes - wait_timeout=600 - -# Delete an instance, but create a snapshot before doing so -- rds: > - command=delete - instance_name=new_database - snapshot=new_database_snapshot - -# Get facts about an instance -- rds: > - command=facts - instance_name=new_database - register: new_database_facts - -# Rename an instance and wait for the change to take effect -- rds: > - command=modify - instance_name=new_database - new_instance_name=renamed_database - wait=yes - -''' - -import sys -import time - -try: - import boto.rds -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def get_current_resource(conn, resource, command): - # There will be exceptions but we want the calling code to handle them - if command == 'snapshot': - return conn.get_all_dbsnapshots(snapshot_id=resource)[0] - else: - return conn.get_all_dbinstances(resource)[0] - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True), - instance_name = dict(required=True), - source_instance = dict(required=False), - db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False), - size = dict(required=False), - instance_type = dict(aliases=['type'], required=False), - username = dict(required=False), - password = dict(no_log=True, required=False), - db_name = dict(required=False), - engine_version = dict(required=False), - parameter_group = dict(required=False), - license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license'], required=False), - multi_zone = dict(type='bool', default=False), - iops = dict(required=False), - security_groups = dict(required=False), - vpc_security_groups = dict(type='list', required=False), - port = dict(required=False), - upgrade = dict(type='bool', default=False), - option_group = dict(required=False), - maint_window = dict(required=False), - backup_window = dict(required=False), - backup_retention = dict(required=False), - zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False), - subnet = dict(required=False), - wait = dict(type='bool', default=False), - wait_timeout = dict(default=300), - snapshot = dict(required=False), - apply_immediately = dict(type='bool', default=False), - new_instance_name = dict(required=False), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - command = module.params.get('command') - instance_name = module.params.get('instance_name') - source_instance = module.params.get('source_instance') - db_engine = module.params.get('db_engine') - size = module.params.get('size') - instance_type = module.params.get('instance_type') - username = module.params.get('username') - password = module.params.get('password') - db_name = module.params.get('db_name') - engine_version = module.params.get('engine_version') - parameter_group = module.params.get('parameter_group') - license_model = module.params.get('license_model') - multi_zone = module.params.get('multi_zone') - iops = module.params.get('iops') - security_groups = module.params.get('security_groups') - vpc_security_groups = module.params.get('vpc_security_groups') - port = module.params.get('port') - upgrade = module.params.get('upgrade') - option_group = module.params.get('option_group') - maint_window = module.params.get('maint_window') - subnet = module.params.get('subnet') - backup_window = module.params.get('backup_window') - backup_retention = module.params.get('backup_retention') - region = module.params.get('region') - zone = module.params.get('zone') - aws_secret_key = module.params.get('aws_secret_key') - aws_access_key = module.params.get('aws_access_key') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - snapshot = module.params.get('snapshot') - apply_immediately = module.params.get('apply_immediately') - new_instance_name = module.params.get('new_instance_name') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION.")) - - # connect to the rds endpoint - try: - conn = connect_to_aws(boto.rds, region, **aws_connect_params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - def invalid_security_group_type(subnet): - if subnet: - return 'security_groups' - else: - return 'vpc_security_groups' - - # Package up the optional parameters - params = {} - - # Validate parameters for each command - if command == 'create': - required_vars = [ 'instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password' ] - invalid_vars = [ 'source_instance', 'snapshot', 'apply_immediately', 'new_instance_name' ] + [invalid_security_group_type(subnet)] - - elif command == 'replicate': - required_vars = [ 'instance_name', 'source_instance' ] - invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'subnet', 'snapshot', 'apply_immediately', 'new_instance_name' ] - - elif command == 'delete': - required_vars = [ 'instance_name' ] - invalid_vars = [ 'db_engine', 'size', 'instance_type', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups' ,'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'port', 'upgrade', 'subnet', 'zone' , 'source_instance', 'apply_immediately', 'new_instance_name' ] - - elif command == 'facts': - required_vars = [ 'instance_name' ] - invalid_vars = [ 'db_engine', 'size', 'instance_type', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'port', 'upgrade', 'subnet', 'zone', 'wait', 'source_instance' 'apply_immediately', 'new_instance_name' ] - - elif command == 'modify': - required_vars = [ 'instance_name' ] - if password: - params["master_password"] = password - invalid_vars = [ 'db_engine', 'username', 'db_name', 'engine_version', 'license_model', 'option_group', 'port', 'upgrade', 'subnet', 'zone', 'source_instance'] - - elif command == 'promote': - required_vars = [ 'instance_name' ] - invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'subnet', 'source_instance', 'snapshot', 'apply_immediately', 'new_instance_name' ] - - elif command == 'snapshot': - required_vars = [ 'instance_name', 'snapshot'] - invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'subnet', 'source_instance', 'apply_immediately', 'new_instance_name' ] - - elif command == 'restore': - required_vars = [ 'instance_name', 'snapshot', 'instance_type' ] - invalid_vars = [ 'db_engine', 'db_name', 'username', 'password', 'engine_version', 'option_group', 'source_instance', 'apply_immediately', 'new_instance_name', 'vpc_security_groups', 'security_groups' ] - - for v in required_vars: - if not module.params.get(v): - module.fail_json(msg = str("Parameter %s required for %s command" % (v, command))) - - for v in invalid_vars: - if module.params.get(v): - module.fail_json(msg = str("Parameter %s invalid for %s command" % (v, command))) - - if db_engine: - params["engine"] = db_engine - - if port: - params["port"] = port - - if db_name: - params["db_name"] = db_name - - if parameter_group: - params["param_group"] = parameter_group - - if zone: - params["availability_zone"] = zone - - if maint_window: - params["preferred_maintenance_window"] = maint_window - - if backup_window: - params["preferred_backup_window"] = backup_window - - if backup_retention: - params["backup_retention_period"] = backup_retention - - if multi_zone: - params["multi_az"] = multi_zone - - if engine_version: - params["engine_version"] = engine_version - - if upgrade: - params["auto_minor_version_upgrade"] = upgrade - - if subnet: - params["db_subnet_group_name"] = subnet - - if license_model: - params["license_model"] = license_model - - if option_group: - params["option_group_name"] = option_group - - if iops: - params["iops"] = iops - - if security_groups: - params["security_groups"] = security_groups.split(',') - - if vpc_security_groups: - groups_list = [] - for x in vpc_security_groups: - groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x)) - params["vpc_security_groups"] = groups_list - - if new_instance_name: - params["new_instance_id"] = new_instance_name - - changed = True - - if command in ['create', 'restore', 'facts']: - try: - result = conn.get_all_dbinstances(instance_name)[0] - changed = False - except boto.exception.BotoServerError, e: - try: - if command == 'create': - result = conn.create_dbinstance(instance_name, size, instance_type, username, password, **params) - if command == 'restore': - result = conn.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) - if command == 'facts': - module.fail_json(msg = "DB Instance %s does not exist" % instance_name) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - if command == 'snapshot': - try: - result = conn.get_all_dbsnapshots(snapshot)[0] - changed = False - except boto.exception.BotoServerError, e: - try: - result = conn.create_dbsnapshot(snapshot, instance_name) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - if command == 'delete': - try: - result = conn.get_all_dbinstances(instance_name)[0] - if result.status == 'deleting': - module.exit_json(changed=False) - except boto.exception.BotoServerError, e: - module.exit_json(changed=False) - try: - if snapshot: - params["skip_final_snapshot"] = False - params["final_snapshot_id"] = snapshot - else: - params["skip_final_snapshot"] = True - result = conn.delete_dbinstance(instance_name, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - if command == 'replicate': - try: - if instance_type: - params["instance_class"] = instance_type - result = conn.create_dbinstance_read_replica(instance_name, source_instance, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - if command == 'modify': - try: - params["apply_immediately"] = apply_immediately - result = conn.modify_dbinstance(instance_name, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - if apply_immediately: - if new_instance_name: - # Wait until the new instance name is valid - found = 0 - while found == 0: - instances = conn.get_all_dbinstances() - for i in instances: - if i.id == new_instance_name: - instance_name = new_instance_name - found = 1 - if found == 0: - time.sleep(5) - - # The name of the database has now changed, so we have - # to force result to contain the new instance, otherwise - # the call below to get_current_resource will fail since it - # will be looking for the old instance name. - result.id = new_instance_name - else: - # Wait for a few seconds since it takes a while for AWS - # to change the instance from 'available' to 'modifying' - time.sleep(5) - - if command == 'promote': - try: - result = conn.promote_read_replica(instance_name, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - # If we're not waiting for a delete to complete then we're all done - # so just return - if command == 'delete' and not wait: - module.exit_json(changed=True) - - try: - resource = get_current_resource(conn, result.id, command) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - # Wait for the resource to be available if requested - if wait: - try: - wait_timeout = time.time() + wait_timeout - time.sleep(5) - - while wait_timeout > time.time() and resource.status != 'available': - time.sleep(5) - if wait_timeout <= time.time(): - module.fail_json(msg = "Timeout waiting for resource %s" % resource.id) - resource = get_current_resource(conn, result.id, command) - except boto.exception.BotoServerError, e: - # If we're waiting for an instance to be deleted then - # get_all_dbinstances will eventually throw a - # DBInstanceNotFound error. - if command == 'delete' and e.error_code == 'DBInstanceNotFound': - module.exit_json(changed=True) - else: - module.fail_json(msg = e.error_message) - - # If we got here then pack up all the instance details to send - # back to ansible - if command == 'snapshot': - d = { - 'id' : resource.id, - 'create_time' : resource.snapshot_create_time, - 'status' : resource.status, - 'availability_zone' : resource.availability_zone, - 'instance_id' : resource.instance_id, - 'instance_created' : resource.instance_create_time, - } - try: - d["snapshot_type"] = resource.snapshot_type - d["iops"] = resource.iops - except AttributeError, e: - pass # needs boto >= 2.21.0 - - return module.exit_json(changed=changed, snapshot=d) - - d = { - 'id' : resource.id, - 'create_time' : resource.create_time, - 'status' : resource.status, - 'availability_zone' : resource.availability_zone, - 'backup_retention' : resource.backup_retention_period, - 'backup_window' : resource.preferred_backup_window, - 'maintenance_window' : resource.preferred_maintenance_window, - 'multi_zone' : resource.multi_az, - 'instance_type' : resource.instance_class, - 'username' : resource.master_username, - 'iops' : resource.iops - } - - # Endpoint exists only if the instance is available - if resource.status == 'available' and command != 'snapshot': - d["endpoint"] = resource.endpoint[0] - d["port"] = resource.endpoint[1] - if resource.vpc_security_groups is not None: - d["vpc_security_groups"] = ','.join(x.vpc_group for x in resource.vpc_security_groups) - else: - d["vpc_security_groups"] = None - else: - d["endpoint"] = None - d["port"] = None - d["vpc_security_groups"] = None - - # ReadReplicaSourceDBInstanceIdentifier may or may not exist - try: - d["replication_source"] = resource.ReadReplicaSourceDBInstanceIdentifier - except Exception, e: - d["replication_source"] = None - - module.exit_json(changed=changed, instance=d) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/rds_param_group b/library/cloud/rds_param_group deleted file mode 100644 index 39f9432057..0000000000 --- a/library/cloud/rds_param_group +++ /dev/null @@ -1,313 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rds_param_group -version_added: "1.5" -short_description: manage RDS parameter groups -description: - - Creates, modifies, and deletes RDS parameter groups. This module has a dependency on python-boto >= 2.5. -options: - state: - description: - - Specifies whether the group should be present or absent. - required: true - default: present - aliases: [] - choices: [ 'present' , 'absent' ] - name: - description: - - Database parameter group identifier. - required: true - default: null - aliases: [] - description: - description: - - Database parameter group description. Only set when a new group is added. - required: false - default: null - aliases: [] - engine: - description: - - The type of database for this group. Required for state=present. - required: false - default: null - aliases: [] - choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0'] - immediate: - description: - - Whether to apply the changes immediately, or after the next reboot of any associated instances. - required: false - default: null - aliases: [] - params: - description: - - Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3), or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group. - required: false - default: null - aliases: [] - choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0'] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: true - default: null - aliases: [ 'aws_region', 'ec2_region' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_secret_key', 'secret_key' ] -requirements: [ "boto" ] -author: Scott Anderson -''' - -EXAMPLES = ''' -# Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024 -- rds_param_group: > - state=present - name=norwegian_blue - description=My Fancy Ex Parrot Group - engine=mysql5.6 - params='{"auto_increment_increment": "42K"}' - -# Remove a parameter group -- rds_param_group: > - state=absent - name=norwegian_blue -''' - -import sys -import time - -VALID_ENGINES = [ - 'mysql5.1', - 'mysql5.5', - 'mysql5.6', - 'oracle-ee-11.2', - 'oracle-se-11.2', - 'oracle-se1-11.2', - 'postgres9.3', - 'sqlserver-ee-10.5', - 'sqlserver-ee-11.0', - 'sqlserver-ex-10.5', - 'sqlserver-ex-11.0', - 'sqlserver-se-10.5', - 'sqlserver-se-11.0', - 'sqlserver-web-10.5', - 'sqlserver-web-11.0', -] - -try: - import boto.rds - from boto.exception import BotoServerError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -# returns a tuple: (whether or not a parameter was changed, the remaining parameters that weren't found in this parameter group) - -class NotModifiableError(StandardError): - def __init__(self, error_message, *args): - super(NotModifiableError, self).__init__(error_message, *args) - self.error_message = error_message - - def __repr__(self): - return 'NotModifiableError: %s' % self.error_message - - def __str__(self): - return 'NotModifiableError: %s' % self.error_message - -INT_MODIFIERS = { - 'K': 1024, - 'M': pow(1024, 2), - 'G': pow(1024, 3), - 'T': pow(1024, 4), -} - -TRUE_VALUES = ('on', 'true', 'yes', '1',) - -def set_parameter(param, value, immediate): - """ - Allows setting parameters with 10M = 10* 1024 * 1024 and so on. - """ - converted_value = value - - if param.type == 'string': - converted_value = str(value) - - elif param.type == 'integer': - if isinstance(value, basestring): - try: - for modifier in INT_MODIFIERS.keys(): - if value.endswith(modifier): - converted_value = int(value[:-1]) * INT_MODIFIERS[modifier] - converted_value = int(converted_value) - except ValueError: - # may be based on a variable (ie. {foo*3/4}) so - # just pass it on through to boto - converted_value = str(value) - elif type(value) == bool: - converted_value = 1 if value else 0 - else: - converted_value = int(value) - - elif param.type == 'boolean': - if isinstance(value, basestring): - converted_value = value in TRUE_VALUES - else: - converted_value = bool(value) - - param.value = converted_value - param.apply(immediate) - -def modify_group(group, params, immediate=False): - """ Set all of the params in a group to the provided new params. Raises NotModifiableError if any of the - params to be changed are read only. - """ - changed = {} - - new_params = dict(params) - - for key in new_params.keys(): - if group.has_key(key): - param = group[key] - new_value = new_params[key] - - try: - old_value = param.value - except ValueError: - # some versions of boto have problems with retrieving - # integer values from params that may have their value - # based on a variable (ie. {foo*3/4}), so grab it in a - # way that bypasses the property functions - old_value = param._value - - if old_value != new_value: - if not param.is_modifiable: - raise NotModifiableError('Parameter %s is not modifiable.' % key) - - changed[key] = {'old': param.value, 'new': new_value} - - set_parameter(param, new_value, immediate) - - del new_params[key] - - return changed, new_params - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - state = dict(required=True, choices=['present', 'absent']), - name = dict(required=True), - engine = dict(required=False, choices=VALID_ENGINES), - description = dict(required=False), - params = dict(required=False, aliases=['parameters'], type='dict'), - immediate = dict(required=False, type='bool'), - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - state = module.params.get('state') - group_name = module.params.get('name').lower() - group_engine = module.params.get('engine') - group_description = module.params.get('description') - group_params = module.params.get('params') or {} - immediate = module.params.get('immediate') or False - - if state == 'present': - for required in ['name', 'description', 'engine', 'params']: - if not module.params.get(required): - module.fail_json(msg = str("Parameter %s required for state='present'" % required)) - else: - for not_allowed in ['description', 'engine', 'params']: - if module.params.get(not_allowed): - module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed)) - - # Retrieve any AWS settings from the environment. - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - if not region: - module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION.")) - - try: - conn = boto.rds.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - group_was_added = False - - try: - changed = False - - try: - all_groups = conn.get_all_dbparameter_groups(group_name, max_records=100) - exists = len(all_groups) > 0 - except BotoServerError, e: - if e.error_code != 'DBParameterGroupNotFound': - module.fail_json(msg = e.error_message) - exists = False - - if state == 'absent': - if exists: - conn.delete_parameter_group(group_name) - changed = True - else: - changed = {} - if not exists: - new_group = conn.create_parameter_group(group_name, engine=group_engine, description=group_description) - group_was_added = True - - # If a "Marker" is present, this group has more attributes remaining to check. Get the next batch, but only - # if there are parameters left to set. - marker = None - while len(group_params): - next_group = conn.get_all_dbparameters(group_name, marker=marker) - - changed_params, group_params = modify_group(next_group, group_params, immediate) - changed.update(changed_params) - - if hasattr(next_group, 'Marker'): - marker = next_group.Marker - else: - break - - - except BotoServerError, e: - module.fail_json(msg = e.error_message) - - except NotModifiableError, e: - msg = e.error_message - if group_was_added: - msg = '%s The group "%s" was added first.' % (msg, group_name) - module.fail_json(msg=msg) - - module.exit_json(changed=changed) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/rds_subnet_group b/library/cloud/rds_subnet_group deleted file mode 100644 index 552c94f188..0000000000 --- a/library/cloud/rds_subnet_group +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rds_subnet_group -version_added: "1.5" -short_description: manage RDS database subnet groups -description: - - Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5. -options: - state: - description: - - Specifies whether the subnet should be present or absent. - required: true - default: present - aliases: [] - choices: [ 'present' , 'absent' ] - name: - description: - - Database subnet group identifier. - required: true - default: null - aliases: [] - description: - description: - - Database subnet group description. Only set when a new group is added. - required: false - default: null - aliases: [] - subnets: - description: - - List of subnet IDs that make up the database subnet group. - required: false - default: null - aliases: [] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: true - default: null - aliases: [ 'aws_region', 'ec2_region' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_secret_key', 'secret_key' ] -requirements: [ "boto" ] -author: Scott Anderson -''' - -EXAMPLES = ''' -# Add or change a subnet group -- local_action: - module: rds_subnet_group - state: present - name: norwegian-blue - description: My Fancy Ex Parrot Subnet Group - subnets: - - subnet-aaaaaaaa - - subnet-bbbbbbbb - -# Remove a parameter group -- rds_param_group: > - state=absent - name=norwegian-blue -''' - -import sys -import time - -try: - import boto.rds - from boto.exception import BotoServerError -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - state = dict(required=True, choices=['present', 'absent']), - name = dict(required=True), - description = dict(required=False), - subnets = dict(required=False, type='list'), - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - state = module.params.get('state') - group_name = module.params.get('name').lower() - group_description = module.params.get('description') - group_subnets = module.params.get('subnets') or {} - - if state == 'present': - for required in ['name', 'description', 'subnets']: - if not module.params.get(required): - module.fail_json(msg = str("Parameter %s required for state='present'" % required)) - else: - for not_allowed in ['description', 'subnets']: - if module.params.get(not_allowed): - module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed)) - - # Retrieve any AWS settings from the environment. - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - if not region: - module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION.")) - - try: - conn = boto.rds.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - try: - changed = False - exists = False - - try: - matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100) - exists = len(matching_groups) > 0 - except BotoServerError, e: - if e.error_code != 'DBSubnetGroupNotFoundFault': - module.fail_json(msg = e.error_message) - - if state == 'absent': - if exists: - conn.delete_db_subnet_group(group_name) - changed = True - else: - if not exists: - new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets) - - else: - changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets) - - except BotoServerError, e: - module.fail_json(msg = e.error_message) - - module.exit_json(changed=changed) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/route53 b/library/cloud/route53 deleted file mode 100644 index b3878e0580..0000000000 --- a/library/cloud/route53 +++ /dev/null @@ -1,281 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: route53 -version_added: "1.3" -short_description: add or delete entries in Amazons Route53 DNS service -description: - - Creates and deletes DNS records in Amazons Route53 service -options: - command: - description: - - Specifies the action to take. - required: true - default: null - aliases: [] - choices: [ 'get', 'create', 'delete' ] - zone: - description: - - The DNS zone to modify - required: true - default: null - aliases: [] - record: - description: - - The full DNS record to create or delete - required: true - default: null - aliases: [] - ttl: - description: - - The TTL to give the new record - required: false - default: 3600 (one hour) - aliases: [] - type: - description: - - The type of DNS record to create - required: true - default: null - aliases: [] - choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ] - value: - description: - - The new value when creating a DNS record. Multiple comma-spaced values are allowed. When deleting a record all values for the record must be specified or Route53 will not delete it. - required: false - default: null - aliases: [] - aws_secret_key: - description: - - AWS secret key. - required: false - default: null - aliases: ['ec2_secret_key', 'secret_key'] - aws_access_key: - description: - - AWS access key. - required: false - default: null - aliases: ['ec2_access_key', 'access_key'] - overwrite: - description: - - Whether an existing record should be overwritten on create if values do not match - required: false - default: null - aliases: [] - retry_interval: - description: - - In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. If you have many domain names, the default of 500 seconds may be too long. - required: false - default: 500 - aliases: [] -requirements: [ "boto" ] -author: Bruce Pennypacker -''' - -EXAMPLES = ''' -# Add new.foo.com as an A record with 3 IPs -- route53: > - command=create - zone=foo.com - record=new.foo.com - type=A - ttl=7200 - value=1.1.1.1,2.2.2.2,3.3.3.3 - -# Retrieve the details for new.foo.com -- route53: > - command=get - zone=foo.com - record=new.foo.com - type=A - register: rec - -# Delete new.foo.com A record using the results from the get command -- route53: > - command=delete - zone=foo.com - record={{ rec.set.record }} - type={{ rec.set.type }} - value={{ rec.set.value }} - -# Add an AAAA record. Note that because there are colons in the value -# that the entire parameter list must be quoted: -- route53: > - command=create - zone=foo.com - record=localhost.foo.com - type=AAAA - ttl=7200 - value="::1" - -# Add a TXT record. Note that TXT and SPF records must be surrounded -# by quotes when sent to Route 53: -- route53: > - command=create - zone=foo.com - record=localhost.foo.com - type=TXT - ttl=7200 - value="\"bar\"" - - -''' - -import sys -import time - -try: - import boto - from boto import route53 - from boto.route53.record import ResourceRecordSets -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def commit(changes, retry_interval): - """Commit changes, but retry PriorRequestNotComplete errors.""" - retry = 10 - while True: - try: - retry -= 1 - return changes.commit() - except boto.route53.exception.DNSServerError, e: - code = e.body.split("")[1] - code = code.split("")[0] - if code != 'PriorRequestNotComplete' or retry < 0: - raise e - time.sleep(retry_interval) - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - command = dict(choices=['get', 'create', 'delete'], required=True), - zone = dict(required=True), - record = dict(required=True), - ttl = dict(required=False, default=3600), - type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), - value = dict(required=False), - overwrite = dict(required=False, type='bool'), - retry_interval = dict(required=False, default=500) - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - - command_in = module.params.get('command') - zone_in = module.params.get('zone') - ttl_in = module.params.get('ttl') - record_in = module.params.get('record') - type_in = module.params.get('type') - value_in = module.params.get('value') - retry_interval_in = module.params.get('retry_interval') - - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - value_list = () - - if type(value_in) is str: - if value_in: - value_list = sorted(value_in.split(',')) - elif type(value_in) is list: - value_list = sorted(value_in) - - if zone_in[-1:] != '.': - zone_in += "." - - if record_in[-1:] != '.': - record_in += "." - - if command_in == 'create' or command_in == 'delete': - if not value_in: - module.fail_json(msg = "parameter 'value' required for create/delete") - - # connect to the route53 endpoint - try: - conn = boto.route53.connection.Route53Connection(aws_access_key, aws_secret_key) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - # Get all the existing hosted zones and save their ID's - zones = {} - results = conn.get_all_hosted_zones() - for r53zone in results['ListHostedZonesResponse']['HostedZones']: - zone_id = r53zone['Id'].replace('/hostedzone/', '') - zones[r53zone['Name']] = zone_id - - # Verify that the requested zone is already defined in Route53 - if not zone_in in zones: - errmsg = "Zone %s does not exist in Route53" % zone_in - module.fail_json(msg = errmsg) - - record = {} - - found_record = False - sets = conn.get_all_rrsets(zones[zone_in]) - for rset in sets: - # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round - # tripping of things like * and @. - decoded_name = rset.name.replace(r'\052', '*') - decoded_name = decoded_name.replace(r'\100', '@') - - if rset.type == type_in and decoded_name == record_in: - found_record = True - record['zone'] = zone_in - record['type'] = rset.type - record['record'] = decoded_name - record['ttl'] = rset.ttl - record['value'] = ','.join(sorted(rset.resource_records)) - record['values'] = sorted(rset.resource_records) - if value_list == sorted(rset.resource_records) and int(record['ttl']) == ttl_in and command_in == 'create': - module.exit_json(changed=False) - - if command_in == 'get': - module.exit_json(changed=False, set=record) - - if command_in == 'delete' and not found_record: - module.exit_json(changed=False) - - changes = ResourceRecordSets(conn, zones[zone_in]) - - if command_in == 'create' and found_record: - if not module.params['overwrite']: - module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it") - else: - change = changes.add_change("DELETE", record_in, type_in, record['ttl']) - for v in record['values']: - change.add_value(v) - - if command_in == 'create' or command_in == 'delete': - change = changes.add_change(command_in.upper(), record_in, type_in, ttl_in) - for v in value_list: - change.add_value(v) - - try: - result = commit(changes, retry_interval_in) - except boto.route53.exception.DNSServerError, e: - txt = e.body.split("")[1] - txt = txt.split("")[0] - module.fail_json(msg = txt) - - module.exit_json(changed=True) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/s3 b/library/cloud/s3 deleted file mode 100644 index 6438c6405e..0000000000 --- a/library/cloud/s3 +++ /dev/null @@ -1,514 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: s3 -short_description: S3 module putting a file into S3. -description: - - This module allows the user to dictate the presence of a given file in an S3 bucket. If or once the key (file) exists in the bucket, it returns a time-expired download URL. This module has a dependency on python-boto. -version_added: "1.1" -options: - bucket: - description: - - Bucket name. - required: true - default: null - aliases: [] - object: - description: - - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. - required: false - default: null - aliases: [] - version_added: "1.3" - src: - description: - - The source file path when performing a PUT operation. - required: false - default: null - aliases: [] - version_added: "1.3" - dest: - description: - - The destination file path when downloading an object/key with a GET operation. - required: false - aliases: [] - version_added: "1.3" - overwrite: - description: - - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. - required: false - default: true - version_added: "1.2" - mode: - description: - - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket) and delete (bucket). - required: true - default: null - aliases: [] - expiration: - description: - - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation. - required: false - default: 600 - aliases: [] - s3_url: - description: - - "S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. Ansible tries to guess if fakes3 (https://github.com/jubos/fake-s3) or Eucalyptus Walrus (https://github.com/eucalyptus/eucalyptus/wiki/Walrus) is used and configure connection accordingly. Current heuristic is: everything with scheme fakes3:// is fakes3, everything else not ending with amazonaws.com is Walrus." - default: null - aliases: [ S3_URL ] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: ['ec2_secret_key', 'secret_key'] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] - metadata: - description: - - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. - required: false - default: null - version_added: "1.6" - region: - description: - - "AWS region to create the bucket in. If not set then the value of the EC2_REGION and AWS_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect." - required: false - default: null - version_added: "1.8" - -requirements: [ "boto" ] -author: Lester Wade, Ralph Tice -''' - -EXAMPLES = ''' -# Simple PUT operation -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put -# Simple GET operation -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get -# GET/download and overwrite local file (trust remote) -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get -# GET/download and do not overwrite local file (trust remote) -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get force=false -# PUT/upload and overwrite remote file (trust local) -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put -# PUT/upload with metadata -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip' -# PUT/upload with multiple metadata -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' -# PUT/upload and do not overwrite remote file (trust local) -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false -# Download an object as a string to use else where in your playbook -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=getstr -# Create an empty bucket -- s3: bucket=mybucket mode=create -# Create a bucket with key as directory -- s3: bucket=mybucket object=/my/directory/path mode=create -# Create an empty bucket in the EU region -- s3: bucket=mybucket mode=create region=eu-west-1 -# Delete a bucket and all contents -- s3: bucket=mybucket mode=delete -''' - -import sys -import os -import urlparse -import hashlib - -try: - import boto - from boto.s3.connection import Location -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def key_check(module, s3, bucket, obj): - try: - bucket = s3.lookup(bucket) - key_check = bucket.get_key(obj) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if key_check: - return True - else: - return False - -def keysum(module, s3, bucket, obj): - bucket = s3.lookup(bucket) - key_check = bucket.get_key(obj) - if not key_check: - return None - md5_remote = key_check.etag[1:-1] - etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5 - if etag_multipart is True: - module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.") - return md5_remote - -def bucket_check(module, s3, bucket): - try: - result = s3.lookup(bucket) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if result: - return True - else: - return False - -def create_bucket(module, s3, bucket, location=Location.DEFAULT): - try: - bucket = s3.create_bucket(bucket, location=location) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if bucket: - return True - -def delete_bucket(module, s3, bucket): - try: - bucket = s3.lookup(bucket) - bucket_contents = bucket.list() - bucket.delete_keys([key.name for key in bucket_contents]) - bucket.delete() - return True - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def delete_key(module, s3, bucket, obj): - try: - bucket = s3.lookup(bucket) - bucket.delete_key(obj) - module.exit_json(msg="Object deleted from bucket %s"%bucket, changed=True) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def create_dirkey(module, s3, bucket, obj): - try: - bucket = s3.lookup(bucket) - key = bucket.new_key(obj) - key.set_contents_from_string('') - module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def upload_file_check(src): - if os.path.exists(src): - file_exists is True - else: - file_exists is False - if os.path.isdir(src): - module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True) - return file_exists - -def path_check(path): - if os.path.exists(path): - return True - else: - return False - -def upload_s3file(module, s3, bucket, obj, src, expiry, metadata): - try: - bucket = s3.lookup(bucket) - key = bucket.new_key(obj) - if metadata: - for meta_key in metadata.keys(): - key.set_metadata(meta_key, metadata[meta_key]) - - key.set_contents_from_filename(src) - url = key.generate_url(expiry) - module.exit_json(msg="PUT operation complete", url=url, changed=True) - except s3.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def download_s3file(module, s3, bucket, obj, dest): - try: - bucket = s3.lookup(bucket) - key = bucket.lookup(obj) - key.get_contents_to_filename(dest) - module.exit_json(msg="GET operation complete", changed=True) - except s3.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def download_s3str(module, s3, bucket, obj): - try: - bucket = s3.lookup(bucket) - key = bucket.lookup(obj) - contents = key.get_contents_as_string() - module.exit_json(msg="GET operation complete", contents=contents, changed=True) - except s3.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def get_download_url(module, s3, bucket, obj, expiry, changed=True): - try: - bucket = s3.lookup(bucket) - key = bucket.lookup(obj) - url = key.generate_url(expiry) - module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def is_fakes3(s3_url): - """ Return True if s3_url has scheme fakes3:// """ - if s3_url is not None: - return urlparse.urlparse(s3_url).scheme == 'fakes3' - else: - return False - -def is_walrus(s3_url): - """ Return True if it's Walrus endpoint, not S3 - - We assume anything other than *.amazonaws.com is Walrus""" - if s3_url is not None: - o = urlparse.urlparse(s3_url) - return not o.hostname.endswith('amazonaws.com') - else: - return False - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - bucket = dict(required=True), - object = dict(), - src = dict(), - dest = dict(default=None), - mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True), - expiry = dict(default=600, aliases=['expiration']), - s3_url = dict(aliases=['S3_URL']), - overwrite = dict(aliases=['force'], default=True, type='bool'), - metadata = dict(type='dict'), - ), - ) - module = AnsibleModule(argument_spec=argument_spec) - - bucket = module.params.get('bucket') - obj = module.params.get('object') - src = module.params.get('src') - if module.params.get('dest'): - dest = os.path.expanduser(module.params.get('dest')) - mode = module.params.get('mode') - expiry = int(module.params['expiry']) - s3_url = module.params.get('s3_url') - overwrite = module.params.get('overwrite') - metadata = module.params.get('metadata') - - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - if region in ('us-east-1', '', None): - # S3ism for the US Standard region - location = Location.DEFAULT - else: - # Boto uses symbolic names for locations but region strings will - # actually work fine for everything except us-east-1 (US Standard) - location = region - - if module.params.get('object'): - obj = os.path.expanduser(module.params['object']) - - # allow eucarc environment variables to be used if ansible vars aren't set - if not s3_url and 'S3_URL' in os.environ: - s3_url = os.environ['S3_URL'] - - # Look at s3_url and tweak connection settings - # if connecting to Walrus or fakes3 - if is_fakes3(s3_url): - try: - fakes3 = urlparse.urlparse(s3_url) - from boto.s3.connection import OrdinaryCallingFormat - s3 = boto.connect_s3( - aws_access_key, - aws_secret_key, - is_secure=False, - host=fakes3.hostname, - port=fakes3.port, - calling_format=OrdinaryCallingFormat()) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - elif is_walrus(s3_url): - try: - walrus = urlparse.urlparse(s3_url).hostname - s3 = boto.connect_walrus(walrus, aws_access_key, aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - else: - try: - s3 = boto.connect_s3(aws_access_key, aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - - # If our mode is a GET operation (download), go through the procedure as appropriate ... - if mode == 'get': - - # First, we check to see if the bucket exists, we get "bucket" returned. - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is False: - module.fail_json(msg="Target bucket cannot be found", failed=True) - - # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check. - keyrtn = key_check(module, s3, bucket, obj) - if keyrtn is False: - module.fail_json(msg="Target key cannot be found", failed=True) - - # If the destination path doesn't exist, no need to md5um etag check, so just download. - pathrtn = path_check(dest) - if pathrtn is False: - download_s3file(module, s3, bucket, obj, dest) - - # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. - if pathrtn is True: - md5_remote = keysum(module, s3, bucket, obj) - md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest() - if md5_local == md5_remote: - sum_matches = True - if overwrite is True: - download_s3file(module, s3, bucket, obj, dest) - else: - module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) - else: - sum_matches = False - if overwrite is True: - download_s3file(module, s3, bucket, obj, dest) - else: - module.fail_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True) - - # Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message. - if sum_matches is True and overwrite is False: - module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) - - # At this point explicitly define the overwrite condition. - if sum_matches is True and pathrtn is True and overwrite is True: - download_s3file(module, s3, bucket, obj, dest) - - # If sum does not match but the destination exists, we - - # if our mode is a PUT operation (upload), go through the procedure as appropriate ... - if mode == 'put': - - # Use this snippet to debug through conditionals: -# module.exit_json(msg="Bucket return %s"%bucketrtn) -# sys.exit(0) - - # Lets check the src path. - pathrtn = path_check(src) - if pathrtn is False: - module.fail_json(msg="Local object for PUT does not exist", failed=True) - - # Lets check to see if bucket exists to get ground truth. - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is True: - keyrtn = key_check(module, s3, bucket, obj) - - # Lets check key state. Does it exist and if it does, compute the etag md5sum. - if bucketrtn is True and keyrtn is True: - md5_remote = keysum(module, s3, bucket, obj) - md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest() - if md5_local == md5_remote: - sum_matches = True - if overwrite is True: - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) - else: - get_download_url(module, s3, bucket, obj, expiry, changed=False) - else: - sum_matches = False - if overwrite is True: - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) - else: - module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) - - # If neither exist (based on bucket existence), we can create both. - if bucketrtn is False and pathrtn is True: - create_bucket(module, s3, bucket, location) - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) - - # If bucket exists but key doesn't, just upload. - if bucketrtn is True and pathrtn is True and keyrtn is False: - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) - - # Support for deleting an object if we have both params. - if mode == 'delete': - if bucket: - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is True: - deletertn = delete_bucket(module, s3, bucket) - if deletertn is True: - module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=True) - else: - module.fail_json(msg="Bucket does not exist.", changed=False) - else: - module.fail_json(msg="Bucket parameter is required.", failed=True) - - # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. - # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. - if mode == 'create': - if bucket and not obj: - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is True: - module.exit_json(msg="Bucket already exists.", changed=False) - else: - module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location)) - if bucket and obj: - bucketrtn = bucket_check(module, s3, bucket) - if obj.endswith('/'): - dirobj = obj - else: - dirobj = obj + "/" - if bucketrtn is True: - keyrtn = key_check(module, s3, bucket, dirobj) - if keyrtn is True: - module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False) - else: - create_dirkey(module, s3, bucket, dirobj) - if bucketrtn is False: - created = create_bucket(module, s3, bucket, location) - create_dirkey(module, s3, bucket, dirobj) - - # Support for grabbing the time-expired URL for an object in S3/Walrus. - if mode == 'geturl': - if bucket and obj: - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is False: - module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True) - else: - keyrtn = key_check(module, s3, bucket, obj) - if keyrtn is True: - get_download_url(module, s3, bucket, obj, expiry) - else: - module.fail_json(msg="Key %s does not exist."%obj, failed=True) - else: - module.fail_json(msg="Bucket and Object parameters must be set", failed=True) - - if mode == 'getstr': - if bucket and obj: - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is False: - module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True) - else: - keyrtn = key_check(module, s3, bucket, obj) - if keyrtn is True: - download_s3str(module, s3, bucket, obj) - else: - module.fail_json(msg="Key %s does not exist."%obj, failed=True) - - module.exit_json(failed=False) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() diff --git a/library/cloud/virt b/library/cloud/virt deleted file mode 100644 index f1d36fc196..0000000000 --- a/library/cloud/virt +++ /dev/null @@ -1,493 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Virt management features - -Copyright 2007, 2012 Red Hat, Inc -Michael DeHaan -Seth Vidal - -This software may be freely redistributed under the terms of the GNU -general public license. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: virt -short_description: Manages virtual machines supported by libvirt -description: - - Manages virtual machines supported by I(libvirt). -version_added: "0.2" -options: - name: - description: - - name of the guest VM being managed. Note that VM must be previously - defined with xml. - required: true - default: null - aliases: [] - state: - description: - - Note that there may be some lag for state requests like C(shutdown) - since these refer only to VM states. After starting a guest, it may not - be immediately accessible. - required: false - choices: [ "running", "shutdown", "destroyed", "paused" ] - default: "no" - command: - description: - - in addition to state management, various non-idempotent commands are available. See examples - required: false - choices: ["create","status", "start", "stop", "pause", "unpause", - "shutdown", "undefine", "destroy", "get_xml", "autostart", - "freemem", "list_vms", "info", "nodeinfo", "virttype", "define"] - uri: - description: - - libvirt connection uri - required: false - defaults: qemu:///system - xml: - description: - - XML document used with the define command - required: false - default: null -requirements: [ "libvirt" ] -author: Michael DeHaan, Seth Vidal -''' - -EXAMPLES = ''' -# a playbook task line: -- virt: name=alpha state=running - -# /usr/bin/ansible invocations -ansible host -m virt -a "name=alpha command=status" -ansible host -m virt -a "name=alpha command=get_xml" -ansible host -m virt -a "name=alpha command=create uri=lxc:///" - -# a playbook example of defining and launching an LXC guest -tasks: - - name: define vm - virt: name=foo - command=define - xml="{{ lookup('template', 'container-template.xml.j2') }}" - uri=lxc:/// - - name: start vm - virt: name=foo state=running uri=lxc:/// -''' - -VIRT_FAILED = 1 -VIRT_SUCCESS = 0 -VIRT_UNAVAILABLE=2 - -import sys - -try: - import libvirt -except ImportError: - print "failed=True msg='libvirt python module unavailable'" - sys.exit(1) - -ALL_COMMANDS = [] -VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause', - 'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart', 'define'] -HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype'] -ALL_COMMANDS.extend(VM_COMMANDS) -ALL_COMMANDS.extend(HOST_COMMANDS) - -VIRT_STATE_NAME_MAP = { - 0 : "running", - 1 : "running", - 2 : "running", - 3 : "paused", - 4 : "shutdown", - 5 : "shutdown", - 6 : "crashed" -} - -class VMNotFound(Exception): - pass - -class LibvirtConnection(object): - - def __init__(self, uri, module): - - self.module = module - - cmd = "uname -r" - rc, stdout, stderr = self.module.run_command(cmd) - - if "xen" in stdout: - conn = libvirt.open(None) - else: - conn = libvirt.open(uri) - - if not conn: - raise Exception("hypervisor connection failure") - - self.conn = conn - - def find_vm(self, vmid): - """ - Extra bonus feature: vmid = -1 returns a list of everything - """ - conn = self.conn - - vms = [] - - # this block of code borrowed from virt-manager: - # get working domain's name - ids = conn.listDomainsID() - for id in ids: - vm = conn.lookupByID(id) - vms.append(vm) - # get defined domain - names = conn.listDefinedDomains() - for name in names: - vm = conn.lookupByName(name) - vms.append(vm) - - if vmid == -1: - return vms - - for vm in vms: - if vm.name() == vmid: - return vm - - raise VMNotFound("virtual machine %s not found" % vmid) - - def shutdown(self, vmid): - return self.find_vm(vmid).shutdown() - - def pause(self, vmid): - return self.suspend(self.conn,vmid) - - def unpause(self, vmid): - return self.resume(self.conn,vmid) - - def suspend(self, vmid): - return self.find_vm(vmid).suspend() - - def resume(self, vmid): - return self.find_vm(vmid).resume() - - def create(self, vmid): - return self.find_vm(vmid).create() - - def destroy(self, vmid): - return self.find_vm(vmid).destroy() - - def undefine(self, vmid): - return self.find_vm(vmid).undefine() - - def get_status2(self, vm): - state = vm.info()[0] - return VIRT_STATE_NAME_MAP.get(state,"unknown") - - def get_status(self, vmid): - state = self.find_vm(vmid).info()[0] - return VIRT_STATE_NAME_MAP.get(state,"unknown") - - def nodeinfo(self): - return self.conn.getInfo() - - def get_type(self): - return self.conn.getType() - - def get_xml(self, vmid): - vm = self.conn.lookupByName(vmid) - return vm.XMLDesc(0) - - def get_maxVcpus(self, vmid): - vm = self.conn.lookupByName(vmid) - return vm.maxVcpus() - - def get_maxMemory(self, vmid): - vm = self.conn.lookupByName(vmid) - return vm.maxMemory() - - def getFreeMemory(self): - return self.conn.getFreeMemory() - - def get_autostart(self, vmid): - vm = self.conn.lookupByName(vmid) - return vm.autostart() - - def set_autostart(self, vmid, val): - vm = self.conn.lookupByName(vmid) - return vm.setAutostart(val) - - def define_from_xml(self, xml): - return self.conn.defineXML(xml) - - -class Virt(object): - - def __init__(self, uri, module): - self.module = module - self.uri = uri - - def __get_conn(self): - self.conn = LibvirtConnection(self.uri, self.module) - return self.conn - - def get_vm(self, vmid): - self.__get_conn() - return self.conn.find_vm(vmid) - - def state(self): - vms = self.list_vms() - state = [] - for vm in vms: - state_blurb = self.conn.get_status(vm) - state.append("%s %s" % (vm,state_blurb)) - return state - - def info(self): - vms = self.list_vms() - info = dict() - for vm in vms: - data = self.conn.find_vm(vm).info() - # libvirt returns maxMem, memory, and cpuTime as long()'s, which - # xmlrpclib tries to convert to regular int's during serialization. - # This throws exceptions, so convert them to strings here and - # assume the other end of the xmlrpc connection can figure things - # out or doesn't care. - info[vm] = { - "state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"), - "maxMem" : str(data[1]), - "memory" : str(data[2]), - "nrVirtCpu" : data[3], - "cpuTime" : str(data[4]), - } - info[vm]["autostart"] = self.conn.get_autostart(vm) - - return info - - def nodeinfo(self): - self.__get_conn() - info = dict() - data = self.conn.nodeinfo() - info = { - "cpumodel" : str(data[0]), - "phymemory" : str(data[1]), - "cpus" : str(data[2]), - "cpumhz" : str(data[3]), - "numanodes" : str(data[4]), - "sockets" : str(data[5]), - "cpucores" : str(data[6]), - "cputhreads" : str(data[7]) - } - return info - - def list_vms(self, state=None): - self.conn = self.__get_conn() - vms = self.conn.find_vm(-1) - results = [] - for x in vms: - try: - if state: - vmstate = self.conn.get_status2(x) - if vmstate == state: - results.append(x.name()) - else: - results.append(x.name()) - except: - pass - return results - - def virttype(self): - return self.__get_conn().get_type() - - def autostart(self, vmid): - self.conn = self.__get_conn() - return self.conn.set_autostart(vmid, True) - - def freemem(self): - self.conn = self.__get_conn() - return self.conn.getFreeMemory() - - def shutdown(self, vmid): - """ Make the machine with the given vmid stop running. Whatever that takes. """ - self.__get_conn() - self.conn.shutdown(vmid) - return 0 - - - def pause(self, vmid): - """ Pause the machine with the given vmid. """ - - self.__get_conn() - return self.conn.suspend(vmid) - - def unpause(self, vmid): - """ Unpause the machine with the given vmid. """ - - self.__get_conn() - return self.conn.resume(vmid) - - def create(self, vmid): - """ Start the machine via the given vmid """ - - self.__get_conn() - return self.conn.create(vmid) - - def start(self, vmid): - """ Start the machine via the given id/name """ - - self.__get_conn() - return self.conn.create(vmid) - - def destroy(self, vmid): - """ Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """ - self.__get_conn() - return self.conn.destroy(vmid) - - def undefine(self, vmid): - """ Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """ - - self.__get_conn() - return self.conn.undefine(vmid) - - def status(self, vmid): - """ - Return a state suitable for server consumption. Aka, codes.py values, not XM output. - """ - self.__get_conn() - return self.conn.get_status(vmid) - - def get_xml(self, vmid): - """ - Receive a Vm id as input - Return an xml describing vm config returned by a libvirt call - """ - - self.__get_conn() - return self.conn.get_xml(vmid) - - def get_maxVcpus(self, vmid): - """ - Gets the max number of VCPUs on a guest - """ - - self.__get_conn() - return self.conn.get_maxVcpus(vmid) - - def get_max_memory(self, vmid): - """ - Gets the max memory on a guest - """ - - self.__get_conn() - return self.conn.get_MaxMemory(vmid) - - def define(self, xml): - """ - Define a guest with the given xml - """ - self.__get_conn() - return self.conn.define_from_xml(xml) - -def core(module): - - state = module.params.get('state', None) - guest = module.params.get('name', None) - command = module.params.get('command', None) - uri = module.params.get('uri', None) - xml = module.params.get('xml', None) - - v = Virt(uri, module) - res = {} - - if state and command=='list_vms': - res = v.list_vms(state=state) - if type(res) != dict: - res = { command: res } - return VIRT_SUCCESS, res - - if state: - if not guest: - module.fail_json(msg = "state change requires a guest specified") - - res['changed'] = False - if state == 'running': - if v.status(guest) is 'paused': - res['changed'] = True - res['msg'] = v.unpause(guest) - elif v.status(guest) is not 'running': - res['changed'] = True - res['msg'] = v.start(guest) - elif state == 'shutdown': - if v.status(guest) is not 'shutdown': - res['changed'] = True - res['msg'] = v.shutdown(guest) - elif state == 'destroyed': - if v.status(guest) is not 'shutdown': - res['changed'] = True - res['msg'] = v.destroy(guest) - elif state == 'paused': - if v.status(guest) is 'running': - res['changed'] = True - res['msg'] = v.pause(guest) - else: - module.fail_json(msg="unexpected state") - - return VIRT_SUCCESS, res - - if command: - if command in VM_COMMANDS: - if not guest: - module.fail_json(msg = "%s requires 1 argument: guest" % command) - if command == 'define': - if not xml: - module.fail_json(msg = "define requires xml argument") - try: - v.get_vm(guest) - except VMNotFound: - v.define(xml) - res = {'changed': True, 'created': guest} - return VIRT_SUCCESS, res - res = getattr(v, command)(guest) - if type(res) != dict: - res = { command: res } - return VIRT_SUCCESS, res - - elif hasattr(v, command): - res = getattr(v, command)() - if type(res) != dict: - res = { command: res } - return VIRT_SUCCESS, res - - else: - module.fail_json(msg="Command %s not recognized" % basecmd) - - module.fail_json(msg="expected state or command parameter to be specified") - -def main(): - - module = AnsibleModule(argument_spec=dict( - name = dict(aliases=['guest']), - state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']), - command = dict(choices=ALL_COMMANDS), - uri = dict(default='qemu:///system'), - xml = dict(), - )) - - rc = VIRT_SUCCESS - try: - rc, result = core(module) - except Exception, e: - module.fail_json(msg=str(e)) - - if rc != 0: # something went wrong emit the msg - module.fail_json(rc=rc, msg=result) - else: - module.exit_json(**result) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/cloud/vsphere_guest b/library/cloud/vsphere_guest deleted file mode 100644 index a91a8199dd..0000000000 --- a/library/cloud/vsphere_guest +++ /dev/null @@ -1,1225 +0,0 @@ -#!/usr/bin/python - -# -*- coding: utf-8 -*- - -# TODO: -# Ability to set CPU/Memory reservations - -try: - import json -except ImportError: - import simplejson as json - -HAS_PYSPHERE = False -try: - from pysphere import VIServer, VIProperty, MORTypes - from pysphere.resources import VimService_services as VI - from pysphere.vi_task import VITask - from pysphere import VIException, VIApiException, FaultTypes - HAS_PYSPHERE = True -except ImportError: - pass - -DOCUMENTATION = ''' ---- -module: vsphere_guest -short_description: Create/delete/manage a guest VM through VMware vSphere. -description: - - Create/delete/reconfigure a guest VM through VMware vSphere. This module has a dependency on pysphere >= 1.7 -version_added: "1.6" -options: - vcenter_hostname: - description: - - The hostname of the vcenter server the module will connect to, to create the guest. - required: true - default: null - aliases: [] - guest: - description: - - The virtual server name you wish to manage. - required: true - user: - description: - - Username to connect to vcenter as. - required: true - default: null - password: - description: - - Password of the user to connect to vcenter as. - required: true - default: null - resource_pool: - description: - - The name of the resource_pool to create the VM in. - required: false - default: None - cluster: - description: - - The name of the cluster to create the VM in. By default this is derived from the host you tell the module to build the guest on. - required: false - default: None - esxi: - description: - - Dictionary which includes datacenter and hostname on which the VM should be created. For standalone ESXi hosts, ha-datacenter should be used as the datacenter name - required: false - default: null - state: - description: - - Indicate desired state of the vm. - default: present - choices: ['present', 'powered_on', 'absent', 'powered_off', 'restarted', 'reconfigured'] - vm_disk: - description: - - A key, value list of disks and their sizes and which datastore to keep it in. - required: false - default: null - vm_hardware: - description: - - A key, value list of VM config settings. Must include ['memory_mb', 'num_cpus', 'osid', 'scsi']. - required: false - default: null - vm_nic: - description: - - A key, value list of nics, their types and what network to put them on. - required: false - default: null - vm_extra_config: - description: - - A key, value pair of any extra values you want set or changed in the vmx file of the VM. Useful to set advanced options on the VM. - required: false - default: null - vm_hw_version: - description: - - Desired hardware version identifier (for example, "vmx-08" for vms that needs to be managed with vSphere Client). Note that changing hardware version of existing vm is not supported. - required: false - default: null - version_added: "1.7" - vmware_guest_facts: - description: - - Gather facts from vCenter on a particular VM - required: false - default: null - force: - description: - - Boolean. Allows you to run commands which may alter the running state of a guest. Also used to reconfigure and destroy. - default: "no" - choices: [ "yes", "no" ] - -notes: - - This module should run from a system that can access vSphere directly. - Either by using local_action, or using delegate_to. -author: Richard Hoop -requirements: [ pysphere ] -''' - - -EXAMPLES = ''' -# Create a new VM on an ESX server -# Returns changed = False when the VM already exists -# Returns changed = True and a adds ansible_facts from the new VM -# State will set the power status of a guest upon creation. Use powered_on to create and boot. -# Options ['state', 'vm_extra_config', 'vm_disk', 'vm_nic', 'vm_hardware', 'esxi'] are required together - -- vsphere_guest: - vcenter_hostname: vcenter.mydomain.local - username: myuser - password: mypass - guest: newvm001 - state: powered_on - vm_extra_config: - vcpu.hotadd: yes - mem.hotadd: yes - notes: This is a test VM - vm_disk: - disk1: - size_gb: 10 - type: thin - datastore: storage001 - vm_nic: - nic1: - type: vmxnet3 - network: VM Network - network_type: standard - vm_hardware: - memory_mb: 2048 - num_cpus: 2 - osid: centos64Guest - scsi: paravirtual - esxi: - datacenter: MyDatacenter - hostname: esx001.mydomain.local - -# Reconfigure the CPU and Memory on the newly created VM -# Will return the changes made - -- vsphere_guest: - vcenter_hostname: vcenter.mydomain.local - username: myuser - password: mypass - guest: newvm001 - state: reconfigured - vm_extra_config: - vcpu.hotadd: yes - mem.hotadd: yes - notes: This is a test VM - vm_disk: - disk1: - size_gb: 10 - type: thin - datastore: storage001 - vm_nic: - nic1: - type: vmxnet3 - network: VM Network - network_type: standard - vm_hardware: - memory_mb: 4096 - num_cpus: 4 - osid: centos64Guest - scsi: paravirtual - esxi: - datacenter: MyDatacenter - hostname: esx001.mydomain.local - -# Task to gather facts from a vSphere cluster only if the system is a VMWare guest - -- vsphere_guest: - vcenter_hostname: vcenter.mydomain.local - username: myuser - password: mypass - guest: newvm001 - vmware_guest_facts: yes - - -# Typical output of a vsphere_facts run on a guest - -- hw_eth0: - - addresstype: "assigned" - label: "Network adapter 1" - macaddress: "00:22:33:33:44:55" - macaddress_dash: "00-22-33-33-44-55" - summary: "VM Network" - hw_guest_full_name: "newvm001" - hw_guest_id: "rhel6_64Guest" - hw_memtotal_mb: 2048 - hw_name: "centos64Guest" - hw_processor_count: 2 - hw_product_uuid: "ef50bac8-2845-40ff-81d9-675315501dac" - -# Remove a vm from vSphere -# The VM must be powered_off of you need to use force to force a shutdown - -- vsphere_guest: - vcenter_hostname: vcenter.mydomain.local - username: myuser - password: mypass - guest: newvm001 - state: absent - force: yes -''' - -def add_scsi_controller(module, s, config, devices, type="paravirtual", bus_num=0, disk_ctrl_key=1): - # add a scsi controller - scsi_ctrl_spec = config.new_deviceChange() - scsi_ctrl_spec.set_element_operation('add') - - if type == "lsi": - # For RHEL5 - scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass() - elif type == "paravirtual": - # For RHEL6 - scsi_ctrl = VI.ns0.ParaVirtualSCSIController_Def("scsi_ctrl").pyclass() - elif type == "lsi_sas": - scsi_ctrl = VI.ns0.VirtualLsiLogicSASController_Def( - "scsi_ctrl").pyclass() - elif type == "bus_logic": - scsi_ctrl = VI.ns0.VirtualBusLogicController_Def("scsi_ctrl").pyclass() - else: - s.disconnect() - module.fail_json( - msg="Error adding scsi controller to vm spec. No scsi controller" - " type of: %s" % (type)) - - scsi_ctrl.set_element_busNumber(int(bus_num)) - scsi_ctrl.set_element_key(int(disk_ctrl_key)) - scsi_ctrl.set_element_sharedBus("noSharing") - scsi_ctrl_spec.set_element_device(scsi_ctrl) - # Add the scsi controller to the VM spec. - devices.append(scsi_ctrl_spec) - return disk_ctrl_key - - -def add_disk(module, s, config_target, config, devices, datastore, type="thin", size=200000, disk_ctrl_key=1, disk_number=0, key=0): - # add a vmdk disk - # Verify the datastore exists - datastore_name, ds = find_datastore(module, s, datastore, config_target) - # create a new disk - file based - for the vm - disk_spec = config.new_deviceChange() - disk_spec.set_element_fileOperation("create") - disk_spec.set_element_operation("add") - disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass() - disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def( - "disk_backing").pyclass() - disk_backing.set_element_fileName(datastore_name) - disk_backing.set_element_diskMode("persistent") - if type != "thick": - disk_backing.set_element_thinProvisioned(1) - disk_ctlr.set_element_key(key) - disk_ctlr.set_element_controllerKey(int(disk_ctrl_key)) - disk_ctlr.set_element_unitNumber(int(disk_number)) - disk_ctlr.set_element_backing(disk_backing) - disk_ctlr.set_element_capacityInKB(int(size)) - disk_spec.set_element_device(disk_ctlr) - devices.append(disk_spec) - - -def add_cdrom(module, s, config_target, config, devices, default_devs, type="client", vm_cd_iso_path=None): - # Add a cd-rom - # Make sure the datastore exists. - if vm_cd_iso_path: - iso_location = vm_cd_iso_path.split('/', 1) - datastore, ds = find_datastore( - module, s, iso_location[0], config_target) - iso_path = iso_location[1] - - # find ide controller - ide_ctlr = None - for dev in default_devs: - if dev.typecode.type[1] == "VirtualIDEController": - ide_ctlr = dev - - # add a cdrom based on a physical device - if ide_ctlr: - cd_spec = config.new_deviceChange() - cd_spec.set_element_operation('add') - cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass() - - if type == "iso": - iso = VI.ns0.VirtualCdromIsoBackingInfo_Def("iso").pyclass() - ds_ref = iso.new_datastore(ds) - ds_ref.set_attribute_type(ds.get_attribute_type()) - iso.set_element_datastore(ds_ref) - iso.set_element_fileName("%s %s" % (datastore, iso_path)) - cd_ctrl.set_element_backing(iso) - cd_ctrl.set_element_key(20) - cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key()) - cd_ctrl.set_element_unitNumber(0) - cd_spec.set_element_device(cd_ctrl) - elif type == "client": - client = VI.ns0.VirtualCdromRemoteAtapiBackingInfo_Def( - "client").pyclass() - client.set_element_deviceName("") - cd_ctrl.set_element_backing(client) - cd_ctrl.set_element_key(20) - cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key()) - cd_ctrl.set_element_unitNumber(0) - cd_spec.set_element_device(cd_ctrl) - else: - s.disconnect() - module.fail_json( - msg="Error adding cdrom of type %s to vm spec. " - " cdrom type can either be iso or client" % (type)) - - devices.append(cd_spec) - - -def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name="VM Network", network_type="standard"): - # add a NIC - # Different network card types are: "VirtualE1000", - # "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet", "VirtualNmxnet2", - # "VirtualVmxnet3" - nic_spec = config.new_deviceChange() - nic_spec.set_element_operation("add") - - if nic_type == "e1000": - nic_ctlr = VI.ns0.VirtualE1000_Def("nic_ctlr").pyclass() - elif nic_type == "e1000e": - nic_ctlr = VI.ns0.VirtualE1000e_Def("nic_ctlr").pyclass() - elif nic_type == "pcnet32": - nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass() - elif nic_type == "vmxnet": - nic_ctlr = VI.ns0.VirtualVmxnet_Def("nic_ctlr").pyclass() - elif nic_type == "vmxnet2": - nic_ctlr = VI.ns0.VirtualVmxnet2_Def("nic_ctlr").pyclass() - elif nic_type == "vmxnet3": - nic_ctlr = VI.ns0.VirtualVmxnet3_Def("nic_ctlr").pyclass() - else: - s.disconnect() - module.fail_json( - msg="Error adding nic to vm spec. No nic type of: %s" % - (nic_type)) - - if network_type == "standard": - nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def( - "nic_backing").pyclass() - nic_backing.set_element_deviceName(network_name) - elif network_type == "dvs": - # Get the portgroup key - portgroupKey = find_portgroup_key(module, s, nfmor, network_name) - # Get the dvswitch uuid - dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, portgroupKey) - - nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def( - "nic_backing_port").pyclass() - nic_backing_port.set_element_switchUuid(dvswitch_uuid) - nic_backing_port.set_element_portgroupKey(portgroupKey) - - nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def( - "nic_backing").pyclass() - nic_backing.set_element_port(nic_backing_port) - else: - s.disconnect() - module.fail_json( - msg="Error adding nic backing to vm spec. No network type of:" - " %s" % (network_type)) - - nic_ctlr.set_element_addressType("generated") - nic_ctlr.set_element_backing(nic_backing) - nic_ctlr.set_element_key(4) - nic_spec.set_element_device(nic_ctlr) - devices.append(nic_spec) - - -def find_datastore(module, s, datastore, config_target): - # Verify the datastore exists and put it in brackets if it does. - ds = None - for d in config_target.Datastore: - if (d.Datastore.Accessible and - (datastore and d.Datastore.Name == datastore) - or (not datastore)): - ds = d.Datastore.Datastore - datastore = d.Datastore.Name - break - if not ds: - s.disconnect() - module.fail_json(msg="Datastore: %s does not appear to exist" % - (datastore)) - - datastore_name = "[%s]" % datastore - return datastore_name, ds - - -def find_portgroup_key(module, s, nfmor, network_name): - # Find a portgroups key given the portgroup name. - - # Grab all the distributed virtual portgroup's names and key's. - dvpg_mors = s._retrieve_properties_traversal( - property_names=['name', 'key'], - from_node=nfmor, obj_type='DistributedVirtualPortgroup') - - # Get the correct portgroup managed object. - dvpg_mor = None - for dvpg in dvpg_mors: - if dvpg_mor: - break - for p in dvpg.PropSet: - if p.Name == "name" and p.Val == network_name: - dvpg_mor = dvpg - if dvpg_mor: - break - - # If dvpg_mor is empty we didn't find the named portgroup. - if dvpg_mor is None: - s.disconnect() - module.fail_json( - msg="Could not find the distributed virtual portgroup named" - " %s" % network_name) - - # Get the portgroup key - portgroupKey = None - for p in dvpg_mor.PropSet: - if p.Name == "key": - portgroupKey = p.Val - - return portgroupKey - - -def find_dvswitch_uuid(module, s, nfmor, portgroupKey): - # Find a dvswitch's uuid given a portgroup key. - # Function searches all dvswitches in the datacenter to find the switch - # that has the portgroup key. - - # Grab the dvswitch uuid and portgroup properties - dvswitch_mors = s._retrieve_properties_traversal( - property_names=['uuid', 'portgroup'], - from_node=nfmor, obj_type='DistributedVirtualSwitch') - - dvswitch_mor = None - # Get the dvswitches managed object - for dvswitch in dvswitch_mors: - if dvswitch_mor: - break - for p in dvswitch.PropSet: - if p.Name == "portgroup": - pg_mors = p.Val.ManagedObjectReference - for pg_mor in pg_mors: - if dvswitch_mor: - break - key_mor = s._get_object_properties( - pg_mor, property_names=['key']) - for key in key_mor.PropSet: - if key.Val == portgroupKey: - dvswitch_mor = dvswitch - - # Get the switches uuid - dvswitch_uuid = None - for p in dvswitch_mor.PropSet: - if p.Name == "uuid": - dvswitch_uuid = p.Val - - return dvswitch_uuid - - -def spec_singleton(spec, request, vm): - - if not spec: - _this = request.new__this(vm._mor) - _this.set_attribute_type(vm._mor.get_attribute_type()) - request.set_element__this(_this) - spec = request.new_spec() - return spec - - -def vmdisk_id(vm, current_datastore_name): - id_list = [] - for vm_disk in vm._disks: - if current_datastore_name in vm_disk['descriptor']: - id_list.append(vm_disk['device']['key']) - return id_list - - -def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force): - spec = None - changed = False - changes = {} - request = VI.ReconfigVM_TaskRequestMsg() - shutdown = False - - memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled) - cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled) - cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled) - - # Change Memory - if vm_hardware['memory_mb']: - - if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB: - spec = spec_singleton(spec, request, vm) - - if vm.is_powered_on(): - if force: - # No hot add but force - if not memoryHotAddEnabled: - shutdown = True - elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB: - shutdown = True - else: - # Fail on no hot add and no force - if not memoryHotAddEnabled: - module.fail_json( - msg="memoryHotAdd is not enabled. force is " - "required for shutdown") - - # Fail on no force and memory shrink - elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB: - module.fail_json( - msg="Cannot lower memory on a live VM. force is " - "required for shutdown") - - # set the new RAM size - spec.set_element_memoryMB(int(vm_hardware['memory_mb'])) - changes['memory'] = vm_hardware['memory_mb'] - - # ====( Config Memory )====# - if vm_hardware['num_cpus']: - if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU: - spec = spec_singleton(spec, request, vm) - - if vm.is_powered_on(): - if force: - # No hot add but force - if not cpuHotAddEnabled: - shutdown = True - elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU: - if not cpuHotRemoveEnabled: - shutdown = True - else: - # Fail on no hot add and no force - if not cpuHotAddEnabled: - module.fail_json( - msg="cpuHotAdd is not enabled. force is " - "required for shutdown") - - # Fail on no force and cpu shrink without hot remove - elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU: - if not cpuHotRemoveEnabled: - module.fail_json( - msg="Cannot lower CPU on a live VM without " - "cpuHotRemove. force is required for shutdown") - - spec.set_element_numCPUs(int(vm_hardware['num_cpus'])) - - changes['cpu'] = vm_hardware['num_cpus'] - - if len(changes): - - if shutdown and vm.is_powered_on(): - try: - vm.power_off(sync_run=True) - vm.get_status() - - except Exception, e: - module.fail_json( - msg='Failed to shutdown vm %s: %s' % (guest, e) - ) - - request.set_element_spec(spec) - ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval - - # Wait for the task to finish - task = VITask(ret, vsphere_client) - status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) - if status == task.STATE_SUCCESS: - changed = True - elif status == task.STATE_ERROR: - module.fail_json( - msg="Error reconfiguring vm: %s" % task.get_error_message()) - - if vm.is_powered_off(): - try: - vm.power_on(sync_run=True) - except Exception, e: - module.fail_json( - msg='Failed to power on vm %s : %s' % (guest, e) - ) - - vsphere_client.disconnect() - if changed: - module.exit_json(changed=True, changes=changes) - - module.exit_json(changed=False) - - -def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, vm_hw_version, state): - - datacenter = esxi['datacenter'] - esxi_hostname = esxi['hostname'] - # Datacenter managed object reference - dclist = [k for k, - v in vsphere_client.get_datacenters().items() if v == datacenter] - if dclist: - dcmor=dclist[0] - else: - vsphere_client.disconnect() - module.fail_json(msg="Cannot find datacenter named: %s" % datacenter) - - dcprops = VIProperty(vsphere_client, dcmor) - - # hostFolder managed reference - hfmor = dcprops.hostFolder._obj - - # virtualmachineFolder managed object reference - vmfmor = dcprops.vmFolder._obj - - # networkFolder managed object reference - nfmor = dcprops.networkFolder._obj - - # Grab the computerResource name and host properties - crmors = vsphere_client._retrieve_properties_traversal( - property_names=['name', 'host'], - from_node=hfmor, - obj_type='ComputeResource') - - # Grab the host managed object reference of the esxi_hostname - try: - hostmor = [k for k, - v in vsphere_client.get_hosts().items() if v == esxi_hostname][0] - except IndexError, e: - vsphere_client.disconnect() - module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname) - - # Grab the computerResource managed object reference of the host we are - # creating the VM on. - crmor = None - for cr in crmors: - if crmor: - break - for p in cr.PropSet: - if p.Name == "host": - for h in p.Val.get_element_ManagedObjectReference(): - if h == hostmor: - crmor = cr.Obj - break - if crmor: - break - crprops = VIProperty(vsphere_client, crmor) - - # Get resource pool managed reference - # Requires that a cluster name be specified. - if resource_pool: - try: - cluster = [k for k, - v in vsphere_client.get_clusters().items() if v == cluster_name][0] - except IndexError, e: - vsphere_client.disconnect() - module.fail_json(msg="Cannot find Cluster named: %s" % - cluster_name) - - try: - rpmor = [k for k, v in vsphere_client.get_resource_pools( - from_mor=cluster).items() - if v == resource_pool][0] - except IndexError, e: - vsphere_client.disconnect() - module.fail_json(msg="Cannot find Resource Pool named: %s" % - resource_pool) - - else: - rpmor = crprops.resourcePool._obj - - # CREATE VM CONFIGURATION - # get config target - request = VI.QueryConfigTargetRequestMsg() - _this = request.new__this(crprops.environmentBrowser._obj) - _this.set_attribute_type( - crprops.environmentBrowser._obj.get_attribute_type()) - request.set_element__this(_this) - h = request.new_host(hostmor) - h.set_attribute_type(hostmor.get_attribute_type()) - request.set_element_host(h) - config_target = vsphere_client._proxy.QueryConfigTarget(request)._returnval - - # get default devices - request = VI.QueryConfigOptionRequestMsg() - _this = request.new__this(crprops.environmentBrowser._obj) - _this.set_attribute_type( - crprops.environmentBrowser._obj.get_attribute_type()) - request.set_element__this(_this) - h = request.new_host(hostmor) - h.set_attribute_type(hostmor.get_attribute_type()) - request.set_element_host(h) - config_option = vsphere_client._proxy.QueryConfigOption(request)._returnval - default_devs = config_option.DefaultDevice - - # add parameters to the create vm task - create_vm_request = VI.CreateVM_TaskRequestMsg() - config = create_vm_request.new_config() - if vm_hw_version: - config.set_element_version(vm_hw_version) - vmfiles = config.new_files() - datastore_name, ds = find_datastore( - module, vsphere_client, vm_disk['disk1']['datastore'], config_target) - vmfiles.set_element_vmPathName(datastore_name) - config.set_element_files(vmfiles) - config.set_element_name(guest) - if 'notes' in vm_extra_config: - config.set_element_annotation(vm_extra_config['notes']) - config.set_element_memoryMB(int(vm_hardware['memory_mb'])) - config.set_element_numCPUs(int(vm_hardware['num_cpus'])) - config.set_element_guestId(vm_hardware['osid']) - devices = [] - - # Attach all the hardware we want to the VM spec. - # Add a scsi controller to the VM spec. - disk_ctrl_key = add_scsi_controller( - module, vsphere_client, config, devices, vm_hardware['scsi']) - if vm_disk: - disk_num = 0 - disk_key = 0 - for disk in sorted(vm_disk.iterkeys()): - try: - datastore = vm_disk[disk]['datastore'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. datastore needs to be" - " specified." % disk) - try: - disksize = int(vm_disk[disk]['size_gb']) - # Convert the disk size to kiloboytes - disksize = disksize * 1024 * 1024 - except (KeyError, ValueError): - vsphere_client.disconnect() - module.fail_json(msg="Error on %s definition. size needs to be specified as an integer." % disk) - try: - disktype = vm_disk[disk]['type'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. type needs to be" - " specified." % disk) - # Add the disk to the VM spec. - add_disk( - module, vsphere_client, config_target, config, - devices, datastore, disktype, disksize, disk_ctrl_key, - disk_num, disk_key) - disk_num = disk_num + 1 - disk_key = disk_key + 1 - if 'vm_cdrom' in vm_hardware: - cdrom_iso_path = None - cdrom_type = None - try: - cdrom_type = vm_hardware['vm_cdrom']['type'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. cdrom type needs to be" - " specified." % vm_hardware['vm_cdrom']) - if cdrom_type == 'iso': - try: - cdrom_iso_path = vm_hardware['vm_cdrom']['iso_path'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. cdrom iso_path needs" - " to be specified." % vm_hardware['vm_cdrom']) - # Add a CD-ROM device to the VM. - add_cdrom(module, vsphere_client, config_target, config, devices, - default_devs, cdrom_type, cdrom_iso_path) - if vm_nic: - for nic in sorted(vm_nic.iterkeys()): - try: - nictype = vm_nic[nic]['type'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. type needs to be " - " specified." % nic) - try: - network = vm_nic[nic]['network'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. network needs to be " - " specified." % nic) - try: - network_type = vm_nic[nic]['network_type'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. network_type needs to be " - " specified." % nic) - # Add the nic to the VM spec. - add_nic(module, vsphere_client, nfmor, config, devices, - nictype, network, network_type) - - config.set_element_deviceChange(devices) - create_vm_request.set_element_config(config) - folder_mor = create_vm_request.new__this(vmfmor) - folder_mor.set_attribute_type(vmfmor.get_attribute_type()) - create_vm_request.set_element__this(folder_mor) - rp_mor = create_vm_request.new_pool(rpmor) - rp_mor.set_attribute_type(rpmor.get_attribute_type()) - create_vm_request.set_element_pool(rp_mor) - host_mor = create_vm_request.new_host(hostmor) - host_mor.set_attribute_type(hostmor.get_attribute_type()) - create_vm_request.set_element_host(host_mor) - - # CREATE THE VM - taskmor = vsphere_client._proxy.CreateVM_Task(create_vm_request)._returnval - task = VITask(taskmor, vsphere_client) - task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) - if task.get_state() == task.STATE_ERROR: - vsphere_client.disconnect() - module.fail_json(msg="Error creating vm: %s" % - task.get_error_message()) - else: - # We always need to get the vm because we are going to gather facts - vm = vsphere_client.get_vm_by_name(guest) - - # VM was created. If there is any extra config options specified, set - # them here , disconnect from vcenter, then exit. - if vm_extra_config: - vm.set_extra_config(vm_extra_config) - - # Power on the VM if it was requested - power_state(vm, state, True) - - vsphere_client.disconnect() - module.exit_json( - ansible_facts=gather_facts(vm), - changed=True, - changes="Created VM %s" % guest) - - -def delete_vm(vsphere_client, module, guest, vm, force): - try: - - if vm.is_powered_on(): - if force: - try: - vm.power_off(sync_run=True) - vm.get_status() - - except Exception, e: - module.fail_json( - msg='Failed to shutdown vm %s: %s' % (guest, e)) - else: - module.fail_json( - msg='You must use either shut the vm down first or ' - 'use force ') - - # Invoke Destroy_Task - request = VI.Destroy_TaskRequestMsg() - _this = request.new__this(vm._mor) - _this.set_attribute_type(vm._mor.get_attribute_type()) - request.set_element__this(_this) - ret = vsphere_client._proxy.Destroy_Task(request)._returnval - task = VITask(ret, vsphere_client) - - # Wait for the task to finish - status = task.wait_for_state( - [task.STATE_SUCCESS, task.STATE_ERROR]) - if status == task.STATE_ERROR: - vsphere_client.disconnect() - module.fail_json(msg="Error removing vm: %s %s" % - task.get_error_message()) - module.exit_json(changed=True, changes="VM %s deleted" % guest) - except Exception, e: - module.fail_json( - msg='Failed to delete vm %s : %s' % (guest, e)) - - -def power_state(vm, state, force): - """ - Correctly set the power status for a VM determined by the current and - requested states. force is forceful - """ - power_status = vm.get_status() - - check_status = ' '.join(state.split("_")).upper() - - # Need Force - if not force and power_status in [ - 'SUSPENDED', 'POWERING ON', - 'RESETTING', 'BLOCKED ON MSG' - ]: - - return "VM is in %s power state. Force is required!" % power_status - - # State is already true - if power_status == check_status: - return False - - else: - try: - if state == 'powered_off': - vm.power_off(sync_run=True) - - elif state == 'powered_on': - vm.power_on(sync_run=True) - - elif state == 'restarted': - if power_status in ('POWERED ON', 'POWERING ON', 'RESETTING'): - vm.reset(sync_run=False) - else: - return "Cannot restart VM in the current state %s" \ - % power_status - return True - - except Exception, e: - return e - - return False - - -def gather_facts(vm): - """ - Gather facts for VM directly from vsphere. - """ - vm.get_properties() - facts = { - 'module_hw': True, - 'hw_name': vm.properties.name, - 'hw_guest_full_name': vm.properties.config.guestFullName, - 'hw_guest_id': vm.properties.config.guestId, - 'hw_product_uuid': vm.properties.config.uuid, - 'hw_processor_count': vm.properties.config.hardware.numCPU, - 'hw_memtotal_mb': vm.properties.config.hardware.memoryMB, - } - - ifidx = 0 - for entry in vm.properties.config.hardware.device: - - if not hasattr(entry, 'macAddress'): - continue - - factname = 'hw_eth' + str(ifidx) - facts[factname] = { - 'addresstype': entry.addressType, - 'label': entry.deviceInfo.label, - 'macaddress': entry.macAddress, - 'macaddress_dash': entry.macAddress.replace(':', '-'), - 'summary': entry.deviceInfo.summary, - } - - ifidx += 1 - - return facts - - -class DefaultVMConfig(object): - - """ - Shallow and deep dict comparison for interfaces - """ - - def __init__(self, check_dict, interface_dict): - self.check_dict, self.interface_dict = check_dict, interface_dict - self.set_current, self.set_past = set( - check_dict.keys()), set(interface_dict.keys()) - self.intersect = self.set_current.intersection(self.set_past) - self.recursive_missing = None - - def shallow_diff(self): - return self.set_past - self.intersect - - def recursive_diff(self): - - if not self.recursive_missing: - self.recursive_missing = [] - for key, value in self.interface_dict.items(): - if isinstance(value, dict): - for k, v in value.items(): - if k in self.check_dict[key]: - if not isinstance(self.check_dict[key][k], v): - try: - if v == int: - self.check_dict[key][k] = int(self.check_dict[key][k]) - elif v == basestring: - self.check_dict[key][k] = str(self.check_dict[key][k]) - else: - raise ValueError - except ValueError: - self.recursive_missing.append((k, v)) - else: - self.recursive_missing.append((k, v)) - - return self.recursive_missing - - -def config_check(name, passed, default, module): - """ - Checks that the dict passed for VM configuration matches the required - interface declared at the top of __main__ - """ - - diff = DefaultVMConfig(passed, default) - if len(diff.shallow_diff()): - module.fail_json( - msg="Missing required key/pair [%s]. %s must contain %s" % - (', '.join(diff.shallow_diff()), name, default)) - - if diff.recursive_diff(): - module.fail_json( - msg="Config mismatch for %s on %s" % - (name, diff.recursive_diff())) - - return True - - -def main(): - - vm = None - - proto_vm_hardware = { - 'memory_mb': int, - 'num_cpus': int, - 'scsi': basestring, - 'osid': basestring - } - - proto_vm_disk = { - 'disk1': { - 'datastore': basestring, - 'size_gb': int, - 'type': basestring - } - } - - proto_vm_nic = { - 'nic1': { - 'type': basestring, - 'network': basestring, - 'network_type': basestring - } - } - - proto_esxi = { - 'datacenter': basestring, - 'hostname': basestring - } - - module = AnsibleModule( - argument_spec=dict( - vcenter_hostname=dict(required=True, type='str'), - username=dict(required=True, type='str'), - password=dict(required=True, type='str'), - state=dict( - required=False, - choices=[ - 'powered_on', - 'powered_off', - 'present', - 'absent', - 'restarted', - 'reconfigured' - ], - default='present'), - vmware_guest_facts=dict(required=False, choices=BOOLEANS), - guest=dict(required=True, type='str'), - vm_disk=dict(required=False, type='dict', default={}), - vm_nic=dict(required=False, type='dict', default={}), - vm_hardware=dict(required=False, type='dict', default={}), - vm_extra_config=dict(required=False, type='dict', default={}), - vm_hw_version=dict(required=False, default=None, type='str'), - resource_pool=dict(required=False, default=None, type='str'), - cluster=dict(required=False, default=None, type='str'), - force=dict(required=False, choices=BOOLEANS, default=False), - esxi=dict(required=False, type='dict', default={}), - - - ), - supports_check_mode=False, - mutually_exclusive=[['state', 'vmware_guest_facts']], - required_together=[ - ['state', 'force'], - [ - 'state', - 'vm_disk', - 'vm_nic', - 'vm_hardware', - 'esxi' - ], - ['resource_pool', 'cluster'] - ], - ) - - if not HAS_PYSPHERE: - module.fail_json(msg='pysphere module required') - - vcenter_hostname = module.params['vcenter_hostname'] - username = module.params['username'] - password = module.params['password'] - vmware_guest_facts = module.params['vmware_guest_facts'] - state = module.params['state'] - guest = module.params['guest'] - force = module.params['force'] - vm_disk = module.params['vm_disk'] - vm_nic = module.params['vm_nic'] - vm_hardware = module.params['vm_hardware'] - vm_extra_config = module.params['vm_extra_config'] - vm_hw_version = module.params['vm_hw_version'] - esxi = module.params['esxi'] - resource_pool = module.params['resource_pool'] - cluster = module.params['cluster'] - - # CONNECT TO THE SERVER - viserver = VIServer() - try: - viserver.connect(vcenter_hostname, username, password) - except VIApiException, err: - module.fail_json(msg="Cannot connect to %s: %s" % - (vcenter_hostname, err)) - - # Check if the VM exists before continuing - try: - vm = viserver.get_vm_by_name(guest) - except Exception: - pass - - if vm: - # Run for facts only - if vmware_guest_facts: - try: - module.exit_json(ansible_facts=gather_facts(vm)) - except Exception, e: - module.fail_json( - msg="Fact gather failed with exception %s" % e) - - # Power Changes - elif state in ['powered_on', 'powered_off', 'restarted']: - state_result = power_state(vm, state, force) - - # Failure - if isinstance(state_result, basestring): - module.fail_json(msg=state_result) - else: - module.exit_json(changed=state_result) - - # Just check if there - elif state == 'present': - module.exit_json(changed=False) - - # Fail on reconfig without params - elif state == 'reconfigured': - reconfigure_vm( - vsphere_client=viserver, - vm=vm, - module=module, - esxi=esxi, - resource_pool=resource_pool, - cluster_name=cluster, - guest=guest, - vm_extra_config=vm_extra_config, - vm_hardware=vm_hardware, - vm_disk=vm_disk, - vm_nic=vm_nic, - state=state, - force=force - ) - elif state == 'absent': - delete_vm( - vsphere_client=viserver, - module=module, - guest=guest, - vm=vm, - force=force) - - # VM doesn't exist - else: - - # Fail for fact gather task - if vmware_guest_facts: - module.fail_json( - msg="No such VM %s. Fact gathering requires an existing vm" - % guest) - if state in ['restarted', 'reconfigured']: - module.fail_json( - msg="No such VM %s. States [" - "restarted, reconfigured] required an existing VM" % guest) - elif state == 'absent': - module.exit_json(changed=False, msg="vm %s not present" % guest) - - # Create the VM - elif state in ['present', 'powered_off', 'powered_on']: - - # Check the guest_config - config_check("vm_disk", vm_disk, proto_vm_disk, module) - config_check("vm_nic", vm_nic, proto_vm_nic, module) - config_check("vm_hardware", vm_hardware, proto_vm_hardware, module) - config_check("esxi", esxi, proto_esxi, module) - - create_vm( - vsphere_client=viserver, - module=module, - esxi=esxi, - resource_pool=resource_pool, - cluster_name=cluster, - guest=guest, - vm_extra_config=vm_extra_config, - vm_hardware=vm_hardware, - vm_disk=vm_disk, - vm_nic=vm_nic, - vm_hw_version=vm_hw_version, - state=state - ) - - viserver.disconnect() - module.exit_json( - changed=False, - vcenter=vcenter_hostname) - - -# this is magic, see lib/ansible/module_common.py -#<> -main() diff --git a/library/commands/command b/library/commands/command deleted file mode 100644 index c1fabd4f9b..0000000000 --- a/library/commands/command +++ /dev/null @@ -1,275 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import sys -import datetime -import traceback -import re -import shlex -import os - -DOCUMENTATION = ''' ---- -module: command -version_added: historical -short_description: Executes a command on a remote node -description: - - The M(command) module takes the command name followed by a list of space-delimited arguments. - - The given command will be executed on all selected nodes. It will not be - processed through the shell, so variables like C($HOME) and operations - like C("<"), C(">"), C("|"), and C("&") will not work (use the M(shell) - module if you need these features). -options: - free_form: - description: - - the command module takes a free form command to run. There is no parameter actually named 'free form'. - See the examples! - required: true - default: null - aliases: [] - creates: - description: - - a filename, when it already exists, this step will B(not) be run. - required: no - default: null - removes: - description: - - a filename, when it does not exist, this step will B(not) be run. - version_added: "0.8" - required: no - default: null - chdir: - description: - - cd into this directory before running the command - version_added: "0.6" - required: false - default: null - executable: - description: - - change the shell used to execute the command. Should be an absolute path to the executable. - required: false - default: null - version_added: "0.9" - warn: - version_added: "1.8" - default: yes - description: - - if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false. - required: false - default: True -notes: - - If you want to run a command through the shell (say you are using C(<), - C(>), C(|), etc), you actually want the M(shell) module instead. The - M(command) module is much more secure as it's not affected by the user's - environment. - - " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this." -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks. -- command: /sbin/shutdown -t now - -# Run the command if the specified file does not exist. -- command: /usr/bin/make_database.sh arg1 arg2 creates=/path/to/database - -# You can also use the 'args' form to provide the options. This command -# will change the working directory to somedir/ and will only run when -# /path/to/database doesn't exist. -- command: /usr/bin/make_database.sh arg1 arg2 - args: - chdir: somedir/ - creates: /path/to/database -''' - -# This is a pretty complex regex, which functions as follows: -# -# 1. (^|\s) -# ^ look for a space or the beginning of the line -# 2. (creates|removes|chdir|executable|NO_LOG)= -# ^ look for a valid param, followed by an '=' -# 3. (?P[\'"])? -# ^ look for an optional quote character, which can either be -# a single or double quote character, and store it for later -# 4. (.*?) -# ^ match everything in a non-greedy manner until... -# 5. (?(quote)(?[\'"])?(.*?)(?(quote)(? 1 and (v.startswith('"') and v.endswith('"') or v.startswith("'") and v.endswith("'")): - v = v[1:-1] - if k in ('creates', 'removes', 'chdir', 'executable', 'NO_LOG'): - if k == "chdir": - v = os.path.abspath(os.path.expanduser(v)) - if not (os.path.exists(v) and os.path.isdir(v)): - self.fail_json(rc=258, msg="cannot change to directory '%s': path does not exist" % v) - elif k == "executable": - v = os.path.abspath(os.path.expanduser(v)) - if not (os.path.exists(v)): - self.fail_json(rc=258, msg="cannot use executable '%s': file does not exist" % v) - params[k] = v - # Remove any of the above k=v params from the args string - args = PARAM_REGEX.sub('', args) - params['args'] = args.strip() - - return (params, params['args']) - -main() diff --git a/library/commands/raw b/library/commands/raw deleted file mode 100644 index 87f2b5c4bd..0000000000 --- a/library/commands/raw +++ /dev/null @@ -1,43 +0,0 @@ -# this is a virtual module that is entirely implemented server side - -DOCUMENTATION = ''' ---- -module: raw -version_added: historical -short_description: Executes a low-down and dirty SSH command -options: - free_form: - description: - - the raw module takes a free form command to run - required: true - executable: - description: - - change the shell used to execute the command. Should be an absolute path to the executable. - required: false - version_added: "1.0" -description: - - Executes a low-down and dirty SSH command, not going through the module - subsystem. This is useful and should only be done in two cases. The - first case is installing C(python-simplejson) on older (Python 2.4 and - before) hosts that need it as a dependency to run modules, since nearly - all core modules require it. Another is speaking to any devices such as - routers that do not have any Python installed. In any other case, using - the M(shell) or M(command) module is much more appropriate. Arguments - given to M(raw) are run directly through the configured remote shell. - Standard output, error output and return code are returned when - available. There is no change handler support for this module. - - This module does not require python on the remote system, much like - the M(script) module. -notes: - - If you want to execute a command securely and predictably, it may be - better to use the M(command) module instead. Best practices when writing - playbooks will follow the trend of using M(command) unless M(shell) is - explicitly required. When running ad-hoc commands, use your best - judgement. -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Bootstrap a legacy python 2.4 host -- raw: yum -y install python-simplejson -''' diff --git a/library/commands/script b/library/commands/script deleted file mode 100644 index 01a1ae34e7..0000000000 --- a/library/commands/script +++ /dev/null @@ -1,47 +0,0 @@ - -DOCUMENTATION = """ ---- -module: script -version_added: "0.9" -short_description: Runs a local script on a remote node after transferring it -description: - - "The M(script) module takes the script name followed by a list of - space-delimited arguments. " - - "The local script at path will be transferred to the remote node and then executed. " - - "The given script will be processed through the shell environment on the remote node. " - - "This module does not require python on the remote system, much like - the M(raw) module. " -options: - free_form: - description: - - path to the local script file followed by optional arguments. - required: true - default: null - aliases: [] - creates: - description: - - a filename, when it already exists, this step will B(not) be run. - required: no - default: null - version_added: "1.5" - removes: - description: - - a filename, when it does not exist, this step will B(not) be run. - required: no - default: null - version_added: "1.5" -notes: - - It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points! -author: Michael DeHaan -""" - -EXAMPLES = ''' -# Example from Ansible Playbooks -- script: /some/local/script.sh --some-arguments 1234 - -# Run a script that creates a file, but only if the file is not yet created -- script: /some/local/create_file.sh --some-arguments 1234 creates=/the/created/file.txt - -# Run a script that removes a file, but only if the file is not yet removed -- script: /some/local/remove_file.sh --some-arguments 1234 removes=/the/removed/file.txt -''' diff --git a/library/commands/shell b/library/commands/shell deleted file mode 100644 index b63a21080e..0000000000 --- a/library/commands/shell +++ /dev/null @@ -1,78 +0,0 @@ -# There is actually no actual shell module source, when you use 'shell' in ansible, -# it runs the 'command' module with special arguments and it behaves differently. -# See the command source and the comment "#USE_SHELL". - -DOCUMENTATION = ''' ---- -module: shell -short_description: Execute commands in nodes. -description: - - The M(shell) module takes the command name followed by a list of space-delimited arguments. - It is almost exactly like the M(command) module but runs - the command through a shell (C(/bin/sh)) on the remote node. -version_added: "0.2" -options: - free_form: - description: - - The shell module takes a free form command to run, as a string. There's not an actual - option named "free form". See the examples! - required: true - default: null - creates: - description: - - a filename, when it already exists, this step will B(not) be run. - required: no - default: null - removes: - description: - - a filename, when it does not exist, this step will B(not) be run. - version_added: "0.8" - required: no - default: null - chdir: - description: - - cd into this directory before running the command - required: false - default: null - version_added: "0.6" - executable: - description: - - change the shell used to execute the command. Should be an absolute path to the executable. - required: false - default: null - version_added: "0.9" - warn: - description: - - if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false. - required: false - default: True - version_added: "1.8" -notes: - - If you want to execute a command securely and predictably, it may be - better to use the M(command) module instead. Best practices when writing - playbooks will follow the trend of using M(command) unless M(shell) is - explicitly required. When running ad-hoc commands, use your best - judgement. - - To sanitize any variables passed to the shell module, you should use - "{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons. - -requirements: [ ] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Execute the command in remote shell; stdout goes to the specified -# file on the remote. -- shell: somescript.sh >> somelog.txt - -# Change the working directory to somedir/ before executing the command. -- shell: somescript.sh >> somelog.txt chdir=somedir/ - -# You can also use the 'args' form to provide the options. This command -# will change the working directory to somedir/ and will only run when -# somedir/somelog.txt doesn't exist. -- shell: somescript.sh >> somelog.txt - args: - chdir: somedir/ - creates: somelog.txt -''' diff --git a/library/database/mongodb_user b/library/database/mongodb_user deleted file mode 100644 index 5d7e0897b6..0000000000 --- a/library/database/mongodb_user +++ /dev/null @@ -1,242 +0,0 @@ -#!/usr/bin/python - -# (c) 2012, Elliott Foster -# Sponsored by Four Kitchens http://fourkitchens.com. -# (c) 2014, Epic Games, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: mongodb_user -short_description: Adds or removes a user from a MongoDB database. -description: - - Adds or removes a user from a MongoDB database. -version_added: "1.1" -options: - login_user: - description: - - The username used to authenticate with - required: false - default: null - login_password: - description: - - The password used to authenticate with - required: false - default: null - login_host: - description: - - The host running the database - required: false - default: localhost - login_port: - description: - - The port to connect to - required: false - default: 27017 - replica_set: - version_added: "1.6" - description: - - Replica set to connect to (automatically connects to primary for writes) - required: false - default: null - database: - description: - - The name of the database to add/remove the user from - required: true - user: - description: - - The name of the user to add or remove - required: true - default: null - password: - description: - - The password to use for the user - required: false - default: null - roles: - version_added: "1.3" - description: - - "The database user roles valid values are one or more of the following: read, 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'dbAdminAnyDatabase'" - - This param requires mongodb 2.4+ and pymongo 2.5+ - required: false - default: "readWrite" - state: - state: - description: - - The database user state - required: false - default: present - choices: [ "present", "absent" ] -notes: - - Requires the pymongo Python package on the remote host, version 2.4.2+. This - can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html -requirements: [ "pymongo" ] -author: Elliott Foster -''' - -EXAMPLES = ''' -# Create 'burgers' database user with name 'bob' and password '12345'. -- mongodb_user: database=burgers name=bob password=12345 state=present - -# Delete 'burgers' database user with name 'bob'. -- mongodb_user: database=burgers name=bob state=absent - -# Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style) -- mongodb_user: database=burgers name=ben password=12345 roles='read' state=present -- mongodb_user: database=burgers name=jim password=12345 roles='readWrite,dbAdmin,userAdmin' state=present -- mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present - -# add a user to database in a replica set, the primary server is automatically discovered and written to -- mongodb_user: database=burgers name=bob replica_set=blecher password=12345 roles='readWriteAnyDatabase' state=present -''' - -import ConfigParser -from distutils.version import LooseVersion -try: - from pymongo.errors import ConnectionFailure - from pymongo.errors import OperationFailure - from pymongo import version as PyMongoVersion - from pymongo import MongoClient -except ImportError: - try: # for older PyMongo 2.2 - from pymongo import Connection as MongoClient - except ImportError: - pymongo_found = False - else: - pymongo_found = True -else: - pymongo_found = True - -# ========================================= -# MongoDB module specific support methods. -# - -def user_add(module, client, db_name, user, password, roles): - db = client[db_name] - if roles is None: - db.add_user(user, password, False) - else: - try: - db.add_user(user, password, None, roles=roles) - except OperationFailure, e: - err_msg = str(e) - if LooseVersion(PyMongoVersion) <= LooseVersion('2.5'): - err_msg = err_msg + ' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)' - module.fail_json(msg=err_msg) - -def user_remove(client, db_name, user): - db = client[db_name] - db.remove_user(user) - -def load_mongocnf(): - config = ConfigParser.RawConfigParser() - mongocnf = os.path.expanduser('~/.mongodb.cnf') - - try: - config.readfp(open(mongocnf)) - creds = dict( - user=config.get('client', 'user'), - password=config.get('client', 'pass') - ) - except (ConfigParser.NoOptionError, IOError): - return False - - return creds - -# ========================================= -# Module execution. -# - -def main(): - module = AnsibleModule( - argument_spec = dict( - login_user=dict(default=None), - login_password=dict(default=None), - login_host=dict(default='localhost'), - login_port=dict(default='27017'), - replica_set=dict(default=None), - database=dict(required=True, aliases=['db']), - user=dict(required=True, aliases=['name']), - password=dict(aliases=['pass']), - roles=dict(default=None, type='list'), - state=dict(default='present', choices=['absent', 'present']), - ) - ) - - if not pymongo_found: - module.fail_json(msg='the python pymongo module is required') - - login_user = module.params['login_user'] - login_password = module.params['login_password'] - login_host = module.params['login_host'] - login_port = module.params['login_port'] - replica_set = module.params['replica_set'] - db_name = module.params['database'] - user = module.params['user'] - password = module.params['password'] - roles = module.params['roles'] - state = module.params['state'] - - try: - if replica_set: - client = MongoClient(login_host, int(login_port), replicaset=replica_set) - else: - client = MongoClient(login_host, int(login_port)) - - # try to authenticate as a target user to check if it already exists - try: - client[db_name].authenticate(user, password) - if state == 'present': - module.exit_json(changed=False, user=user) - except OperationFailure: - if state == 'absent': - module.exit_json(changed=False, user=user) - - if login_user is None and login_password is None: - mongocnf_creds = load_mongocnf() - if mongocnf_creds is not False: - login_user = mongocnf_creds['user'] - login_password = mongocnf_creds['password'] - elif login_password is None and login_user is not None: - module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') - - if login_user is not None and login_password is not None: - client.admin.authenticate(login_user, login_password) - - except ConnectionFailure, e: - module.fail_json(msg='unable to connect to database: %s' % str(e)) - - if state == 'present': - if password is None: - module.fail_json(msg='password parameter required when adding a user') - - try: - user_add(module, client, db_name, user, password, roles) - except OperationFailure, e: - module.fail_json(msg='Unable to add or update user: %s' % str(e)) - - elif state == 'absent': - try: - user_remove(client, db_name, user) - except OperationFailure, e: - module.fail_json(msg='Unable to remove user: %s' % str(e)) - - module.exit_json(changed=True, user=user) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/mysql_db b/library/database/mysql_db deleted file mode 100644 index 38dee608ba..0000000000 --- a/library/database/mysql_db +++ /dev/null @@ -1,363 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Mark Theunissen -# Sponsored by Four Kitchens http://fourkitchens.com. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: mysql_db -short_description: Add or remove MySQL databases from a remote host. -description: - - Add or remove MySQL databases from a remote host. -version_added: "0.6" -options: - name: - description: - - name of the database to add or remove - required: true - default: null - aliases: [ db ] - login_user: - description: - - The username used to authenticate with - required: false - default: null - login_password: - description: - - The password used to authenticate with - required: false - default: null - login_host: - description: - - Host running the database - required: false - default: localhost - login_port: - description: - - Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used - required: false - default: 3306 - login_unix_socket: - description: - - The path to a Unix domain socket for local connections - required: false - default: null - state: - description: - - The database state - required: false - default: present - choices: [ "present", "absent", "dump", "import" ] - collation: - description: - - Collation mode - required: false - default: null - encoding: - description: - - Encoding mode - required: false - default: null - target: - description: - - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL - files (C(.sql)) as well as bzip2 (C(.bz2)) and gzip (C(.gz)) compressed files are supported. - required: false -notes: - - Requires the MySQLdb Python package on the remote host. For Ubuntu, this - is as easy as apt-get install python-mysqldb. (See M(apt).) - - Both I(login_password) and I(login_user) are required when you are - passing credentials. If none are present, the module will attempt to read - the credentials from C(~/.my.cnf), and finally fall back to using the MySQL - default login of C(root) with no password. -requirements: [ ConfigParser ] -author: Mark Theunissen -''' - -EXAMPLES = ''' -# Create a new database with name 'bobdata' -- mysql_db: name=bobdata state=present - -# Copy database dump file to remote host and restore it to database 'my_db' -- copy: src=dump.sql.bz2 dest=/tmp -- mysql_db: name=my_db state=import target=/tmp/dump.sql.bz2 -''' - -import ConfigParser -import os -import pipes -try: - import MySQLdb -except ImportError: - mysqldb_found = False -else: - mysqldb_found = True - -# =========================================== -# MySQL module specific support methods. -# - -def db_exists(cursor, db): - res = cursor.execute("SHOW DATABASES LIKE %s", (db.replace("_","\_"),)) - return bool(res) - -def db_delete(cursor, db): - query = "DROP DATABASE `%s`" % db - cursor.execute(query) - return True - -def db_dump(module, host, user, password, db_name, target, port, socket=None): - cmd = module.get_bin_path('mysqldump', True) - cmd += " --quick --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password)) - if socket is not None: - cmd += " --socket=%s" % pipes.quote(socket) - else: - cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port)) - cmd += " %s" % pipes.quote(db_name) - if os.path.splitext(target)[-1] == '.gz': - cmd = cmd + ' | gzip > ' + pipes.quote(target) - elif os.path.splitext(target)[-1] == '.bz2': - cmd = cmd + ' | bzip2 > ' + pipes.quote(target) - else: - cmd += " > %s" % pipes.quote(target) - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - return rc, stdout, stderr - -def db_import(module, host, user, password, db_name, target, port, socket=None): - if not os.path.exists(target): - return module.fail_json(msg="target %s does not exist on the host" % target) - - cmd = module.get_bin_path('mysql', True) - cmd += " --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password)) - if socket is not None: - cmd += " --socket=%s" % pipes.quote(socket) - else: - cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port)) - cmd += " -D %s" % pipes.quote(db_name) - if os.path.splitext(target)[-1] == '.gz': - gunzip_path = module.get_bin_path('gunzip') - if gunzip_path: - rc, stdout, stderr = module.run_command('%s %s' % (gunzip_path, target)) - if rc != 0: - return rc, stdout, stderr - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - if rc != 0: - return rc, stdout, stderr - gzip_path = module.get_bin_path('gzip') - if gzip_path: - rc, stdout, stderr = module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0])) - else: - module.fail_json(msg="gzip command not found") - else: - module.fail_json(msg="gunzip command not found") - elif os.path.splitext(target)[-1] == '.bz2': - bunzip2_path = module.get_bin_path('bunzip2') - if bunzip2_path: - rc, stdout, stderr = module.run_command('%s %s' % (bunzip2_path, target)) - if rc != 0: - return rc, stdout, stderr - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - if rc != 0: - return rc, stdout, stderr - bzip2_path = module.get_bin_path('bzip2') - if bzip2_path: - rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0])) - else: - module.fail_json(msg="bzip2 command not found") - else: - module.fail_json(msg="bunzip2 command not found") - else: - cmd += " < %s" % pipes.quote(target) - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - return rc, stdout, stderr - -def db_create(cursor, db, encoding, collation): - if encoding: - encoding = " CHARACTER SET %s" % encoding - if collation: - collation = " COLLATE %s" % collation - query = "CREATE DATABASE `%s`%s%s" % (db, encoding, collation) - res = cursor.execute(query) - return True - -def strip_quotes(s): - """ Remove surrounding single or double quotes - - >>> print strip_quotes('hello') - hello - >>> print strip_quotes('"hello"') - hello - >>> print strip_quotes("'hello'") - hello - >>> print strip_quotes("'hello") - 'hello - - """ - single_quote = "'" - double_quote = '"' - - if s.startswith(single_quote) and s.endswith(single_quote): - s = s.strip(single_quote) - elif s.startswith(double_quote) and s.endswith(double_quote): - s = s.strip(double_quote) - return s - - -def config_get(config, section, option): - """ Calls ConfigParser.get and strips quotes - - See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html - """ - return strip_quotes(config.get(section, option)) - - -def load_mycnf(): - config = ConfigParser.RawConfigParser() - mycnf = os.path.expanduser('~/.my.cnf') - if not os.path.exists(mycnf): - return False - try: - config.readfp(open(mycnf)) - except (IOError): - return False - # We support two forms of passwords in .my.cnf, both pass= and password=, - # as these are both supported by MySQL. - try: - passwd = config_get(config, 'client', 'password') - except (ConfigParser.NoOptionError): - try: - passwd = config_get(config, 'client', 'pass') - except (ConfigParser.NoOptionError): - return False - try: - creds = dict(user=config_get(config, 'client', 'user'),passwd=passwd) - except (ConfigParser.NoOptionError): - return False - return creds - -# =========================================== -# Module execution. -# - -def main(): - module = AnsibleModule( - argument_spec = dict( - login_user=dict(default=None), - login_password=dict(default=None), - login_host=dict(default="localhost"), - login_port=dict(default="3306"), - login_unix_socket=dict(default=None), - name=dict(required=True, aliases=['db']), - encoding=dict(default=""), - collation=dict(default=""), - target=dict(default=None), - state=dict(default="present", choices=["absent", "present","dump", "import"]), - ) - ) - - if not mysqldb_found: - module.fail_json(msg="the python mysqldb module is required") - - db = module.params["name"] - encoding = module.params["encoding"] - collation = module.params["collation"] - state = module.params["state"] - target = module.params["target"] - - # make sure the target path is expanded for ~ and $HOME - if target is not None: - target = os.path.expandvars(os.path.expanduser(target)) - - # Either the caller passes both a username and password with which to connect to - # mysql, or they pass neither and allow this module to read the credentials from - # ~/.my.cnf. - login_password = module.params["login_password"] - login_user = module.params["login_user"] - if login_user is None and login_password is None: - mycnf_creds = load_mycnf() - if mycnf_creds is False: - login_user = "root" - login_password = "" - else: - login_user = mycnf_creds["user"] - login_password = mycnf_creds["passwd"] - elif login_password is None or login_user is None: - module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") - login_host = module.params["login_host"] - - if state in ['dump','import']: - if target is None: - module.fail_json(msg="with state=%s target is required" % (state)) - connect_to_db = db - else: - connect_to_db = 'mysql' - try: - if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db=connect_to_db) - elif module.params["login_port"] != "3306" and module.params["login_host"] == "localhost": - module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined") - else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db=connect_to_db) - cursor = db_connection.cursor() - except Exception, e: - if "Unknown database" in str(e): - errno, errstr = e.args - module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) - else: - module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check ~/.my.cnf contains credentials") - - changed = False - if db_exists(cursor, db): - if state == "absent": - try: - changed = db_delete(cursor, db) - except Exception, e: - module.fail_json(msg="error deleting database: " + str(e)) - elif state == "dump": - rc, stdout, stderr = db_dump(module, login_host, login_user, - login_password, db, target, - port=module.params['login_port'], - socket=module.params['login_unix_socket']) - if rc != 0: - module.fail_json(msg="%s" % stderr) - else: - module.exit_json(changed=True, db=db, msg=stdout) - elif state == "import": - rc, stdout, stderr = db_import(module, login_host, login_user, - login_password, db, target, - port=module.params['login_port'], - socket=module.params['login_unix_socket']) - if rc != 0: - module.fail_json(msg="%s" % stderr) - else: - module.exit_json(changed=True, db=db, msg=stdout) - else: - if state == "present": - try: - changed = db_create(cursor, db, encoding, collation) - except Exception, e: - module.fail_json(msg="error creating database: " + str(e)) - - module.exit_json(changed=changed, db=db) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/mysql_replication b/library/database/mysql_replication deleted file mode 100644 index d965f3ce0d..0000000000 --- a/library/database/mysql_replication +++ /dev/null @@ -1,369 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" - -Ansible module to manage mysql replication -(c) 2013, Balazs Pocze -Certain parts are taken from Mark Theunissen's mysqldb module - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: mysql_replication - -short_description: Manage MySQL replication -description: - - Manages MySQL server replication, slave, master status get and change master host. -version_added: "1.3" -options: - mode: - description: - - module operating mode. Could be getslave (SHOW SLAVE STATUS), getmaster (SHOW MASTER STATUS), changemaster (CHANGE MASTER TO), startslave (START SLAVE), stopslave (STOP SLAVE) - required: False - choices: - - getslave - - getmaster - - changemaster - - stopslave - - startslave - default: getslave - login_user: - description: - - username to connect mysql host, if defined login_password also needed. - required: False - login_password: - description: - - password to connect mysql host, if defined login_user also needed. - required: False - login_host: - description: - - mysql host to connect - required: False - login_unix_socket: - description: - - unix socket to connect mysql server - master_host: - description: - - same as mysql variable - master_user: - description: - - same as mysql variable - master_password: - description: - - same as mysql variable - master_port: - description: - - same as mysql variable - master_connect_retry: - description: - - same as mysql variable - master_log_file: - description: - - same as mysql variable - master_log_pos: - description: - - same as mysql variable - relay_log_file: - description: - - same as mysql variable - relay_log_pos: - description: - - same as mysql variable - master_ssl: - description: - - same as mysql variable - possible values: 0,1 - master_ssl_ca: - description: - - same as mysql variable - master_ssl_capath: - description: - - same as mysql variable - master_ssl_cert: - description: - - same as mysql variable - master_ssl_key: - description: - - same as mysql variable - master_ssl_cipher: - description: - - same as mysql variable - -''' - -EXAMPLES = ''' -# Stop mysql slave thread -- mysql_replication: mode=stopslave - -# Get master binlog file name and binlog position -- mysql_replication: mode=getmaster - -# Change master to master server 192.168.1.1 and use binary log 'mysql-bin.000009' with position 4578 -- mysql_replication: mode=changemaster master_host=192.168.1.1 master_log_file=mysql-bin.000009 master_log_pos=4578 -''' - -import ConfigParser -import os -import warnings - -try: - import MySQLdb -except ImportError: - mysqldb_found = False -else: - mysqldb_found = True - - -def get_master_status(cursor): - cursor.execute("SHOW MASTER STATUS") - masterstatus = cursor.fetchone() - return masterstatus - - -def get_slave_status(cursor): - cursor.execute("SHOW SLAVE STATUS") - slavestatus = cursor.fetchone() - return slavestatus - - -def stop_slave(cursor): - try: - cursor.execute("STOP SLAVE") - stopped = True - except: - stopped = False - return stopped - - -def start_slave(cursor): - try: - cursor.execute("START SLAVE") - started = True - except: - started = False - return started - - -def changemaster(cursor, chm): - SQLPARAM = ",".join(chm) - cursor.execute("CHANGE MASTER TO " + SQLPARAM) - - -def strip_quotes(s): - """ Remove surrounding single or double quotes - - >>> print strip_quotes('hello') - hello - >>> print strip_quotes('"hello"') - hello - >>> print strip_quotes("'hello'") - hello - >>> print strip_quotes("'hello") - 'hello - - """ - single_quote = "'" - double_quote = '"' - - if s.startswith(single_quote) and s.endswith(single_quote): - s = s.strip(single_quote) - elif s.startswith(double_quote) and s.endswith(double_quote): - s = s.strip(double_quote) - return s - - -def config_get(config, section, option): - """ Calls ConfigParser.get and strips quotes - - See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html - """ - return strip_quotes(config.get(section, option)) - - -def load_mycnf(): - config = ConfigParser.RawConfigParser() - mycnf = os.path.expanduser('~/.my.cnf') - if not os.path.exists(mycnf): - return False - try: - config.readfp(open(mycnf)) - except (IOError): - return False - # We support two forms of passwords in .my.cnf, both pass= and password=, - # as these are both supported by MySQL. - try: - passwd = config_get(config, 'client', 'password') - except (ConfigParser.NoOptionError): - try: - passwd = config_get(config, 'client', 'pass') - except (ConfigParser.NoOptionError): - return False - - # If .my.cnf doesn't specify a user, default to user login name - try: - user = config_get(config, 'client', 'user') - except (ConfigParser.NoOptionError): - user = getpass.getuser() - creds = dict(user=user, passwd=passwd) - return creds - - -def main(): - module = AnsibleModule( - argument_spec = dict( - login_user=dict(default=None), - login_password=dict(default=None), - login_host=dict(default="localhost"), - login_unix_socket=dict(default=None), - mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]), - master_host=dict(default=None), - master_user=dict(default=None), - master_password=dict(default=None), - master_port=dict(default=None), - master_connect_retry=dict(default=None), - master_log_file=dict(default=None), - master_log_pos=dict(default=None), - relay_log_file=dict(default=None), - relay_log_pos=dict(default=None), - master_ssl=dict(default=False, type='bool'), - master_ssl_ca=dict(default=None), - master_ssl_capath=dict(default=None), - master_ssl_cert=dict(default=None), - master_ssl_key=dict(default=None), - master_ssl_cipher=dict(default=None), - ) - ) - user = module.params["login_user"] - password = module.params["login_password"] - host = module.params["login_host"] - mode = module.params["mode"] - master_host = module.params["master_host"] - master_user = module.params["master_user"] - master_password = module.params["master_password"] - master_port = module.params["master_port"] - master_connect_retry = module.params["master_connect_retry"] - master_log_file = module.params["master_log_file"] - master_log_pos = module.params["master_log_pos"] - relay_log_file = module.params["relay_log_file"] - relay_log_pos = module.params["relay_log_pos"] - master_ssl = module.params["master_ssl"] - master_ssl_ca = module.params["master_ssl_ca"] - master_ssl_capath = module.params["master_ssl_capath"] - master_ssl_cert = module.params["master_ssl_cert"] - master_ssl_key = module.params["master_ssl_key"] - master_ssl_cipher = module.params["master_ssl_cipher"] - - if not mysqldb_found: - module.fail_json(msg="the python mysqldb module is required") - else: - warnings.filterwarnings('error', category=MySQLdb.Warning) - - # Either the caller passes both a username and password with which to connect to - # mysql, or they pass neither and allow this module to read the credentials from - # ~/.my.cnf. - login_password = module.params["login_password"] - login_user = module.params["login_user"] - if login_user is None and login_password is None: - mycnf_creds = load_mycnf() - if mycnf_creds is False: - login_user = "root" - login_password = "" - else: - login_user = mycnf_creds["user"] - login_password = mycnf_creds["passwd"] - elif login_password is None or login_user is None: - module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") - - try: - if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") - else: - db_connection = MySQLdb.connect(host=module.params["login_host"], user=login_user, passwd=login_password, db="mysql") - except Exception, e: - module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") - try: - cursor = db_connection.cursor(cursorclass=MySQLdb.cursors.DictCursor) - except Exception, e: - module.fail_json(msg="Trouble getting DictCursor from db_connection: %s" % e) - - if mode in "getmaster": - masterstatus = get_master_status(cursor) - try: - module.exit_json( **masterstatus ) - except TypeError: - module.fail_json(msg="Server is not configured as mysql master") - - elif mode in "getslave": - slavestatus = get_slave_status(cursor) - try: - module.exit_json( **slavestatus ) - except TypeError: - module.fail_json(msg="Server is not configured as mysql slave") - - elif mode in "changemaster": - print "Change master" - chm=[] - if master_host: - chm.append("MASTER_HOST='" + master_host + "'") - if master_user: - chm.append("MASTER_USER='" + master_user + "'") - if master_password: - chm.append("MASTER_PASSWORD='" + master_password + "'") - if master_port: - chm.append("MASTER_PORT=" + master_port) - if master_connect_retry: - chm.append("MASTER_CONNECT_RETRY='" + master_connect_retry + "'") - if master_log_file: - chm.append("MASTER_LOG_FILE='" + master_log_file + "'") - if master_log_pos: - chm.append("MASTER_LOG_POS=" + master_log_pos) - if relay_log_file: - chm.append("RELAY_LOG_FILE='" + relay_log_file + "'") - if relay_log_pos: - chm.append("RELAY_LOG_POS=" + relay_log_pos) - if master_ssl: - chm.append("MASTER_SSL=1") - if master_ssl_ca: - chm.append("MASTER_SSL_CA='" + master_ssl_ca + "'") - if master_ssl_capath: - chm.append("MASTER_SSL_CAPATH='" + master_ssl_capath + "'") - if master_ssl_cert: - chm.append("MASTER_SSL_CERT='" + master_ssl_cert + "'") - if master_ssl_key: - chm.append("MASTER_SSL_KEY='" + master_ssl_key + "'") - if master_ssl_cipher: - chm.append("MASTER_SSL_CIPHER='" + master_ssl_cipher + "'") - changemaster(cursor,chm) - module.exit_json(changed=True) - elif mode in "startslave": - started = start_slave(cursor) - if started is True: - module.exit_json(msg="Slave started ", changed=True) - else: - module.exit_json(msg="Slave already started (Or cannot be started)", changed=False) - elif mode in "stopslave": - stopped = stop_slave(cursor) - if stopped is True: - module.exit_json(msg="Slave stopped", changed=True) - else: - module.exit_json(msg="Slave already stopped", changed=False) - -# import module snippets -from ansible.module_utils.basic import * -main() -warnings.simplefilter("ignore") \ No newline at end of file diff --git a/library/database/mysql_user b/library/database/mysql_user deleted file mode 100644 index aaec05f99f..0000000000 --- a/library/database/mysql_user +++ /dev/null @@ -1,476 +0,0 @@ -#!/usr/bin/python - -# (c) 2012, Mark Theunissen -# Sponsored by Four Kitchens http://fourkitchens.com. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: mysql_user -short_description: Adds or removes a user from a MySQL database. -description: - - Adds or removes a user from a MySQL database. -version_added: "0.6" -options: - name: - description: - - name of the user (role) to add or remove - required: true - default: null - password: - description: - - set the user's password - required: false - default: null - host: - description: - - the 'host' part of the MySQL username - required: false - default: localhost - login_user: - description: - - The username used to authenticate with - required: false - default: null - login_password: - description: - - The password used to authenticate with - required: false - default: null - login_host: - description: - - Host running the database - required: false - default: localhost - login_port: - description: - - Port of the MySQL server - required: false - default: 3306 - version_added: '1.4' - login_unix_socket: - description: - - The path to a Unix domain socket for local connections - required: false - default: null - priv: - description: - - "MySQL privileges string in the format: C(db.table:priv1,priv2)" - required: false - default: null - append_privs: - description: - - Append the privileges defined by priv to the existing ones for this - user instead of overwriting existing ones. - required: false - choices: [ "yes", "no" ] - default: "no" - version_added: "1.4" - state: - description: - - Whether the user should exist. When C(absent), removes - the user. - required: false - default: present - choices: [ "present", "absent" ] - check_implicit_admin: - description: - - Check if mysql allows login as root/nopassword before trying supplied credentials. - required: false - default: false - version_added: "1.3" -notes: - - Requires the MySQLdb Python package on the remote host. For Ubuntu, this - is as easy as apt-get install python-mysqldb. - - Both C(login_password) and C(login_username) are required when you are - passing credentials. If none are present, the module will attempt to read - the credentials from C(~/.my.cnf), and finally fall back to using the MySQL - default login of 'root' with no password. - - "MySQL server installs with default login_user of 'root' and no password. To secure this user - as part of an idempotent playbook, you must create at least two tasks: the first must change the root user's password, - without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing - the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from - the file." - -requirements: [ "ConfigParser", "MySQLdb" ] -author: Mark Theunissen -''' - -EXAMPLES = """ -# Create database user with name 'bob' and password '12345' with all database privileges -- mysql_user: name=bob password=12345 priv=*.*:ALL state=present - -# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION' -- mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present - -# Ensure no user named 'sally' exists, also passing in the auth credentials. -- mysql_user: login_user=root login_password=123456 name=sally state=absent - -# Specify grants composed of more than one word -- mysql_user: name=replication password=12345 priv=*.*:"REPLICATION CLIENT" state=present - -# Revoke all privileges for user 'bob' and password '12345' -- mysql_user: name=bob password=12345 priv=*.*:USAGE state=present - -# Example privileges string format -mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL - -# Example using login_unix_socket to connect to server -- mysql_user: name=root password=abc123 login_unix_socket=/var/run/mysqld/mysqld.sock - -# Example .my.cnf file for setting the root password -# Note: don't use quotes around the password, because the mysql_user module -# will include them in the password but the mysql client will not - -[client] -user=root -password=n<_665{vS43y -""" - -import ConfigParser -import getpass -import tempfile -try: - import MySQLdb -except ImportError: - mysqldb_found = False -else: - mysqldb_found = True - -# =========================================== -# MySQL module specific support methods. -# - -def user_exists(cursor, user, host): - cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host)) - count = cursor.fetchone() - return count[0] > 0 - -def user_add(cursor, user, host, password, new_priv): - cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password)) - if new_priv is not None: - for db_table, priv in new_priv.iteritems(): - privileges_grant(cursor, user,host,db_table,priv) - return True - -def user_mod(cursor, user, host, password, new_priv, append_privs): - changed = False - grant_option = False - - # Handle passwords. - if password is not None: - cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) - current_pass_hash = cursor.fetchone() - cursor.execute("SELECT PASSWORD(%s)", (password,)) - new_pass_hash = cursor.fetchone() - if current_pass_hash[0] != new_pass_hash[0]: - cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password)) - changed = True - - # Handle privileges. - if new_priv is not None: - curr_priv = privileges_get(cursor, user,host) - - # If the user has privileges on a db.table that doesn't appear at all in - # the new specification, then revoke all privileges on it. - for db_table, priv in curr_priv.iteritems(): - # If the user has the GRANT OPTION on a db.table, revoke it first. - if "GRANT" in priv: - grant_option = True - if db_table not in new_priv: - if user != "root" and "PROXY" not in priv and not append_privs: - privileges_revoke(cursor, user,host,db_table,grant_option) - changed = True - - # If the user doesn't currently have any privileges on a db.table, then - # we can perform a straight grant operation. - for db_table, priv in new_priv.iteritems(): - if db_table not in curr_priv: - privileges_grant(cursor, user,host,db_table,priv) - changed = True - - # If the db.table specification exists in both the user's current privileges - # and in the new privileges, then we need to see if there's a difference. - db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys()) - for db_table in db_table_intersect: - priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table]) - if (len(priv_diff) > 0): - if not append_privs: - privileges_revoke(cursor, user,host,db_table,grant_option) - privileges_grant(cursor, user,host,db_table,new_priv[db_table]) - changed = True - - return changed - -def user_delete(cursor, user, host): - cursor.execute("DROP USER %s@%s", (user,host)) - return True - -def privileges_get(cursor, user,host): - """ MySQL doesn't have a better method of getting privileges aside from the - SHOW GRANTS query syntax, which requires us to then parse the returned string. - Here's an example of the string that is returned from MySQL: - - GRANT USAGE ON *.* TO 'user'@'localhost' IDENTIFIED BY 'pass'; - - This function makes the query and returns a dictionary containing the results. - The dictionary format is the same as that returned by privileges_unpack() below. - """ - output = {} - cursor.execute("SHOW GRANTS FOR %s@%s", (user,host)) - grants = cursor.fetchall() - - def pick(x): - if x == 'ALL PRIVILEGES': - return 'ALL' - else: - return x - - for grant in grants: - res = re.match("GRANT (.+) ON (.+) TO '.+'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0]) - if res is None: - module.fail_json(msg="unable to parse the MySQL grant string") - privileges = res.group(1).split(", ") - privileges = [ pick(x) for x in privileges] - if "WITH GRANT OPTION" in res.group(4): - privileges.append('GRANT') - db = res.group(2) - output[db] = privileges - return output - -def privileges_unpack(priv): - """ Take a privileges string, typically passed as a parameter, and unserialize - it into a dictionary, the same format as privileges_get() above. We have this - custom format to avoid using YAML/JSON strings inside YAML playbooks. Example - of a privileges string: - - mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanother.*:ALL - - The privilege USAGE stands for no privileges, so we add that in on *.* if it's - not specified in the string, as MySQL will always provide this by default. - """ - output = {} - for item in priv.split('/'): - pieces = item.split(':') - if '.' in pieces[0]: - pieces[0] = pieces[0].split('.') - for idx, piece in enumerate(pieces): - if pieces[0][idx] != "*": - pieces[0][idx] = "`" + pieces[0][idx] + "`" - pieces[0] = '.'.join(pieces[0]) - - output[pieces[0]] = pieces[1].upper().split(',') - - if '*.*' not in output: - output['*.*'] = ['USAGE'] - - return output - -def privileges_revoke(cursor, user,host,db_table,grant_option): - if grant_option: - query = "REVOKE GRANT OPTION ON %s FROM '%s'@'%s'" % (db_table,user,host) - cursor.execute(query) - query = "REVOKE ALL PRIVILEGES ON %s FROM '%s'@'%s'" % (db_table,user,host) - cursor.execute(query) - -def privileges_grant(cursor, user,host,db_table,priv): - - priv_string = ",".join(filter(lambda x: x != 'GRANT', priv)) - query = "GRANT %s ON %s TO '%s'@'%s'" % (priv_string,db_table,user,host) - if 'GRANT' in priv: - query = query + " WITH GRANT OPTION" - cursor.execute(query) - - -def strip_quotes(s): - """ Remove surrounding single or double quotes - - >>> print strip_quotes('hello') - hello - >>> print strip_quotes('"hello"') - hello - >>> print strip_quotes("'hello'") - hello - >>> print strip_quotes("'hello") - 'hello - - """ - single_quote = "'" - double_quote = '"' - - if s.startswith(single_quote) and s.endswith(single_quote): - s = s.strip(single_quote) - elif s.startswith(double_quote) and s.endswith(double_quote): - s = s.strip(double_quote) - return s - - -def config_get(config, section, option): - """ Calls ConfigParser.get and strips quotes - - See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html - """ - return strip_quotes(config.get(section, option)) - - -def _safe_cnf_load(config, path): - - data = {'user':'', 'password':''} - - # read in user/pass - f = open(path, 'r') - for line in f.readlines(): - line = line.strip() - if line.startswith('user='): - data['user'] = line.split('=', 1)[1].strip() - if line.startswith('password=') or line.startswith('pass='): - data['password'] = line.split('=', 1)[1].strip() - f.close() - - # write out a new cnf file with only user/pass - fh, newpath = tempfile.mkstemp(prefix=path + '.') - f = open(newpath, 'wb') - f.write('[client]\n') - f.write('user=%s\n' % data['user']) - f.write('password=%s\n' % data['password']) - f.close() - - config.readfp(open(newpath)) - os.remove(newpath) - return config - -def load_mycnf(): - config = ConfigParser.RawConfigParser() - mycnf = os.path.expanduser('~/.my.cnf') - if not os.path.exists(mycnf): - return False - try: - config.readfp(open(mycnf)) - except (IOError): - return False - except: - config = _safe_cnf_load(config, mycnf) - - # We support two forms of passwords in .my.cnf, both pass= and password=, - # as these are both supported by MySQL. - try: - passwd = config_get(config, 'client', 'password') - except (ConfigParser.NoOptionError): - try: - passwd = config_get(config, 'client', 'pass') - except (ConfigParser.NoOptionError): - return False - - # If .my.cnf doesn't specify a user, default to user login name - try: - user = config_get(config, 'client', 'user') - except (ConfigParser.NoOptionError): - user = getpass.getuser() - creds = dict(user=user,passwd=passwd) - return creds - -def connect(module, login_user, login_password): - if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") - else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db="mysql") - return db_connection.cursor() - -# =========================================== -# Module execution. -# - -def main(): - module = AnsibleModule( - argument_spec = dict( - login_user=dict(default=None), - login_password=dict(default=None), - login_host=dict(default="localhost"), - login_port=dict(default="3306"), - login_unix_socket=dict(default=None), - user=dict(required=True, aliases=['name']), - password=dict(default=None), - host=dict(default="localhost"), - state=dict(default="present", choices=["absent", "present"]), - priv=dict(default=None), - append_privs=dict(type="bool", default="no"), - check_implicit_admin=dict(default=False), - ) - ) - user = module.params["user"] - password = module.params["password"] - host = module.params["host"] - state = module.params["state"] - priv = module.params["priv"] - check_implicit_admin = module.params['check_implicit_admin'] - append_privs = module.boolean(module.params["append_privs"]) - - if not mysqldb_found: - module.fail_json(msg="the python mysqldb module is required") - - if priv is not None: - try: - priv = privileges_unpack(priv) - except: - module.fail_json(msg="invalid privileges string") - - # Either the caller passes both a username and password with which to connect to - # mysql, or they pass neither and allow this module to read the credentials from - # ~/.my.cnf. - login_password = module.params["login_password"] - login_user = module.params["login_user"] - if login_user is None and login_password is None: - mycnf_creds = load_mycnf() - if mycnf_creds is False: - login_user = "root" - login_password = "" - else: - login_user = mycnf_creds["user"] - login_password = mycnf_creds["passwd"] - elif login_password is None or login_user is None: - module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") - - cursor = None - try: - if check_implicit_admin: - try: - cursor = connect(module, 'root', '') - except: - pass - - if not cursor: - cursor = connect(module, login_user, login_password) - except Exception, e: - module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") - - if state == "present": - if user_exists(cursor, user, host): - changed = user_mod(cursor, user, host, password, priv, append_privs) - else: - if password is None: - module.fail_json(msg="password parameter required when adding a user") - changed = user_add(cursor, user, host, password, priv) - elif state == "absent": - if user_exists(cursor, user, host): - changed = user_delete(cursor, user, host) - else: - changed = False - module.exit_json(changed=changed, user=user) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/mysql_variables b/library/database/mysql_variables deleted file mode 100644 index 7353fdd485..0000000000 --- a/library/database/mysql_variables +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" - -Ansible module to manage mysql variables -(c) 2013, Balazs Pocze -Certain parts are taken from Mark Theunissen's mysqldb module - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: mysql_variables - -short_description: Manage MySQL global variables -description: - - Query / Set MySQL variables -version_added: 1.3 -options: - variable: - description: - - Variable name to operate - required: True - value: - description: - - If set, then sets variable value to this - required: False - login_user: - description: - - username to connect mysql host, if defined login_password also needed. - required: False - login_password: - description: - - password to connect mysql host, if defined login_user also needed. - required: False - login_host: - description: - - mysql host to connect - required: False - login_unix_socket: - description: - - unix socket to connect mysql server -''' -EXAMPLES = ''' -# Check for sync_binlog setting -- mysql_variables: variable=sync_binlog - -# Set read_only variable to 1 -- mysql_variables: variable=read_only value=1 -''' - - -import ConfigParser -import os -import warnings - -try: - import MySQLdb -except ImportError: - mysqldb_found = False -else: - mysqldb_found = True - - -def typedvalue(value): - """ - Convert value to number whenever possible, return same value - otherwise. - - >>> typedvalue('3') - 3 - >>> typedvalue('3.0') - 3.0 - >>> typedvalue('foobar') - 'foobar' - - """ - try: - return int(value) - except ValueError: - pass - - try: - return float(value) - except ValueError: - pass - - return value - - -def getvariable(cursor, mysqlvar): - cursor.execute("SHOW VARIABLES LIKE '" + mysqlvar + "'") - mysqlvar_val = cursor.fetchall() - return mysqlvar_val - - -def setvariable(cursor, mysqlvar, value): - """ Set a global mysql variable to a given value - - The DB driver will handle quoting of the given value based on its - type, thus numeric strings like '3.0' or '8' are illegal, they - should be passed as numeric literals. - - """ - try: - cursor.execute("SET GLOBAL " + mysqlvar + " = %s", (value,)) - cursor.fetchall() - result = True - except Exception, e: - result = str(e) - return result - - -def strip_quotes(s): - """ Remove surrounding single or double quotes - - >>> print strip_quotes('hello') - hello - >>> print strip_quotes('"hello"') - hello - >>> print strip_quotes("'hello'") - hello - >>> print strip_quotes("'hello") - 'hello - - """ - single_quote = "'" - double_quote = '"' - - if s.startswith(single_quote) and s.endswith(single_quote): - s = s.strip(single_quote) - elif s.startswith(double_quote) and s.endswith(double_quote): - s = s.strip(double_quote) - return s - - -def config_get(config, section, option): - """ Calls ConfigParser.get and strips quotes - - See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html - """ - return strip_quotes(config.get(section, option)) - - -def load_mycnf(): - config = ConfigParser.RawConfigParser() - mycnf = os.path.expanduser('~/.my.cnf') - if not os.path.exists(mycnf): - return False - try: - config.readfp(open(mycnf)) - except (IOError): - return False - # We support two forms of passwords in .my.cnf, both pass= and password=, - # as these are both supported by MySQL. - try: - passwd = config_get(config, 'client', 'password') - except (ConfigParser.NoOptionError): - try: - passwd = config_get(config, 'client', 'pass') - except (ConfigParser.NoOptionError): - return False - - # If .my.cnf doesn't specify a user, default to user login name - try: - user = config_get(config, 'client', 'user') - except (ConfigParser.NoOptionError): - user = getpass.getuser() - creds = dict(user=user, passwd=passwd) - return creds - - -def main(): - module = AnsibleModule( - argument_spec = dict( - login_user=dict(default=None), - login_password=dict(default=None), - login_host=dict(default="localhost"), - login_unix_socket=dict(default=None), - variable=dict(default=None), - value=dict(default=None) - - ) - ) - user = module.params["login_user"] - password = module.params["login_password"] - host = module.params["login_host"] - mysqlvar = module.params["variable"] - value = module.params["value"] - if not mysqldb_found: - module.fail_json(msg="the python mysqldb module is required") - else: - warnings.filterwarnings('error', category=MySQLdb.Warning) - - # Either the caller passes both a username and password with which to connect to - # mysql, or they pass neither and allow this module to read the credentials from - # ~/.my.cnf. - login_password = module.params["login_password"] - login_user = module.params["login_user"] - if login_user is None and login_password is None: - mycnf_creds = load_mycnf() - if mycnf_creds is False: - login_user = "root" - login_password = "" - else: - login_user = mycnf_creds["user"] - login_password = mycnf_creds["passwd"] - elif login_password is None or login_user is None: - module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") - try: - if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") - else: - db_connection = MySQLdb.connect(host=module.params["login_host"], user=login_user, passwd=login_password, db="mysql") - cursor = db_connection.cursor() - except Exception, e: - module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") - if mysqlvar is None: - module.fail_json(msg="Cannot run without variable to operate with") - mysqlvar_val = getvariable(cursor, mysqlvar) - if value is None: - module.exit_json(msg=mysqlvar_val) - else: - if len(mysqlvar_val) < 1: - module.fail_json(msg="Variable not available", changed=False) - # Type values before using them - value_wanted = typedvalue(value) - value_actual = typedvalue(mysqlvar_val[0][1]) - if value_wanted == value_actual: - module.exit_json(msg="Variable already set to requested value", changed=False) - result = setvariable(cursor, mysqlvar, value_wanted) - if result is True: - module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True) - else: - module.fail_json(msg=result, changed=False) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/postgresql_db b/library/database/postgresql_db deleted file mode 100644 index 605be62160..0000000000 --- a/library/database/postgresql_db +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: postgresql_db -short_description: Add or remove PostgreSQL databases from a remote host. -description: - - Add or remove PostgreSQL databases from a remote host. -version_added: "0.6" -options: - name: - description: - - name of the database to add or remove - required: true - default: null - login_user: - description: - - The username used to authenticate with - required: false - default: null - login_password: - description: - - The password used to authenticate with - required: false - default: null - login_host: - description: - - Host running the database - required: false - default: localhost - owner: - description: - - Name of the role to set as owner of the database - required: false - default: null - port: - description: - - Database port to connect to. - required: false - default: 5432 - template: - description: - - Template used to create the database - required: false - default: null - encoding: - description: - - Encoding of the database - required: false - default: null - encoding: - description: - - Encoding of the database - required: false - default: null - lc_collate: - description: - - Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template. - required: false - default: null - lc_ctype: - description: - - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0) is used as template. - required: false - default: null - state: - description: - - The database state - required: false - default: present - choices: [ "present", "absent" ] -notes: - - The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host. - - This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on - the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module. -requirements: [ psycopg2 ] -author: Lorin Hochstein -''' - -EXAMPLES = ''' -# Create a new database with name "acme" -- postgresql_db: name=acme - -# Create a new database with name "acme" and specific encoding and locale -# settings. If a template different from "template0" is specified, encoding -# and locale settings must match those of the template. -- postgresql_db: name=acme - encoding='UTF-8' - lc_collate='de_DE.UTF-8' - lc_ctype='de_DE.UTF-8' - template='template0' -''' - -try: - import psycopg2 - import psycopg2.extras -except ImportError: - postgresqldb_found = False -else: - postgresqldb_found = True - -class NotSupportedError(Exception): - pass - - -# =========================================== -# PostgreSQL module specific support methods. -# - -def set_owner(cursor, db, owner): - query = "ALTER DATABASE \"%s\" OWNER TO \"%s\"" % (db, owner) - cursor.execute(query) - return True - -def get_encoding_id(cursor, encoding): - query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;" - cursor.execute(query, {'encoding': encoding}) - return cursor.fetchone()['encoding_id'] - -def get_db_info(cursor, db): - query = """ - SELECT rolname AS owner, - pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id, - datcollate AS lc_collate, datctype AS lc_ctype - FROM pg_database JOIN pg_roles ON pg_roles.oid = pg_database.datdba - WHERE datname = %(db)s - """ - cursor.execute(query, {'db':db}) - return cursor.fetchone() - -def db_exists(cursor, db): - query = "SELECT * FROM pg_database WHERE datname=%(db)s" - cursor.execute(query, {'db': db}) - return cursor.rowcount == 1 - -def db_delete(cursor, db): - if db_exists(cursor, db): - query = "DROP DATABASE \"%s\"" % db - cursor.execute(query) - return True - else: - return False - -def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype): - if not db_exists(cursor, db): - if owner: - owner = " OWNER \"%s\"" % owner - if template: - template = " TEMPLATE \"%s\"" % template - if encoding: - encoding = " ENCODING '%s'" % encoding - if lc_collate: - lc_collate = " LC_COLLATE '%s'" % lc_collate - if lc_ctype: - lc_ctype = " LC_CTYPE '%s'" % lc_ctype - query = 'CREATE DATABASE "%s"%s%s%s%s%s' % (db, owner, - template, encoding, - lc_collate, lc_ctype) - cursor.execute(query) - return True - else: - db_info = get_db_info(cursor, db) - if (encoding and - get_encoding_id(cursor, encoding) != db_info['encoding_id']): - raise NotSupportedError( - 'Changing database encoding is not supported. ' - 'Current encoding: %s' % db_info['encoding'] - ) - elif lc_collate and lc_collate != db_info['lc_collate']: - raise NotSupportedError( - 'Changing LC_COLLATE is not supported. ' - 'Current LC_COLLATE: %s' % db_info['lc_collate'] - ) - elif lc_ctype and lc_ctype != db_info['lc_ctype']: - raise NotSupportedError( - 'Changing LC_CTYPE is not supported.' - 'Current LC_CTYPE: %s' % db_info['lc_ctype'] - ) - elif owner and owner != db_info['owner']: - return set_owner(cursor, db, owner) - else: - return False - -def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype): - if not db_exists(cursor, db): - return False - else: - db_info = get_db_info(cursor, db) - if (encoding and - get_encoding_id(cursor, encoding) != db_info['encoding_id']): - return False - elif lc_collate and lc_collate != db_info['lc_collate']: - return False - elif lc_ctype and lc_ctype != db_info['lc_ctype']: - return False - elif owner and owner != db_info['owner']: - return False - else: - return True - -# =========================================== -# Module execution. -# - -def main(): - module = AnsibleModule( - argument_spec=dict( - login_user=dict(default="postgres"), - login_password=dict(default=""), - login_host=dict(default=""), - port=dict(default="5432"), - db=dict(required=True, aliases=['name']), - owner=dict(default=""), - template=dict(default=""), - encoding=dict(default=""), - lc_collate=dict(default=""), - lc_ctype=dict(default=""), - state=dict(default="present", choices=["absent", "present"]), - ), - supports_check_mode = True - ) - - if not postgresqldb_found: - module.fail_json(msg="the python psycopg2 module is required") - - db = module.params["db"] - port = module.params["port"] - owner = module.params["owner"] - template = module.params["template"] - encoding = module.params["encoding"] - lc_collate = module.params["lc_collate"] - lc_ctype = module.params["lc_ctype"] - state = module.params["state"] - changed = False - - # To use defaults values, keyword arguments must be absent, so - # check which values are empty and don't include in the **kw - # dictionary - params_map = { - "login_host":"host", - "login_user":"user", - "login_password":"password", - "port":"port" - } - kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() - if k in params_map and v != '' ) - try: - db_connection = psycopg2.connect(database="template1", **kw) - # Enable autocommit so we can create databases - if psycopg2.__version__ >= '2.4.2': - db_connection.autocommit = True - else: - db_connection.set_isolation_level(psycopg2 - .extensions - .ISOLATION_LEVEL_AUTOCOMMIT) - cursor = db_connection.cursor( - cursor_factory=psycopg2.extras.DictCursor) - except Exception, e: - module.fail_json(msg="unable to connect to database: %s" % e) - - try: - if module.check_mode: - if state == "absent": - changed = not db_exists(cursor, db) - elif state == "present": - changed = not db_matches(cursor, db, owner, template, encoding, - lc_collate, lc_ctype) - module.exit_json(changed=changed,db=db) - - if state == "absent": - changed = db_delete(cursor, db) - - elif state == "present": - changed = db_create(cursor, db, owner, template, encoding, - lc_collate, lc_ctype) - except NotSupportedError, e: - module.fail_json(msg=str(e)) - except Exception, e: - module.fail_json(msg="Database query failed: %s" % e) - - module.exit_json(changed=changed, db=db) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/postgresql_privs b/library/database/postgresql_privs deleted file mode 100644 index de5fa94fa4..0000000000 --- a/library/database/postgresql_privs +++ /dev/null @@ -1,613 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -module: postgresql_privs -version_added: "1.2" -short_description: Grant or revoke privileges on PostgreSQL database objects. -description: - - Grant or revoke privileges on PostgreSQL database objects. - - This module is basically a wrapper around most of the functionality of - PostgreSQL's GRANT and REVOKE statements with detection of changes - (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)) -options: - database: - description: - - Name of database to connect to. - - 'Alias: I(db)' - required: yes - state: - description: - - If C(present), the specified privileges are granted, if C(absent) they - are revoked. - required: no - default: present - choices: [present, absent] - privs: - description: - - Comma separated list of privileges to grant/revoke. - - 'Alias: I(priv)' - required: no - type: - description: - - Type of database object to set privileges on. - required: no - default: table - choices: [table, sequence, function, database, - schema, language, tablespace, group] - objs: - description: - - Comma separated list of database objects to set privileges on. - - If I(type) is C(table) or C(sequence), the special value - C(ALL_IN_SCHEMA) can be provided instead to specify all database - objects of type I(type) in the schema specified via I(schema). (This - also works with PostgreSQL < 9.0.) - - If I(type) is C(database), this parameter can be omitted, in which case - privileges are set for the database specified via I(database). - - 'If I(type) is I(function), colons (":") in object names will be - replaced with commas (needed to specify function signatures, see - examples)' - - 'Alias: I(obj)' - required: no - schema: - description: - - Schema that contains the database objects specified via I(objs). - - May only be provided if I(type) is C(table), C(sequence) or - C(function). Defaults to C(public) in these cases. - required: no - roles: - description: - - Comma separated list of role (user/group) names to set permissions for. - - The special value C(PUBLIC) can be provided instead to set permissions - for the implicitly defined PUBLIC group. - - 'Alias: I(role)' - required: yes - grant_option: - description: - - Whether C(role) may grant/revoke the specified privileges/group - memberships to others. - - Set to C(no) to revoke GRANT OPTION, leave unspecified to - make no changes. - - I(grant_option) only has an effect if I(state) is C(present). - - 'Alias: I(admin_option)' - required: no - choices: ['yes', 'no'] - host: - description: - - Database host address. If unspecified, connect via Unix socket. - - 'Alias: I(login_host)' - default: null - required: no - port: - description: - - Database port to connect to. - required: no - default: 5432 - login: - description: - - The username to authenticate with. - - 'Alias: I(login_user)' - default: postgres - password: - description: - - The password to authenticate with. - - 'Alias: I(login_password))' - default: null - required: no -notes: - - Default authentication assumes that postgresql_privs is run by the - C(postgres) user on the remote host. (Ansible's C(user) or C(sudo-user)). - - This module requires Python package I(psycopg2) to be installed on the - remote host. In the default case of the remote host also being the - PostgreSQL server, PostgreSQL has to be installed there as well, obviously. - For Debian/Ubuntu-based systems, install packages I(postgresql) and - I(python-psycopg2). - - Parameters that accept comma separated lists (I(privs), I(objs), I(roles)) - have singular alias names (I(priv), I(obj), I(role)). - - To revoke only C(GRANT OPTION) for a specific object, set I(state) to - C(present) and I(grant_option) to C(no) (see examples). - - Note that when revoking privileges from a role R, this role may still have - access via privileges granted to any role R is a member of including - C(PUBLIC). - - Note that when revoking privileges from a role R, you do so as the user - specified via I(login). If R has been granted the same privileges by - another user also, R can still access database objects via these privileges. - - When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs). -requirements: [psycopg2] -author: Bernhard Weitzhofer -""" - -EXAMPLES = """ -# On database "library": -# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors -# TO librarian, reader WITH GRANT OPTION -- postgresql_privs: > - database=library - state=present - privs=SELECT,INSERT,UPDATE - type=table - objs=books,authors - schema=public - roles=librarian,reader - grant_option=yes - -# Same as above leveraging default values: -- postgresql_privs: > - db=library - privs=SELECT,INSERT,UPDATE - objs=books,authors - roles=librarian,reader - grant_option=yes - -# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader -# Note that role "reader" will be *granted* INSERT privilege itself if this -# isn't already the case (since state=present). -- postgresql_privs: > - db=library - state=present - priv=INSERT - obj=books - role=reader - grant_option=no - -# REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader -# "public" is the default schema. This also works for PostgreSQL 8.x. -- postgresql_privs: > - db=library - state=absent - privs=INSERT,UPDATE - objs=ALL_IN_SCHEMA - role=reader - -# GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian -- postgresql_privs: > - db=library - privs=ALL - type=schema - objs=public,math - role=librarian - -# GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader -# Note the separation of arguments with colons. -- postgresql_privs: > - db=library - privs=ALL - type=function - obj=add(int:int) - schema=math - roles=librarian,reader - -# GRANT librarian, reader TO alice, bob WITH ADMIN OPTION -# Note that group role memberships apply cluster-wide and therefore are not -# restricted to database "library" here. -- postgresql_privs: > - db=library - type=group - objs=librarian,reader - roles=alice,bob - admin_option=yes - -# GRANT ALL PRIVILEGES ON DATABASE library TO librarian -# Note that here "db=postgres" specifies the database to connect to, not the -# database to grant privileges on (which is specified via the "objs" param) -- postgresql_privs: > - db=postgres - privs=ALL - type=database - obj=library - role=librarian - -# GRANT ALL PRIVILEGES ON DATABASE library TO librarian -# If objs is omitted for type "database", it defaults to the database -# to which the connection is established -- postgresql_privs: > - db=library - privs=ALL - type=database - role=librarian -""" - -try: - import psycopg2 - import psycopg2.extensions -except ImportError: - psycopg2 = None - - -class Error(Exception): - pass - - -# We don't have functools.partial in Python < 2.5 -def partial(f, *args, **kwargs): - """Partial function application""" - def g(*g_args, **g_kwargs): - new_kwargs = kwargs.copy() - new_kwargs.update(g_kwargs) - return f(*(args + g_args), **g_kwargs) - g.f = f - g.args = args - g.kwargs = kwargs - return g - - -class Connection(object): - """Wrapper around a psycopg2 connection with some convenience methods""" - - def __init__(self, params): - self.database = params.database - # To use defaults values, keyword arguments must be absent, so - # check which values are empty and don't include in the **kw - # dictionary - params_map = { - "host":"host", - "login":"user", - "password":"password", - "port":"port", - "database": "database", - } - kw = dict( (params_map[k], getattr(params, k)) for k in params_map - if getattr(params, k) != '' ) - self.connection = psycopg2.connect(**kw) - self.cursor = self.connection.cursor() - - - def commit(self): - self.connection.commit() - - - def rollback(self): - self.connection.rollback() - - @property - def encoding(self): - """Connection encoding in Python-compatible form""" - return psycopg2.extensions.encodings[self.connection.encoding] - - - ### Methods for querying database objects - - # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like - # phrases in GRANT or REVOKE statements, therefore alternative methods are - # provided here. - - def schema_exists(self, schema): - query = """SELECT count(*) - FROM pg_catalog.pg_namespace WHERE nspname = %s""" - self.cursor.execute(query, (schema,)) - return self.cursor.fetchone()[0] > 0 - - - def get_all_tables_in_schema(self, schema): - if not self.schema_exists(schema): - raise Error('Schema "%s" does not exist.' % schema) - query = """SELECT relname - FROM pg_catalog.pg_class c - JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE nspname = %s AND relkind = 'r'""" - self.cursor.execute(query, (schema,)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_all_sequences_in_schema(self, schema): - if not self.schema_exists(schema): - raise Error('Schema "%s" does not exist.' % schema) - query = """SELECT relname - FROM pg_catalog.pg_class c - JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE nspname = %s AND relkind = 'S'""" - self.cursor.execute(query, (schema,)) - return [t[0] for t in self.cursor.fetchall()] - - - - ### Methods for getting access control lists and group membership info - - # To determine whether anything has changed after granting/revoking - # privileges, we compare the access control lists of the specified database - # objects before and afterwards. Python's list/string comparison should - # suffice for change detection, we should not actually have to parse ACLs. - # The same should apply to group membership information. - - def get_table_acls(self, schema, tables): - query = """SELECT relacl - FROM pg_catalog.pg_class c - JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE nspname = %s AND relkind = 'r' AND relname = ANY (%s) - ORDER BY relname""" - self.cursor.execute(query, (schema, tables)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_sequence_acls(self, schema, sequences): - query = """SELECT relacl - FROM pg_catalog.pg_class c - JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s) - ORDER BY relname""" - self.cursor.execute(query, (schema, sequences)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_function_acls(self, schema, function_signatures): - funcnames = [f.split('(', 1)[0] for f in function_signatures] - query = """SELECT proacl - FROM pg_catalog.pg_proc p - JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace - WHERE nspname = %s AND proname = ANY (%s) - ORDER BY proname, proargtypes""" - self.cursor.execute(query, (schema, funcnames)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_schema_acls(self, schemas): - query = """SELECT nspacl FROM pg_catalog.pg_namespace - WHERE nspname = ANY (%s) ORDER BY nspname""" - self.cursor.execute(query, (schemas,)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_language_acls(self, languages): - query = """SELECT lanacl FROM pg_catalog.pg_language - WHERE lanname = ANY (%s) ORDER BY lanname""" - self.cursor.execute(query, (languages,)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_tablespace_acls(self, tablespaces): - query = """SELECT spcacl FROM pg_catalog.pg_tablespace - WHERE spcname = ANY (%s) ORDER BY spcname""" - self.cursor.execute(query, (tablespaces,)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_database_acls(self, databases): - query = """SELECT datacl FROM pg_catalog.pg_database - WHERE datname = ANY (%s) ORDER BY datname""" - self.cursor.execute(query, (databases,)) - return [t[0] for t in self.cursor.fetchall()] - - - def get_group_memberships(self, groups): - query = """SELECT roleid, grantor, member, admin_option - FROM pg_catalog.pg_auth_members am - JOIN pg_catalog.pg_roles r ON r.oid = am.roleid - WHERE r.rolname = ANY(%s) - ORDER BY roleid, grantor, member""" - self.cursor.execute(query, (groups,)) - return self.cursor.fetchall() - - - ### Manipulating privileges - - def manipulate_privs(self, obj_type, privs, objs, roles, - state, grant_option, schema_qualifier=None): - """Manipulate database object privileges. - - :param obj_type: Type of database object to grant/revoke - privileges for. - :param privs: Either a list of privileges to grant/revoke - or None if type is "group". - :param objs: List of database objects to grant/revoke - privileges for. - :param roles: Either a list of role names or "PUBLIC" - for the implicitly defined "PUBLIC" group - :param state: "present" to grant privileges, "absent" to revoke. - :param grant_option: Only for state "present": If True, set - grant/admin option. If False, revoke it. - If None, don't change grant option. - :param schema_qualifier: Some object types ("TABLE", "SEQUENCE", - "FUNCTION") must be qualified by schema. - Ignored for other Types. - """ - # get_status: function to get current status - if obj_type == 'table': - get_status = partial(self.get_table_acls, schema_qualifier) - elif obj_type == 'sequence': - get_status = partial(self.get_sequence_acls, schema_qualifier) - elif obj_type == 'function': - get_status = partial(self.get_function_acls, schema_qualifier) - elif obj_type == 'schema': - get_status = self.get_schema_acls - elif obj_type == 'language': - get_status = self.get_language_acls - elif obj_type == 'tablespace': - get_status = self.get_tablespace_acls - elif obj_type == 'database': - get_status = self.get_database_acls - elif obj_type == 'group': - get_status = self.get_group_memberships - else: - raise Error('Unsupported database object type "%s".' % obj_type) - - # Return False (nothing has changed) if there are no objs to work on. - if not objs: - return False - - # obj_ids: quoted db object identifiers (sometimes schema-qualified) - if obj_type == 'function': - obj_ids = [] - for obj in objs: - try: - f, args = obj.split('(', 1) - except: - raise Error('Illegal function signature: "%s".' % obj) - obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args)) - elif obj_type in ['table', 'sequence']: - obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs] - else: - obj_ids = ['"%s"' % o for o in objs] - - # set_what: SQL-fragment specifying what to set for the target roless: - # Either group membership or privileges on objects of a certain type. - if obj_type == 'group': - set_what = ','.join(obj_ids) - else: - set_what = '%s ON %s %s' % (','.join(privs), obj_type, - ','.join(obj_ids)) - - # for_whom: SQL-fragment specifying for whom to set the above - if roles == 'PUBLIC': - for_whom = 'PUBLIC' - else: - for_whom = ','.join(['"%s"' % r for r in roles]) - - status_before = get_status(objs) - if state == 'present': - if grant_option: - if obj_type == 'group': - query = 'GRANT %s TO %s WITH ADMIN OPTION' - else: - query = 'GRANT %s TO %s WITH GRANT OPTION' - else: - query = 'GRANT %s TO %s' - self.cursor.execute(query % (set_what, for_whom)) - - # Only revoke GRANT/ADMIN OPTION if grant_option actually is False. - if grant_option == False: - if obj_type == 'group': - query = 'REVOKE ADMIN OPTION FOR %s FROM %s' - else: - query = 'REVOKE GRANT OPTION FOR %s FROM %s' - self.cursor.execute(query % (set_what, for_whom)) - else: - query = 'REVOKE %s FROM %s' - self.cursor.execute(query % (set_what, for_whom)) - status_after = get_status(objs) - return status_before != status_after - - -def main(): - module = AnsibleModule( - argument_spec = dict( - database=dict(required=True, aliases=['db']), - state=dict(default='present', choices=['present', 'absent']), - privs=dict(required=False, aliases=['priv']), - type=dict(default='table', - choices=['table', - 'sequence', - 'function', - 'database', - 'schema', - 'language', - 'tablespace', - 'group']), - objs=dict(required=False, aliases=['obj']), - schema=dict(required=False), - roles=dict(required=True, aliases=['role']), - grant_option=dict(required=False, type='bool', - aliases=['admin_option']), - host=dict(default='', aliases=['login_host']), - port=dict(type='int', default=5432), - login=dict(default='postgres', aliases=['login_user']), - password=dict(default='', aliases=['login_password']) - ), - supports_check_mode = True - ) - - # Create type object as namespace for module params - p = type('Params', (), module.params) - - # param "schema": default, allowed depends on param "type" - if p.type in ['table', 'sequence', 'function']: - p.schema = p.schema or 'public' - elif p.schema: - module.fail_json(msg='Argument "schema" is not allowed ' - 'for type "%s".' % p.type) - - # param "objs": default, required depends on param "type" - if p.type == 'database': - p.objs = p.objs or p.database - elif not p.objs: - module.fail_json(msg='Argument "objs" is required ' - 'for type "%s".' % p.type) - - # param "privs": allowed, required depends on param "type" - if p.type == 'group': - if p.privs: - module.fail_json(msg='Argument "privs" is not allowed ' - 'for type "group".') - elif not p.privs: - module.fail_json(msg='Argument "privs" is required ' - 'for type "%s".' % p.type) - - # Connect to Database - if not psycopg2: - module.fail_json(msg='Python module "psycopg2" must be installed.') - try: - conn = Connection(p) - except psycopg2.Error, e: - module.fail_json(msg='Could not connect to database: %s' % e) - - try: - # privs - if p.privs: - privs = p.privs.split(',') - else: - privs = None - - # objs: - if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA': - objs = conn.get_all_tables_in_schema(p.schema) - elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA': - objs = conn.get_all_sequences_in_schema(p.schema) - else: - objs = p.objs.split(',') - - # function signatures are encoded using ':' to separate args - if p.type == 'function': - objs = [obj.replace(':', ',') for obj in objs] - - # roles - if p.roles == 'PUBLIC': - roles = 'PUBLIC' - else: - roles = p.roles.split(',') - - changed = conn.manipulate_privs( - obj_type = p.type, - privs = privs, - objs = objs, - roles = roles, - state = p.state, - grant_option = p.grant_option, - schema_qualifier=p.schema - ) - - except Error, e: - conn.rollback() - module.fail_json(msg=e.message) - - except psycopg2.Error, e: - conn.rollback() - # psycopg2 errors come in connection encoding, reencode - msg = e.message.decode(conn.encoding).encode(sys.getdefaultencoding(), - 'replace') - module.fail_json(msg=msg) - - if module.check_mode: - conn.rollback() - else: - conn.commit() - module.exit_json(changed=changed) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/postgresql_user b/library/database/postgresql_user deleted file mode 100644 index 8af8c45d0c..0000000000 --- a/library/database/postgresql_user +++ /dev/null @@ -1,526 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: postgresql_user -short_description: Adds or removes a users (roles) from a PostgreSQL database. -description: - - Add or remove PostgreSQL users (roles) from a remote host and, optionally, - grant the users access to an existing database or tables. - - The fundamental function of the module is to create, or delete, roles from - a PostgreSQL cluster. Privilege assignment, or removal, is an optional - step, which works on one database at a time. This allows for the module to - be called several times in the same module to modify the permissions on - different databases, or to grant permissions to already existing users. - - A user cannot be removed until all the privileges have been stripped from - the user. In such situation, if the module tries to remove the user it - will fail. To avoid this from happening the fail_on_user option signals - the module to try to remove the user, but if not possible keep going; the - module will report if changes happened and separately if the user was - removed or not. -version_added: "0.6" -options: - name: - description: - - name of the user (role) to add or remove - required: true - default: null - password: - description: - - set the user's password, before 1.4 this was required. - - "When passing an encrypted password, the encrypted parameter must also be true, and it must be generated with the format C('str[\\"md5\\"] + md5[ password + username ]'), resulting in a total of 35 characters. An easy way to do this is: C(echo \\"md5`echo -n \\"verysecretpasswordJOE\\" | md5`\\")." - required: false - default: null - db: - description: - - name of database where permissions will be granted - required: false - default: null - fail_on_user: - description: - - if C(yes), fail when user can't be removed. Otherwise just log and continue - required: false - default: 'yes' - choices: [ "yes", "no" ] - port: - description: - - Database port to connect to. - required: false - default: 5432 - login_user: - description: - - User (role) used to authenticate with PostgreSQL - required: false - default: postgres - login_password: - description: - - Password used to authenticate with PostgreSQL - required: false - default: null - login_host: - description: - - Host running PostgreSQL. - required: false - default: localhost - priv: - description: - - "PostgreSQL privileges string in the format: C(table:priv1,priv2)" - required: false - default: null - role_attr_flags: - description: - - "PostgreSQL role attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER" - required: false - default: null - choices: [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB", - "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ] - state: - description: - - The user (role) state - required: false - default: present - choices: [ "present", "absent" ] - encrypted: - description: - - denotes if the password is already encrypted. boolean. - required: false - default: false - version_added: '1.4' - expires: - description: - - sets the user's password expiration. - required: false - default: null - version_added: '1.4' -notes: - - The default authentication assumes that you are either logging in as or - sudo'ing to the postgres account on the host. - - This module uses psycopg2, a Python PostgreSQL database adapter. You must - ensure that psycopg2 is installed on the host before using this module. If - the remote host is the PostgreSQL server (which is the default case), then - PostgreSQL must also be installed on the remote host. For Ubuntu-based - systems, install the postgresql, libpq-dev, and python-psycopg2 packages - on the remote host before using this module. - - If you specify PUBLIC as the user, then the privilege changes will apply - to all users. You may not specify password or role_attr_flags when the - PUBLIC user is specified. -requirements: [ psycopg2 ] -author: Lorin Hochstein -''' - -EXAMPLES = ''' -# Create django user and grant access to database and products table -- postgresql_user: db=acme name=django password=ceec4eif7ya priv=CONNECT/products:ALL - -# Create rails user, grant privilege to create other databases and demote rails from super user status -- postgresql_user: name=rails password=secret role_attr_flags=CREATEDB,NOSUPERUSER - -# Remove test user privileges from acme -- postgresql_user: db=acme name=test priv=ALL/products:ALL state=absent fail_on_user=no - -# Remove test user from test database and the cluster -- postgresql_user: db=test name=test priv=ALL state=absent - -# Example privileges string format -INSERT,UPDATE/table:SELECT/anothertable:ALL - -# Remove an existing user's password -- postgresql_user: db=test user=test password=NULL -''' - -import re - -try: - import psycopg2 -except ImportError: - postgresqldb_found = False -else: - postgresqldb_found = True - -# =========================================== -# PostgreSQL module specific support methods. -# - - -def user_exists(cursor, user): - # The PUBLIC user is a special case that is always there - if user == 'PUBLIC': - return True - query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s" - cursor.execute(query, {'user': user}) - return cursor.rowcount > 0 - - -def user_add(cursor, user, password, role_attr_flags, encrypted, expires): - """Create a new database user (role).""" - query_password_data = dict() - query = 'CREATE USER "%(user)s"' % { "user": user} - if password is not None: - query = query + " WITH %(crypt)s" % { "crypt": encrypted } - query = query + " PASSWORD %(password)s" - query_password_data.update(password=password) - if expires is not None: - query = query + " VALID UNTIL '%(expires)s'" % { "expires": expires } - query = query + " " + role_attr_flags - cursor.execute(query, query_password_data) - return True - -def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires): - """Change user password and/or attributes. Return True if changed, False otherwise.""" - changed = False - - if user == 'PUBLIC': - if password is not None: - module.fail_json(msg="cannot change the password for PUBLIC user") - elif role_attr_flags != '': - module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user") - else: - return False - - # Handle passwords. - if password is not None or role_attr_flags is not None: - # Select password and all flag-like columns in order to verify changes. - query_password_data = dict() - select = "SELECT * FROM pg_authid where rolname=%(user)s" - cursor.execute(select, {"user": user}) - # Grab current role attributes. - current_role_attrs = cursor.fetchone() - - alter = 'ALTER USER "%(user)s"' % {"user": user} - if password is not None: - query_password_data.update(password=password) - alter = alter + " WITH %(crypt)s" % {"crypt": encrypted} - alter = alter + " PASSWORD %(password)s" - alter = alter + " %(flags)s" % {'flags': role_attr_flags} - elif role_attr_flags: - alter = alter + ' WITH ' + role_attr_flags - if expires is not None: - alter = alter + " VALID UNTIL '%(expires)s'" % { "exipres": expires } - - try: - cursor.execute(alter, query_password_data) - except psycopg2.InternalError, e: - if e.pgcode == '25006': - # Handle errors due to read-only transactions indicated by pgcode 25006 - # ERROR: cannot execute ALTER ROLE in a read-only transaction - changed = False - module.fail_json(msg=e.pgerror) - return changed - else: - raise psycopg2.InternalError, e - - # Grab new role attributes. - cursor.execute(select, {"user": user}) - new_role_attrs = cursor.fetchone() - - # Detect any differences between current_ and new_role_attrs. - for i in range(len(current_role_attrs)): - if current_role_attrs[i] != new_role_attrs[i]: - changed = True - - return changed - -def user_delete(cursor, user): - """Try to remove a user. Returns True if successful otherwise False""" - cursor.execute("SAVEPOINT ansible_pgsql_user_delete") - try: - cursor.execute("DROP USER \"%s\"" % user) - except: - cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete") - cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") - return False - - cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") - return True - -def has_table_privilege(cursor, user, table, priv): - query = 'SELECT has_table_privilege(%s, %s, %s)' - cursor.execute(query, (user, table, priv)) - return cursor.fetchone()[0] - -def get_table_privileges(cursor, user, table): - if '.' in table: - schema, table = table.split('.', 1) - else: - schema = 'public' - query = '''SELECT privilege_type FROM information_schema.role_table_grants - WHERE grantee=%s AND table_name=%s AND table_schema=%s''' - cursor.execute(query, (user, table, schema)) - return set([x[0] for x in cursor.fetchall()]) - - -def quote_pg_identifier(identifier): - """ - quote postgresql identifiers involving zero or more namespaces - """ - - if '"' in identifier: - # the user has supplied their own quoting. we have to hope they're - # doing it right. Maybe they have an unfortunately named table - # containing a period in the name, such as: "public"."users.2013" - return identifier - - tokens = identifier.strip().split(".") - quoted_tokens = [] - for token in tokens: - quoted_tokens.append('"%s"' % (token, )) - return ".".join(quoted_tokens) - -def grant_table_privilege(cursor, user, table, priv): - prev_priv = get_table_privileges(cursor, user, table) - query = 'GRANT %s ON TABLE %s TO %s' % ( - priv, quote_pg_identifier(table), quote_pg_identifier(user), ) - cursor.execute(query) - curr_priv = get_table_privileges(cursor, user, table) - return len(curr_priv) > len(prev_priv) - -def revoke_table_privilege(cursor, user, table, priv): - prev_priv = get_table_privileges(cursor, user, table) - query = 'REVOKE %s ON TABLE %s FROM %s' % ( - priv, quote_pg_identifier(table), quote_pg_identifier(user), ) - cursor.execute(query) - curr_priv = get_table_privileges(cursor, user, table) - return len(curr_priv) < len(prev_priv) - - -def get_database_privileges(cursor, user, db): - priv_map = { - 'C':'CREATE', - 'T':'TEMPORARY', - 'c':'CONNECT', - } - query = 'SELECT datacl FROM pg_database WHERE datname = %s' - cursor.execute(query, (db,)) - datacl = cursor.fetchone()[0] - if datacl is None: - return [] - r = re.search('%s=(C?T?c?)/[a-z]+\,?' % user, datacl) - if r is None: - return [] - o = [] - for v in r.group(1): - o.append(priv_map[v]) - return o - -def has_database_privilege(cursor, user, db, priv): - query = 'SELECT has_database_privilege(%s, %s, %s)' - cursor.execute(query, (user, db, priv)) - return cursor.fetchone()[0] - -def grant_database_privilege(cursor, user, db, priv): - prev_priv = get_database_privileges(cursor, user, db) - if user == "PUBLIC": - query = 'GRANT %s ON DATABASE \"%s\" TO PUBLIC' % (priv, db) - else: - query = 'GRANT %s ON DATABASE \"%s\" TO \"%s\"' % (priv, db, user) - cursor.execute(query) - curr_priv = get_database_privileges(cursor, user, db) - return len(curr_priv) > len(prev_priv) - -def revoke_database_privilege(cursor, user, db, priv): - prev_priv = get_database_privileges(cursor, user, db) - if user == "PUBLIC": - query = 'REVOKE %s ON DATABASE \"%s\" FROM PUBLIC' % (priv, db) - else: - query = 'REVOKE %s ON DATABASE \"%s\" FROM \"%s\"' % (priv, db, user) - cursor.execute(query) - curr_priv = get_database_privileges(cursor, user, db) - return len(curr_priv) < len(prev_priv) - -def revoke_privileges(cursor, user, privs): - if privs is None: - return False - - changed = False - for type_ in privs: - revoke_func = { - 'table':revoke_table_privilege, - 'database':revoke_database_privilege - }[type_] - for name, privileges in privs[type_].iteritems(): - for privilege in privileges: - changed = revoke_func(cursor, user, name, privilege)\ - or changed - - return changed - -def grant_privileges(cursor, user, privs): - if privs is None: - return False - - changed = False - for type_ in privs: - grant_func = { - 'table':grant_table_privilege, - 'database':grant_database_privilege - }[type_] - for name, privileges in privs[type_].iteritems(): - for privilege in privileges: - changed = grant_func(cursor, user, name, privilege)\ - or changed - - return changed - -def parse_role_attrs(role_attr_flags): - """ - Parse role attributes string for user creation. - Format: - - attributes[,attributes,...] - - Where: - - attributes := CREATEDB,CREATEROLE,NOSUPERUSER,... - """ - if ',' not in role_attr_flags: - return role_attr_flags - flag_set = role_attr_flags.split(",") - o_flags = " ".join(flag_set) - return o_flags - -def parse_privs(privs, db): - """ - Parse privilege string to determine permissions for database db. - Format: - - privileges[/privileges/...] - - Where: - - privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] | - TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...] - """ - if privs is None: - return privs - - o_privs = { - 'database':{}, - 'table':{} - } - for token in privs.split('/'): - if ':' not in token: - type_ = 'database' - name = db - priv_set = set(x.strip() for x in token.split(',')) - else: - type_ = 'table' - name, privileges = token.split(':', 1) - priv_set = set(x.strip() for x in privileges.split(',')) - - o_privs[type_][name] = priv_set - - return o_privs - -# =========================================== -# Module execution. -# - -def main(): - module = AnsibleModule( - argument_spec=dict( - login_user=dict(default="postgres"), - login_password=dict(default=""), - login_host=dict(default=""), - user=dict(required=True, aliases=['name']), - password=dict(default=None), - state=dict(default="present", choices=["absent", "present"]), - priv=dict(default=None), - db=dict(default=''), - port=dict(default='5432'), - fail_on_user=dict(type='bool', default='yes'), - role_attr_flags=dict(default=''), - encrypted=dict(type='bool', default='no'), - expires=dict(default=None) - ), - supports_check_mode = True - ) - - user = module.params["user"] - password = module.params["password"] - state = module.params["state"] - fail_on_user = module.params["fail_on_user"] - db = module.params["db"] - if db == '' and module.params["priv"] is not None: - module.fail_json(msg="privileges require a database to be specified") - privs = parse_privs(module.params["priv"], db) - port = module.params["port"] - role_attr_flags = parse_role_attrs(module.params["role_attr_flags"]) - if module.params["encrypted"]: - encrypted = "ENCRYPTED" - else: - encrypted = "UNENCRYPTED" - expires = module.params["expires"] - - if not postgresqldb_found: - module.fail_json(msg="the python psycopg2 module is required") - - # To use defaults values, keyword arguments must be absent, so - # check which values are empty and don't include in the **kw - # dictionary - params_map = { - "login_host":"host", - "login_user":"user", - "login_password":"password", - "port":"port", - "db":"database" - } - kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() - if k in params_map and v != "" ) - try: - db_connection = psycopg2.connect(**kw) - cursor = db_connection.cursor() - except Exception, e: - module.fail_json(msg="unable to connect to database: %s" % e) - - kw = dict(user=user) - changed = False - user_removed = False - - if state == "present": - if user_exists(cursor, user): - changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires) - else: - changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires) - changed = grant_privileges(cursor, user, privs) or changed - else: - if user_exists(cursor, user): - if module.check_mode: - changed = True - kw['user_removed'] = True - else: - changed = revoke_privileges(cursor, user, privs) - user_removed = user_delete(cursor, user) - changed = changed or user_removed - if fail_on_user and not user_removed: - msg = "unable to remove user" - module.fail_json(msg=msg) - kw['user_removed'] = user_removed - - if changed: - if module.check_mode: - db_connection.rollback() - else: - db_connection.commit() - - kw['changed'] = changed - module.exit_json(**kw) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/redis b/library/database/redis deleted file mode 100644 index eb9654631e..0000000000 --- a/library/database/redis +++ /dev/null @@ -1,329 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: redis -short_description: Various redis commands, slave and flush -description: - - Unified utility to interact with redis instances. - 'slave' sets a redis instance in slave or master mode. - 'flush' flushes all the instance or a specified db. - 'config' (new in 1.6), ensures a configuration setting on an instance. -version_added: "1.3" -options: - command: - description: - - The selected redis command - required: true - default: null - choices: [ "slave", "flush", "config" ] - login_password: - description: - - The password used to authenticate with (usually not used) - required: false - default: null - login_host: - description: - - The host running the database - required: false - default: localhost - login_port: - description: - - The port to connect to - required: false - default: 6379 - master_host: - description: - - The host of the master instance [slave command] - required: false - default: null - master_port: - description: - - The port of the master instance [slave command] - required: false - default: null - slave_mode: - description: - - the mode of the redis instance [slave command] - required: false - default: slave - choices: [ "master", "slave" ] - db: - description: - - The database to flush (used in db mode) [flush command] - required: false - default: null - flush_mode: - description: - - Type of flush (all the dbs in a redis instance or a specific one) - [flush command] - required: false - default: all - choices: [ "all", "db" ] - name: - version_added: 1.6 - description: - - A redis config key. - required: false - default: null - value: - version_added: 1.6 - description: - - A redis config value. - required: false - default: null - - -notes: - - Requires the redis-py Python package on the remote host. You can - install it with pip (pip install redis) or with a package manager. - https://github.com/andymccurdy/redis-py - - If the redis master instance we are making slave of is password protected - this needs to be in the redis.conf in the masterauth variable - -requirements: [ redis ] -author: Xabier Larrakoetxea -''' - -EXAMPLES = ''' -# Set local redis instance to be slave of melee.island on port 6377 -- redis: command=slave master_host=melee.island master_port=6377 - -# Deactivate slave mode -- redis: command=slave slave_mode=master - -# Flush all the redis db -- redis: command=flush flush_mode=all - -# Flush only one db in a redis instance -- redis: command=flush db=1 flush_mode=db - -# Configure local redis to have 10000 max clients -- redis: command=config name=maxclients value=10000 - -# Configure local redis to have lua time limit of 100 ms -- redis: command=config name=lua-time-limit value=100 -''' - -try: - import redis -except ImportError: - redis_found = False -else: - redis_found = True - - -# =========================================== -# Redis module specific support methods. -# - -def set_slave_mode(client, master_host, master_port): - try: - return client.slaveof(master_host, master_port) - except Exception: - return False - - -def set_master_mode(client): - try: - return client.slaveof() - except Exception: - return False - - -def flush(client, db=None): - try: - if type(db) != int: - return client.flushall() - else: - # The passed client has been connected to the database already - return client.flushdb() - except Exception: - return False - - -# =========================================== -# Module execution. -# - -def main(): - module = AnsibleModule( - argument_spec = dict( - command=dict(default=None, choices=['slave', 'flush', 'config']), - login_password=dict(default=None), - login_host=dict(default='localhost'), - login_port=dict(default='6379'), - master_host=dict(default=None), - master_port=dict(default=None), - slave_mode=dict(default='slave', choices=['master', 'slave']), - db=dict(default=None), - flush_mode=dict(default='all', choices=['all', 'db']), - name=dict(default=None), - value=dict(default=None) - ), - supports_check_mode = True - ) - - if not redis_found: - module.fail_json(msg="python redis module is required") - - login_password = module.params['login_password'] - login_host = module.params['login_host'] - login_port = int(module.params['login_port']) - command = module.params['command'] - - # Slave Command section ----------- - if command == "slave": - master_host = module.params['master_host'] - master_port = module.params['master_port'] - try: - master_port = int(module.params['master_port']) - except Exception: - pass - mode = module.params['slave_mode'] - - #Check if we have all the data - if mode == "slave": # Only need data if we want to be slave - if not master_host: - module.fail_json( - msg='In slave mode master host must be provided') - - if not master_port: - module.fail_json( - msg='In slave mode master port must be provided') - - #Connect and check - r = redis.StrictRedis(host=login_host, - port=login_port, - password=login_password) - try: - r.ping() - except Exception, e: - module.fail_json(msg="unable to connect to database: %s" % e) - - #Check if we are already in the mode that we want - info = r.info() - if mode == "master" and info["role"] == "master": - module.exit_json(changed=False, mode=mode) - - elif mode == "slave" and\ - info["role"] == "slave" and\ - info["master_host"] == master_host and\ - info["master_port"] == master_port: - status = { - 'status': mode, - 'master_host': master_host, - 'master_port': master_port, - } - module.exit_json(changed=False, mode=status) - else: - # Do the stuff - # (Check Check_mode before commands so the commands aren't evaluated - # if not necessary) - if mode == "slave": - if module.check_mode or\ - set_slave_mode(r, master_host, master_port): - info = r.info() - status = { - 'status': mode, - 'master_host': master_host, - 'master_port': master_port, - } - module.exit_json(changed=True, mode=status) - else: - module.fail_json(msg='Unable to set slave mode') - - else: - if module.check_mode or set_master_mode(r): - module.exit_json(changed=True, mode=mode) - else: - module.fail_json(msg='Unable to set master mode') - - # flush Command section ----------- - elif command == "flush": - try: - db = int(module.params['db']) - except Exception: - db = 0 - mode = module.params['flush_mode'] - - #Check if we have all the data - if mode == "db": - if type(db) != int: - module.fail_json( - msg="In db mode the db number must be provided") - - #Connect and check - r = redis.StrictRedis(host=login_host, - port=login_port, - password=login_password, - db=db) - try: - r.ping() - except Exception, e: - module.fail_json(msg="unable to connect to database: %s" % e) - - # Do the stuff - # (Check Check_mode before commands so the commands aren't evaluated - # if not necessary) - if mode == "all": - if module.check_mode or flush(r): - module.exit_json(changed=True, flushed=True) - else: # Flush never fails :) - module.fail_json(msg="Unable to flush all databases") - - else: - if module.check_mode or flush(r, db): - module.exit_json(changed=True, flushed=True, db=db) - else: # Flush never fails :) - module.fail_json(msg="Unable to flush '%d' database" % db) - elif command == 'config': - name = module.params['name'] - value = module.params['value'] - - r = redis.StrictRedis(host=login_host, - port=login_port, - password=login_password) - - try: - r.ping() - except Exception, e: - module.fail_json(msg="unable to connect to database: %s" % e) - - - try: - old_value = r.config_get(name)[name] - except Exception, e: - module.fail_json(msg="unable to read config: %s" % e) - changed = old_value != value - - if module.check_mode or not changed: - module.exit_json(changed=changed, name=name, value=value) - else: - try: - r.config_set(name, value) - except Exception, e: - module.fail_json(msg="unable to write config: %s" % e) - module.exit_json(changed=changed, name=name, value=value) - else: - module.fail_json(msg='A valid command must be provided') - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/database/riak b/library/database/riak deleted file mode 100644 index b30e7dc485..0000000000 --- a/library/database/riak +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, James Martin , Drew Kerrigan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -DOCUMENTATION = ''' ---- -module: riak -short_description: This module handles some common Riak operations -description: - - This module can be used to join nodes to a cluster, check - the status of the cluster. -version_added: "1.2" -options: - command: - description: - - The command you would like to perform against the cluster. - required: false - default: null - aliases: [] - choices: ['ping', 'kv_test', 'join', 'plan', 'commit'] - config_dir: - description: - - The path to the riak configuration directory - required: false - default: /etc/riak - aliases: [] - http_conn: - description: - - The ip address and port that is listening for Riak HTTP queries - required: false - default: 127.0.0.1:8098 - aliases: [] - target_node: - description: - - The target node for certain operations (join, ping) - required: false - default: riak@127.0.0.1 - aliases: [] - wait_for_handoffs: - description: - - Number of seconds to wait for handoffs to complete. - required: false - default: null - aliases: [] - type: 'int' - wait_for_ring: - description: - - Number of seconds to wait for all nodes to agree on the ring. - required: false - default: null - aliases: [] - type: 'int' - wait_for_service: - description: - - Waits for a riak service to come online before continuing. - required: false - default: None - aliases: [] - choices: ['kv'] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 -''' - -EXAMPLES = ''' -# Join's a Riak node to another node -- riak: command=join target_node=riak@10.1.1.1 - -# Wait for handoffs to finish. Use with async and poll. -- riak: wait_for_handoffs=yes - -# Wait for riak_kv service to startup -- riak: wait_for_service=kv -''' - -import urllib2 -import time -import socket -import sys -try: - import json -except ImportError: - import simplejson as json - - -def ring_check(module, riak_admin_bin): - cmd = '%s ringready' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0 and 'TRUE All nodes agree on the ring' in out: - return True - else: - return False - -def main(): - - module = AnsibleModule( - argument_spec=dict( - command=dict(required=False, default=None, choices=[ - 'ping', 'kv_test', 'join', 'plan', 'commit']), - config_dir=dict(default='/etc/riak'), - http_conn=dict(required=False, default='127.0.0.1:8098'), - target_node=dict(default='riak@127.0.0.1', required=False), - wait_for_handoffs=dict(default=False, type='int'), - wait_for_ring=dict(default=False, type='int'), - wait_for_service=dict( - required=False, default=None, choices=['kv']), - validate_certs = dict(default='yes', type='bool')) - ) - - - command = module.params.get('command') - config_dir = module.params.get('config_dir') - http_conn = module.params.get('http_conn') - target_node = module.params.get('target_node') - wait_for_handoffs = module.params.get('wait_for_handoffs') - wait_for_ring = module.params.get('wait_for_ring') - wait_for_service = module.params.get('wait_for_service') - validate_certs = module.params.get('validate_certs') - - - #make sure riak commands are on the path - riak_bin = module.get_bin_path('riak') - riak_admin_bin = module.get_bin_path('riak-admin') - - timeout = time.time() + 120 - while True: - if time.time() > timeout: - module.fail_json(msg='Timeout, could not fetch Riak stats.') - (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5) - if info['status'] == 200: - stats_raw = response.read() - break - time.sleep(5) - - # here we attempt to load those stats, - try: - stats = json.loads(stats_raw) - except: - module.fail_json(msg='Could not parse Riak stats.') - - node_name = stats['nodename'] - nodes = stats['ring_members'] - ring_size = stats['ring_creation_size'] - rc, out, err = module.run_command([riak_bin, 'version'] ) - version = out.strip() - - result = dict(node_name=node_name, - nodes=nodes, - ring_size=ring_size, - version=version) - - if command == 'ping': - cmd = '%s ping %s' % ( riak_bin, target_node ) - rc, out, err = module.run_command(cmd) - if rc == 0: - result['ping'] = out - else: - module.fail_json(msg=out) - - elif command == 'kv_test': - cmd = '%s test' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0: - result['kv_test'] = out - else: - module.fail_json(msg=out) - - elif command == 'join': - if nodes.count(node_name) == 1 and len(nodes) > 1: - result['join'] = 'Node is already in cluster or staged to be in cluster.' - else: - cmd = '%s cluster join %s' % (riak_admin_bin, target_node) - rc, out, err = module.run_command(cmd) - if rc == 0: - result['join'] = out - result['changed'] = True - else: - module.fail_json(msg=out) - - elif command == 'plan': - cmd = '%s cluster plan' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0: - result['plan'] = out - if 'Staged Changes' in out: - result['changed'] = True - else: - module.fail_json(msg=out) - - elif command == 'commit': - cmd = '%s cluster commit' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if rc == 0: - result['commit'] = out - result['changed'] = True - else: - module.fail_json(msg=out) - -# this could take a while, recommend to run in async mode - if wait_for_handoffs: - timeout = time.time() + wait_for_handoffs - while True: - cmd = '%s transfers' % riak_admin_bin - rc, out, err = module.run_command(cmd) - if 'No transfers active' in out: - result['handoffs'] = 'No transfers active.' - break - time.sleep(10) - if time.time() > timeout: - module.fail_json(msg='Timeout waiting for handoffs.') - - if wait_for_service: - cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name ] - rc, out, err = module.run_command(cmd) - result['service'] = out - - if wait_for_ring: - timeout = time.time() + wait_for_ring - while True: - if ring_check(module, riak_admin_bin): - break - time.sleep(10) - if time.time() > timeout: - module.fail_json(msg='Timeout waiting for nodes to agree on ring.') - - result['ring_ready'] = ring_check(module, riak_admin_bin) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/files/acl b/library/files/acl deleted file mode 100644 index 30c533e006..0000000000 --- a/library/files/acl +++ /dev/null @@ -1,295 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: acl -version_added: "1.4" -short_description: Sets and retrieves file ACL information. -description: - - Sets and retrieves file ACL information. -options: - name: - required: true - default: null - description: - - The full path of the file or object. - aliases: ['path'] - - state: - required: false - default: query - choices: [ 'query', 'present', 'absent' ] - description: - - defines whether the ACL should be present or not. The C(query) state gets the current acl without changing it, for use in 'register' operations. - - follow: - required: false - default: yes - choices: [ 'yes', 'no' ] - description: - - whether to follow symlinks on the path if a symlink is encountered. - - default: - version_added: "1.5" - required: false - default: no - choices: [ 'yes', 'no' ] - description: - - if the target is a directory, setting this to yes will make it the default acl for entities created inside the directory. It causes an error if name is a file. - - entity: - version_added: "1.5" - required: false - description: - - actual user or group that the ACL applies to when matching entity types user or group are selected. - - etype: - version_added: "1.5" - required: false - default: null - choices: [ 'user', 'group', 'mask', 'other' ] - description: - - the entity type of the ACL to apply, see setfacl documentation for more info. - - - permissions: - version_added: "1.5" - required: false - default: null - description: - - Permissions to apply/remove can be any combination of r, w and x (read, write and execute respectively) - - entry: - required: false - default: null - description: - - DEPRECATED. The acl to set or remove. This must always be quoted in the form of '::'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields. - -author: Brian Coca -notes: - - The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed. -''' - -EXAMPLES = ''' -# Grant user Joe read access to a file -- acl: name=/etc/foo.conf entity=joe etype=user permissions="r" state=present - -# Removes the acl for Joe on a specific file -- acl: name=/etc/foo.conf entity=joe etype=user state=absent - -# Sets default acl for joe on foo.d -- acl: name=/etc/foo.d entity=joe etype=user permissions=rw default=yes state=present - -# Same as previous but using entry shorthand -- acl: name=/etc/foo.d entry="default:user:joe:rw-" state=present - -# Obtain the acl for a specific file -- acl: name=/etc/foo.conf - register: acl_info -''' - -def normalize_permissions(p): - perms = ['-','-','-'] - for char in p: - if char == 'r': - perms[0] = 'r' - if char == 'w': - perms[1] = 'w' - if char == 'x': - perms[2] = 'x' - return ''.join(perms) - -def split_entry(entry): - ''' splits entry and ensures normalized return''' - - a = entry.split(':') - a.reverse() - if len(a) == 3: - a.append(False) - try: - p,e,t,d = a - except ValueError, e: - print "wtf?? %s => %s" % (entry,a) - raise e - - if d: - d = True - - if t.startswith("u"): - t = "user" - elif t.startswith("g"): - t = "group" - elif t.startswith("m"): - t = "mask" - elif t.startswith("o"): - t = "other" - else: - t = None - - p = normalize_permissions(p) - - return [d,t,e,p] - -def get_acls(module,path,follow): - - cmd = [ module.get_bin_path('getfacl', True) ] - if not follow: - cmd.append('-h') - # prevents absolute path warnings and removes headers - cmd.append('--omit-header') - cmd.append('--absolute-names') - cmd.append(path) - - return _run_acl(module,cmd) - -def set_acl(module,path,entry,follow,default): - - cmd = [ module.get_bin_path('setfacl', True) ] - if not follow: - cmd.append('-h') - if default: - cmd.append('-d') - cmd.append('-m "%s"' % entry) - cmd.append(path) - - return _run_acl(module,cmd) - -def rm_acl(module,path,entry,follow,default): - - cmd = [ module.get_bin_path('setfacl', True) ] - if not follow: - cmd.append('-h') - if default: - cmd.append('-k') - entry = entry[0:entry.rfind(':')] - cmd.append('-x "%s"' % entry) - cmd.append(path) - - return _run_acl(module,cmd,False) - -def _run_acl(module,cmd,check_rc=True): - - try: - (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc) - except Exception, e: - module.fail_json(msg=e.strerror) - - # trim last line as it is always empty - ret = out.splitlines() - return ret[0:len(ret)-1] - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True,aliases=['path'], type='str'), - entry = dict(required=False, etype='str'), - entity = dict(required=False, type='str', default=''), - etype = dict(required=False, choices=['other', 'user', 'group', 'mask'], type='str'), - permissions = dict(required=False, type='str'), - state = dict(required=False, default='query', choices=[ 'query', 'present', 'absent' ], type='str'), - follow = dict(required=False, type='bool', default=True), - default= dict(required=False, type='bool', default=False), - ), - supports_check_mode=True, - ) - - path = os.path.expanduser(module.params.get('name')) - entry = module.params.get('entry') - entity = module.params.get('entity') - etype = module.params.get('etype') - permissions = module.params.get('permissions') - state = module.params.get('state') - follow = module.params.get('follow') - default = module.params.get('default') - - if permissions: - permissions = normalize_permissions(permissions) - - if not os.path.exists(path): - module.fail_json(msg="path not found or not accessible!") - - if state in ['present','absent']: - if not entry and not etype: - module.fail_json(msg="%s requires either etype and permissions or just entry be set" % state) - - if entry: - if etype or entity or permissions: - module.fail_json(msg="entry and another incompatible field (entity, etype or permissions) are also set") - if entry.count(":") not in [2,3]: - module.fail_json(msg="Invalid entry: '%s', it requires 3 or 4 sections divided by ':'" % entry) - - default, etype, entity, permissions = split_entry(entry) - - changed=False - msg = "" - currentacls = get_acls(module,path,follow) - - if (state == 'present'): - matched = False - for oldentry in currentacls: - if oldentry.count(":") == 0: - continue - old_default, old_type, old_entity, old_permissions = split_entry(oldentry) - if old_default == default: - if old_type == etype: - if etype in ['user', 'group']: - if old_entity == entity: - matched = True - if not old_permissions == permissions: - changed = True - break - else: - matched = True - if not old_permissions == permissions: - changed = True - break - if not matched: - changed=True - - if changed and not module.check_mode: - set_acl(module,path,':'.join([etype, str(entity), permissions]),follow,default) - msg="%s is present" % ':'.join([etype, str(entity), permissions]) - - elif state == 'absent': - for oldentry in currentacls: - if oldentry.count(":") == 0: - continue - old_default, old_type, old_entity, old_permissions = split_entry(oldentry) - if old_default == default: - if old_type == etype: - if etype in ['user', 'group']: - if old_entity == entity: - changed=True - break - else: - changed=True - break - if changed and not module.check_mode: - rm_acl(module,path,':'.join([etype, entity, '---']),follow,default) - msg="%s is absent" % ':'.join([etype, entity, '---']) - else: - msg="current acl" - - if changed: - currentacls = get_acls(module,path,follow) - - module.exit_json(changed=changed, msg=msg, acl=currentacls) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/files/assemble b/library/files/assemble deleted file mode 100644 index a16431b9f5..0000000000 --- a/library/files/assemble +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Stephen Fromm -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import os -import os.path -import shutil -import tempfile -import re - -DOCUMENTATION = ''' ---- -module: assemble -short_description: Assembles a configuration file from fragments -description: - - Assembles a configuration file from fragments. Often a particular - program will take a single configuration file and does not support a - C(conf.d) style structure where it is easy to build up the configuration - from multiple sources. M(assemble) will take a directory of files that can be - local or have already been transferred to the system, and concatenate them - together to produce a destination file. Files are assembled in string sorting order. - Puppet calls this idea I(fragments). -version_added: "0.5" -options: - src: - description: - - An already existing directory full of source files. - required: true - default: null - aliases: [] - dest: - description: - - A file to create using the concatenation of all of the source files. - required: true - default: null - backup: - description: - - Create a backup file (if C(yes)), including the timestamp information so - you can get the original file back if you somehow clobbered it - incorrectly. - required: false - choices: [ "yes", "no" ] - default: "no" - delimiter: - description: - - A delimiter to separate the file contents. - version_added: "1.4" - required: false - default: null - remote_src: - description: - - If False, it will search for src at originating/master machine, if True it will - go to the remote/target machine for the src. Default is True. - choices: [ "True", "False" ] - required: false - default: "True" - version_added: "1.4" - regexp: - description: - - Assemble files only if C(regex) matches the filename. If not set, - all files are assembled. All "\\" (backslash) must be escaped as - "\\\\" to comply yaml syntax. Uses Python regular expressions; see - U(http://docs.python.org/2/library/re.html). - required: false - default: null -author: Stephen Fromm -extends_documentation_fragment: files -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks -- assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf - -# When a delimiter is specified, it will be inserted in between each fragment -- assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf delimiter='### START FRAGMENT ###' -''' - -# =========================================== -# Support method - -def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None): - ''' assemble a file from a directory of fragments ''' - tmpfd, temp_path = tempfile.mkstemp() - tmp = os.fdopen(tmpfd,'w') - delimit_me = False - add_newline = False - - for f in sorted(os.listdir(src_path)): - if compiled_regexp and not compiled_regexp.search(f): - continue - fragment = "%s/%s" % (src_path, f) - if not os.path.isfile(fragment): - continue - fragment_content = file(fragment).read() - - # always put a newline between fragments if the previous fragment didn't end with a newline. - if add_newline: - tmp.write('\n') - - # delimiters should only appear between fragments - if delimit_me: - if delimiter: - # un-escape anything like newlines - delimiter = delimiter.decode('unicode-escape') - tmp.write(delimiter) - # always make sure there's a newline after the - # delimiter, so lines don't run together - if delimiter[-1] != '\n': - tmp.write('\n') - - tmp.write(fragment_content) - delimit_me = True - if fragment_content.endswith('\n'): - add_newline = False - else: - add_newline = True - - tmp.close() - return temp_path - -# ============================================================== -# main - -def main(): - - module = AnsibleModule( - # not checking because of daisy chain to file module - argument_spec = dict( - src = dict(required=True), - delimiter = dict(required=False), - dest = dict(required=True), - backup=dict(default=False, type='bool'), - remote_src=dict(default=False, type='bool'), - regexp = dict(required=False), - ), - add_file_common_args=True - ) - - changed = False - pathmd5 = None - destmd5 = None - src = os.path.expanduser(module.params['src']) - dest = os.path.expanduser(module.params['dest']) - backup = module.params['backup'] - delimiter = module.params['delimiter'] - regexp = module.params['regexp'] - compiled_regexp = None - - if not os.path.exists(src): - module.fail_json(msg="Source (%s) does not exist" % src) - - if not os.path.isdir(src): - module.fail_json(msg="Source (%s) is not a directory" % src) - - if regexp != None: - try: - compiled_regexp = re.compile(regexp) - except re.error, e: - module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp)) - - path = assemble_from_fragments(src, delimiter, compiled_regexp) - pathmd5 = module.md5(path) - - if os.path.exists(dest): - destmd5 = module.md5(dest) - - if pathmd5 != destmd5: - if backup and destmd5 is not None: - module.backup_local(dest) - shutil.copy(path, dest) - changed = True - - os.remove(path) - - file_args = module.load_file_common_arguments(module.params) - changed = module.set_fs_attributes_if_different(file_args, changed) - # Mission complete - module.exit_json(src=src, dest=dest, md5sum=pathmd5, changed=changed, msg="OK") - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/files/copy b/library/files/copy deleted file mode 100644 index eff46dae98..0000000000 --- a/library/files/copy +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import os -import time - -DOCUMENTATION = ''' ---- -module: copy -version_added: "historical" -short_description: Copies files to remote locations. -description: - - The M(copy) module copies a file on the local box to remote locations. -options: - src: - description: - - Local path to a file to copy to the remote server; can be absolute or relative. - If path is a directory, it is copied recursively. In this case, if path ends - with "/", only inside contents of that directory are copied to destination. - Otherwise, if it does not end with "/", the directory itself with all contents - is copied. This behavior is similar to Rsync. - required: false - default: null - aliases: [] - content: - version_added: "1.1" - description: - - When used instead of 'src', sets the contents of a file directly to the specified value. - required: false - default: null - dest: - description: - - Remote absolute path where the file should be copied to. If src is a directory, - this must be a directory too. - required: true - default: null - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - version_added: "0.7" - required: false - choices: [ "yes", "no" ] - default: "no" - force: - description: - - the default is C(yes), which will replace the remote file when contents - are different than the source. If C(no), the file will only be transferred - if the destination does not exist. - version_added: "1.1" - required: false - choices: [ "yes", "no" ] - default: "yes" - aliases: [ "thirsty" ] - validate: - description: - - The validation command to run before copying into place. The path to the file to - validate is passed in via '%s' which must be present as in the visudo example below. - The command is passed securely so shell features like expansion and pipes won't work. - required: false - default: "" - version_added: "1.2" - directory_mode: - description: - - When doing a recursive copy set the mode for the directories. If this is not set we will use the system - defaults. The mode is only set on directories which are newly created, and will not affect those that - already existed. - required: false - version_added: "1.5" -extends_documentation_fragment: files -author: Michael DeHaan -notes: - - The "copy" module recursively copy facility does not scale to lots (>hundreds) of files. - For alternative, see synchronize module, which is a wrapper around rsync. -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks -- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode=0644 - -# The same example as above, but using a symbolic mode equivalent to 0644 -- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode="u=rw,g=r,o=r" - -# Another symbolic mode example, adding some permissions and removing others -- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode="u+rw,g-wx,o-rwx" - -# Copy a new "ntp.conf file into place, backing up the original if it differs from the copied version -- copy: src=/mine/ntp.conf dest=/etc/ntp.conf owner=root group=root mode=644 backup=yes - -# Copy a new "sudoers" file into place, after passing validation with visudo -- copy: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s' -''' - - -def split_pre_existing_dir(dirname): - ''' - Return the first pre-existing directory and a list of the new directories that will be created. - ''' - - head, tail = os.path.split(dirname) - if not os.path.exists(head): - (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head) - else: - return (head, [ tail ]) - new_directory_list.append(tail) - return (pre_existing_dir, new_directory_list) - - -def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed): - ''' - Walk the new directories list and make sure that permissions are as we would expect - ''' - - if len(new_directory_list) > 0: - working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0)) - directory_args['path'] = working_dir - changed = module.set_fs_attributes_if_different(directory_args, changed) - changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed) - return changed - - -def main(): - - module = AnsibleModule( - # not checking because of daisy chain to file module - argument_spec = dict( - src = dict(required=False), - original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack - content = dict(required=False, no_log=True), - dest = dict(required=True), - backup = dict(default=False, type='bool'), - force = dict(default=True, aliases=['thirsty'], type='bool'), - validate = dict(required=False, type='str'), - directory_mode = dict(required=False) - ), - add_file_common_args=True, - supports_check_mode=True, - ) - - src = os.path.expanduser(module.params['src']) - dest = os.path.expanduser(module.params['dest']) - backup = module.params['backup'] - force = module.params['force'] - original_basename = module.params.get('original_basename',None) - validate = module.params.get('validate',None) - follow = module.params['follow'] - - if not os.path.exists(src): - module.fail_json(msg="Source %s failed to transfer" % (src)) - if not os.access(src, os.R_OK): - module.fail_json(msg="Source %s not readable" % (src)) - - md5sum_src = module.md5(src) - md5sum_dest = None - - changed = False - - # Special handling for recursive copy - create intermediate dirs - if original_basename and dest.endswith("/"): - dest = os.path.join(dest, original_basename) - dirname = os.path.dirname(dest) - if not os.path.exists(dirname): - (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname) - os.makedirs(dirname) - directory_args = module.load_file_common_arguments(module.params) - directory_mode = module.params["directory_mode"] - if directory_mode is not None: - directory_args['mode'] = directory_mode - else: - directory_args['mode'] = None - adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed) - - if os.path.exists(dest): - if os.path.islink(dest) and follow: - dest = os.path.realpath(dest) - if not force: - module.exit_json(msg="file already exists", src=src, dest=dest, changed=False) - if (os.path.isdir(dest)): - basename = os.path.basename(src) - if original_basename: - basename = original_basename - dest = os.path.join(dest, basename) - if os.access(dest, os.R_OK): - md5sum_dest = module.md5(dest) - else: - if not os.path.exists(os.path.dirname(dest)): - try: - # os.path.exists() can return false in some - # circumstances where the directory does not have - # the execute bit for the current user set, in - # which case the stat() call will raise an OSError - os.stat(os.path.dirname(dest)) - except OSError, e: - if "permission denied" in str(e).lower(): - module.fail_json(msg="Destination directory %s is not accessible" % (os.path.dirname(dest))) - module.fail_json(msg="Destination directory %s does not exist" % (os.path.dirname(dest))) - if not os.access(os.path.dirname(dest), os.W_OK): - module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest))) - - backup_file = None - if md5sum_src != md5sum_dest or os.path.islink(dest): - try: - if backup: - if os.path.exists(dest): - backup_file = module.backup_local(dest) - # allow for conversion from symlink. - if os.path.islink(dest): - os.unlink(dest) - open(dest, 'w').close() - if validate: - if "%s" not in validate: - module.fail_json(msg="validate must contain %%s: %s" % (validate)) - (rc,out,err) = module.run_command(validate % src) - if rc != 0: - module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc,err)) - module.atomic_move(src, dest) - except IOError: - module.fail_json(msg="failed to copy: %s to %s" % (src, dest)) - changed = True - else: - changed = False - - res_args = dict( - dest = dest, src = src, md5sum = md5sum_src, changed = changed - ) - if backup_file: - res_args['backup_file'] = backup_file - - module.params['dest'] = dest - file_args = module.load_file_common_arguments(module.params) - res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed']) - - module.exit_json(**res_args) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/files/fetch b/library/files/fetch deleted file mode 100644 index 5b47d87a85..0000000000 --- a/library/files/fetch +++ /dev/null @@ -1,67 +0,0 @@ -# this is a virtual module that is entirely implemented server side - -DOCUMENTATION = ''' ---- -module: fetch -short_description: Fetches a file from remote nodes -description: - - This module works like M(copy), but in reverse. It is used for fetching - files from remote machines and storing them locally in a file tree, - organized by hostname. Note that this module is written to transfer - log files that might not be present, so a missing remote file won't - be an error unless fail_on_missing is set to 'yes'. -version_added: "0.2" -options: - src: - description: - - The file on the remote system to fetch. This I(must) be a file, not a - directory. Recursive fetching may be supported in a later release. - required: true - default: null - aliases: [] - dest: - description: - - A directory to save the file into. For example, if the I(dest) - directory is C(/backup) a I(src) file named C(/etc/profile) on host - C(host.example.com), would be saved into - C(/backup/host.example.com/etc/profile) - required: true - default: null - fail_on_missing: - version_added: "1.1" - description: - - Makes it fails when the source file is missing. - required: false - choices: [ "yes", "no" ] - default: "no" - validate_md5: - version_added: "1.4" - description: - - Verify that the source and destination md5sums match after the files are fetched. - required: false - choices: [ "yes", "no" ] - default: "yes" - flat: - version_added: "1.2" - description: - Allows you to override the default behavior of prepending hostname/path/to/file to - the destination. If dest ends with '/', it will use the basename of the source - file, similar to the copy module. Obviously this is only handy if the filenames - are unique. -requirements: [] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Store file into /tmp/fetched/host.example.com/tmp/somefile -- fetch: src=/tmp/somefile dest=/tmp/fetched - -# Specifying a path directly -- fetch: src=/tmp/somefile dest=/tmp/prefix-{{ ansible_hostname }} flat=yes - -# Specifying a destination path -- fetch: src=/tmp/uniquefile dest=/tmp/special/ flat=yes - -# Storing in a path relative to the playbook -- fetch: src=/tmp/uniquefile dest=special/prefix-{{ ansible_hostname }} flat=yes -''' diff --git a/library/files/file b/library/files/file deleted file mode 100644 index ff9feb41ee..0000000000 --- a/library/files/file +++ /dev/null @@ -1,358 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import shutil -import stat -import grp -import pwd -try: - import selinux - HAVE_SELINUX=True -except ImportError: - HAVE_SELINUX=False - -DOCUMENTATION = ''' ---- -module: file -version_added: "historical" -short_description: Sets attributes of files -extends_documentation_fragment: files -description: - - Sets attributes of files, symlinks, and directories, or removes - files/symlinks/directories. Many other modules support the same options as - the M(file) module - including M(copy), M(template), and M(assemble). -notes: - - See also M(copy), M(template), M(assemble) -requirements: [ ] -author: Michael DeHaan -options: - path: - description: - - 'path to the file being managed. Aliases: I(dest), I(name)' - required: true - default: [] - aliases: ['dest', 'name'] - state: - description: - - If C(directory), all immediate subdirectories will be created if they - do not exist, since 1.7 they will be created with the supplied permissions. - If C(file), the file will NOT be created if it does not exist, see the M(copy) - or M(template) module if you want that behavior. If C(link), the symbolic - link will be created or changed. Use C(hard) for hardlinks. If C(absent), - directories will be recursively deleted, and files or symlinks will be unlinked. - If C(touch) (new in 1.4), an empty file will be created if the c(path) does not - exist, while an existing file or directory will receive updated file access and - modification times (similar to the way `touch` works from the command line). - required: false - default: file - choices: [ file, link, directory, hard, touch, absent ] - src: - required: false - default: null - choices: [] - description: - - path of the file to link to (applies only to C(state=link)). Will accept absolute, - relative and nonexisting paths. Relative paths are not expanded. - recurse: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "1.1" - description: - - recursively set the specified file attributes (applies only to state=directory) - force: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - 'force the creation of the symlinks in two cases: the source file does - not exist (but will appear later); the destination exists and is a file (so, we need to unlink the - "path" file and create symlink to the "src" file in place of it).' -''' - -EXAMPLES = ''' -- file: path=/etc/foo.conf owner=foo group=foo mode=0644 -- file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link -- file: src=/tmp/{{ item.path }} dest={{ item.dest }} state=link - with_items: - - { path: 'x', dest: 'y' } - - { path: 'z', dest: 'k' } - -# touch a file, using symbolic modes to set the permissions (equivalent to 0644) -- file: path=/etc/foo.conf state=touch mode="u=rw,g=r,o=r" - -# touch the same file, but add/remove some permissions -- file: path=/etc/foo.conf state=touch mode="u+rw,g-wx,o-rwx" - -''' - -def main(): - - module = AnsibleModule( - argument_spec = dict( - state = dict(choices=['file','directory','link','hard','touch','absent'], default=None), - path = dict(aliases=['dest', 'name'], required=True), - original_basename = dict(required=False), # Internal use only, for recursive ops - recurse = dict(default='no', type='bool'), - force = dict(required=False,default=False,type='bool'), - diff_peek = dict(default=None), - validate = dict(required=False, default=None), - src = dict(required=False, default=None), - ), - add_file_common_args=True, - supports_check_mode=True - ) - - params = module.params - state = params['state'] - force = params['force'] - diff_peek = params['diff_peek'] - src = params['src'] - follow = params['follow'] - - # modify source as we later reload and pass, specially relevant when used by other modules. - params['path'] = path = os.path.expanduser(params['path']) - - # short-circuit for diff_peek - if diff_peek is not None: - appears_binary = False - try: - f = open(path) - b = f.read(8192) - f.close() - if "\x00" in b: - appears_binary = True - except: - pass - module.exit_json(path=path, changed=False, appears_binary=appears_binary) - - # Find out current state - prev_state = 'absent' - if os.path.lexists(path): - if os.path.islink(path): - prev_state = 'link' - elif os.path.isdir(path): - prev_state = 'directory' - elif os.stat(path).st_nlink > 1: - prev_state = 'hard' - else: - # could be many other things, but defaulting to file - prev_state = 'file' - - # state should default to file, but since that creates many conflicts, - # default to 'current' when it exists. - if state is None: - if prev_state != 'absent': - state = prev_state - else: - state = 'file' - - # source is both the source of a symlink or an informational passing of the src for a template module - # or copy module, even if this module never uses it, it is needed to key off some things - if src is not None: - src = os.path.expanduser(src) - - # original_basename is used by other modules that depend on file. - if os.path.isdir(path) and state not in ["link", "absent"]: - if params['original_basename']: - basename = params['original_basename'] - else: - basename = os.path.basename(src) - params['path'] = path = os.path.join(path, basename) - else: - if state in ['link','hard']: - if follow: - # use the current target of the link as the source - src = os.readlink(path) - else: - module.fail_json(msg='src and dest are required for creating links') - - # make sure the target path is a directory when we're doing a recursive operation - recurse = params['recurse'] - if recurse and state != 'directory': - module.fail_json(path=path, msg="recurse option requires state to be 'directory'") - - file_args = module.load_file_common_arguments(params) - changed = False - - if state == 'absent': - if state != prev_state: - if not module.check_mode: - if prev_state == 'directory': - try: - shutil.rmtree(path, ignore_errors=False) - except Exception, e: - module.fail_json(msg="rmtree failed: %s" % str(e)) - else: - try: - os.unlink(path) - except Exception, e: - module.fail_json(path=path, msg="unlinking failed: %s " % str(e)) - module.exit_json(path=path, changed=True) - else: - module.exit_json(path=path, changed=False) - - elif state == 'file': - if state != prev_state: - # file is not absent and any other state is a conflict - module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state)) - - changed = module.set_fs_attributes_if_different(file_args, changed) - module.exit_json(path=path, changed=changed) - - elif state == 'directory': - if prev_state == 'absent': - if module.check_mode: - module.exit_json(changed=True) - changed = True - curpath = '' - # Split the path so we can apply filesystem attributes recursively - # from the root (/) directory for absolute paths or the base path - # of a relative path. We can then walk the appropriate directory - # path to apply attributes. - for dirname in path.strip('/').split('/'): - curpath = '/'.join([curpath, dirname]) - # Remove leading slash if we're creating a relative path - if not os.path.isabs(path): - curpath = curpath.lstrip('/') - if not os.path.exists(curpath): - os.mkdir(curpath) - tmp_file_args = file_args.copy() - tmp_file_args['path']=curpath - changed = module.set_fs_attributes_if_different(tmp_file_args, changed) - - changed = module.set_fs_attributes_if_different(file_args, changed) - - if recurse: - for root,dirs,files in os.walk( file_args['path'] ): - for fsobj in dirs + files: - fsname=os.path.join(root, fsobj) - tmp_file_args = file_args.copy() - tmp_file_args['path']=fsname - changed = module.set_fs_attributes_if_different(tmp_file_args, changed) - - module.exit_json(path=path, changed=changed) - - elif state in ['link','hard']: - - if os.path.isdir(path) and not os.path.islink(path): - relpath = path - else: - relpath = os.path.dirname(path) - - absrc = os.path.join(relpath, src) - if not os.path.exists(absrc) and not force: - module.fail_json(path=path, src=src, msg='src file does not exist, use "force=yes" if you really want to create the link: %s' % absrc) - - if state == 'hard': - if not os.path.isabs(src): - module.fail_json(msg="absolute paths are required") - elif prev_state == 'directory': - if not force: - module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, path)) - elif len(os.listdir(path)) > 0: - # refuse to replace a directory that has files in it - module.fail_json(path=path, msg='the directory %s is not empty, refusing to convert it' % path) - elif prev_state in ['file', 'hard'] and not force: - module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, path)) - - if prev_state == 'absent': - changed = True - elif prev_state == 'link': - old_src = os.readlink(path) - if old_src != src: - changed = True - elif prev_state == 'hard': - if not (state == 'hard' and os.stat(path).st_ino == os.stat(src).st_ino): - changed = True - if not force: - module.fail_json(dest=path, src=src, msg='Cannot link, different hard link exists at destination') - elif prev_state in ['file', 'directory']: - changed = True - if not force: - module.fail_json(dest=path, src=src, msg='Cannot link, %s exists at destination' % prev_state) - else: - module.fail_json(dest=path, src=src, msg='unexpected position reached') - - if changed and not module.check_mode: - if prev_state != 'absent': - # try to replace atomically - tmppath = '/'.join([os.path.dirname(path), ".%s.%s.tmp" % (os.getpid(),time.time())]) - try: - if prev_state == 'directory' and (state == 'hard' or state == 'link'): - os.rmdir(path) - if state == 'hard': - os.link(src,tmppath) - else: - os.symlink(src, tmppath) - os.rename(tmppath, path) - except OSError, e: - if os.path.exists(tmppath): - os.unlink(tmppath) - module.fail_json(path=path, msg='Error while replacing: %s' % str(e)) - else: - try: - if state == 'hard': - os.link(src,path) - else: - os.symlink(src, path) - except OSError, e: - module.fail_json(path=path, msg='Error while linking: %s' % str(e)) - - if module.check_mode and not os.path.exists(path): - module.exit_json(dest=path, src=src, changed=changed) - - changed = module.set_fs_attributes_if_different(file_args, changed) - module.exit_json(dest=path, src=src, changed=changed) - - elif state == 'touch': - if not module.check_mode: - - if prev_state == 'absent': - try: - open(path, 'w').close() - except OSError, e: - module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e)) - elif prev_state in ['file', 'directory']: - try: - os.utime(path, None) - except OSError, e: - module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e)) - else: - module.fail_json(msg='Cannot touch other than files and directories') - try: - module.set_fs_attributes_if_different(file_args, True) - except SystemExit, e: - if e.code: - # We take this to mean that fail_json() was called from - # somewhere in basic.py - if prev_state == 'absent': - # If we just created the file we can safely remove it - os.remove(path) - raise e - - module.exit_json(dest=path, changed=True) - - module.fail_json(path=path, msg='unexpected position reached') - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/files/ini_file b/library/files/ini_file deleted file mode 100644 index 83a980f5ba..0000000000 --- a/library/files/ini_file +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Jan-Piet Mens -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: ini_file -short_description: Tweak settings in INI files -description: - - Manage (add, remove, change) individual settings in an INI-style file without having - to manage the file as a whole with, say, M(template) or M(assemble). Adds missing - sections if they don't exist. - - Comments are discarded when the source file is read, and therefore will not - show up in the destination file. -version_added: "0.9" -options: - dest: - description: - - Path to the INI-style file; this file is created if required - required: true - default: null - section: - description: - - Section name in INI file. This is added if C(state=present) automatically when - a single value is being set. - required: true - default: null - option: - description: - - if set (required for changing a I(value)), this is the name of the option. - - May be omitted if adding/removing a whole I(section). - required: false - default: null - value: - description: - - the string value to be associated with an I(option). May be omitted when removing an I(option). - required: false - default: null - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - required: false - default: "no" - choices: [ "yes", "no" ] - others: - description: - - all arguments accepted by the M(file) module also work here - required: false -notes: - - While it is possible to add an I(option) without specifying a I(value), this makes - no sense. - - A section named C(default) cannot be added by the module, but if it exists, individual - options within the section can be updated. (This is a limitation of Python's I(ConfigParser).) - Either use M(template) to create a base INI file with a C([default]) section, or use - M(lineinfile) to add the missing line. -requirements: [ ConfigParser ] -author: Jan-Piet Mens -''' - -EXAMPLES = ''' -# Ensure "fav=lemonade is in section "[drinks]" in specified file -- ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes - -- ini_file: dest=/etc/anotherconf - section=drinks - option=temperature - value=cold - backup=yes -''' - -import ConfigParser -import sys - -# ============================================================== -# do_ini - -def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False): - - changed = False - if (sys.version_info[0] == 2 and sys.version_info[1] >= 7) or sys.version_info[0] >= 3: - cp = ConfigParser.ConfigParser(allow_no_value=True) - else: - cp = ConfigParser.ConfigParser() - cp.optionxform = identity - - try: - f = open(filename) - cp.readfp(f) - except IOError: - pass - - - if state == 'absent': - if option is None and value is None: - if cp.has_section(section): - cp.remove_section(section) - changed = True - else: - if option is not None: - try: - if cp.get(section, option): - cp.remove_option(section, option) - changed = True - except: - pass - - if state == 'present': - - # DEFAULT section is always there by DEFAULT, so never try to add it. - if cp.has_section(section) == False and section.upper() != 'DEFAULT': - - cp.add_section(section) - changed = True - - if option is not None and value is not None: - try: - oldvalue = cp.get(section, option) - if str(value) != str(oldvalue): - cp.set(section, option, value) - changed = True - except ConfigParser.NoSectionError: - cp.set(section, option, value) - changed = True - except ConfigParser.NoOptionError: - cp.set(section, option, value) - changed = True - - if changed: - if backup: - module.backup_local(filename) - - try: - f = open(filename, 'w') - cp.write(f) - except: - module.fail_json(msg="Can't creat %s" % filename) - - return changed - -# ============================================================== -# identity - -def identity(arg): - """ - This function simply returns its argument. It serves as a - replacement for ConfigParser.optionxform, which by default - changes arguments to lower case. The identity function is a - better choice than str() or unicode(), because it is - encoding-agnostic. - """ - return arg - -# ============================================================== -# main - -def main(): - - module = AnsibleModule( - argument_spec = dict( - dest = dict(required=True), - section = dict(required=True), - option = dict(required=False), - value = dict(required=False), - backup = dict(default='no', type='bool'), - state = dict(default='present', choices=['present', 'absent']) - ), - add_file_common_args = True - ) - - info = dict() - - dest = os.path.expanduser(module.params['dest']) - section = module.params['section'] - option = module.params['option'] - value = module.params['value'] - state = module.params['state'] - backup = module.params['backup'] - - changed = do_ini(module, dest, section, option, value, state, backup) - - file_args = module.load_file_common_arguments(module.params) - changed = module.set_fs_attributes_if_different(file_args, changed) - - # Mission complete - module.exit_json(dest=dest, changed=changed, msg="OK") - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/files/lineinfile b/library/files/lineinfile deleted file mode 100644 index 12f8dc89a7..0000000000 --- a/library/files/lineinfile +++ /dev/null @@ -1,400 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Daniel Hokka Zakrisson -# (c) 2014, Ahti Kitsik -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import pipes -import re -import os -import tempfile - -DOCUMENTATION = """ ---- -module: lineinfile -author: Daniel Hokka Zakrisson, Ahti Kitsik -short_description: Ensure a particular line is in a file, or replace an - existing line using a back-referenced regular expression. -description: - - This module will search a file for a line, and ensure that it is present or absent. - - This is primarily useful when you want to change a single line in a - file only. For other cases, see the M(copy) or M(template) modules. -version_added: "0.7" -options: - dest: - required: true - aliases: [ name, destfile ] - description: - - The file to modify. - regexp: - required: false - version_added: 1.7 - description: - - The regular expression to look for in every line of the file. For - C(state=present), the pattern to replace if found; only the last line - found will be replaced. For C(state=absent), the pattern of the line - to remove. Uses Python regular expressions; see - U(http://docs.python.org/2/library/re.html). - state: - required: false - choices: [ present, absent ] - default: "present" - aliases: [] - description: - - Whether the line should be there or not. - line: - required: false - description: - - Required for C(state=present). The line to insert/replace into the - file. If C(backrefs) is set, may contain backreferences that will get - expanded with the C(regexp) capture groups if the regexp matches. The - backreferences should be double escaped (see examples). - backrefs: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "1.1" - description: - - Used with C(state=present). If set, line can contain backreferences - (both positional and named) that will get populated if the C(regexp) - matches. This flag changes the operation of the module slightly; - C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp) - doesn't match anywhere in the file, the file will be left unchanged. - If the C(regexp) does match, the last matching line will be replaced by - the expanded line parameter. - insertafter: - required: false - default: EOF - description: - - Used with C(state=present). If specified, the line will be inserted - after the specified regular expression. A special value is - available; C(EOF) for inserting the line at the end of the file. - May not be used with C(backrefs). - choices: [ 'EOF', '*regex*' ] - insertbefore: - required: false - version_added: "1.1" - description: - - Used with C(state=present). If specified, the line will be inserted - before the specified regular expression. A value is available; - C(BOF) for inserting the line at the beginning of the file. - May not be used with C(backrefs). - choices: [ 'BOF', '*regex*' ] - create: - required: false - choices: [ "yes", "no" ] - default: "no" - description: - - Used with C(state=present). If specified, the file will be created - if it does not already exist. By default it will fail if the file - is missing. - backup: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - Create a backup file including the timestamp information so you can - get the original file back if you somehow clobbered it incorrectly. - validate: - required: false - description: - - validation to run before copying into place. - Use %s in the command to indicate the current file to validate. - The command is passed securely so shell features like - expansion and pipes won't work. - required: false - default: None - version_added: "1.4" - others: - description: - - All arguments accepted by the M(file) module also work here. - required: false -""" - -EXAMPLES = r""" -- lineinfile: dest=/etc/selinux/config regexp=^SELINUX= line=SELINUX=disabled - -- lineinfile: dest=/etc/sudoers state=absent regexp="^%wheel" - -- lineinfile: dest=/etc/hosts regexp='^127\.0\.0\.1' line='127.0.0.1 localhost' owner=root group=root mode=0644 - -- lineinfile: dest=/etc/httpd/conf/httpd.conf regexp="^Listen " insertafter="^#Listen " line="Listen 8080" - -- lineinfile: dest=/etc/services regexp="^# port for http" insertbefore="^www.*80/tcp" line="# port for http by default" - -# Add a line to a file if it does not exist, without passing regexp -- lineinfile: dest=/tmp/testfile line="192.168.1.99 foo.lab.net foo" - -# Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs. -- lineinfile: "dest=/etc/sudoers state=present regexp='^%wheel' line='%wheel ALL=(ALL) NOPASSWD: ALL'" - -- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes - -# Validate a the sudoers file before saving -- lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s' -""" - -def write_changes(module,lines,dest): - - tmpfd, tmpfile = tempfile.mkstemp() - f = os.fdopen(tmpfd,'wb') - f.writelines(lines) - f.close() - - validate = module.params.get('validate', None) - valid = not validate - if validate: - if "%s" not in validate: - module.fail_json(msg="validate must contain %%s: %s" % (validate)) - (rc, out, err) = module.run_command(validate % tmpfile) - valid = rc == 0 - if rc != 0: - module.fail_json(msg='failed to validate: ' - 'rc:%s error:%s' % (rc,err)) - if valid: - module.atomic_move(tmpfile, os.path.realpath(dest)) - -def check_file_attrs(module, changed, message): - - file_args = module.load_file_common_arguments(module.params) - if module.set_fs_attributes_if_different(file_args, False): - - if changed: - message += " and " - changed = True - message += "ownership, perms or SE linux context changed" - - return message, changed - - -def present(module, dest, regexp, line, insertafter, insertbefore, create, - backup, backrefs): - - if not os.path.exists(dest): - if not create: - module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) - destpath = os.path.dirname(dest) - if not os.path.exists(destpath): - os.makedirs(destpath) - lines = [] - else: - f = open(dest, 'rb') - lines = f.readlines() - f.close() - - msg = "" - - if regexp is not None: - mre = re.compile(regexp) - - if insertafter not in (None, 'BOF', 'EOF'): - insre = re.compile(insertafter) - elif insertbefore not in (None, 'BOF'): - insre = re.compile(insertbefore) - else: - insre = None - - # index[0] is the line num where regexp has been found - # index[1] is the line num where insertafter/inserbefore has been found - index = [-1, -1] - m = None - for lineno, cur_line in enumerate(lines): - if regexp is not None: - match_found = mre.search(cur_line) - else: - match_found = line == cur_line.rstrip('\r\n') - if match_found: - index[0] = lineno - m = match_found - elif insre is not None and insre.search(cur_line): - if insertafter: - # + 1 for the next line - index[1] = lineno + 1 - if insertbefore: - # + 1 for the previous line - index[1] = lineno - - msg = '' - changed = False - # Regexp matched a line in the file - if index[0] != -1: - if backrefs: - new_line = m.expand(line) - else: - # Don't do backref expansion if not asked. - new_line = line - - if lines[index[0]] != new_line + os.linesep: - lines[index[0]] = new_line + os.linesep - msg = 'line replaced' - changed = True - elif backrefs: - # Do absolutely nothing, since it's not safe generating the line - # without the regexp matching to populate the backrefs. - pass - # Add it to the beginning of the file - elif insertbefore == 'BOF' or insertafter == 'BOF': - lines.insert(0, line + os.linesep) - msg = 'line added' - changed = True - # Add it to the end of the file if requested or - # if insertafter=/insertbefore didn't match anything - # (so default behaviour is to add at the end) - elif insertafter == 'EOF': - - # If the file is not empty then ensure there's a newline before the added line - if len(lines)>0 and not (lines[-1].endswith('\n') or lines[-1].endswith('\r')): - lines.append(os.linesep) - - lines.append(line + os.linesep) - msg = 'line added' - changed = True - # Do nothing if insert* didn't match - elif index[1] == -1: - pass - # insert* matched, but not the regexp - else: - lines.insert(index[1], line + os.linesep) - msg = 'line added' - changed = True - - backupdest = "" - if changed and not module.check_mode: - if backup and os.path.exists(dest): - backupdest = module.backup_local(dest) - write_changes(module, lines, dest) - - msg, changed = check_file_attrs(module, changed, msg) - module.exit_json(changed=changed, msg=msg, backup=backupdest) - - -def absent(module, dest, regexp, line, backup): - - if not os.path.exists(dest): - module.exit_json(changed=False, msg="file not present") - - msg = "" - - f = open(dest, 'rb') - lines = f.readlines() - f.close() - if regexp is not None: - cre = re.compile(regexp) - found = [] - - def matcher(cur_line): - if regexp is not None: - match_found = cre.search(cur_line) - else: - match_found = line == cur_line.rstrip('\r\n') - if match_found: - found.append(cur_line) - return not match_found - - lines = filter(matcher, lines) - changed = len(found) > 0 - backupdest = "" - if changed and not module.check_mode: - if backup: - backupdest = module.backup_local(dest) - write_changes(module, lines, dest) - - if changed: - msg = "%s line(s) removed" % len(found) - - msg, changed = check_file_attrs(module, changed, msg) - module.exit_json(changed=changed, found=len(found), msg=msg, backup=backupdest) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - dest=dict(required=True, aliases=['name', 'destfile']), - state=dict(default='present', choices=['absent', 'present']), - regexp=dict(default=None), - line=dict(aliases=['value']), - insertafter=dict(default=None), - insertbefore=dict(default=None), - backrefs=dict(default=False, type='bool'), - create=dict(default=False, type='bool'), - backup=dict(default=False, type='bool'), - validate=dict(default=None, type='str'), - ), - mutually_exclusive=[['insertbefore', 'insertafter']], - add_file_common_args=True, - supports_check_mode=True - ) - - params = module.params - create = module.params['create'] - backup = module.params['backup'] - backrefs = module.params['backrefs'] - dest = os.path.expanduser(params['dest']) - - - if os.path.isdir(dest): - module.fail_json(rc=256, msg='Destination %s is a directory !' % dest) - - if params['state'] == 'present': - if backrefs and params['regexp'] is None: - module.fail_json(msg='regexp= is required with backrefs=true') - - if params.get('line', None) is None: - module.fail_json(msg='line= is required with state=present') - - # Deal with the insertafter default value manually, to avoid errors - # because of the mutually_exclusive mechanism. - ins_bef, ins_aft = params['insertbefore'], params['insertafter'] - if ins_bef is None and ins_aft is None: - ins_aft = 'EOF' - - line = params['line'] - - # The safe_eval call will remove some quoting, but not others, - # so we need to know if we should specifically unquote it. - should_unquote = not is_quoted(line) - - # always add one layer of quotes - line = "'%s'" % line - - # Replace escape sequences like '\n' while being sure - # not to replace octal escape sequences (\ooo) since they - # match the backref syntax. - if backrefs: - line = re.sub(r'(\\[0-9]{1,3})', r'\\\1', line) - line = module.safe_eval(line) - - # Now remove quotes around the string, if needed after - # removing the layer we added above - line = unquote(line) - if should_unquote: - line = unquote(line) - - present(module, dest, params['regexp'], line, - ins_aft, ins_bef, create, backup, backrefs) - else: - if params['regexp'] is None and params.get('line', None) is None: - module.fail_json(msg='one of line= or regexp= is required with state=absent') - - absent(module, dest, params['regexp'], params.get('line', None), backup) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.splitter import * - -main() diff --git a/library/files/replace b/library/files/replace deleted file mode 100644 index 57b522dd77..0000000000 --- a/library/files/replace +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Evan Kaufman . - -import re -import os -import tempfile - -DOCUMENTATION = """ ---- -module: replace -author: Evan Kaufman -short_description: Replace all instances of a particular string in a - file using a back-referenced regular expression. -description: - - This module will replace all instances of a pattern within a file. - - It is up to the user to maintain idempotence by ensuring that the - same pattern would never match any replacements made. -version_added: "1.6" -options: - dest: - required: true - aliases: [ name, destfile ] - description: - - The file to modify. - regexp: - required: true - description: - - The regular expression to look for in the contents of the file. - Uses Python regular expressions; see - U(http://docs.python.org/2/library/re.html). - Uses multiline mode, which means C(^) and C($) match the beginning - and end respectively of I(each line) of the file. - replace: - required: false - description: - - The string to replace regexp matches. May contain backreferences - that will get expanded with the regexp capture groups if the regexp - matches. If not set, matches are removed entirely. - backup: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - Create a backup file including the timestamp information so you can - get the original file back if you somehow clobbered it incorrectly. - validate: - required: false - description: - - validation to run before copying into place - required: false - default: None - others: - description: - - All arguments accepted by the M(file) module also work here. - required: false -""" - -EXAMPLES = r""" -- replace: dest=/etc/hosts regexp='(\s+)old\.host\.name(\s+.*)?$' replace='\1new.host.name\2' backup=yes - -- replace: dest=/home/jdoe/.ssh/known_hosts regexp='^old\.host\.name[^\n]*\n' owner=jdoe group=jdoe mode=644 - -- replace: dest=/etc/apache/ports regexp='^(NameVirtualHost|Listen)\s+80\s*$' replace='\1 127.0.0.1:8080' validate='/usr/sbin/apache2ctl -f %s -t' -""" - -def write_changes(module,contents,dest): - - tmpfd, tmpfile = tempfile.mkstemp() - f = os.fdopen(tmpfd,'wb') - f.write(contents) - f.close() - - validate = module.params.get('validate', None) - valid = not validate - if validate: - if "%s" not in validate: - module.fail_json(msg="validate must contain %%s: %s" % (validate)) - (rc, out, err) = module.run_command(validate % tmpfile) - valid = rc == 0 - if rc != 0: - module.fail_json(msg='failed to validate: ' - 'rc:%s error:%s' % (rc,err)) - if valid: - module.atomic_move(tmpfile, dest) - -def check_file_attrs(module, changed, message): - - file_args = module.load_file_common_arguments(module.params) - if module.set_file_attributes_if_different(file_args, False): - - if changed: - message += " and " - changed = True - message += "ownership, perms or SE linux context changed" - - return message, changed - -def main(): - module = AnsibleModule( - argument_spec=dict( - dest=dict(required=True, aliases=['name', 'destfile']), - regexp=dict(required=True), - replace=dict(default='', type='str'), - backup=dict(default=False, type='bool'), - validate=dict(default=None, type='str'), - ), - add_file_common_args=True, - supports_check_mode=True - ) - - params = module.params - dest = os.path.expanduser(params['dest']) - - if os.path.isdir(dest): - module.fail_json(rc=256, msg='Destination %s is a directory !' % dest) - - if not os.path.exists(dest): - module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) - else: - f = open(dest, 'rb') - contents = f.read() - f.close() - - mre = re.compile(params['regexp'], re.MULTILINE) - result = re.subn(mre, params['replace'], contents, 0) - - if result[1] > 0 and contents != result[0]: - msg = '%s replacements made' % result[1] - changed = True - else: - msg = '' - changed = False - - if changed and not module.check_mode: - if params['backup'] and os.path.exists(dest): - module.backup_local(dest) - write_changes(module, result[0], dest) - - msg, changed = check_file_attrs(module, changed, msg) - module.exit_json(changed=changed, msg=msg) - -# this is magic, see lib/ansible/module_common.py -#<> - -main() diff --git a/library/files/stat b/library/files/stat deleted file mode 100644 index 8c717a395c..0000000000 --- a/library/files/stat +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: stat -version_added: "1.3" -short_description: retrieve file or file system status -description: - - Retrieves facts for a file similar to the linux/unix 'stat' command. -options: - path: - description: - - The full path of the file/object to get the facts of - required: true - default: null - aliases: [] - follow: - description: - - Whether to follow symlinks - required: false - default: no - aliases: [] - get_md5: - description: - - Whether to return the md5 sum of the file - required: false - default: yes - aliases: [] -author: Bruce Pennypacker -''' - -EXAMPLES = ''' -# Obtain the stats of /etc/foo.conf, and check that the file still belongs -# to 'root'. Fail otherwise. -- stat: path=/etc/foo.conf - register: st -- fail: msg="Whoops! file ownership has changed" - when: st.stat.pw_name != 'root' - -# Determine if a path exists and is a directory. Note we need to test -# both that p.stat.isdir actually exists, and also that it's set to true. -- stat: path=/path/to/something - register: p -- debug: msg="Path exists and is a directory" - when: p.stat.isdir is defined and p.stat.isdir == true - -# Don't do md5 checksum -- stat: path=/path/to/myhugefile get_md5=no -''' - -import os -import sys -from stat import * -import pwd - -def main(): - module = AnsibleModule( - argument_spec = dict( - path = dict(required=True), - follow = dict(default='no', type='bool'), - get_md5 = dict(default='yes', type='bool') - ), - supports_check_mode = True - ) - - path = module.params.get('path') - path = os.path.expanduser(path) - follow = module.params.get('follow') - get_md5 = module.params.get('get_md5') - - try: - if follow: - st = os.stat(path) - else: - st = os.lstat(path) - except OSError, e: - if e.errno == errno.ENOENT: - d = { 'exists' : False } - module.exit_json(changed=False, stat=d) - - module.fail_json(msg = e.strerror) - - mode = st.st_mode - - # back to ansible - d = { - 'exists' : True, - 'mode' : "%04o" % S_IMODE(mode), - 'isdir' : S_ISDIR(mode), - 'ischr' : S_ISCHR(mode), - 'isblk' : S_ISBLK(mode), - 'isreg' : S_ISREG(mode), - 'isfifo' : S_ISFIFO(mode), - 'islnk' : S_ISLNK(mode), - 'issock' : S_ISSOCK(mode), - 'uid' : st.st_uid, - 'gid' : st.st_gid, - 'size' : st.st_size, - 'inode' : st.st_ino, - 'dev' : st.st_dev, - 'nlink' : st.st_nlink, - 'atime' : st.st_atime, - 'mtime' : st.st_mtime, - 'ctime' : st.st_ctime, - 'wusr' : bool(mode & stat.S_IWUSR), - 'rusr' : bool(mode & stat.S_IRUSR), - 'xusr' : bool(mode & stat.S_IXUSR), - 'wgrp' : bool(mode & stat.S_IWGRP), - 'rgrp' : bool(mode & stat.S_IRGRP), - 'xgrp' : bool(mode & stat.S_IXGRP), - 'woth' : bool(mode & stat.S_IWOTH), - 'roth' : bool(mode & stat.S_IROTH), - 'xoth' : bool(mode & stat.S_IXOTH), - 'isuid' : bool(mode & stat.S_ISUID), - 'isgid' : bool(mode & stat.S_ISGID), - } - - if S_ISLNK(mode): - d['lnk_source'] = os.path.realpath(path) - - if S_ISREG(mode) and get_md5 and os.access(path,os.R_OK): - d['md5'] = module.md5(path) - - - try: - pw = pwd.getpwuid(st.st_uid) - - d['pw_name'] = pw.pw_name - except: - pass - - - module.exit_json(changed=False, stat=d) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/files/synchronize b/library/files/synchronize deleted file mode 100644 index 842dd86384..0000000000 --- a/library/files/synchronize +++ /dev/null @@ -1,345 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012-2013, Timothy Appnel -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: synchronize -version_added: "1.4" -short_description: Uses rsync to make synchronizing file paths in your playbooks quick and easy. -description: - - This is a wrapper around rsync. Of course you could just use the command action to call rsync yourself, but you also have to add a fair number of boilerplate options and host facts. You still may need to call rsync directly via C(command) or C(shell) depending on your use case. The synchronize action is meant to do common things with C(rsync) easily. It does not provide access to the full power of rsync, but does make most invocations easier to follow. -options: - src: - description: - - Path on the source machine that will be synchronized to the destination; The path can be absolute or relative. - required: true - dest: - description: - - Path on the destination machine that will be synchronized from the source; The path can be absolute or relative. - required: true - dest_port: - description: - - Port number for ssh on the destination host. The ansible_ssh_port inventory var takes precedence over this value. - default: 22 - version_added: "1.5" - mode: - description: - - Specify the direction of the synchroniztion. In push mode the localhost or delegate is the source; In pull mode the remote host in context is the source. - required: false - choices: [ 'push', 'pull' ] - default: 'push' - archive: - description: - - Mirrors the rsync archive flag, enables recursive, links, perms, times, owner, group flags and -D. - choices: [ 'yes', 'no' ] - default: 'yes' - required: false - checksum: - description: - - Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will not disable it. - choices: [ 'yes', 'no' ] - default: 'no' - required: false - version_added: "1.6" - compress: - description: - - Compress file data during the transfer. In most cases, leave this enabled unless it causes problems. - choices: [ 'yes', 'no' ] - default: 'yes' - required: false - version_added: "1.7" - existing_only: - description: - - Skip creating new files on receiver. - choices: [ 'yes', 'no' ] - default: 'no' - required: false - version_added: "1.5" - delete: - description: - - Delete files that don't exist (after transfer, not before) in the C(src) path. This option requires C(recursive=yes). - choices: [ 'yes', 'no' ] - default: 'no' - required: false - dirs: - description: - - Transfer directories without recursing - choices: [ 'yes', 'no' ] - default: 'no' - required: false - recursive: - description: - - Recurse into directories. - choices: [ 'yes', 'no' ] - default: the value of the archive option - required: false - links: - description: - - Copy symlinks as symlinks. - choices: [ 'yes', 'no' ] - default: the value of the archive option - required: false - copy_links: - description: - - Copy symlinks as the item that they point to (the referent) is copied, rather than the symlink. - choices: [ 'yes', 'no' ] - default: 'no' - required: false - perms: - description: - - Preserve permissions. - choices: [ 'yes', 'no' ] - default: the value of the archive option - required: false - times: - description: - - Preserve modification times - choices: [ 'yes', 'no' ] - default: the value of the archive option - required: false - owner: - description: - - Preserve owner (super user only) - choices: [ 'yes', 'no' ] - default: the value of the archive option - required: false - group: - description: - - Preserve group - choices: [ 'yes', 'no' ] - default: the value of the archive option - required: false - rsync_path: - description: - - Specify the rsync command to run on the remote machine. See C(--rsync-path) on the rsync man page. - required: false - rsync_timeout: - description: - - Specify a --timeout for the rsync command in seconds. - default: 0 - required: false - set_remote_user: - description: - - put user@ for the remote paths. If you have a custom ssh config to define the remote user for a host - that does not match the inventory user, you should set this parameter to "no". - default: yes - rsync_opts: - description: - - Specify additional rsync options by passing in an array. - default: - required: false - version_added: "1.6" -notes: - - Inspect the verbose output to validate the destination user/host/path - are what was expected. - - The remote user for the dest path will always be the remote_user, not - the sudo_user. - - Expect that dest=~/x will be ~/x even if using sudo. - - To exclude files and directories from being synchronized, you may add - C(.rsync-filter) files to the source directory. - - -author: Timothy Appnel -''' - -EXAMPLES = ''' -# Synchronization of src on the control machine to dest on the remote hosts -synchronize: src=some/relative/path dest=/some/absolute/path - -# Synchronization without any --archive options enabled -synchronize: src=some/relative/path dest=/some/absolute/path archive=no - -# Synchronization with --archive options enabled except for --recursive -synchronize: src=some/relative/path dest=/some/absolute/path recursive=no - -# Synchronization with --archive options enabled except for --times, with --checksum option enabled -synchronize: src=some/relative/path dest=/some/absolute/path checksum=yes times=no - -# Synchronization without --archive options enabled except use --links -synchronize: src=some/relative/path dest=/some/absolute/path archive=no links=yes - -# Synchronization of two paths both on the control machine -local_action: synchronize src=some/relative/path dest=/some/absolute/path - -# Synchronization of src on the inventory host to the dest on the localhost in -pull mode -synchronize: mode=pull src=some/relative/path dest=/some/absolute/path - -# Synchronization of src on delegate host to dest on the current inventory host -synchronize: > - src=some/relative/path dest=/some/absolute/path - delegate_to: delegate.host - -# Synchronize and delete files in dest on the remote host that are not found in src of localhost. -synchronize: src=some/relative/path dest=/some/absolute/path delete=yes - -# Synchronize using an alternate rsync command -synchronize: src=some/relative/path dest=/some/absolute/path rsync_path="sudo rsync" - -# Example .rsync-filter file in the source directory -- var # exclude any path whose last part is 'var' -- /var # exclude any path starting with 'var' starting at the source directory -+ /var/conf # include /var/conf even though it was previously excluded - -# Synchronize passing in extra rsync options -synchronize: src=/tmp/helloworld dest=/var/www/helloword rsync_opts=--no-motd,--exclude=.git -''' - - -def main(): - module = AnsibleModule( - argument_spec = dict( - src = dict(required=True), - dest = dict(required=True), - dest_port = dict(default=22), - delete = dict(default='no', type='bool'), - private_key = dict(default=None), - rsync_path = dict(default=None), - archive = dict(default='yes', type='bool'), - checksum = dict(default='no', type='bool'), - compress = dict(default='yes', type='bool'), - existing_only = dict(default='no', type='bool'), - dirs = dict(default='no', type='bool'), - recursive = dict(type='bool'), - links = dict(type='bool'), - copy_links = dict(type='bool'), - perms = dict(type='bool'), - times = dict(type='bool'), - owner = dict(type='bool'), - group = dict(type='bool'), - set_remote_user = dict(default='yes', type='bool'), - rsync_timeout = dict(type='int', default=0), - rsync_opts = dict(type='list') - ), - supports_check_mode = True - ) - - source = '"' + module.params['src'] + '"' - dest = '"' + module.params['dest'] + '"' - dest_port = module.params['dest_port'] - delete = module.params['delete'] - private_key = module.params['private_key'] - rsync_path = module.params['rsync_path'] - rsync = module.params.get('local_rsync_path', 'rsync') - rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout') - archive = module.params['archive'] - checksum = module.params['checksum'] - compress = module.params['compress'] - existing_only = module.params['existing_only'] - dirs = module.params['dirs'] - # the default of these params depends on the value of archive - recursive = module.params['recursive'] - links = module.params['links'] - copy_links = module.params['copy_links'] - perms = module.params['perms'] - times = module.params['times'] - owner = module.params['owner'] - group = module.params['group'] - rsync_opts = module.params['rsync_opts'] - - cmd = '%s --delay-updates -FF' % rsync - if compress: - cmd = cmd + ' --compress' - if rsync_timeout: - cmd = cmd + ' --timeout=%s' % rsync_timeout - if module.check_mode: - cmd = cmd + ' --dry-run' - if delete: - cmd = cmd + ' --delete-after' - if existing_only: - cmd = cmd + ' --existing' - if checksum: - cmd = cmd + ' --checksum' - if archive: - cmd = cmd + ' --archive' - if recursive is False: - cmd = cmd + ' --no-recursive' - if links is False: - cmd = cmd + ' --no-links' - if copy_links is True: - cmd = cmd + ' --copy-links' - if perms is False: - cmd = cmd + ' --no-perms' - if times is False: - cmd = cmd + ' --no-times' - if owner is False: - cmd = cmd + ' --no-owner' - if group is False: - cmd = cmd + ' --no-group' - else: - if recursive is True: - cmd = cmd + ' --recursive' - if links is True: - cmd = cmd + ' --links' - if copy_links is True: - cmd = cmd + ' --copy-links' - if perms is True: - cmd = cmd + ' --perms' - if times is True: - cmd = cmd + ' --times' - if owner is True: - cmd = cmd + ' --owner' - if group is True: - cmd = cmd + ' --group' - if dirs: - cmd = cmd + ' --dirs' - if private_key is None: - private_key = '' - else: - private_key = '-i '+ private_key - - ssh_opts = '-S none -o StrictHostKeyChecking=no' - if dest_port != 22: - cmd += " --rsh 'ssh %s %s -o Port=%s'" % (private_key, ssh_opts, dest_port) - else: - cmd += " --rsh 'ssh %s %s'" % (private_key, ssh_opts) # need ssh param - - if rsync_path: - cmd = cmd + " --rsync-path=%s" % (rsync_path) - - if rsync_opts: - cmd = cmd + " " + " ".join(rsync_opts) - - changed_marker = '<>' - cmd = cmd + " --out-format='" + changed_marker + "%i %n%L'" - - # expand the paths - if '@' not in source: - source = os.path.expanduser(source) - if '@' not in dest: - dest = os.path.expanduser(dest) - - cmd = ' '.join([cmd, source, dest]) - cmdstr = cmd - (rc, out, err) = module.run_command(cmd) - if rc: - return module.fail_json(msg=err, rc=rc, cmd=cmdstr) - else: - changed = changed_marker in out - out_clean=out.replace(changed_marker,'') - out_lines=out_clean.split('\n') - while '' in out_lines: - out_lines.remove('') - return module.exit_json(changed=changed, msg=out_clean, - rc=rc, cmd=cmdstr, stdout_lines=out_lines) - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/files/template b/library/files/template deleted file mode 100644 index 7ba072fcdc..0000000000 --- a/library/files/template +++ /dev/null @@ -1,66 +0,0 @@ -# this is a virtual module that is entirely implemented server side - -DOCUMENTATION = ''' ---- -module: template -version_added: historical -short_description: Templates a file out to a remote server. -description: - - Templates are processed by the Jinja2 templating language - (U(http://jinja.pocoo.org/docs/)) - documentation on the template - formatting can be found in the Template Designer Documentation - (U(http://jinja.pocoo.org/docs/templates/)). - - "Six additional variables can be used in templates: C(ansible_managed) - (configurable via the C(defaults) section of C(ansible.cfg)) contains a string - which can be used to describe the template name, host, modification time of the - template file and the owner uid, C(template_host) contains the node name of - the template's machine, C(template_uid) the owner, C(template_path) the - absolute path of the template, C(template_fullpath) is the absolute path of the - template, and C(template_run_date) is the date that the template was rendered. Note that including - a string that uses a date in the template will result in the template being marked 'changed' - each time." -options: - src: - description: - - Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path. - required: true - default: null - aliases: [] - dest: - description: - - Location to render the template to on the remote machine. - required: true - default: null - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - required: false - choices: [ "yes", "no" ] - default: "no" - validate: - description: - - The validation command to run before copying into place. - - The path to the file to validate is passed in via '%s' which must be present as in the visudo example below. - - validation to run before copying into place. The command is passed - securely so shell features like expansion and pipes won't work. - required: false - default: "" - version_added: "1.2" -notes: - - "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)." -requirements: [] -author: Michael DeHaan -extends_documentation_fragment: files -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks -- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode=0644 - -# The same example, but using symbolic modes equivalent to 0644 -- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode="u=rw,g=r,o=r" - -# Copy a new "sudoers" file into place, after passing validation with visudo -- template: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s' -''' diff --git a/library/files/unarchive b/library/files/unarchive deleted file mode 100644 index 657e464937..0000000000 --- a/library/files/unarchive +++ /dev/null @@ -1,250 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# (c) 2013, Dylan Martin -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: unarchive -version_added: 1.4 -short_description: Copies an archive to a remote location and unpack it -description: - - The M(unarchive) module copies an archive file from the local machine to a remote and unpacks it. -options: - src: - description: - - Local path to archive file to copy to the remote server; can be absolute or relative. - required: true - default: null - dest: - description: - - Remote absolute path where the archive should be unpacked - required: true - default: null - copy: - description: - - "if true, the file is copied from the 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine." - required: false - choices: [ "yes", "no" ] - default: "yes" - creates: - description: - - a filename, when it already exists, this step will B(not) be run. - required: no - default: null - version_added: "1.6" -author: Dylan Martin -todo: - - detect changed/unchanged for .zip files - - handle common unarchive args, like preserve owner/timestamp etc... -notes: - - requires C(tar)/C(unzip) command on target host - - can handle I(gzip), I(bzip2) and I(xz) compressed as well as uncompressed tar files - - detects type of archive automatically - - uses tar's C(--diff arg) to calculate if changed or not. If this C(arg) is not - supported, it will always unpack the archive - - does not detect if a .zip file is different from destination - always unzips - - existing files/directories in the destination which are not in the archive - are not touched. This is the same behavior as a normal archive extraction - - existing files/directories in the destination which are not in the archive - are ignored for purposes of deciding if the archive should be unpacked or not -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks -- unarchive: src=foo.tgz dest=/var/lib/foo - -# Unarchive a file that is already on the remote machine -- unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no -''' - -import os - - -# class to handle .zip files -class ZipFile(object): - - def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('unzip') - - def is_unarchived(self): - return dict(unarchived=False) - - def unarchive(self): - cmd = '%s -o "%s" -d "%s"' % (self.cmd_path, self.src, self.dest) - rc, out, err = self.module.run_command(cmd) - return dict(cmd=cmd, rc=rc, out=out, err=err) - - def can_handle_archive(self): - if not self.cmd_path: - return False - cmd = '%s -l "%s"' % (self.cmd_path, self.src) - rc, out, err = self.module.run_command(cmd) - if rc == 0: - return True - return False - - -# class to handle gzipped tar files -class TgzFile(object): - - def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') - self.zipflag = 'z' - - def is_unarchived(self): - cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) - rc, out, err = self.module.run_command(cmd) - unarchived = (rc == 0) - return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) - - def unarchive(self): - cmd = '%s -x%sf "%s"' % (self.cmd_path, self.zipflag, self.src) - rc, out, err = self.module.run_command(cmd, cwd=self.dest) - return dict(cmd=cmd, rc=rc, out=out, err=err) - - def can_handle_archive(self): - if not self.cmd_path: - return False - cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src) - rc, out, err = self.module.run_command(cmd) - if rc == 0: - if len(out.splitlines(True)) > 0: - return True - return False - - -# class to handle tar files that aren't compressed -class TarFile(TgzFile): - def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') - self.zipflag = '' - - -# class to handle bzip2 compressed tar files -class TarBzip(TgzFile): - def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') - self.zipflag = 'j' - - -# class to handle xz compressed tar files -class TarXz(TgzFile): - def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') - self.zipflag = 'J' - - -# try handlers in order and return the one that works or bail if none work -def pick_handler(src, dest, module): - handlers = [TgzFile, ZipFile, TarFile, TarBzip, TarXz] - for handler in handlers: - obj = handler(src, dest, module) - if obj.can_handle_archive(): - return obj - module.fail_json(msg='Failed to find handler to unarchive. Make sure the required command to extract the file is installed.') - - -def main(): - module = AnsibleModule( - # not checking because of daisy chain to file module - argument_spec = dict( - src = dict(required=True), - original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack - dest = dict(required=True), - copy = dict(default=True, type='bool'), - creates = dict(required=False), - ), - add_file_common_args=True, - ) - - src = os.path.expanduser(module.params['src']) - dest = os.path.expanduser(module.params['dest']) - copy = module.params['copy'] - creates = module.params['creates'] - - # did tar file arrive? - if not os.path.exists(src): - if copy: - module.fail_json(msg="Source '%s' failed to transfer" % src) - else: - module.fail_json(msg="Source '%s' does not exist" % src) - if not os.access(src, os.R_OK): - module.fail_json(msg="Source '%s' not readable" % src) - - if creates: - # do not run the command if the line contains creates=filename - # and the filename already exists. This allows idempotence - # of command executions. - v = os.path.expanduser(creates) - if os.path.exists(v): - module.exit_json( - stdout="skipped, since %s exists" % v, - skipped=True, - changed=False, - stderr=False, - rc=0 - ) - - # is dest OK to receive tar file? - if not os.path.isdir(dest): - module.fail_json(msg="Destination '%s' is not a directory" % dest) - if not os.access(dest, os.W_OK): - module.fail_json(msg="Destination '%s' not writable" % dest) - - handler = pick_handler(src, dest, module) - - res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src) - - # do we need to do unpack? - res_args['check_results'] = handler.is_unarchived() - if res_args['check_results']['unarchived']: - res_args['changed'] = False - module.exit_json(**res_args) - - # do the unpack - try: - res_args['extract_results'] = handler.unarchive() - if res_args['extract_results']['rc'] != 0: - module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args) - except IOError: - module.fail_json(msg="failed to unpack %s to %s" % (src, dest)) - - res_args['changed'] = True - - module.exit_json(**res_args) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/files/xattr b/library/files/xattr deleted file mode 100644 index 94115ae3b5..0000000000 --- a/library/files/xattr +++ /dev/null @@ -1,206 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: xattr -version_added: "1.3" -short_description: set/retrieve extended attributes -description: - - Manages filesystem user defined extended attributes, requires that they are enabled - on the target filesystem and that the setfattr/getfattr utilities are present. -options: - name: - required: true - default: None - aliases: ['path'] - description: - - The full path of the file/object to get the facts of - key: - required: false - default: None - description: - - The name of a specific Extended attribute key to set/retrieve - value: - required: false - default: None - description: - - The value to set the named name/key to, it automatically sets the C(state) to 'set' - state: - required: false - default: get - choices: [ 'read', 'present', 'all', 'keys', 'absent' ] - description: - - defines which state you want to do. - C(read) retrieves the current value for a C(key) (default) - C(present) sets C(name) to C(value), default if value is set - C(all) dumps all data - C(keys) retrieves all keys - C(absent) deletes the key - follow: - required: false - default: yes - choices: [ 'yes', 'no' ] - description: - - if yes, dereferences symlinks and sets/gets attributes on symlink target, - otherwise acts on symlink itself. - -author: Brian Coca -''' - -EXAMPLES = ''' -# Obtain the extended attributes of /etc/foo.conf -- xattr: name=/etc/foo.conf - -# Sets the key 'foo' to value 'bar' -- xattr: path=/etc/foo.conf key=user.foo value=bar - -# Removes the key 'foo' -- xattr: name=/etc/foo.conf key=user.foo state=absent -''' - -import operator - -def get_xattr_keys(module,path,follow): - cmd = [ module.get_bin_path('getfattr', True) ] - # prevents warning and not sure why it's not default - cmd.append('--absolute-names') - if not follow: - cmd.append('-h') - cmd.append(path) - - return _run_xattr(module,cmd) - -def get_xattr(module,path,key,follow): - - cmd = [ module.get_bin_path('getfattr', True) ] - # prevents warning and not sure why it's not default - cmd.append('--absolute-names') - if not follow: - cmd.append('-h') - if key is None: - cmd.append('-d') - else: - cmd.append('-n %s' % key) - cmd.append(path) - - return _run_xattr(module,cmd,False) - -def set_xattr(module,path,key,value,follow): - - cmd = [ module.get_bin_path('setfattr', True) ] - if not follow: - cmd.append('-h') - cmd.append('-n %s' % key) - cmd.append('-v %s' % value) - cmd.append(path) - - return _run_xattr(module,cmd) - -def rm_xattr(module,path,key,follow): - - cmd = [ module.get_bin_path('setfattr', True) ] - if not follow: - cmd.append('-h') - cmd.append('-x %s' % key) - cmd.append(path) - - return _run_xattr(module,cmd,False) - -def _run_xattr(module,cmd,check_rc=True): - - try: - (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc) - except Exception, e: - module.fail_json(msg="%s!" % e.strerror) - - #result = {'raw': out} - result = {} - for line in out.splitlines(): - if re.match("^#", line) or line == "": - pass - elif re.search('=', line): - (key, val) = line.split("=") - result[key] = val.strip('"') - else: - result[line] = '' - return result - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True, aliases=['path']), - key = dict(required=False, default=None), - value = dict(required=False, default=None), - state = dict(required=False, default='read', choices=[ 'read', 'present', 'all', 'keys', 'absent' ], type='str'), - follow = dict(required=False, type='bool', default=True), - ), - supports_check_mode=True, - ) - path = module.params.get('name') - key = module.params.get('key') - value = module.params.get('value') - state = module.params.get('state') - follow = module.params.get('follow') - - if not os.path.exists(path): - module.fail_json(msg="path not found or not accessible!") - - - changed=False - msg = "" - res = {} - - if key is None and state in ['present','absent']: - module.fail_json(msg="%s needs a key parameter" % state) - - # All xattr must begin in user namespace - if key is not None and not re.match('^user\.',key): - key = 'user.%s' % key - - - if (state == 'present' or value is not None): - current=get_xattr(module,path,key,follow) - if current is None or not key in current or value != current[key]: - if not module.check_mode: - res = set_xattr(module,path,key,value,follow) - changed=True - res=current - msg="%s set to %s" % (key, value) - elif state == 'absent': - current=get_xattr(module,path,key,follow) - if current is not None and key in current: - if not module.check_mode: - res = rm_xattr(module,path,key,follow) - changed=True - res=current - msg="%s removed" % (key) - elif state == 'keys': - res=get_xattr_keys(module,path,follow) - msg="returning all keys" - elif state == 'all': - res=get_xattr(module,path,None,follow) - msg="dumping all" - else: - res=get_xattr(module,path,key,follow) - msg="returning %s" % key - - module.exit_json(changed=changed, msg=msg, xattr=res) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/internal/async_status b/library/internal/async_status deleted file mode 100644 index f991b50064..0000000000 --- a/library/internal/async_status +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: async_status -short_description: Obtain status of asynchronous task -description: - - "This module gets the status of an asynchronous task." -version_added: "0.5" -options: - jid: - description: - - Job or task identifier - required: true - default: null - aliases: [] - mode: - description: - - if C(status), obtain the status; if C(cleanup), clean up the async job cache - located in C(~/.ansible_async/) for the specified job I(jid). - required: false - choices: [ "status", "cleanup" ] - default: "status" -notes: - - See also U(http://docs.ansible.com/playbooks_async.html) -requirements: [] -author: Michael DeHaan -''' - -import datetime -import traceback - -def main(): - - module = AnsibleModule(argument_spec=dict( - jid=dict(required=True), - mode=dict(default='status', choices=['status','cleanup']), - )) - - mode = module.params['mode'] - jid = module.params['jid'] - - # setup logging directory - logdir = os.path.expanduser("~/.ansible_async") - log_path = os.path.join(logdir, jid) - - if not os.path.exists(log_path): - module.fail_json(msg="could not find job", ansible_job_id=jid) - - if mode == 'cleanup': - os.unlink(log_path) - module.exit_json(ansible_job_id=jid, erased=log_path) - - # NOT in cleanup mode, assume regular status mode - # no remote kill mode currently exists, but probably should - # consider log_path + ".pid" file and also unlink that above - - data = file(log_path).read() - try: - data = json.loads(data) - except Exception, e: - if data == '': - # file not written yet? That means it is running - module.exit_json(results_file=log_path, ansible_job_id=jid, started=1, finished=0) - else: - module.fail_json(ansible_job_id=jid, results_file=log_path, - msg="Could not parse job output: %s" % data) - - if not 'started' in data: - data['finished'] = 1 - data['ansible_job_id'] = jid - - # Fix error: TypeError: exit_json() keywords must be strings - data = dict([(str(k), v) for k, v in data.iteritems()]) - - module.exit_json(**data) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/internal/async_wrapper b/library/internal/async_wrapper deleted file mode 100644 index 2bc2dc2182..0000000000 --- a/library/internal/async_wrapper +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -try: - import json -except ImportError: - import simplejson as json -import shlex -import os -import subprocess -import sys -import datetime -import traceback -import signal -import time -import syslog - -def daemonize_self(): - # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012 - # logger.info("cobblerd started") - try: - pid = os.fork() - if pid > 0: - # exit first parent - sys.exit(0) - except OSError, e: - print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror) - sys.exit(1) - - # decouple from parent environment - os.chdir("/") - os.setsid() - os.umask(022) - - # do second fork - try: - pid = os.fork() - if pid > 0: - # print "Daemon PID %d" % pid - sys.exit(0) - except OSError, e: - print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror) - sys.exit(1) - - dev_null = file('/dev/null','rw') - os.dup2(dev_null.fileno(), sys.stdin.fileno()) - os.dup2(dev_null.fileno(), sys.stdout.fileno()) - os.dup2(dev_null.fileno(), sys.stderr.fileno()) - -if len(sys.argv) < 3: - print json.dumps({ - "failed" : True, - "msg" : "usage: async_wrapper . Humans, do not call directly!" - }) - sys.exit(1) - -jid = "%s.%d" % (sys.argv[1], os.getpid()) -time_limit = sys.argv[2] -wrapped_module = sys.argv[3] -argsfile = sys.argv[4] -cmd = "%s %s" % (wrapped_module, argsfile) - -syslog.openlog('ansible-%s' % os.path.basename(__file__)) -syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:])) - -# setup logging directory -logdir = os.path.expanduser("~/.ansible_async") -log_path = os.path.join(logdir, jid) - -if not os.path.exists(logdir): - try: - os.makedirs(logdir) - except: - print json.dumps({ - "failed" : 1, - "msg" : "could not create: %s" % logdir - }) - -def _run_command(wrapped_cmd, jid, log_path): - - logfile = open(log_path, "w") - logfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid })) - logfile.close() - logfile = open(log_path, "w") - result = {} - - outdata = '' - try: - cmd = shlex.split(wrapped_cmd) - script = subprocess.Popen(cmd, shell=False, - stdin=None, stdout=logfile, stderr=logfile) - script.communicate() - outdata = file(log_path).read() - result = json.loads(outdata) - - except (OSError, IOError), e: - result = { - "failed": 1, - "cmd" : wrapped_cmd, - "msg": str(e), - } - result['ansible_job_id'] = jid - logfile.write(json.dumps(result)) - except: - result = { - "failed" : 1, - "cmd" : wrapped_cmd, - "data" : outdata, # temporary debug only - "msg" : traceback.format_exc() - } - result['ansible_job_id'] = jid - logfile.write(json.dumps(result)) - logfile.close() - -# immediately exit this process, leaving an orphaned process -# running which immediately forks a supervisory timing process - -#import logging -#import logging.handlers - -#logger = logging.getLogger("ansible_async") -#logger.setLevel(logging.WARNING) -#logger.addHandler( logging.handlers.SysLogHandler("/dev/log") ) -def debug(msg): - #logger.warning(msg) - pass - -try: - pid = os.fork() - if pid: - # Notify the overlord that the async process started - - # we need to not return immmediately such that the launched command has an attempt - # to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile) - # this probably could be done with some IPC later. Modules should always read - # the argsfile at the very first start of their execution anyway - time.sleep(1) - debug("Return async_wrapper task started.") - print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : log_path }) - sys.stdout.flush() - sys.exit(0) - else: - # The actual wrapper process - - # Daemonize, so we keep on running - daemonize_self() - - # we are now daemonized, create a supervisory process - debug("Starting module and watcher") - - sub_pid = os.fork() - if sub_pid: - # the parent stops the process after the time limit - remaining = int(time_limit) - - # set the child process group id to kill all children - os.setpgid(sub_pid, sub_pid) - - debug("Start watching %s (%s)"%(sub_pid, remaining)) - time.sleep(5) - while os.waitpid(sub_pid, os.WNOHANG) == (0, 0): - debug("%s still running (%s)"%(sub_pid, remaining)) - time.sleep(5) - remaining = remaining - 5 - if remaining <= 0: - debug("Now killing %s"%(sub_pid)) - os.killpg(sub_pid, signal.SIGKILL) - debug("Sent kill to group %s"%sub_pid) - time.sleep(1) - sys.exit(0) - debug("Done in kid B.") - os._exit(0) - else: - # the child process runs the actual module - debug("Start module (%s)"%os.getpid()) - _run_command(cmd, jid, log_path) - debug("Module complete (%s)"%os.getpid()) - sys.exit(0) - -except Exception, err: - debug("error: %s"%(err)) - raise err diff --git a/library/inventory/add_host b/library/inventory/add_host deleted file mode 100644 index 4fd4e1eb15..0000000000 --- a/library/inventory/add_host +++ /dev/null @@ -1,36 +0,0 @@ -# -*- mode: python -*- - -DOCUMENTATION = ''' ---- -module: add_host -short_description: add a host (and alternatively a group) to the ansible-playbook in-memory inventory -description: - - Use variables to create new hosts and groups in inventory for use in later plays of the same playbook. - Takes variables so you can define the new hosts more fully. -version_added: "0.9" -options: - name: - aliases: [ 'hostname', 'host' ] - description: - - The hostname/ip of the host to add to the inventory, can include a colon and a port number. - required: true - groups: - aliases: [ 'groupname', 'group' ] - description: - - The groups to add the hostname to, comma separated. - required: false -author: Seth Vidal -''' - -EXAMPLES = ''' -# add host to group 'just_created' with variable foo=42 -- add_host: name={{ ip_from_ec2 }} groups=just_created foo=42 - -# add a host with a non-standard port local to your machines -- add_host: name={{ new_ip }}:{{ new_port }} - -# add a host alias that we reach through a tunnel -- add_host: hostname={{ new_ip }} - ansible_ssh_host={{ inventory_hostname }} - ansible_ssh_port={{ new_port }} -''' diff --git a/library/inventory/group_by b/library/inventory/group_by deleted file mode 100644 index d09552e662..0000000000 --- a/library/inventory/group_by +++ /dev/null @@ -1,25 +0,0 @@ -# -*- mode: python -*- - -DOCUMENTATION = ''' ---- -module: group_by -short_description: Create Ansible groups based on facts -description: - - Use facts to create ad-hoc groups that can be used later in a playbook. -version_added: "0.9" -options: - key: - description: - - The variables whose values will be used as groups - required: true -author: Jeroen Hoekx -notes: - - Spaces in group names are converted to dashes '-'. -''' - -EXAMPLES = ''' -# Create groups based on the machine architecture -- group_by: key=machine_{{ ansible_machine }} -# Create groups like 'kvm-host' -- group_by: key=virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }} -''' diff --git a/library/messaging/rabbitmq_parameter b/library/messaging/rabbitmq_parameter deleted file mode 100644 index 2f78bd4ee1..0000000000 --- a/library/messaging/rabbitmq_parameter +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Chatham Financial -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rabbitmq_parameter -short_description: Adds or removes parameters to RabbitMQ -description: - - Manage dynamic, cluster-wide parameters for RabbitMQ -version_added: "1.1" -author: Chris Hoffman -options: - component: - description: - - Name of the component of which the parameter is being set - required: true - default: null - name: - description: - - Name of the parameter being set - required: true - default: null - value: - description: - - Value of the parameter, as a JSON term - required: false - default: null - vhost: - description: - - vhost to apply access privileges. - required: false - default: / - node: - description: - - erlang node name of the rabbit we wish to configure - required: false - default: rabbit - version_added: "1.2" - state: - description: - - Specify if user is to be added or removed - required: false - default: present - choices: [ 'present', 'absent'] -''' - -EXAMPLES = """ -# Set the federation parameter 'local_username' to a value of 'guest' (in quotes) -- rabbitmq_parameter: component=federation - name=local-username - value='"guest"' - state=present -""" - -class RabbitMqParameter(object): - def __init__(self, module, component, name, value, vhost, node): - self.module = module - self.component = component - self.name = name - self.value = value - self.vhost = vhost - self.node = node - - self._value = None - - self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) - - def _exec(self, args, run_in_check_mode=False): - if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = [self._rabbitmqctl, '-q', '-n', self.node] - rc, out, err = self.module.run_command(cmd + args, check_rc=True) - return out.splitlines() - return list() - - def get(self): - parameters = self._exec(['list_parameters', '-p', self.vhost], True) - - for param_item in parameters: - component, name, value = param_item.split('\t') - - if component == self.component and name == self.name: - self._value = value - return True - return False - - def set(self): - self._exec(['set_parameter', '-p', self.vhost, self.component, self.name, self.value]) - - def delete(self): - self._exec(['clear_parameter', '-p', self.vhost, self.component, self.name]) - - def has_modifications(self): - return self.value != self._value - -def main(): - arg_spec = dict( - component=dict(required=True), - name=dict(required=True), - value=dict(default=None), - vhost=dict(default='/'), - state=dict(default='present', choices=['present', 'absent']), - node=dict(default='rabbit') - ) - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - component = module.params['component'] - name = module.params['name'] - value = module.params['value'] - vhost = module.params['vhost'] - state = module.params['state'] - node = module.params['node'] - - rabbitmq_parameter = RabbitMqParameter(module, component, name, value, vhost, node) - - changed = False - if rabbitmq_parameter.get(): - if state == 'absent': - rabbitmq_parameter.delete() - changed = True - else: - if rabbitmq_parameter.has_modifications(): - rabbitmq_parameter.set() - changed = True - elif state == 'present': - rabbitmq_parameter.set() - changed = True - - module.exit_json(changed=changed, component=component, name=name, vhost=vhost, state=state) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/messaging/rabbitmq_plugin b/library/messaging/rabbitmq_plugin deleted file mode 100644 index 53c38f978d..0000000000 --- a/library/messaging/rabbitmq_plugin +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Chatham Financial -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rabbitmq_plugin -short_description: Adds or removes plugins to RabbitMQ -description: - - Enables or disables RabbitMQ plugins -version_added: "1.1" -author: Chris Hoffman -options: - names: - description: - - Comma-separated list of plugin names - required: true - default: null - aliases: [name] - new_only: - description: - - Only enable missing plugins - - Does not disable plugins that are not in the names list - required: false - default: "no" - choices: [ "yes", "no" ] - state: - description: - - Specify if plugins are to be enabled or disabled - required: false - default: enabled - choices: [enabled, disabled] - prefix: - description: - - Specify a custom install prefix to a Rabbit - required: false - version_added: "1.3" - default: null -''' - -EXAMPLES = ''' -# Enables the rabbitmq_management plugin -- rabbitmq_plugin: names=rabbitmq_management state=enabled -''' - -class RabbitMqPlugins(object): - def __init__(self, module): - self.module = module - - if module.params['prefix']: - self._rabbitmq_plugins = module.params['prefix'] + "/sbin/rabbitmq-plugins" - else: - self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) - - def _exec(self, args, run_in_check_mode=False): - if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = [self._rabbitmq_plugins] - rc, out, err = self.module.run_command(cmd + args, check_rc=True) - return out.splitlines() - return list() - - def get_all(self): - return self._exec(['list', '-E', '-m'], True) - - def enable(self, name): - self._exec(['enable', name]) - - def disable(self, name): - self._exec(['disable', name]) - -def main(): - arg_spec = dict( - names=dict(required=True, aliases=['name']), - new_only=dict(default='no', type='bool'), - state=dict(default='enabled', choices=['enabled', 'disabled']), - prefix=dict(required=False, default=None) - ) - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - names = module.params['names'].split(',') - new_only = module.params['new_only'] - state = module.params['state'] - - rabbitmq_plugins = RabbitMqPlugins(module) - enabled_plugins = rabbitmq_plugins.get_all() - - enabled = [] - disabled = [] - if state == 'enabled': - if not new_only: - for plugin in enabled_plugins: - if plugin not in names: - rabbitmq_plugins.disable(plugin) - disabled.append(plugin) - - for name in names: - if name not in enabled_plugins: - rabbitmq_plugins.enable(name) - enabled.append(name) - else: - for plugin in enabled_plugins: - if plugin in names: - rabbitmq_plugins.disable(plugin) - disabled.append(plugin) - - changed = len(enabled) > 0 or len(disabled) > 0 - module.exit_json(changed=changed, enabled=enabled, disabled=disabled) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/messaging/rabbitmq_policy b/library/messaging/rabbitmq_policy deleted file mode 100644 index 800c3822d5..0000000000 --- a/library/messaging/rabbitmq_policy +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, John Dewey -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: rabbitmq_policy -short_description: Manage the state of policies in RabbitMQ. -description: - - Manage the state of a virtual host in RabbitMQ. -version_added: "1.5" -author: John Dewey -options: - name: - description: - - The name of the policy to manage. - required: true - default: null - vhost: - description: - - The name of the vhost to apply to. - required: false - default: / - pattern: - description: - - A regex of queues to apply the policy to. - required: true - default: null - tags: - description: - - A dict or string describing the policy. - required: true - default: null - priority: - description: - - The priority of the policy. - required: false - default: 0 - node: - description: - - Erlang node name of the rabbit we wish to configure. - required: false - default: rabbit - state: - description: - - The state of the policy. - default: present - choices: [present, absent] -''' - -EXAMPLES = ''' -- name: ensure the default vhost contains the HA policy via a dict - rabbitmq_policy: name=HA pattern='.*' - args: - tags: - "ha-mode": all - -- name: ensure the default vhost contains the HA policy - rabbitmq_policy: name=HA pattern='.*' tags="ha-mode=all" -''' -class RabbitMqPolicy(object): - def __init__(self, module, name): - self._module = module - self._name = name - self._vhost = module.params['vhost'] - self._pattern = module.params['pattern'] - self._tags = module.params['tags'] - self._priority = module.params['priority'] - self._node = module.params['node'] - self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) - - def _exec(self, args, run_in_check_mode=False): - if not self._module.check_mode or (self._module.check_mode and run_in_check_mode): - cmd = [self._rabbitmqctl, '-q', '-n', self._node] - args.insert(1, '-p') - args.insert(2, self._vhost) - rc, out, err = self._module.run_command(cmd + args, check_rc=True) - return out.splitlines() - return list() - - def list(self): - policies = self._exec(['list_policies'], True) - - for policy in policies: - policy_name = policy.split('\t')[1] - if policy_name == self._name: - return True - return False - - def set(self): - import json - args = ['set_policy'] - args.append(self._name) - args.append(self._pattern) - args.append(json.dumps(self._tags)) - args.append('--priority') - args.append(self._priority) - return self._exec(args) - - def clear(self): - return self._exec(['clear_policy', self._name]) - - -def main(): - arg_spec = dict( - name=dict(required=True), - vhost=dict(default='/'), - pattern=dict(required=True), - tags=dict(type='dict', required=True), - priority=dict(default='0'), - node=dict(default='rabbit'), - state=dict(default='present', choices=['present', 'absent']), - ) - - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - name = module.params['name'] - state = module.params['state'] - rabbitmq_policy = RabbitMqPolicy(module, name) - - changed = False - if rabbitmq_policy.list(): - if state == 'absent': - rabbitmq_policy.clear() - changed = True - else: - changed = False - elif state == 'present': - rabbitmq_policy.set() - changed = True - - module.exit_json(changed=changed, name=name, state=state) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/messaging/rabbitmq_user b/library/messaging/rabbitmq_user deleted file mode 100644 index 1cbee360df..0000000000 --- a/library/messaging/rabbitmq_user +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Chatham Financial -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rabbitmq_user -short_description: Adds or removes users to RabbitMQ -description: - - Add or remove users to RabbitMQ and assign permissions -version_added: "1.1" -author: Chris Hoffman -options: - user: - description: - - Name of user to add - required: true - default: null - aliases: [username, name] - password: - description: - - Password of user to add. - - To change the password of an existing user, you must also specify - C(force=yes). - required: false - default: null - tags: - description: - - User tags specified as comma delimited - required: false - default: null - vhost: - description: - - vhost to apply access privileges. - required: false - default: / - node: - description: - - erlang node name of the rabbit we wish to configure - required: false - default: rabbit - version_added: "1.2" - configure_priv: - description: - - Regular expression to restrict configure actions on a resource - for the specified vhost. - - By default all actions are restricted. - required: false - default: ^$ - write_priv: - description: - - Regular expression to restrict configure actions on a resource - for the specified vhost. - - By default all actions are restricted. - required: false - default: ^$ - read_priv: - description: - - Regular expression to restrict configure actions on a resource - for the specified vhost. - - By default all actions are restricted. - required: false - default: ^$ - force: - description: - - Deletes and recreates the user. - required: false - default: "no" - choices: [ "yes", "no" ] - state: - description: - - Specify if user is to be added or removed - required: false - default: present - choices: [present, absent] -''' - -EXAMPLES = ''' -# Add user to server and assign full access control -- rabbitmq_user: user=joe - password=changeme - vhost=/ - configure_priv=.* - read_priv=.* - write_priv=.* - state=present -''' - -class RabbitMqUser(object): - def __init__(self, module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node): - self.module = module - self.username = username - self.password = password - self.node = node - if tags is None: - self.tags = list() - else: - self.tags = tags.split(',') - - permissions = dict( - vhost=vhost, - configure_priv=configure_priv, - write_priv=write_priv, - read_priv=read_priv - ) - self.permissions = permissions - - self._tags = None - self._permissions = None - self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) - - def _exec(self, args, run_in_check_mode=False): - if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = [self._rabbitmqctl, '-q', '-n', self.node] - rc, out, err = self.module.run_command(cmd + args, check_rc=True) - return out.splitlines() - return list() - - def get(self): - users = self._exec(['list_users'], True) - - for user_tag in users: - user, tags = user_tag.split('\t') - - if user == self.username: - for c in ['[',']',' ']: - tags = tags.replace(c, '') - - if tags != '': - self._tags = tags.split(',') - else: - self._tags = list() - - self._permissions = self._get_permissions() - return True - return False - - def _get_permissions(self): - perms_out = self._exec(['list_user_permissions', self.username], True) - - for perm in perms_out: - vhost, configure_priv, write_priv, read_priv = perm.split('\t') - if vhost == self.permissions['vhost']: - return dict(vhost=vhost, configure_priv=configure_priv, write_priv=write_priv, read_priv=read_priv) - - return dict() - - def add(self): - self._exec(['add_user', self.username, self.password]) - - def delete(self): - self._exec(['delete_user', self.username]) - - def set_tags(self): - self._exec(['set_user_tags', self.username] + self.tags) - - def set_permissions(self): - cmd = ['set_permissions'] - cmd.append('-p') - cmd.append(self.permissions['vhost']) - cmd.append(self.username) - cmd.append(self.permissions['configure_priv']) - cmd.append(self.permissions['write_priv']) - cmd.append(self.permissions['read_priv']) - self._exec(cmd) - - def has_tags_modifications(self): - return set(self.tags) != set(self._tags) - - def has_permissions_modifications(self): - return self._permissions != self.permissions - -def main(): - arg_spec = dict( - user=dict(required=True, aliases=['username', 'name']), - password=dict(default=None), - tags=dict(default=None), - vhost=dict(default='/'), - configure_priv=dict(default='^$'), - write_priv=dict(default='^$'), - read_priv=dict(default='^$'), - force=dict(default='no', type='bool'), - state=dict(default='present', choices=['present', 'absent']), - node=dict(default='rabbit') - ) - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - username = module.params['user'] - password = module.params['password'] - tags = module.params['tags'] - vhost = module.params['vhost'] - configure_priv = module.params['configure_priv'] - write_priv = module.params['write_priv'] - read_priv = module.params['read_priv'] - force = module.params['force'] - state = module.params['state'] - node = module.params['node'] - - rabbitmq_user = RabbitMqUser(module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node) - - changed = False - if rabbitmq_user.get(): - if state == 'absent': - rabbitmq_user.delete() - changed = True - else: - if force: - rabbitmq_user.delete() - rabbitmq_user.add() - rabbitmq_user.get() - changed = True - - if rabbitmq_user.has_tags_modifications(): - rabbitmq_user.set_tags() - changed = True - - if rabbitmq_user.has_permissions_modifications(): - rabbitmq_user.set_permissions() - changed = True - elif state == 'present': - rabbitmq_user.add() - rabbitmq_user.set_tags() - rabbitmq_user.set_permissions() - changed = True - - module.exit_json(changed=changed, user=username, state=state) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/messaging/rabbitmq_vhost b/library/messaging/rabbitmq_vhost deleted file mode 100644 index fd4b04a683..0000000000 --- a/library/messaging/rabbitmq_vhost +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Chatham Financial -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: rabbitmq_vhost -short_description: Manage the state of a virtual host in RabbitMQ -description: - - Manage the state of a virtual host in RabbitMQ -version_added: "1.1" -author: Chris Hoffman -options: - name: - description: - - The name of the vhost to manage - required: true - default: null - aliases: [vhost] - node: - description: - - erlang node name of the rabbit we wish to configure - required: false - default: rabbit - version_added: "1.2" - tracing: - description: - - Enable/disable tracing for a vhost - default: "no" - choices: [ "yes", "no" ] - aliases: [trace] - state: - description: - - The state of vhost - default: present - choices: [present, absent] -''' - -EXAMPLES = ''' -# Ensure that the vhost /test exists. -- rabbitmq_vhost: name=/test state=present -''' - -class RabbitMqVhost(object): - def __init__(self, module, name, tracing, node): - self.module = module - self.name = name - self.tracing = tracing - self.node = node - - self._tracing = False - self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) - - def _exec(self, args, run_in_check_mode=False): - if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = [self._rabbitmqctl, '-q', '-n', self.node] - rc, out, err = self.module.run_command(cmd + args, check_rc=True) - return out.splitlines() - return list() - - def get(self): - vhosts = self._exec(['list_vhosts', 'name', 'tracing'], True) - - for vhost in vhosts: - name, tracing = vhost.split('\t') - if name == self.name: - self._tracing = self.module.boolean(tracing) - return True - return False - - def add(self): - return self._exec(['add_vhost', self.name]) - - def delete(self): - return self._exec(['delete_vhost', self.name]) - - def set_tracing(self): - if self.tracing != self._tracing: - if self.tracing: - self._enable_tracing() - else: - self._disable_tracing() - return True - return False - - def _enable_tracing(self): - return self._exec(['trace_on', '-p', self.name]) - - def _disable_tracing(self): - return self._exec(['trace_off', '-p', self.name]) - - -def main(): - arg_spec = dict( - name=dict(required=True, aliases=['vhost']), - tracing=dict(default='off', aliases=['trace'], type='bool'), - state=dict(default='present', choices=['present', 'absent']), - node=dict(default='rabbit'), - ) - - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - name = module.params['name'] - tracing = module.params['tracing'] - state = module.params['state'] - node = module.params['node'] - - rabbitmq_vhost = RabbitMqVhost(module, name, tracing, node) - - changed = False - if rabbitmq_vhost.get(): - if state == 'absent': - rabbitmq_vhost.delete() - changed = True - else: - if rabbitmq_vhost.set_tracing(): - changed = True - elif state == 'present': - rabbitmq_vhost.add() - rabbitmq_vhost.set_tracing() - changed = True - - module.exit_json(changed=changed, name=name, state=state) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/monitoring/airbrake_deployment b/library/monitoring/airbrake_deployment deleted file mode 100644 index e1c490b881..0000000000 --- a/library/monitoring/airbrake_deployment +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Bruce Pennypacker -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: airbrake_deployment -version_added: "1.2" -author: Bruce Pennypacker -short_description: Notify airbrake about app deployments -description: - - Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking) -options: - token: - description: - - API token. - required: true - environment: - description: - - The airbrake environment name, typically 'production', 'staging', etc. - required: true - user: - description: - - The username of the person doing the deployment - required: false - repo: - description: - - URL of the project repository - required: false - revision: - description: - - A hash, number, tag, or other identifier showing what revision was deployed - required: false - url: - description: - - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit. - required: false - default: "https://airbrake.io/deploys" - version_added: "1.5" - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] -''' - -EXAMPLES = ''' -- airbrake_deployment: token=AAAAAA - environment='staging' - user='ansible' - revision=4.2 -''' - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True), - environment=dict(required=True), - user=dict(required=False), - repo=dict(required=False), - revision=dict(required=False), - url=dict(required=False, default='https://api.airbrake.io/deploys.txt'), - validate_certs=dict(default='yes', type='bool'), - ), - supports_check_mode=True - ) - - # build list of params - params = {} - - if module.params["environment"]: - params["deploy[rails_env]"] = module.params["environment"] - - if module.params["user"]: - params["deploy[local_username]"] = module.params["user"] - - if module.params["repo"]: - params["deploy[scm_repository]"] = module.params["repo"] - - if module.params["revision"]: - params["deploy[scm_revision]"] = module.params["revision"] - - params["api_key"] = module.params["token"] - - url = module.params.get('url') - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True) - - # Send the data to airbrake - data = urllib.urlencode(params) - response, info = fetch_url(module, url, data=data) - if info['status'] == 200: - module.exit_json(changed=True) - else: - module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() - diff --git a/library/monitoring/bigpanda b/library/monitoring/bigpanda deleted file mode 100644 index 1195028707..0000000000 --- a/library/monitoring/bigpanda +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python - -DOCUMENTATION = ''' ---- -module: bigpanda -author: BigPanda -short_description: Notify BigPanda about deployments -version_added: "1.8" -description: - - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls. -options: - component: - description: - - "The name of the component being deployed. Ex: billing" - required: true - alias: name - version: - description: - - The deployment version. - required: true - token: - description: - - API token. - required: true - state: - description: - - State of the deployment. - required: true - choices: ['started', 'finished', 'failed'] - hosts: - description: - - Name of affected host name. Can be a list. - required: false - default: machine's hostname - alias: host - env: - description: - - The environment name, typically 'production', 'staging', etc. - required: false - owner: - description: - - The person responsible for the deployment. - required: false - description: - description: - - Free text description of the deployment. - required: false - url: - description: - - Base URL of the API server. - required: False - default: https://api.bigpanda.io - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] -''' - -EXAMPLES = ''' -- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started -... -- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=finished - -or using a deployment object: -- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started - register: deployment - -- bigpanda: state=finished - args: deployment - -If outside servers aren't reachable from your machine, use local_action and pass the hostname: -- local_action: bigpanda component=myapp version=1.3 hosts={{ansible_hostname}} token={{ bigpanda_token }} state=started - register: deployment -... -- local_action: bigpanda state=finished - args: deployment -''' - -# =========================================== -# Module execution. -# -import socket - -def main(): - - module = AnsibleModule( - argument_spec=dict( - component=dict(required=True, aliases=['name']), - version=dict(required=True), - token=dict(required=True), - state=dict(required=True, choices=['started', 'finished', 'failed']), - hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']), - env=dict(required=False), - owner=dict(required=False), - description=dict(required=False), - message=dict(required=False), - source_system=dict(required=False, default='ansible'), - validate_certs=dict(default='yes', type='bool'), - url=dict(required=False, default='https://api.bigpanda.io'), - ), - supports_check_mode=True, - check_invalid_arguments=False, - ) - - token = module.params['token'] - state = module.params['state'] - url = module.params['url'] - - # Build the common request body - body = dict() - for k in ('component', 'version', 'hosts'): - v = module.params[k] - if v is not None: - body[k] = v - - if not isinstance(body['hosts'], list): - body['hosts'] = [body['hosts']] - - # Insert state-specific attributes to body - if state == 'started': - for k in ('source_system', 'env', 'owner', 'description'): - v = module.params[k] - if v is not None: - body[k] = v - - request_url = url + '/data/events/deployments/start' - else: - message = module.params['message'] - if message is not None: - body['errorMessage'] = message - - if state == 'finished': - body['status'] = 'success' - else: - body['status'] = 'failure' - - request_url = url + '/data/events/deployments/end' - - # Build the deployment object we return - deployment = dict(token=token, url=url) - deployment.update(body) - if 'errorMessage' in deployment: - message = deployment.pop('errorMessage') - deployment['message'] = message - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True, **deployment) - - # Send the data to bigpanda - data = json.dumps(body) - headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'} - try: - response, info = fetch_url(module, request_url, data=data, headers=headers) - if info['status'] == 200: - module.exit_json(changed=True, **deployment) - else: - module.fail_json(msg=json.dumps(info)) - except Exception as e: - module.fail_json(msg=str(e)) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/monitoring/boundary_meter b/library/monitoring/boundary_meter deleted file mode 100644 index da739d4306..0000000000 --- a/library/monitoring/boundary_meter +++ /dev/null @@ -1,256 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to add boundary meters. - -(c) 2013, curtis - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -import json -import datetime -import base64 -import os - -DOCUMENTATION = ''' - -module: boundary_meter -short_description: Manage boundary meters -description: - - This module manages boundary meters -version_added: "1.3" -author: curtis@serverascode.com -requirements: - - Boundary API access - - bprobe is required to send data, but not to register a meter - - Python urllib2 -options: - name: - description: - - meter name - required: true - state: - description: - - Whether to create or remove the client from boundary - required: false - default: true - choices: ["present", "absent"] - apiid: - description: - - Organizations boundary API ID - required: true - apikey: - description: - - Organizations boundary API KEY - required: true - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 - -notes: - - This module does not yet support boundary tags. - -''' - -EXAMPLES=''' -- name: Create meter - boundary_meter: apiid=AAAAAA api_key=BBBBBB state=present name={{ inventory_hostname }}" - -- name: Delete meter - boundary_meter: apiid=AAAAAA api_key=BBBBBB state=absent name={{ inventory_hostname }}" - -''' - -api_host = "api.boundary.com" -config_directory = "/etc/bprobe" - -# "resource" like thing or apikey? -def auth_encode(apikey): - auth = base64.standard_b64encode(apikey) - auth.replace("\n", "") - return auth - -def build_url(name, apiid, action, meter_id=None, cert_type=None): - if action == "create": - return 'https://%s/%s/meters' % (api_host, apiid) - elif action == "search": - return "https://%s/%s/meters?name=%s" % (api_host, apiid, name) - elif action == "certificates": - return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type) - elif action == "tags": - return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id) - elif action == "delete": - return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id) - -def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None): - - if meter_id is None: - url = build_url(name, apiid, action) - else: - if cert_type is None: - url = build_url(name, apiid, action, meter_id) - else: - url = build_url(name, apiid, action, meter_id, cert_type) - - headers = dict() - headers["Authorization"] = "Basic %s" % auth_encode(apikey) - headers["Content-Type"] = "application/json" - - return fetch_url(module, url, data=data, headers=headers) - -def create_meter(module, name, apiid, apikey): - - meters = search_meter(module, name, apiid, apikey) - - if len(meters) > 0: - # If the meter already exists, do nothing - module.exit_json(status="Meter " + name + " already exists",changed=False) - else: - # If it doesn't exist, create it - body = '{"name":"' + name + '"}' - response, info = http_request(module, name, apiid, apikey, data=body, action="create") - - if info['status'] != 200: - module.fail_json(msg="Failed to connect to api host to create meter") - - # If the config directory doesn't exist, create it - if not os.path.exists(config_directory): - try: - os.makedirs(config_directory) - except: - module.fail_json("Could not create " + config_directory) - - - # Download both cert files from the api host - types = ['key', 'cert'] - for cert_type in types: - try: - # If we can't open the file it's not there, so we should download it - cert_file = open('%s/%s.pem' % (config_directory,cert_type)) - except IOError: - # Now download the file... - rc = download_request(module, name, apiid, apikey, cert_type) - if rc == False: - module.fail_json("Download request for " + cert_type + ".pem failed") - - return 0, "Meter " + name + " created" - -def search_meter(module, name, apiid, apikey): - - response, info = http_request(module, name, apiid, apikey, action="search") - - if info['status'] != 200: - module.fail_json("Failed to connect to api host to search for meter") - - # Return meters - return json.loads(response.read()) - -def get_meter_id(module, name, apiid, apikey): - # In order to delete the meter we need its id - meters = search_meter(module, name, apiid, apikey) - - if len(meters) > 0: - return meters[0]['id'] - else: - return None - -def delete_meter(module, name, apiid, apikey): - - meter_id = get_meter_id(module, name, apiid, apikey) - - if meter_id is None: - return 1, "Meter does not exist, so can't delete it" - else: - response, info = http_request(module, name, apiid, apikey, action, meter_id) - if info['status'] != 200: - module.fail_json("Failed to delete meter") - - # Each new meter gets a new key.pem and ca.pem file, so they should be deleted - types = ['cert', 'key'] - for cert_type in types: - try: - cert_file = '%s/%s.pem' % (config_directory,cert_type) - os.remove(cert_file) - except OSError, e: - module.fail_json("Failed to remove " + cert_type + ".pem file") - - return 0, "Meter " + name + " deleted" - -def download_request(module, name, apiid, apikey, cert_type): - - meter_id = get_meter_id(module, name, apiid, apikey) - - if meter_id is not None: - action = "certificates" - response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type) - if info['status'] != 200: - module.fail_json("Failed to connect to api host to download certificate") - - if result: - try: - cert_file_path = '%s/%s.pem' % (config_directory,cert_type) - body = response.read() - cert_file = open(cert_file_path, 'w') - cert_file.write(body) - cert_file.close - os.chmod(cert_file_path, 0o600) - except: - module.fail_json("Could not write to certificate file") - - return True - else: - module.fail_json("Could not get meter id") - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['present', 'absent']), - name=dict(required=False), - apikey=dict(required=True), - apiid=dict(required=True), - validate_certs = dict(default='yes', type='bool'), - ) - ) - - state = module.params['state'] - name= module.params['name'] - apikey = module.params['api_key'] - apiid = module.params['api_id'] - - if state == "present": - (rc, result) = create_meter(module, name, apiid, apikey) - - if state == "absent": - (rc, result) = delete_meter(module, name, apiid, apikey) - - if rc != 0: - module.fail_json(msg=result) - - module.exit_json(status=result,changed=True) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() - diff --git a/library/monitoring/datadog_event b/library/monitoring/datadog_event deleted file mode 100644 index 5d38dd4c31..0000000000 --- a/library/monitoring/datadog_event +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Author: Artūras 'arturaz' Šlajus -# -# This module is proudly sponsored by iGeolise (www.igeolise.com) and -# Tiny Lab Productions (www.tinylabproductions.com). - -DOCUMENTATION = ''' ---- -module: datadog_event -short_description: Posts events to DataDog service -description: -- "Allows to post events to DataDog (www.datadoghq.com) service." -- "Uses http://docs.datadoghq.com/api/#events API." -version_added: "1.3" -author: Artūras 'arturaz' Šlajus -notes: [] -requirements: [urllib2] -options: - api_key: - description: ["Your DataDog API key."] - required: true - default: null - title: - description: ["The event title."] - required: true - default: null - text: - description: ["The body of the event."] - required: true - default: null - date_happened: - description: - - POSIX timestamp of the event. - - Default value is now. - required: false - default: now - priority: - description: ["The priority of the event."] - required: false - default: normal - choices: [normal, low] - tags: - description: ["Comma separated list of tags to apply to the event."] - required: false - default: null - alert_type: - description: ["Type of alert."] - required: false - default: info - choices: ['error', 'warning', 'info', 'success'] - aggregation_key: - description: ["An arbitrary string to use for aggregation."] - required: false - default: null - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 -''' - -EXAMPLES = ''' -# Post an event with low priority -datadog_event: title="Testing from ansible" text="Test!" priority="low" - api_key="6873258723457823548234234234" -# Post an event with several tags -datadog_event: title="Testing from ansible" text="Test!" - api_key="6873258723457823548234234234" - tags=aa,bb,cc -''' - -import socket - -def main(): - module = AnsibleModule( - argument_spec=dict( - api_key=dict(required=True), - title=dict(required=True), - text=dict(required=True), - date_happened=dict(required=False, default=None, type='int'), - priority=dict( - required=False, default='normal', choices=['normal', 'low'] - ), - tags=dict(required=False, default=None), - alert_type=dict( - required=False, default='info', - choices=['error', 'warning', 'info', 'success'] - ), - aggregation_key=dict(required=False, default=None), - source_type_name=dict( - required=False, default='my apps', - choices=['nagios', 'hudson', 'jenkins', 'user', 'my apps', - 'feed', 'chef', 'puppet', 'git', 'bitbucket', 'fabric', - 'capistrano'] - ), - validate_certs = dict(default='yes', type='bool'), - ) - ) - - post_event(module) - -def post_event(module): - uri = "https://app.datadoghq.com/api/v1/events?api_key=%s" % module.params['api_key'] - - body = dict( - title=module.params['title'], - text=module.params['text'], - priority=module.params['priority'], - alert_type=module.params['alert_type'] - ) - if module.params['date_happened'] != None: - body['date_happened'] = module.params['date_happened'] - if module.params['tags'] != None: - body['tags'] = module.params['tags'].split(",") - if module.params['aggregation_key'] != None: - body['aggregation_key'] = module.params['aggregation_key'] - if module.params['source_type_name'] != None: - body['source_type_name'] = module.params['source_type_name'] - - json_body = module.jsonify(body) - headers = {"Content-Type": "application/json"} - - (response, info) = fetch_url(module, uri, data=json_body, headers=headers) - if info['status'] == 200: - response_body = response.read() - response_json = module.from_json(response_body) - if response_json['status'] == 'ok': - module.exit_json(changed=True) - else: - module.fail_json(msg=response) - else: - module.fail_json(**info) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/monitoring/librato_annotation b/library/monitoring/librato_annotation deleted file mode 100644 index 63979f41bf..0000000000 --- a/library/monitoring/librato_annotation +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (C) Seth Edwards, 2014 -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - - -import base64 - -DOCUMENTATION = ''' ---- -module: librato_annotation -short_description: create an annotation in librato -description: - - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically -version_added: "1.6" -author: Seth Edwards -requirements: - - urllib2 - - base64 -options: - user: - description: - - Librato account username - required: true - api_key: - description: - - Librato account api key - required: true - name: - description: - - The annotation stream name - - If the annotation stream does not exist, it will be created automatically - required: false - title: - description: - - The title of an annotation is a string and may contain spaces - - The title should be a short, high-level summary of the annotation e.g. v45 Deployment - required: true - source: - description: - - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population - required: false - description: - description: - - The description contains extra meta-data about a particular annotation - - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo! - required: false - start_time: - description: - - The unix timestamp indicating the the time at which the event referenced by this annotation started - required: false - end_time: - description: - - The unix timestamp indicating the the time at which the event referenced by this annotation ended - - For events that have a duration, this is a useful way to annotate the duration of the event - required: false - links: - description: - - See examples - required: true -''' - -EXAMPLES = ''' -# Create a simple annotation event with a source -- librato_annotation: - user: user@example.com - api_key: XXXXXXXXXXXXXXXXX - title: 'App Config Change' - source: 'foo.bar' - description: 'This is a detailed description of the config change' - -# Create an annotation that includes a link -- librato_annotation: - user: user@example.com - api_key: XXXXXXXXXXXXXXXXXX - name: 'code.deploy' - title: 'app code deploy' - description: 'this is a detailed description of a deployment' - links: - - { rel: 'example', href: 'http://www.example.com/deploy' } - -# Create an annotation with a start_time and end_time -- librato_annotation: - user: user@example.com - api_key: XXXXXXXXXXXXXXXXXX - name: 'maintenance' - title: 'Maintenance window' - description: 'This is a detailed description of maintenance' - start_time: 1395940006 - end_time: 1395954406 -''' - - -try: - import urllib2 - HAS_URLLIB2 = True -except ImportError: - HAS_URLLIB2 = False - -def post_annotation(module): - user = module.params['user'] - api_key = module.params['api_key'] - name = module.params['name'] - title = module.params['title'] - - url = 'https://metrics-api.librato.com/v1/annotations/%s' % name - params = {} - params['title'] = title - - if module.params['source'] != None: - params['source'] = module.params['source'] - if module.params['description'] != None: - params['description'] = module.params['description'] - if module.params['start_time'] != None: - params['start_time'] = module.params['start_time'] - if module.params['end_time'] != None: - params['end_time'] = module.params['end_time'] - if module.params['links'] != None: - params['links'] = module.params['links'] - - json_body = module.jsonify(params) - - headers = {} - headers['Content-Type'] = 'application/json' - headers['Authorization'] = b"Basic " + base64.b64encode(user + b":" + api_key).strip() - req = urllib2.Request(url, json_body, headers) - try: - response = urllib2.urlopen(req) - except urllib2.HTTPError as e: - module.fail_json(msg="Request Failed", reason=e.reason) - response = response.read() - module.exit_json(changed=True, annotation=response) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - user = dict(required=True), - api_key = dict(required=True), - name = dict(required=False), - title = dict(required=True), - source = dict(required=False), - description = dict(required=False), - start_time = dict(required=False, default=None, type='int'), - end_time = dict(require=False, default=None, type='int'), - links = dict(type='list') - ) - ) - - post_annotation(module) - -from ansible.module_utils.basic import * -main() diff --git a/library/monitoring/logentries b/library/monitoring/logentries deleted file mode 100644 index 373f4f777f..0000000000 --- a/library/monitoring/logentries +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Ivan Vanderbyl -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: logentries -author: Ivan Vanderbyl -short_description: Module for tracking logs via logentries.com -description: - - Sends logs to LogEntries in realtime -version_added: "1.6" -options: - path: - description: - - path to a log file - required: true - state: - description: - - following state of the log - choices: [ 'present', 'absent' ] - required: false - default: present -notes: - - Requires the LogEntries agent which can be installed following the instructions at logentries.com -''' -EXAMPLES = ''' -- logentries: path=/var/log/nginx/access.log state=present -- logentries: path=/var/log/nginx/error.log state=absent -''' - -def query_log_status(module, le_path, path, state="present"): - """ Returns whether a log is followed or not. """ - - if state == "present": - rc, out, err = module.run_command("%s followed %s" % (le_path, path)) - if rc == 0: - return True - - return False - -def follow_log(module, le_path, logs): - """ Follows one or more logs if not already followed. """ - - followed_count = 0 - - for log in logs: - if query_log_status(module, le_path, log): - continue - - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = module.run_command([le_path, 'follow', log]) - - if not query_log_status(module, le_path, log): - module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip())) - - followed_count += 1 - - if followed_count > 0: - module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,)) - - module.exit_json(changed=False, msg="logs(s) already followed") - -def unfollow_log(module, le_path, logs): - """ Unfollows one or more logs if followed. """ - - removed_count = 0 - - # Using a for loop incase of error, we can report the package that failed - for log in logs: - # Query the log first, to see if we even need to remove. - if not query_log_status(module, le_path, log): - continue - - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = module.run_command([le_path, 'rm', log]) - - if query_log_status(module, le_path, log): - module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip())) - - removed_count += 1 - - if removed_count > 0: - module.exit_json(changed=True, msg="removed %d package(s)" % removed_count) - - module.exit_json(changed=False, msg="logs(s) already unfollowed") - -def main(): - module = AnsibleModule( - argument_spec = dict( - path = dict(aliases=["name"], required=True), - state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]) - ), - supports_check_mode=True - ) - - le_path = module.get_bin_path('le', True, ['/usr/local/bin']) - - p = module.params - - # Handle multiple log files - logs = p["path"].split(",") - logs = filter(None, logs) - - if p["state"] in ["present", "followed"]: - follow_log(module, le_path, logs) - - elif p["state"] in ["absent", "unfollowed"]: - unfollow_log(module, le_path, logs) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/monitoring/monit b/library/monitoring/monit deleted file mode 100644 index 558f1e696f..0000000000 --- a/library/monitoring/monit +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Darryl Stoflet -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: monit -short_description: Manage the state of a program monitored via Monit -description: - - Manage the state of a program monitored via I(Monit) -version_added: "1.2" -options: - name: - description: - - The name of the I(monit) program/process to manage - required: true - default: null - state: - description: - - The state of service - required: true - default: null - choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] -requirements: [ ] -author: Darryl Stoflet -''' - -EXAMPLES = ''' -# Manage the state of program "httpd" to be in "started" state. -- monit: name=httpd state=started -''' - -def main(): - arg_spec = dict( - name=dict(required=True), - state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded']) - ) - - module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - - name = module.params['name'] - state = module.params['state'] - - MONIT = module.get_bin_path('monit', True) - - if state == 'reloaded': - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = module.run_command('%s reload' % MONIT) - if rc != 0: - module.fail_json(msg='monit reload failed', stdout=out, stderr=err) - module.exit_json(changed=True, name=name, state=state) - - def status(): - """Return the status of the process in monit, or the empty string if not present.""" - rc, out, err = module.run_command('%s summary' % MONIT, check_rc=True) - for line in out.split('\n'): - # Sample output lines: - # Process 'name' Running - # Process 'name' Running - restart pending - parts = line.lower().split() - if len(parts) > 2 and parts[0] == 'process' and parts[1] == "'%s'" % name: - return ' '.join(parts[2:]) - else: - return '' - - def run_command(command): - """Runs a monit command, and returns the new status.""" - module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True) - return status() - - present = status() != '' - - if not present and not state == 'present': - module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state) - - if state == 'present': - if not present: - if module.check_mode: - module.exit_json(changed=True) - status = run_command('reload') - if status == '': - module.fail_json(msg='%s process not configured with monit' % name, name=name, state=state) - else: - module.exit_json(changed=True, name=name, state=state) - module.exit_json(changed=False, name=name, state=state) - - running = 'running' in status() - - if running and state in ['started', 'monitored']: - module.exit_json(changed=False, name=name, state=state) - - if running and state == 'stopped': - if module.check_mode: - module.exit_json(changed=True) - status = run_command('stop') - if status in ['not monitored'] or 'stop pending' in status: - module.exit_json(changed=True, name=name, state=state) - module.fail_json(msg='%s process not stopped' % name, status=status) - - if running and state == 'unmonitored': - if module.check_mode: - module.exit_json(changed=True) - status = run_command('unmonitor') - if status in ['not monitored']: - module.exit_json(changed=True, name=name, state=state) - module.fail_json(msg='%s process not unmonitored' % name, status=status) - - elif state == 'restarted': - if module.check_mode: - module.exit_json(changed=True) - status = run_command('restart') - if status in ['initializing', 'running'] or 'restart pending' in status: - module.exit_json(changed=True, name=name, state=state) - module.fail_json(msg='%s process not restarted' % name, status=status) - - elif not running and state == 'started': - if module.check_mode: - module.exit_json(changed=True) - status = run_command('start') - if status in ['initializing', 'running'] or 'start pending' in status: - module.exit_json(changed=True, name=name, state=state) - module.fail_json(msg='%s process not started' % name, status=status) - - elif not running and state == 'monitored': - if module.check_mode: - module.exit_json(changed=True) - status = run_command('monitor') - if status not in ['not monitored']: - module.exit_json(changed=True, name=name, state=state) - module.fail_json(msg='%s process not monitored' % name, status=status) - - module.exit_json(changed=False, name=name, state=state) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/monitoring/nagios b/library/monitoring/nagios deleted file mode 100644 index 9219766b86..0000000000 --- a/library/monitoring/nagios +++ /dev/null @@ -1,880 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# This file is largely copied from the Nagios module included in the -# Func project. Original copyright follows: -# -# func-nagios - Schedule downtime and enables/disable notifications -# Copyright 2011, Red Hat, Inc. -# Tim Bielawa -# -# This software may be freely redistributed under the terms of the GNU -# general public license version 2. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -DOCUMENTATION = ''' ---- -module: nagios -short_description: Perform common tasks in Nagios related to downtime and notifications. -description: - - "The M(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts." - - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer to the host the playbook is currently running on. - - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet). - - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself), e.g., C(service=host). This keyword may not be given with other services at the same time. I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all). - - When using the M(nagios) module you will need to specify your Nagios server using the C(delegate_to) parameter. -version_added: "0.7" -options: - action: - description: - - Action to take. - required: true - default: null - choices: [ "downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", - "silence_nagios", "unsilence_nagios", "command" ] - host: - description: - - Host to operate on in Nagios. - required: false - default: null - cmdfile: - description: - - Path to the nagios I(command file) (FIFO pipe). - Only required if auto-detection fails. - required: false - default: auto-detected - author: - description: - - Author to leave downtime comments as. - Only usable with the C(downtime) action. - required: false - default: Ansible - minutes: - description: - - Minutes to schedule downtime for. - - Only usable with the C(downtime) action. - required: false - default: 30 - services: - description: - - What to manage downtime/alerts for. Separate multiple services with commas. - C(service) is an alias for C(services). - B(Required) option when using the C(downtime), C(enable_alerts), and C(disable_alerts) actions. - aliases: [ "service" ] - required: true - default: null - command: - description: - - The raw command to send to nagios, which - should not include the submitted time header or the line-feed - B(Required) option when using the C(command) action. - required: true - default: null - -author: Tim Bielawa -requirements: [ "Nagios" ] -''' - -EXAMPLES = ''' -# set 30 minutes of apache downtime -- nagios: action=downtime minutes=30 service=httpd host={{ inventory_hostname }} - -# schedule an hour of HOST downtime -- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} - -# schedule downtime for ALL services on HOST -- nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }} - -# schedule downtime for a few services -- nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }} - -# enable SMART disk alerts -- nagios: action=enable_alerts service=smart host={{ inventory_hostname }} - -# "two services at once: disable httpd and nfs alerts" -- nagios: action=disable_alerts service=httpd,nfs host={{ inventory_hostname }} - -# disable HOST alerts -- nagios: action=disable_alerts service=host host={{ inventory_hostname }} - -# silence ALL alerts -- nagios: action=silence host={{ inventory_hostname }} - -# unsilence all alerts -- nagios: action=unsilence host={{ inventory_hostname }} - -# SHUT UP NAGIOS -- nagios: action=silence_nagios - -# ANNOY ME NAGIOS -- nagios: action=unsilence_nagios - -# command something -- nagios: action=command command='DISABLE_FAILURE_PREDICTION' -''' - -import ConfigParser -import types -import time -import os.path - -###################################################################### - - -def which_cmdfile(): - locations = [ - # rhel - '/etc/nagios/nagios.cfg', - # debian - '/etc/nagios3/nagios.cfg', - # older debian - '/etc/nagios2/nagios.cfg', - # bsd, solaris - '/usr/local/etc/nagios/nagios.cfg', - # groundwork it monitoring - '/usr/local/groundwork/nagios/etc/nagios.cfg', - # open monitoring distribution - '/omd/sites/oppy/tmp/nagios/nagios.cfg', - # ??? - '/usr/local/nagios/etc/nagios.cfg', - '/usr/local/nagios/nagios.cfg', - '/opt/nagios/etc/nagios.cfg', - '/opt/nagios/nagios.cfg', - # icinga on debian/ubuntu - '/etc/icinga/icinga.cfg', - # icinga installed from source (default location) - '/usr/local/icinga/etc/icinga.cfg', - ] - - for path in locations: - if os.path.exists(path): - for line in open(path): - if line.startswith('command_file'): - return line.split('=')[1].strip() - - return None - -###################################################################### - - -def main(): - ACTION_CHOICES = [ - 'downtime', - 'silence', - 'unsilence', - 'enable_alerts', - 'disable_alerts', - 'silence_nagios', - 'unsilence_nagios', - 'command', - ] - - module = AnsibleModule( - argument_spec=dict( - action=dict(required=True, default=None, choices=ACTION_CHOICES), - author=dict(default='Ansible'), - host=dict(required=False, default=None), - minutes=dict(default=30), - cmdfile=dict(default=which_cmdfile()), - services=dict(default=None, aliases=['service']), - command=dict(required=False, default=None), - ) - ) - - action = module.params['action'] - host = module.params['host'] - minutes = module.params['minutes'] - services = module.params['services'] - cmdfile = module.params['cmdfile'] - command = module.params['command'] - - ################################################################## - # Required args per action: - # downtime = (minutes, service, host) - # (un)silence = (host) - # (enable/disable)_alerts = (service, host) - # command = command - # - # AnsibleModule will verify most stuff, we need to verify - # 'minutes' and 'service' manually. - - ################################################################## - if action not in ['command', 'silence_nagios', 'unsilence_nagios']: - if not host: - module.fail_json(msg='no host specified for action requiring one') - ###################################################################### - if action == 'downtime': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to set downtime for') - # Make sure minutes is a number - try: - m = int(minutes) - if not isinstance(m, types.IntType): - module.fail_json(msg='minutes must be a number') - except Exception: - module.fail_json(msg='invalid entry for minutes') - - ################################################################## - if action in ['enable_alerts', 'disable_alerts']: - if not services: - module.fail_json(msg='a service is required when setting alerts') - - if action in ['command']: - if not command: - module.fail_json(msg='no command passed for command action') - ################################################################## - if not cmdfile: - module.fail_json('unable to locate nagios.cfg') - - ################################################################## - ansible_nagios = Nagios(module, **module.params) - if module.check_mode: - module.exit_json(changed=True) - else: - ansible_nagios.act() - ################################################################## - - -###################################################################### -class Nagios(object): - """ - Perform common tasks in Nagios related to downtime and - notifications. - - The complete set of external commands Nagios handles is documented - on their website: - - http://old.nagios.org/developerinfo/externalcommands/commandlist.php - - Note that in the case of `schedule_svc_downtime`, - `enable_svc_notifications`, and `disable_svc_notifications`, the - service argument should be passed as a list. - """ - - def __init__(self, module, **kwargs): - self.module = module - self.action = kwargs['action'] - self.author = kwargs['author'] - self.host = kwargs['host'] - self.minutes = int(kwargs['minutes']) - self.cmdfile = kwargs['cmdfile'] - self.command = kwargs['command'] - - if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'): - self.services = kwargs['services'] - else: - self.services = kwargs['services'].split(',') - - self.command_results = [] - - def _now(self): - """ - The time in seconds since 12:00:00AM Jan 1, 1970 - """ - - return int(time.time()) - - def _write_command(self, cmd): - """ - Write the given command to the Nagios command file - """ - - try: - fp = open(self.cmdfile, 'w') - fp.write(cmd) - fp.flush() - fp.close() - self.command_results.append(cmd.strip()) - except IOError: - self.module.fail_json(msg='unable to write to nagios command file', - cmdfile=self.cmdfile) - - def _fmt_dt_str(self, cmd, host, duration, author=None, - comment="Scheduling downtime", start=None, - svc=None, fixed=1, trigger=0): - """ - Format an external-command downtime string. - - cmd - Nagios command ID - host - Host schedule downtime on - duration - Minutes to schedule downtime for - author - Name to file the downtime as - comment - Reason for running this command (upgrade, reboot, etc) - start - Start of downtime in seconds since 12:00AM Jan 1 1970 - Default is to use the entry time (now) - svc - Service to schedule downtime for, omit when for host downtime - fixed - Start now if 1, start when a problem is detected if 0 - trigger - Optional ID of event to start downtime from. Leave as 0 for - fixed downtime. - - Syntax: [submitted] COMMAND;;[] - ;;;;;; - - """ - - entry_time = self._now() - if start is None: - start = entry_time - - hdr = "[%s] %s;%s;" % (entry_time, cmd, host) - duration_s = (duration * 60) - end = start + duration_s - - if not author: - author = self.author - - if svc is not None: - dt_args = [svc, str(start), str(end), str(fixed), str(trigger), - str(duration_s), author, comment] - else: - # Downtime for a host if no svc specified - dt_args = [str(start), str(end), str(fixed), str(trigger), - str(duration_s), author, comment] - - dt_arg_str = ";".join(dt_args) - dt_str = hdr + dt_arg_str + "\n" - - return dt_str - - def _fmt_notif_str(self, cmd, host=None, svc=None): - """ - Format an external-command notification string. - - cmd - Nagios command ID. - host - Host to en/disable notifications on.. A value is not required - for global downtime - svc - Service to schedule downtime for. A value is not required - for host downtime. - - Syntax: [submitted] COMMAND;[;] - """ - - entry_time = self._now() - notif_str = "[%s] %s" % (entry_time, cmd) - if host is not None: - notif_str += ";%s" % host - - if svc is not None: - notif_str += ";%s" % svc - - notif_str += "\n" - - return notif_str - - def schedule_svc_downtime(self, host, services=[], minutes=30): - """ - This command is used to schedule downtime for a particular - service. - - During the specified downtime, Nagios will not send - notifications out about the service. - - Syntax: SCHEDULE_SVC_DOWNTIME;; - ;;;;;; - - """ - - cmd = "SCHEDULE_SVC_DOWNTIME" - for service in services: - dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, svc=service) - self._write_command(dt_cmd_str) - - def schedule_host_downtime(self, host, minutes=30): - """ - This command is used to schedule downtime for a particular - host. - - During the specified downtime, Nagios will not send - notifications out about the host. - - Syntax: SCHEDULE_HOST_DOWNTIME;;;; - ;;;; - """ - - cmd = "SCHEDULE_HOST_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, host, minutes) - self._write_command(dt_cmd_str) - - def schedule_host_svc_downtime(self, host, minutes=30): - """ - This command is used to schedule downtime for - all services associated with a particular host. - - During the specified downtime, Nagios will not send - notifications out about the host. - - SCHEDULE_HOST_SVC_DOWNTIME;;;; - ;;;; - """ - - cmd = "SCHEDULE_HOST_SVC_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, host, minutes) - self._write_command(dt_cmd_str) - - def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30): - """ - This command is used to schedule downtime for all hosts in a - particular hostgroup. - - During the specified downtime, Nagios will not send - notifications out about the hosts. - - Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;;; - ;;;;; - """ - - cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes) - self._write_command(dt_cmd_str) - - def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30): - """ - This command is used to schedule downtime for all services in - a particular hostgroup. - - During the specified downtime, Nagios will not send - notifications out about the services. - - Note that scheduling downtime for services does not - automatically schedule downtime for the hosts those services - are associated with. - - Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;;; - ;;;;; - """ - - cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes) - self._write_command(dt_cmd_str) - - def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30): - """ - This command is used to schedule downtime for all hosts in a - particular servicegroup. - - During the specified downtime, Nagios will not send - notifications out about the hosts. - - Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;; - ;;;;;; - - """ - - cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes) - self._write_command(dt_cmd_str) - - def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30): - """ - This command is used to schedule downtime for all services in - a particular servicegroup. - - During the specified downtime, Nagios will not send - notifications out about the services. - - Note that scheduling downtime for services does not - automatically schedule downtime for the hosts those services - are associated with. - - Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;; - ;;;;;; - - """ - - cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME" - dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes) - self._write_command(dt_cmd_str) - - def disable_host_svc_notifications(self, host): - """ - This command is used to prevent notifications from being sent - out for all services on the specified host. - - Note that this command does not disable notifications from - being sent out about the host. - - Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOST_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - self._write_command(notif_str) - - def disable_host_notifications(self, host): - """ - This command is used to prevent notifications from being sent - out for the specified host. - - Note that this command does not disable notifications for - services associated with this host. - - Syntax: DISABLE_HOST_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - self._write_command(notif_str) - - def disable_svc_notifications(self, host, services=[]): - """ - This command is used to prevent notifications from being sent - out for the specified service. - - Note that this command does not disable notifications from - being sent out about the host. - - Syntax: DISABLE_SVC_NOTIFICATIONS;; - """ - - cmd = "DISABLE_SVC_NOTIFICATIONS" - for service in services: - notif_str = self._fmt_notif_str(cmd, host, svc=service) - self._write_command(notif_str) - - def disable_servicegroup_host_notifications(self, servicegroup): - """ - This command is used to prevent notifications from being sent - out for all hosts in the specified servicegroup. - - Note that this command does not disable notifications for - services associated with hosts in this service group. - - Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - self._write_command(notif_str) - - def disable_servicegroup_svc_notifications(self, servicegroup): - """ - This command is used to prevent notifications from being sent - out for all services in the specified servicegroup. - - Note that this does not prevent notifications from being sent - out about the hosts in this servicegroup. - - Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - self._write_command(notif_str) - - def disable_hostgroup_host_notifications(self, hostgroup): - """ - Disables notifications for all hosts in a particular - hostgroup. - - Note that this does not disable notifications for the services - associated with the hosts in the hostgroup - see the - DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that. - - Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - self._write_command(notif_str) - - def disable_hostgroup_svc_notifications(self, hostgroup): - """ - Disables notifications for all services associated with hosts - in a particular hostgroup. - - Note that this does not disable notifications for the hosts in - the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS - command for that. - - Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - self._write_command(notif_str) - - def enable_host_notifications(self, host): - """ - Enables notifications for a particular host. - - Note that this command does not enable notifications for - services associated with this host. - - Syntax: ENABLE_HOST_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - self._write_command(notif_str) - - def enable_host_svc_notifications(self, host): - """ - Enables notifications for all services on the specified host. - - Note that this does not enable notifications for the host. - - Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOST_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, host) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_svc_notifications(self, host, services=[]): - """ - Enables notifications for a particular service. - - Note that this does not enable notifications for the host. - - Syntax: ENABLE_SVC_NOTIFICATIONS;; - """ - - cmd = "ENABLE_SVC_NOTIFICATIONS" - nagios_return = True - return_str_list = [] - for service in services: - notif_str = self._fmt_notif_str(cmd, host, svc=service) - nagios_return = self._write_command(notif_str) and nagios_return - return_str_list.append(notif_str) - - if nagios_return: - return return_str_list - else: - return "Fail: could not write to the command file" - - def enable_hostgroup_host_notifications(self, hostgroup): - """ - Enables notifications for all hosts in a particular hostgroup. - - Note that this command does not enable notifications for - services associated with the hosts in this hostgroup. - - Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_hostgroup_svc_notifications(self, hostgroup): - """ - Enables notifications for all services that are associated - with hosts in a particular hostgroup. - - Note that this does not enable notifications for the hosts in - this hostgroup. - - Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, hostgroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_servicegroup_host_notifications(self, servicegroup): - """ - Enables notifications for all hosts that have services that - are members of a particular servicegroup. - - Note that this command does not enable notifications for - services associated with the hosts in this servicegroup. - - Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS; - """ - - cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def enable_servicegroup_svc_notifications(self, servicegroup): - """ - Enables notifications for all services that are members of a - particular servicegroup. - - Note that this does not enable notifications for the hosts in - this servicegroup. - - Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS; - """ - - cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS" - notif_str = self._fmt_notif_str(cmd, servicegroup) - nagios_return = self._write_command(notif_str) - - if nagios_return: - return notif_str - else: - return "Fail: could not write to the command file" - - def silence_host(self, host): - """ - This command is used to prevent notifications from being sent - out for the host and all services on the specified host. - - This is equivalent to calling disable_host_svc_notifications - and disable_host_notifications. - - Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; - Syntax: DISABLE_HOST_NOTIFICATIONS; - """ - - cmd = [ - "DISABLE_HOST_SVC_NOTIFICATIONS", - "DISABLE_HOST_NOTIFICATIONS" - ] - nagios_return = True - return_str_list = [] - for c in cmd: - notif_str = self._fmt_notif_str(c, host) - nagios_return = self._write_command(notif_str) and nagios_return - return_str_list.append(notif_str) - - if nagios_return: - return return_str_list - else: - return "Fail: could not write to the command file" - - def unsilence_host(self, host): - """ - This command is used to enable notifications for the host and - all services on the specified host. - - This is equivalent to calling enable_host_svc_notifications - and enable_host_notifications. - - Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; - Syntax: ENABLE_HOST_NOTIFICATIONS; - """ - - cmd = [ - "ENABLE_HOST_SVC_NOTIFICATIONS", - "ENABLE_HOST_NOTIFICATIONS" - ] - nagios_return = True - return_str_list = [] - for c in cmd: - notif_str = self._fmt_notif_str(c, host) - nagios_return = self._write_command(notif_str) and nagios_return - return_str_list.append(notif_str) - - if nagios_return: - return return_str_list - else: - return "Fail: could not write to the command file" - - def silence_nagios(self): - """ - This command is used to disable notifications for all hosts and services - in nagios. - - This is a 'SHUT UP, NAGIOS' command - """ - cmd = 'DISABLE_NOTIFICATIONS' - self._write_command(self._fmt_notif_str(cmd)) - - def unsilence_nagios(self): - """ - This command is used to enable notifications for all hosts and services - in nagios. - - This is a 'OK, NAGIOS, GO'' command - """ - cmd = 'ENABLE_NOTIFICATIONS' - self._write_command(self._fmt_notif_str(cmd)) - - def nagios_cmd(self, cmd): - """ - This sends an arbitrary command to nagios - - It prepends the submitted time and appends a \n - - You just have to provide the properly formatted command - """ - - pre = '[%s]' % int(time.time()) - - post = '\n' - cmdstr = '%s %s %s' % (pre, cmd, post) - self._write_command(cmdstr) - - def act(self): - """ - Figure out what you want to do from ansible, and then do the - needful (at the earliest). - """ - # host or service downtime? - if self.action == 'downtime': - if self.services == 'host': - self.schedule_host_downtime(self.host, self.minutes) - elif self.services == 'all': - self.schedule_host_svc_downtime(self.host, self.minutes) - else: - self.schedule_svc_downtime(self.host, - services=self.services, - minutes=self.minutes) - - # toggle the host AND service alerts - elif self.action == 'silence': - self.silence_host(self.host) - - elif self.action == 'unsilence': - self.unsilence_host(self.host) - - # toggle host/svc alerts - elif self.action == 'enable_alerts': - if self.services == 'host': - self.enable_host_notifications(self.host) - else: - self.enable_svc_notifications(self.host, - services=self.services) - - elif self.action == 'disable_alerts': - if self.services == 'host': - self.disable_host_notifications(self.host) - else: - self.disable_svc_notifications(self.host, - services=self.services) - elif self.action == 'silence_nagios': - self.silence_nagios() - - elif self.action == 'unsilence_nagios': - self.unsilence_nagios() - - elif self.action == 'command': - self.nagios_cmd(self.command) - - # wtf? - else: - self.module.fail_json(msg="unknown action specified: '%s'" % \ - self.action) - - self.module.exit_json(nagios_commands=self.command_results, - changed=True) - -###################################################################### -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/monitoring/newrelic_deployment b/library/monitoring/newrelic_deployment deleted file mode 100644 index 93d55832fd..0000000000 --- a/library/monitoring/newrelic_deployment +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Matt Coddington -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: newrelic_deployment -version_added: "1.2" -author: Matt Coddington -short_description: Notify newrelic about app deployments -description: - - Notify newrelic about app deployments (see http://newrelic.github.io/newrelic_api/NewRelicApi/Deployment.html) -options: - token: - description: - - API token. - required: true - app_name: - description: - - (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application - required: false - application_id: - description: - - (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM - required: false - changelog: - description: - - A list of changes for this deployment - required: false - description: - description: - - Text annotation for the deployment - notes for you - required: false - revision: - description: - - A revision number (e.g., git commit SHA) - required: false - user: - description: - - The name of the user/process that triggered this deployment - required: false - appname: - description: - - Name of the application - required: false - environment: - description: - - The environment for this deployment - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 - -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] -''' - -EXAMPLES = ''' -- newrelic_deployment: token=AAAAAA - app_name=myapp - user='ansible deployment' - revision=1.0 -''' - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True), - app_name=dict(required=False), - application_id=dict(required=False), - changelog=dict(required=False), - description=dict(required=False), - revision=dict(required=False), - user=dict(required=False), - appname=dict(required=False), - environment=dict(required=False), - validate_certs = dict(default='yes', type='bool'), - ), - supports_check_mode=True - ) - - # build list of params - params = {} - if module.params["app_name"] and module.params["application_id"]: - module.fail_json(msg="only one of 'app_name' or 'application_id' can be set") - - if module.params["app_name"]: - params["app_name"] = module.params["app_name"] - elif module.params["application_id"]: - params["application_id"] = module.params["application_id"] - else: - module.fail_json(msg="you must set one of 'app_name' or 'application_id'") - - for item in [ "changelog", "description", "revision", "user", "appname", "environment" ]: - if module.params[item]: - params[item] = module.params[item] - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True) - - # Send the data to NewRelic - url = "https://rpm.newrelic.com/deployments.xml" - data = urllib.urlencode(params) - headers = { - 'x-api-key': module.params["token"], - } - response, info = fetch_url(module, url, data=data, headers=headers) - if info['status'] in (200, 201): - module.exit_json(changed=True) - else: - module.fail_json(msg="unable to update newrelic: %s" % info['msg']) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() - diff --git a/library/monitoring/pagerduty b/library/monitoring/pagerduty deleted file mode 100644 index 5ca33717dc..0000000000 --- a/library/monitoring/pagerduty +++ /dev/null @@ -1,232 +0,0 @@ -#!/usr/bin/python - -DOCUMENTATION = ''' - -module: pagerduty -short_description: Create PagerDuty maintenance windows -description: - - This module will let you create PagerDuty maintenance windows -version_added: "1.2" -author: Justin Johns -requirements: - - PagerDuty API access -options: - state: - description: - - Create a maintenance window or get a list of ongoing windows. - required: true - default: null - choices: [ "running", "started", "ongoing" ] - aliases: [] - name: - description: - - PagerDuty unique subdomain. - required: true - default: null - choices: [] - aliases: [] - user: - description: - - PagerDuty user ID. - required: true - default: null - choices: [] - aliases: [] - passwd: - description: - - PagerDuty user password. - required: true - default: null - choices: [] - aliases: [] - token: - description: - - A pagerduty token, generated on the pagerduty site. Can be used instead of - user/passwd combination. - required: true - default: null - choices: [] - aliases: [] - version_added: '1.8' - requester_id: - description: - - ID of user making the request. Only needed when using a token and creating a maintenance_window. - required: true - default: null - choices: [] - aliases: [] - version_added: '1.8' - service: - description: - - PagerDuty service ID. - required: false - default: null - choices: [] - aliases: [] - hours: - description: - - Length of maintenance window in hours. - required: false - default: 1 - choices: [] - aliases: [] - minutes: - description: - - Maintenance window in minutes (this is added to the hours). - required: false - default: 0 - choices: [] - aliases: [] - version_added: '1.8' - desc: - description: - - Short description of maintenance window. - required: false - default: Created by Ansible - choices: [] - aliases: [] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 - -notes: - - This module does not yet have support to end maintenance windows. -''' - -EXAMPLES=''' -# List ongoing maintenance windows using a user/passwd -- pagerduty: name=companyabc user=example@example.com passwd=password123 state=ongoing - -# List ongoing maintenance windows using a token -- pagerduty: name=companyabc token=xxxxxxxxxxxxxx state=ongoing - -# Create a 1 hour maintenance window for service FOO123, using a user/passwd -- pagerduty: name=companyabc - user=example@example.com - passwd=password123 - state=running - service=FOO123 - -# Create a 5 minute maintenance window for service FOO123, using a token -- pagerduty: name=companyabc - token=xxxxxxxxxxxxxx - hours=0 - minutes=5 - state=running - service=FOO123 - - -# Create a 4 hour maintenance window for service FOO123 with the description "deployment". -- pagerduty: name=companyabc - user=example@example.com - passwd=password123 - state=running - service=FOO123 - hours=4 - desc=deployment -''' - -import json -import datetime -import base64 - -def auth_header(user, passwd, token): - if token: - return "Token token=%s" % token - - auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '') - return "Basic %s" % auth - -def ongoing(module, name, user, passwd, token): - url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing" - headers = {"Authorization": auth_header(user, passwd, token)} - - response, info = fetch_url(module, url, headers=headers) - if info['status'] != 200: - module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg']) - - return False, response.read() - - -def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc): - now = datetime.datetime.utcnow() - later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes)) - start = now.strftime("%Y-%m-%dT%H:%M:%SZ") - end = later.strftime("%Y-%m-%dT%H:%M:%SZ") - - url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows" - headers = { - 'Authorization': auth_header(user, passwd, token), - 'Content-Type' : 'application/json', - } - request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': [service]}} - if requester_id: - request_data['requester_id'] = requester_id - else: - if token: - module.fail_json(msg="requester_id is required when using a token") - - data = json.dumps(request_data) - response, info = fetch_url(module, url, data=data, headers=headers, method='POST') - if info['status'] != 200: - module.fail_json(msg="failed to create the window: %s" % info['msg']) - - return False, response.read() - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['running', 'started', 'ongoing']), - name=dict(required=True), - user=dict(required=False), - passwd=dict(required=False), - token=dict(required=False), - service=dict(required=False), - requester_id=dict(required=False), - hours=dict(default='1', required=False), - minutes=dict(default='0', required=False), - desc=dict(default='Created by Ansible', required=False), - validate_certs = dict(default='yes', type='bool'), - ) - ) - - state = module.params['state'] - name = module.params['name'] - user = module.params['user'] - passwd = module.params['passwd'] - token = module.params['token'] - service = module.params['service'] - hours = module.params['hours'] - minutes = module.params['minutes'] - token = module.params['token'] - desc = module.params['desc'] - requester_id = module.params['requester_id'] - - if not token and not (user or passwd): - module.fail_json(msg="neither user and passwd nor token specified") - - if state == "running" or state == "started": - if not service: - module.fail_json(msg="service not specified") - (rc, out) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc) - - if state == "ongoing": - (rc, out) = ongoing(module, name, user, passwd, token) - - if rc != 0: - module.fail_json(msg="failed", result=out) - - module.exit_json(msg="success", result=out) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/monitoring/pingdom b/library/monitoring/pingdom deleted file mode 100644 index 6f658cd950..0000000000 --- a/library/monitoring/pingdom +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/python - -DOCUMENTATION = ''' - -module: pingdom -short_description: Pause/unpause Pingdom alerts -description: - - This module will let you pause/unpause Pingdom alerts -version_added: "1.2" -author: Justin Johns -requirements: - - "This pingdom python library: https://github.com/mbabineau/pingdom-python" -options: - state: - description: - - Define whether or not the check should be running or paused. - required: true - default: null - choices: [ "running", "paused" ] - aliases: [] - checkid: - description: - - Pingdom ID of the check. - required: true - default: null - choices: [] - aliases: [] - uid: - description: - - Pingdom user ID. - required: true - default: null - choices: [] - aliases: [] - passwd: - description: - - Pingdom user password. - required: true - default: null - choices: [] - aliases: [] - key: - description: - - Pingdom API key. - required: true - default: null - choices: [] - aliases: [] -notes: - - This module does not yet have support to add/remove checks. -''' - -EXAMPLES = ''' -# Pause the check with the ID of 12345. -- pingdom: uid=example@example.com - passwd=password123 - key=apipassword123 - checkid=12345 - state=paused - -# Unpause the check with the ID of 12345. -- pingdom: uid=example@example.com - passwd=password123 - key=apipassword123 - checkid=12345 - state=running -''' - -try: - import pingdom - HAS_PINGDOM = True -except: - HAS_PINGDOM = False - - - -def pause(checkid, uid, passwd, key): - - c = pingdom.PingdomConnection(uid, passwd, key) - c.modify_check(checkid, paused=True) - check = c.get_check(checkid) - name = check.name - result = check.status - #if result != "paused": # api output buggy - accept raw exception for now - # return (True, name, result) - return (False, name, result) - - -def unpause(checkid, uid, passwd, key): - - c = pingdom.PingdomConnection(uid, passwd, key) - c.modify_check(checkid, paused=False) - check = c.get_check(checkid) - name = check.name - result = check.status - #if result != "up": # api output buggy - accept raw exception for now - # return (True, name, result) - return (False, name, result) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']), - checkid=dict(required=True), - uid=dict(required=True), - passwd=dict(required=True), - key=dict(required=True) - ) - ) - - if not HAS_PINGDOM: - module.fail_json(msg="Missing requried pingdom module (check docs)") - - checkid = module.params['checkid'] - state = module.params['state'] - uid = module.params['uid'] - passwd = module.params['passwd'] - key = module.params['key'] - - if (state == "paused" or state == "stopped"): - (rc, name, result) = pause(checkid, uid, passwd, key) - - if (state == "running" or state == "started"): - (rc, name, result) = unpause(checkid, uid, passwd, key) - - if rc != 0: - module.fail_json(checkid=checkid, name=name, status=result) - - module.exit_json(checkid=checkid, name=name, status=result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/monitoring/rollbar_deployment b/library/monitoring/rollbar_deployment deleted file mode 100644 index 772e78fc5c..0000000000 --- a/library/monitoring/rollbar_deployment +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2014, Max Riveiro, -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rollbar_deployment -version_added: 1.6 -author: Max Riveiro -short_description: Notify Rollbar about app deployments -description: - - Notify Rollbar about app deployments - (see https://rollbar.com/docs/deploys_other/) -options: - token: - description: - - Your project access token. - required: true - environment: - description: - - Name of the environment being deployed, e.g. 'production'. - required: true - revision: - description: - - Revision number/sha being deployed. - required: true - user: - description: - - User who deployed. - required: false - rollbar_user: - description: - - Rollbar username of the user who deployed. - required: false - comment: - description: - - Deploy comment (e.g. what is being deployed). - required: false - url: - description: - - Optional URL to submit the notification to. - required: false - default: 'https://api.rollbar.com/api/1/deploy/' - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. - This should only be used on personally controlled sites using - self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] -''' - -EXAMPLES = ''' -- rollbar_deployment: token=AAAAAA - environment='staging' - user='ansible' - revision=4.2, - rollbar_user='admin', - comment='Test Deploy' -''' - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True), - environment=dict(required=True), - revision=dict(required=True), - user=dict(required=False), - rollbar_user=dict(required=False), - comment=dict(required=False), - url=dict( - required=False, - default='https://api.rollbar.com/api/1/deploy/' - ), - validate_certs=dict(default='yes', type='bool'), - ), - supports_check_mode=True - ) - - if module.check_mode: - module.exit_json(changed=True) - - params = dict( - access_token=module.params['token'], - environment=module.params['environment'], - revision=module.params['revision'] - ) - - if module.params['user']: - params['local_username'] = module.params['user'] - - if module.params['rollbar_user']: - params['rollbar_username'] = module.params['rollbar_user'] - - if module.params['comment']: - params['comment'] = module.params['comment'] - - url = module.params.get('url') - - try: - data = urllib.urlencode(params) - response, info = fetch_url(module, url, data=data) - except Exception, e: - module.fail_json(msg='Unable to notify Rollbar: %s' % e) - else: - if info['status'] == 200: - module.exit_json(changed=True) - else: - module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url)) - -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/monitoring/stackdriver b/library/monitoring/stackdriver deleted file mode 100644 index c36964dd9d..0000000000 --- a/library/monitoring/stackdriver +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -DOCUMENTATION = ''' - -module: stackdriver -short_description: Send code deploy and annotation events to stackdriver -description: - - Send code deploy and annotation events to Stackdriver -version_added: "1.6" -author: Ben Whaley -options: - key: - description: - - API key. - required: true - default: null - event: - description: - - The type of event to send, either annotation or deploy - choices: ['annotation', 'deploy'] - required: false - default: null - revision_id: - description: - - The revision of the code that was deployed. Required for deploy events - required: false - default: null - deployed_by: - description: - - The person or robot responsible for deploying the code - required: false - default: "Ansible" - deployed_to: - description: - - "The environment code was deployed to. (ie: development, staging, production)" - required: false - default: null - repository: - description: - - The repository (or project) deployed - required: false - default: null - msg: - description: - - The contents of the annotation message, in plain text.  Limited to 256 characters. Required for annotation. - required: false - default: null - annotated_by: - description: - - The person or robot who the annotation should be attributed to. - required: false - default: "Ansible" - level: - description: - - one of INFO/WARN/ERROR, defaults to INFO if not supplied.  May affect display. - choices: ['INFO', 'WARN', 'ERROR'] - required: false - default: 'INFO' - instance_id: - description: - - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown - required: false - default: null - event_epoch: - description: - - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this." - required: false - default: null -''' - -EXAMPLES = ''' -- stackdriver: key=AAAAAA event=deploy deployed_to=production deployed_by=leeroyjenkins repository=MyWebApp revision_id=abcd123 - -- stackdriver: key=AAAAAA event=annotation msg="Greetings from Ansible" annotated_by=leeroyjenkins level=WARN instance_id=i-abcd1234 -''' - -# =========================================== -# Stackdriver module specific support methods. -# -try: - import json -except ImportError: - import simplejson as json - -def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None): - """Send a deploy event to Stackdriver""" - deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent" - - params = {} - params['revision_id'] = revision_id - params['deployed_by'] = deployed_by - if deployed_to: - params['deployed_to'] = deployed_to - if repository: - params['repository'] = repository - - return do_send_request(module, deploy_api, params, key) - -def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None): - """Send an annotation event to Stackdriver""" - annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent" - - params = {} - params['message'] = msg - if annotated_by: - params['annotated_by'] = annotated_by - if level: - params['level'] = level - if instance_id: - params['instance_id'] = instance_id - if event_epoch: - params['event_epoch'] = event_epoch - - return do_send_request(module, annotation_api, params, key) - -def do_send_request(module, url, params, key): - data = json.dumps(params) - headers = { - 'Content-Type': 'application/json', - 'x-stackdriver-apikey': key - } - response, info = fetch_url(module, url, headers=headers, data=data, method='POST') - if info['status'] != 200: - module.fail_json(msg="Unable to send msg: %s" % info['msg']) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - key=dict(required=True), - event=dict(required=True, choices=['deploy', 'annotation']), - msg=dict(), - revision_id=dict(), - annotated_by=dict(default='Ansible'), - level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']), - instance_id=dict(), - event_epoch=dict(), - deployed_by=dict(default='Ansible'), - deployed_to=dict(), - repository=dict(), - ), - supports_check_mode=True - ) - - key = module.params["key"] - event = module.params["event"] - - # Annotation params - msg = module.params["msg"] - annotated_by = module.params["annotated_by"] - level = module.params["level"] - instance_id = module.params["instance_id"] - event_epoch = module.params["event_epoch"] - - # Deploy params - revision_id = module.params["revision_id"] - deployed_by = module.params["deployed_by"] - deployed_to = module.params["deployed_to"] - repository = module.params["repository"] - - ################################################################## - # deploy requires revision_id - # annotation requires msg - # We verify these manually - ################################################################## - - if event == 'deploy': - if not revision_id: - module.fail_json(msg="revision_id required for deploy events") - try: - send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository) - except Exception, e: - module.fail_json(msg="unable to sent deploy event: %s" % e) - - if event == 'annotation': - if not msg: - module.fail_json(msg="msg required for annotation events") - try: - send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch) - except Exception, e: - module.fail_json(msg="unable to sent annotation event: %s" % e) - - changed = True - module.exit_json(changed=changed, deployed_by=deployed_by) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/monitoring/zabbix_maintenance b/library/monitoring/zabbix_maintenance deleted file mode 100644 index e27091e073..0000000000 --- a/library/monitoring/zabbix_maintenance +++ /dev/null @@ -1,371 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Alexander Bulimov -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -DOCUMENTATION = ''' - -module: zabbix_maintenance -short_description: Create Zabbix maintenance windows -description: - - This module will let you create Zabbix maintenance windows. -version_added: "1.8" -author: Alexander Bulimov -requirements: - - zabbix-api python module -options: - state: - description: - - Create or remove a maintenance window. - required: true - default: null - choices: [ "present", "absent" ] - server_url: - description: - - Url of Zabbix server, with protocol (http or https). - C(url) is an alias for C(server_url). - required: true - default: null - aliases: [ "url" ] - login_user: - description: - - Zabbix user name. - required: true - default: null - login_password: - description: - - Zabbix user password. - required: true - default: null - host_names: - description: - - Hosts to manage maintenance window for. - Separate multiple hosts with commas. - C(host_name) is an alias for C(host_names). - B(Required) option when C(state) is I(present) - and no C(host_groups) specified. - required: false - default: null - aliases: [ "host_name" ] - host_groups: - description: - - Host groups to manage maintenance window for. - Separate multiple groups with commas. - C(host_group) is an alias for C(host_groups). - B(Required) option when C(state) is I(present) - and no C(host_names) specified. - required: false - default: null - aliases: [ "host_group" ] - minutes: - description: - - Length of maintenance window in minutes. - required: false - default: 10 - name: - description: - - Unique name of maintenance window. - required: true - default: null - desc: - description: - - Short description of maintenance window. - required: true - default: Created by Ansible - collect_data: - description: - - Type of maintenance. With data collection, or without. - required: false - default: "true" -notes: - - Useful for setting hosts in maintenance mode before big update, - and removing maintenance window after update. - - Module creates maintenance window from now() to now() + minutes, - so if Zabbix server's time and host's time are not synchronized, - you will get strange results. - - Install required module with 'pip install zabbix-api' command. - - Checks existance only by maintenance name. -''' - -EXAMPLES = ''' -# Create maintenance window named "Update of www1" -# for host www1.example.com for 90 minutes -- zabbix_maintenance: name="Update of www1" - host_name=www1.example.com - state=present - minutes=90 - server_url=https://monitoring.example.com - login_user=ansible - login_password=pAsSwOrD - -# Create maintenance window named "Mass update" -# for host www1.example.com and host groups Office and Dev -- zabbix_maintenance: name="Update of www1" - host_name=www1.example.com - host_groups=Office,Dev - state=present - server_url=https://monitoring.example.com - login_user=ansible - login_password=pAsSwOrD - -# Create maintenance window named "update" -# for hosts www1.example.com and db1.example.com and without data collection. -- zabbix_maintenance: name=update - host_names=www1.example.com,db1.example.com - state=present - collect_data=false - server_url=https://monitoring.example.com - login_user=ansible - login_password=pAsSwOrD - -# Remove maintenance window named "Test1" -- zabbix_maintenance: name=Test1 - state=absent - server_url=https://monitoring.example.com - login_user=ansible - login_password=pAsSwOrD -''' - -import datetime -import time - -try: - from zabbix_api import ZabbixAPI - HAS_ZABBIX_API = True -except ImportError: - HAS_ZABBIX_API = False - - -def create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc): - end_time = start_time + period - try: - zbx.maintenance.create( - { - "groupids": group_ids, - "hostids": host_ids, - "name": name, - "maintenance_type": maintenance_type, - "active_since": str(start_time), - "active_till": str(end_time), - "description": desc, - "timeperiods": [{ - "timeperiod_type": "0", - "start_date": str(start_time), - "period": str(period), - }] - } - ) - except BaseException as e: - return 1, None, str(e) - return 0, None, None - - -def get_maintenance_id(zbx, name): - try: - result = zbx.maintenance.get( - { - "filter": - { - "name": name, - } - } - ) - except BaseException as e: - return 1, None, str(e) - - maintenance_ids = [] - for res in result: - maintenance_ids.append(res["maintenanceid"]) - - return 0, maintenance_ids, None - - -def delete_maintenance(zbx, maintenance_id): - try: - zbx.maintenance.delete(maintenance_id) - except BaseException as e: - return 1, None, str(e) - return 0, None, None - - -def check_maintenance(zbx, name): - try: - result = zbx.maintenance.exists( - { - "name": name - } - ) - except BaseException as e: - return 1, None, str(e) - return 0, result, None - - -def get_group_ids(zbx, host_groups): - group_ids = [] - for group in host_groups: - try: - result = zbx.hostgroup.get( - { - "output": "extend", - "filter": - { - "name": group - } - } - ) - except BaseException as e: - return 1, None, str(e) - - if not result: - return 1, None, "Group id for group %s not found" % group - - group_ids.append(result[0]["groupid"]) - - return 0, group_ids, None - - -def get_host_ids(zbx, host_names): - host_ids = [] - for host in host_names: - try: - result = zbx.host.get( - { - "output": "extend", - "filter": - { - "name": host - } - } - ) - except BaseException as e: - return 1, None, str(e) - - if not result: - return 1, None, "Host id for host %s not found" % host - - host_ids.append(result[0]["hostid"]) - - return 0, host_ids, None - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(required=True, default=None, choices=['present', 'absent']), - server_url=dict(required=True, default=None, aliases=['url']), - host_names=dict(type='list', required=False, default=None, aliases=['host_name']), - minutes=dict(type='int', required=False, default=10), - host_groups=dict(type='list', required=False, default=None, aliases=['host_group']), - login_user=dict(required=True, default=None), - login_password=dict(required=True, default=None), - name=dict(required=True, default=None), - desc=dict(required=False, default="Created by Ansible"), - collect_data=dict(type='bool', required=False, default=True), - ), - supports_check_mode=True, - ) - - if not HAS_ZABBIX_API: - module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") - - host_names = module.params['host_names'] - host_groups = module.params['host_groups'] - state = module.params['state'] - login_user = module.params['login_user'] - login_password = module.params['login_password'] - minutes = module.params['minutes'] - name = module.params['name'] - desc = module.params['desc'] - server_url = module.params['server_url'] - collect_data = module.params['collect_data'] - if collect_data: - maintenance_type = 0 - else: - maintenance_type = 1 - - try: - zbx = ZabbixAPI(server_url) - zbx.login(login_user, login_password) - except BaseException as e: - module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) - - changed = False - - if state == "present": - - now = datetime.datetime.now() - start_time = time.mktime(now.timetuple()) - period = 60 * int(minutes) # N * 60 seconds - - if host_groups: - (rc, group_ids, error) = get_group_ids(zbx, host_groups) - if rc != 0: - module.fail_json(msg="Failed to get group_ids: %s" % error) - else: - group_ids = [] - - if host_names: - (rc, host_ids, error) = get_host_ids(zbx, host_names) - if rc != 0: - module.fail_json(msg="Failed to get host_ids: %s" % error) - else: - host_ids = [] - - (rc, exists, error) = check_maintenance(zbx, name) - if rc != 0: - module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error)) - - if not exists: - if not host_names and not host_groups: - module.fail_json(msg="At least one host_name or host_group must be defined for each created maintenance.") - - if module.check_mode: - changed = True - else: - (rc, _, error) = create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc) - if rc == 0: - changed = True - else: - module.fail_json(msg="Failed to create maintenance: %s" % error) - - if state == "absent": - - (rc, exists, error) = check_maintenance(zbx, name) - if rc != 0: - module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error)) - - if exists: - (rc, maintenance, error) = get_maintenance_id(zbx, name) - if rc != 0: - module.fail_json(msg="Failed to get maintenance id: %s" % error) - - if maintenance: - if module.check_mode: - changed = True - else: - (rc, _, error) = delete_maintenance(zbx, maintenance) - if rc == 0: - changed = True - else: - module.fail_json(msg="Failed to remove maintenance: %s" % error) - - module.exit_json(changed=changed) - -from ansible.module_utils.basic import * -main() diff --git a/library/net_infrastructure/a10_server b/library/net_infrastructure/a10_server deleted file mode 100644 index 65410536ee..0000000000 --- a/library/net_infrastructure/a10_server +++ /dev/null @@ -1,269 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to manage A10 Networks slb server objects -(c) 2014, Mischa Peters - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: a10_server -version_added: 1.8 -short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices -description: - - Manage slb server objects on A10 Networks devices via aXAPI -author: Mischa Peters -notes: - - Requires A10 Networks aXAPI 2.1 -options: - host: - description: - - hostname or ip of your A10 Networks device - required: true - default: null - aliases: [] - choices: [] - username: - description: - - admin account of your A10 Networks device - required: true - default: null - aliases: ['user', 'admin'] - choices: [] - password: - description: - - admin password of your A10 Networks device - required: true - default: null - aliases: ['pass', 'pwd'] - choices: [] - server_name: - description: - - slb server name - required: true - default: null - aliases: ['server'] - choices: [] - server_ip: - description: - - slb server IP address - required: false - default: null - aliases: ['ip', 'address'] - choices: [] - server_status: - description: - - slb virtual server status - required: false - default: enable - aliases: ['status'] - choices: ['enabled', 'disabled'] - server_ports: - description: - - A list of ports to create for the server. Each list item should be a - dictionary which specifies the C(port:) and C(protocol:), but can also optionally - specify the C(status:). See the examples below for details. This parameter is - required when C(state) is C(present). - required: false - default: null - aliases: [] - choices: [] - state: - description: - - create, update or remove slb server - required: false - default: present - aliases: [] - choices: ['present', 'absent'] -''' - -EXAMPLES = ''' -# Create a new server -- a10_server: - host: a10.mydomain.com - username: myadmin - password: mypassword - server: test - server_ip: 1.1.1.100 - server_ports: - - port_num: 8080 - protocol: tcp - - port_num: 8443 - protocol: TCP - -''' - -VALID_PORT_FIELDS = ['port_num', 'protocol', 'status'] - -def validate_ports(module, ports): - for item in ports: - for key in item: - if key not in VALID_PORT_FIELDS: - module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS))) - - # validate the port number is present and an integer - if 'port_num' in item: - try: - item['port_num'] = int(item['port_num']) - except: - module.fail_json(msg="port_num entries in the port definitions must be integers") - else: - module.fail_json(msg="port definitions must define the port_num field") - - # validate the port protocol is present, and convert it to - # the internal API integer value (and validate it) - if 'protocol' in item: - protocol = axapi_get_port_protocol(item['protocol']) - if not protocol: - module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_PORT_PROTOCOLS)) - else: - item['protocol'] = protocol - else: - module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_PORT_PROTOCOLS)) - - # convert the status to the internal API integer value - if 'status' in item: - item['status'] = axapi_enabled_disabled(item['status']) - else: - item['status'] = 1 - - -def main(): - argument_spec = a10_argument_spec() - argument_spec.update(url_argument_spec()) - argument_spec.update( - dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - server_name=dict(type='str', aliases=['server'], required=True), - server_ip=dict(type='str', aliases=['ip', 'address']), - server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']), - server_ports=dict(type='list', aliases=['port'], default=[]), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=False - ) - - host = module.params['host'] - username = module.params['username'] - password = module.params['password'] - state = module.params['state'] - write_config = module.params['write_config'] - slb_server = module.params['server_name'] - slb_server_ip = module.params['server_ip'] - slb_server_status = module.params['server_status'] - slb_server_ports = module.params['server_ports'] - - if slb_server is None: - module.fail_json(msg='server_name is required') - - axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host - session_url = axapi_authenticate(module, axapi_base_url, username, password) - - # validate the ports data structure - validate_ports(module, slb_server_ports) - - json_post = { - 'server': { - 'name': slb_server, - 'host': slb_server_ip, - 'status': axapi_enabled_disabled(slb_server_status), - 'port_list': slb_server_ports, - } - } - - slb_server_data = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) - slb_server_exists = not axapi_failure(slb_server_data) - - changed = False - if state == 'present': - if not slb_server_ip: - module.fail_json(msg='you must specify an IP address when creating a server') - - if not slb_server_exists: - result = axapi_call(module, session_url + '&method=slb.server.create', json.dumps(json_post)) - if axapi_failure(result): - module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg']) - changed = True - else: - def needs_update(src_ports, dst_ports): - ''' - Checks to determine if the port definitions of the src_ports - array are in or different from those in dst_ports. If there is - a difference, this function returns true, otherwise false. - ''' - for src_port in src_ports: - found = False - different = False - for dst_port in dst_ports: - if src_port['port_num'] == dst_port['port_num']: - found = True - for valid_field in VALID_PORT_FIELDS: - if src_port[valid_field] != dst_port[valid_field]: - different = True - break - if found or different: - break - if not found or different: - return True - # every port from the src exists in the dst, and none of them were different - return False - - defined_ports = slb_server_data.get('server', {}).get('port_list', []) - - # we check for a needed update both ways, in case ports - # are missing from either the ones specified by the user - # or from those on the device - if needs_update(defined_ports, slb_server_ports) or needs_update(slb_server_ports, defined_ports): - result = axapi_call(module, session_url + '&method=slb.server.update', json.dumps(json_post)) - if axapi_failure(result): - module.fail_json(msg="failed to update the server: %s" % result['response']['err']['msg']) - changed = True - - # if we changed things, get the full info regarding - # the service group for the return data below - if changed: - result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) - else: - result = slb_server_data - elif state == 'absent': - if slb_server_exists: - result = axapi_call(module, session_url + '&method=slb.server.delete', json.dumps({'name': slb_server})) - changed = True - else: - result = dict(msg="the server was not present") - - # if the config has changed, save the config unless otherwise requested - if changed and write_config: - write_result = axapi_call(module, session_url + '&method=system.action.write_memory') - if axapi_failure(write_result): - module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) - - # log out of the session nicely and exit - axapi_call(module, session_url + '&method=session.close') - module.exit_json(changed=changed, content=result) - -# standard ansible module imports -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.a10 import * - -main() diff --git a/library/net_infrastructure/a10_service_group b/library/net_infrastructure/a10_service_group deleted file mode 100644 index 3627e2d12b..0000000000 --- a/library/net_infrastructure/a10_service_group +++ /dev/null @@ -1,341 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to manage A10 Networks slb service-group objects -(c) 2014, Mischa Peters - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: a10_service_group -version_added: 1.8 -short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices -description: - - Manage slb service-group objects on A10 Networks devices via aXAPI -author: Mischa Peters -notes: - - Requires A10 Networks aXAPI 2.1 - - When a server doesn't exist and is added to the service-group the server will be created -options: - host: - description: - - hostname or ip of your A10 Networks device - required: true - default: null - aliases: [] - choices: [] - username: - description: - - admin account of your A10 Networks device - required: true - default: null - aliases: ['user', 'admin'] - choices: [] - password: - description: - - admin password of your A10 Networks device - required: true - default: null - aliases: ['pass', 'pwd'] - choices: [] - service_group: - description: - - slb service-group name - required: true - default: null - aliases: ['service', 'pool', 'group'] - choices: [] - service_group_protocol: - description: - - slb service-group protocol - required: false - default: tcp - aliases: ['proto', 'protocol'] - choices: ['tcp', 'udp'] - service_group_method: - description: - - slb service-group loadbalancing method - required: false - default: round-robin - aliases: ['method'] - choices: ['round-robin', 'weighted-rr', 'least-connection', 'weighted-least-connection', 'service-least-connection', 'service-weighted-least-connection', 'fastest-response', 'least-request', 'round-robin-strict', 'src-ip-only-hash', 'src-ip-hash'] - servers: - description: - - A list of servers to add to the service group. Each list item should be a - dictionary which specifies the C(server:) and C(port:), but can also optionally - specify the C(status:). See the examples below for details. - required: false - default: null - aliases: [] - choices: [] - write_config: - description: - - If C(yes), any changes will cause a write of the running configuration - to non-volatile memory. This will save I(all) configuration changes, - including those that may have been made manually or through other modules, - so care should be taken when specifying C(yes). - required: false - default: "no" - choices: ["yes", "no"] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled devices using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -''' - -EXAMPLES = ''' -# Create a new service-group -- a10_service_group: - host: a10.mydomain.com - username: myadmin - password: mypassword - service_group: sg-80-tcp - servers: - - server: foo1.mydomain.com - port: 8080 - - server: foo2.mydomain.com - port: 8080 - - server: foo3.mydomain.com - port: 8080 - - server: foo4.mydomain.com - port: 8080 - status: disabled - -''' - -VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method'] -VALID_SERVER_FIELDS = ['server', 'port', 'status'] - -def validate_servers(module, servers): - for item in servers: - for key in item: - if key not in VALID_SERVER_FIELDS: - module.fail_json(msg="invalid server field (%s), must be one of: %s" % (key, ','.join(VALID_SERVER_FIELDS))) - - # validate the server name is present - if 'server' not in item: - module.fail_json(msg="server definitions must define the server field") - - # validate the port number is present and an integer - if 'port' in item: - try: - item['port'] = int(item['port']) - except: - module.fail_json(msg="server port definitions must be integers") - else: - module.fail_json(msg="server definitions must define the port field") - - # convert the status to the internal API integer value - if 'status' in item: - item['status'] = axapi_enabled_disabled(item['status']) - else: - item['status'] = 1 - - -def main(): - argument_spec = a10_argument_spec() - argument_spec.update(url_argument_spec()) - argument_spec.update( - dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - service_group=dict(type='str', aliases=['service', 'pool', 'group'], required=True), - service_group_protocol=dict(type='str', default='tcp', aliases=['proto', 'protocol'], choices=['tcp', 'udp']), - service_group_method=dict(type='str', default='round-robin', - aliases=['method'], - choices=['round-robin', - 'weighted-rr', - 'least-connection', - 'weighted-least-connection', - 'service-least-connection', - 'service-weighted-least-connection', - 'fastest-response', - 'least-request', - 'round-robin-strict', - 'src-ip-only-hash', - 'src-ip-hash']), - servers=dict(type='list', aliases=['server', 'member'], default=[]), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=False - ) - - host = module.params['host'] - username = module.params['username'] - password = module.params['password'] - state = module.params['state'] - write_config = module.params['write_config'] - slb_service_group = module.params['service_group'] - slb_service_group_proto = module.params['service_group_protocol'] - slb_service_group_method = module.params['service_group_method'] - slb_servers = module.params['servers'] - - if slb_service_group is None: - module.fail_json(msg='service_group is required') - - axapi_base_url = 'https://' + host + '/services/rest/V2.1/?format=json' - load_balancing_methods = {'round-robin': 0, - 'weighted-rr': 1, - 'least-connection': 2, - 'weighted-least-connection': 3, - 'service-least-connection': 4, - 'service-weighted-least-connection': 5, - 'fastest-response': 6, - 'least-request': 7, - 'round-robin-strict': 8, - 'src-ip-only-hash': 14, - 'src-ip-hash': 15} - - if not slb_service_group_proto or slb_service_group_proto.lower() == 'tcp': - protocol = 2 - else: - protocol = 3 - - # validate the server data list structure - validate_servers(module, slb_servers) - - json_post = { - 'service_group': { - 'name': slb_service_group, - 'protocol': protocol, - 'lb_method': load_balancing_methods[slb_service_group_method], - } - } - - # first we authenticate to get a session id - session_url = axapi_authenticate(module, axapi_base_url, username, password) - - # then we check to see if the specified group exists - slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group})) - slb_service_group_exist = not axapi_failure(slb_result) - - changed = False - if state == 'present': - # before creating/updating we need to validate that servers - # defined in the servers list exist to prevent errors - checked_servers = [] - for server in slb_servers: - result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': server['server']})) - if axapi_failure(result): - module.fail_json(msg="the server %s specified in the servers list does not exist" % server['server']) - checked_servers.append(server['server']) - - if not slb_service_group_exist: - result = axapi_call(module, session_url + '&method=slb.service_group.create', json.dumps(json_post)) - if axapi_failure(result): - module.fail_json(msg=result['response']['err']['msg']) - changed = True - else: - # check to see if the service group definition without the - # server members is different, and update that individually - # if it needs it - do_update = False - for field in VALID_SERVICE_GROUP_FIELDS: - if json_post['service_group'][field] != slb_result['service_group'][field]: - do_update = True - break - - if do_update: - result = axapi_call(module, session_url + '&method=slb.service_group.update', json.dumps(json_post)) - if axapi_failure(result): - module.fail_json(msg=result['response']['err']['msg']) - changed = True - - # next we pull the defined list of servers out of the returned - # results to make it a bit easier to iterate over - defined_servers = slb_result.get('service_group', {}).get('member_list', []) - - # next we add/update new member servers from the user-specified - # list if they're different or not on the target device - for server in slb_servers: - found = False - different = False - for def_server in defined_servers: - if server['server'] == def_server['server']: - found = True - for valid_field in VALID_SERVER_FIELDS: - if server[valid_field] != def_server[valid_field]: - different = True - break - if found or different: - break - # add or update as required - server_data = { - "name": slb_service_group, - "member": server, - } - if not found: - result = axapi_call(module, session_url + '&method=slb.service_group.member.create', json.dumps(server_data)) - changed = True - elif different: - result = axapi_call(module, session_url + '&method=slb.service_group.member.update', json.dumps(server_data)) - changed = True - - # finally, remove any servers that are on the target - # device but were not specified in the list given - for server in defined_servers: - found = False - for slb_server in slb_servers: - if server['server'] == slb_server['server']: - found = True - break - # remove if not found - server_data = { - "name": slb_service_group, - "member": server, - } - if not found: - result = axapi_call(module, session_url + '&method=slb.service_group.member.delete', json.dumps(server_data)) - changed = True - - # if we changed things, get the full info regarding - # the service group for the return data below - if changed: - result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group})) - else: - result = slb_result - elif state == 'absent': - if slb_service_group_exist: - result = axapi_call(module, session_url + '&method=slb.service_group.delete', json.dumps({'name': slb_service_group})) - changed = True - else: - result = dict(msg="the service group was not present") - - # if the config has changed, save the config unless otherwise requested - if changed and write_config: - write_result = axapi_call(module, session_url + '&method=system.action.write_memory') - if axapi_failure(write_result): - module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) - - # log out of the session nicely and exit - axapi_call(module, session_url + '&method=session.close') - module.exit_json(changed=changed, content=result) - -# standard ansible module imports -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.a10 import * - -main() diff --git a/library/net_infrastructure/a10_virtual_server b/library/net_infrastructure/a10_virtual_server deleted file mode 100644 index 3d807c098c..0000000000 --- a/library/net_infrastructure/a10_virtual_server +++ /dev/null @@ -1,299 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to manage A10 Networks slb virtual server objects -(c) 2014, Mischa Peters - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: a10_virtual_server -version_added: 1.8 -short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices -description: - - Manage slb virtual server objects on A10 Networks devices via aXAPI -author: Mischa Peters -notes: - - Requires A10 Networks aXAPI 2.1 -requirements: - - urllib2 - - re -options: - host: - description: - - hostname or ip of your A10 Networks device - required: true - default: null - aliases: [] - choices: [] - username: - description: - - admin account of your A10 Networks device - required: true - default: null - aliases: ['user', 'admin'] - choices: [] - password: - description: - - admin password of your A10 Networks device - required: true - default: null - aliases: ['pass', 'pwd'] - choices: [] - virtual_server: - description: - - slb virtual server name - required: true - default: null - aliases: ['vip', 'virtual'] - choices: [] - virtual_server_ip: - description: - - slb virtual server ip address - required: false - default: null - aliases: ['ip', 'address'] - choices: [] - virtual_server_status: - description: - - slb virtual server status - required: false - default: enable - aliases: ['status'] - choices: ['enabled', 'disabled'] - virtual_server_ports: - description: - - A list of ports to create for the virtual server. Each list item should be a - dictionary which specifies the C(port:) and C(type:), but can also optionally - specify the C(service_group:) as well as the C(status:). See the examples - below for details. This parameter is required when C(state) is C(present). - required: false - write_config: - description: - - If C(yes), any changes will cause a write of the running configuration - to non-volatile memory. This will save I(all) configuration changes, - including those that may have been made manually or through other modules, - so care should be taken when specifying C(yes). - required: false - default: "no" - choices: ["yes", "no"] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled devices using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -''' - -EXAMPLES = ''' -# Create a new virtual server -- a10_virtual_server: - host: a10.mydomain.com - username: myadmin - password: mypassword - virtual_server: vserver1 - virtual_server_ip: 1.1.1.1 - virtual_server_ports: - - port: 80 - protocol: TCP - service_group: sg-80-tcp - - port: 443 - protocol: HTTPS - service_group: sg-443-https - - port: 8080 - protocol: http - status: disabled - -''' - -VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status'] - -def validate_ports(module, ports): - for item in ports: - for key in item: - if key not in VALID_PORT_FIELDS: - module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS))) - - # validate the port number is present and an integer - if 'port' in item: - try: - item['port'] = int(item['port']) - except: - module.fail_json(msg="port definitions must be integers") - else: - module.fail_json(msg="port definitions must define the port field") - - # validate the port protocol is present, and convert it to - # the internal API integer value (and validate it) - if 'protocol' in item: - protocol = axapi_get_vport_protocol(item['protocol']) - if not protocol: - module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS)) - else: - item['protocol'] = protocol - else: - module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS)) - - # convert the status to the internal API integer value - if 'status' in item: - item['status'] = axapi_enabled_disabled(item['status']) - else: - item['status'] = 1 - - # ensure the service_group field is at least present - if 'service_group' not in item: - item['service_group'] = '' - -def main(): - argument_spec = a10_argument_spec() - argument_spec.update(url_argument_spec()) - argument_spec.update( - dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True), - virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True), - virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']), - virtual_server_ports=dict(type='list', required=True), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=False - ) - - host = module.params['host'] - username = module.params['username'] - password = module.params['password'] - state = module.params['state'] - write_config = module.params['write_config'] - slb_virtual = module.params['virtual_server'] - slb_virtual_ip = module.params['virtual_server_ip'] - slb_virtual_status = module.params['virtual_server_status'] - slb_virtual_ports = module.params['virtual_server_ports'] - - if slb_virtual is None: - module.fail_json(msg='virtual_server is required') - - validate_ports(module, slb_virtual_ports) - - axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host - session_url = axapi_authenticate(module, axapi_base_url, username, password) - - slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual})) - slb_virtual_exists = not axapi_failure(slb_virtual_data) - - changed = False - if state == 'present': - json_post = { - 'virtual_server': { - 'name': slb_virtual, - 'address': slb_virtual_ip, - 'status': axapi_enabled_disabled(slb_virtual_status), - 'vport_list': slb_virtual_ports, - } - } - - # before creating/updating we need to validate that any - # service groups defined in the ports list exist since - # since the API will still create port definitions for - # them while indicating a failure occurred - checked_service_groups = [] - for port in slb_virtual_ports: - if 'service_group' in port and port['service_group'] not in checked_service_groups: - # skip blank service group entries - if port['service_group'] == '': - continue - result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']})) - if axapi_failure(result): - module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group']) - checked_service_groups.append(port['service_group']) - - if not slb_virtual_exists: - result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post)) - if axapi_failure(result): - module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg']) - changed = True - else: - def needs_update(src_ports, dst_ports): - ''' - Checks to determine if the port definitions of the src_ports - array are in or different from those in dst_ports. If there is - a difference, this function returns true, otherwise false. - ''' - for src_port in src_ports: - found = False - different = False - for dst_port in dst_ports: - if src_port['port'] == dst_port['port']: - found = True - for valid_field in VALID_PORT_FIELDS: - if src_port[valid_field] != dst_port[valid_field]: - different = True - break - if found or different: - break - if not found or different: - return True - # every port from the src exists in the dst, and none of them were different - return False - - defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', []) - - # we check for a needed update both ways, in case ports - # are missing from either the ones specified by the user - # or from those on the device - if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports): - result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post)) - if axapi_failure(result): - module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg']) - changed = True - - # if we changed things, get the full info regarding - # the service group for the return data below - if changed: - result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual})) - else: - result = slb_virtual_data - elif state == 'absent': - if slb_virtual_exists: - result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual})) - changed = True - else: - result = dict(msg="the virtual server was not present") - - # if the config has changed, save the config unless otherwise requested - if changed and write_config: - write_result = axapi_call(module, session_url + '&method=system.action.write_memory') - if axapi_failure(write_result): - module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) - - # log out of the session nicely and exit - axapi_call(module, session_url + '&method=session.close') - module.exit_json(changed=changed, content=result) - -# standard ansible module imports -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.a10 import * - -main() - diff --git a/library/net_infrastructure/bigip_facts b/library/net_infrastructure/bigip_facts deleted file mode 100755 index 99a1e31de6..0000000000 --- a/library/net_infrastructure/bigip_facts +++ /dev/null @@ -1,1670 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Matt Hite -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: bigip_facts -short_description: "Collect facts from F5 BIG-IP devices" -description: - - "Collect facts from F5 BIG-IP devices via iControl SOAP API" -version_added: "1.6" -author: Matt Hite -notes: - - "Requires BIG-IP software version >= 11.4" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" - - "Tested with manager and above account privilege level" - -requirements: - - bigsuds -options: - server: - description: - - BIG-IP host - required: true - default: null - choices: [] - aliases: [] - user: - description: - - BIG-IP username - required: true - default: null - choices: [] - aliases: [] - password: - description: - - BIG-IP password - required: true - default: null - choices: [] - aliases: [] - session: - description: - - BIG-IP session support; may be useful to avoid concurrency - issues in certain circumstances. - required: false - default: true - choices: [] - aliases: [] - include: - description: - - Fact category or list of categories to collect - required: true - default: null - choices: ['address_class', 'certificate', 'client_ssl_profile', - 'device_group', 'interface', 'key', 'node', 'pool', 'rule', - 'self_ip', 'software', 'system_info', 'traffic_group', - 'trunk', 'virtual_address', 'virtual_server', 'vlan'] - aliases: [] - filter: - description: - - Shell-style glob matching string used to filter fact keys. Not - applicable for software and system_info fact categories. - required: false - default: null - choices: [] - aliases: [] -''' - -EXAMPLES = ''' - -## playbook task examples: - ---- -# file bigip-test.yml -# ... -- hosts: bigip-test - tasks: - - name: Collect BIG-IP facts - local_action: > - bigip_facts - server=lb.mydomain.com - user=admin - password=mysecret - include=interface,vlan - -''' - -try: - import bigsuds - from suds import MethodNotFound -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -import fnmatch -import traceback -import re - -# =========================================== -# bigip_facts module specific support methods. -# - -class F5(object): - """F5 iControl class. - - F5 BIG-IP iControl API class. - - Attributes: - api: iControl API instance. - """ - - def __init__(self, host, user, password, session=False): - self.api = bigsuds.BIGIP(hostname=host, username=user, password=password) - if session: - self.start_session() - - def start_session(self): - self.api = self.api.with_session_id() - - def get_api(self): - return self.api - - def set_recursive_query_state(self, state): - self.api.System.Session.set_recursive_query_state(state) - - def get_recursive_query_state(self): - return self.api.System.Session.get_recursive_query_state() - - def enable_recursive_query_state(self): - self.set_recursive_query_state('STATE_ENABLED') - - def disable_recursive_query_state(self): - self.set_recursive_query_state('STATE_DISABLED') - - def set_active_folder(self, folder): - self.api.System.Session.set_active_folder(folder=folder) - - def get_active_folder(self): - return self.api.System.Session.get_active_folder() - - -class Interfaces(object): - """Interfaces class. - - F5 BIG-IP interfaces class. - - Attributes: - api: iControl API instance. - interfaces: A list of BIG-IP interface names. - """ - - def __init__(self, api, regex=None): - self.api = api - self.interfaces = api.Networking.Interfaces.get_list() - if regex: - re_filter = re.compile(regex) - self.interfaces = filter(re_filter.search, self.interfaces) - - def get_list(self): - return self.interfaces - - def get_active_media(self): - return self.api.Networking.Interfaces.get_active_media(self.interfaces) - - def get_actual_flow_control(self): - return self.api.Networking.Interfaces.get_actual_flow_control(self.interfaces) - - def get_bundle_state(self): - return self.api.Networking.Interfaces.get_bundle_state(self.interfaces) - - def get_description(self): - return self.api.Networking.Interfaces.get_description(self.interfaces) - - def get_dual_media_state(self): - return self.api.Networking.Interfaces.get_dual_media_state(self.interfaces) - - def get_enabled_state(self): - return self.api.Networking.Interfaces.get_enabled_state(self.interfaces) - - def get_if_index(self): - return self.api.Networking.Interfaces.get_if_index(self.interfaces) - - def get_learning_mode(self): - return self.api.Networking.Interfaces.get_learning_mode(self.interfaces) - - def get_lldp_admin_status(self): - return self.api.Networking.Interfaces.get_lldp_admin_status(self.interfaces) - - def get_lldp_tlvmap(self): - return self.api.Networking.Interfaces.get_lldp_tlvmap(self.interfaces) - - def get_mac_address(self): - return self.api.Networking.Interfaces.get_mac_address(self.interfaces) - - def get_media(self): - return self.api.Networking.Interfaces.get_media(self.interfaces) - - def get_media_option(self): - return self.api.Networking.Interfaces.get_media_option(self.interfaces) - - def get_media_option_sfp(self): - return self.api.Networking.Interfaces.get_media_option_sfp(self.interfaces) - - def get_media_sfp(self): - return self.api.Networking.Interfaces.get_media_sfp(self.interfaces) - - def get_media_speed(self): - return self.api.Networking.Interfaces.get_media_speed(self.interfaces) - - def get_media_status(self): - return self.api.Networking.Interfaces.get_media_status(self.interfaces) - - def get_mtu(self): - return self.api.Networking.Interfaces.get_mtu(self.interfaces) - - def get_phy_master_slave_mode(self): - return self.api.Networking.Interfaces.get_phy_master_slave_mode(self.interfaces) - - def get_prefer_sfp_state(self): - return self.api.Networking.Interfaces.get_prefer_sfp_state(self.interfaces) - - def get_flow_control(self): - return self.api.Networking.Interfaces.get_requested_flow_control(self.interfaces) - - def get_sflow_poll_interval(self): - return self.api.Networking.Interfaces.get_sflow_poll_interval(self.interfaces) - - def get_sflow_poll_interval_global(self): - return self.api.Networking.Interfaces.get_sflow_poll_interval_global(self.interfaces) - - def get_sfp_media_state(self): - return self.api.Networking.Interfaces.get_sfp_media_state(self.interfaces) - - def get_stp_active_edge_port_state(self): - return self.api.Networking.Interfaces.get_stp_active_edge_port_state(self.interfaces) - - def get_stp_enabled_state(self): - return self.api.Networking.Interfaces.get_stp_enabled_state(self.interfaces) - - def get_stp_link_type(self): - return self.api.Networking.Interfaces.get_stp_link_type(self.interfaces) - - def get_stp_protocol_detection_reset_state(self): - return self.api.Networking.Interfaces.get_stp_protocol_detection_reset_state(self.interfaces) - - -class SelfIPs(object): - """Self IPs class. - - F5 BIG-IP Self IPs class. - - Attributes: - api: iControl API instance. - self_ips: List of self IPs. - """ - - def __init__(self, api, regex=None): - self.api = api - self.self_ips = api.Networking.SelfIPV2.get_list() - if regex: - re_filter = re.compile(regex) - self.self_ips = filter(re_filter.search, self.self_ips) - - def get_list(self): - return self.self_ips - - def get_address(self): - return self.api.Networking.SelfIPV2.get_address(self.self_ips) - - def get_allow_access_list(self): - return self.api.Networking.SelfIPV2.get_allow_access_list(self.self_ips) - - def get_description(self): - return self.api.Networking.SelfIPV2.get_description(self.self_ips) - - def get_enforced_firewall_policy(self): - return self.api.Networking.SelfIPV2.get_enforced_firewall_policy(self.self_ips) - - def get_floating_state(self): - return self.api.Networking.SelfIPV2.get_floating_state(self.self_ips) - - def get_fw_rule(self): - return self.api.Networking.SelfIPV2.get_fw_rule(self.self_ips) - - def get_netmask(self): - return self.api.Networking.SelfIPV2.get_netmask(self.self_ips) - - def get_staged_firewall_policy(self): - return self.api.Networking.SelfIPV2.get_staged_firewall_policy(self.self_ips) - - def get_traffic_group(self): - return self.api.Networking.SelfIPV2.get_traffic_group(self.self_ips) - - def get_vlan(self): - return self.api.Networking.SelfIPV2.get_vlan(self.self_ips) - - def get_is_traffic_group_inherited(self): - return self.api.Networking.SelfIPV2.is_traffic_group_inherited(self.self_ips) - - -class Trunks(object): - """Trunks class. - - F5 BIG-IP trunks class. - - Attributes: - api: iControl API instance. - trunks: List of trunks. - """ - - def __init__(self, api, regex=None): - self.api = api - self.trunks = api.Networking.Trunk.get_list() - if regex: - re_filter = re.compile(regex) - self.trunks = filter(re_filter.search, self.trunks) - - def get_list(self): - return self.trunks - - def get_active_lacp_state(self): - return self.api.Networking.Trunk.get_active_lacp_state(self.trunks) - - def get_configured_member_count(self): - return self.api.Networking.Trunk.get_configured_member_count(self.trunks) - - def get_description(self): - return self.api.Networking.Trunk.get_description(self.trunks) - - def get_distribution_hash_option(self): - return self.api.Networking.Trunk.get_distribution_hash_option(self.trunks) - - def get_interface(self): - return self.api.Networking.Trunk.get_interface(self.trunks) - - def get_lacp_enabled_state(self): - return self.api.Networking.Trunk.get_lacp_enabled_state(self.trunks) - - def get_lacp_timeout_option(self): - return self.api.Networking.Trunk.get_lacp_timeout_option(self.trunks) - - def get_link_selection_policy(self): - return self.api.Networking.Trunk.get_link_selection_policy(self.trunks) - - def get_media_speed(self): - return self.api.Networking.Trunk.get_media_speed(self.trunks) - - def get_media_status(self): - return self.api.Networking.Trunk.get_media_status(self.trunks) - - def get_operational_member_count(self): - return self.api.Networking.Trunk.get_operational_member_count(self.trunks) - - def get_stp_enabled_state(self): - return self.api.Networking.Trunk.get_stp_enabled_state(self.trunks) - - def get_stp_protocol_detection_reset_state(self): - return self.api.Networking.Trunk.get_stp_protocol_detection_reset_state(self.trunks) - - -class Vlans(object): - """Vlans class. - - F5 BIG-IP Vlans class. - - Attributes: - api: iControl API instance. - vlans: List of VLANs. - """ - - def __init__(self, api, regex=None): - self.api = api - self.vlans = api.Networking.VLAN.get_list() - if regex: - re_filter = re.compile(regex) - self.vlans = filter(re_filter.search, self.vlans) - - def get_list(self): - return self.vlans - - def get_auto_lasthop(self): - return self.api.Networking.VLAN.get_auto_lasthop(self.vlans) - - def get_cmp_hash_algorithm(self): - return self.api.Networking.VLAN.get_cmp_hash_algorithm(self.vlans) - - def get_description(self): - return self.api.Networking.VLAN.get_description(self.vlans) - - def get_dynamic_forwarding(self): - return self.api.Networking.VLAN.get_dynamic_forwarding(self.vlans) - - def get_failsafe_action(self): - return self.api.Networking.VLAN.get_failsafe_action(self.vlans) - - def get_failsafe_state(self): - return self.api.Networking.VLAN.get_failsafe_state(self.vlans) - - def get_failsafe_timeout(self): - return self.api.Networking.VLAN.get_failsafe_timeout(self.vlans) - - def get_if_index(self): - return self.api.Networking.VLAN.get_if_index(self.vlans) - - def get_learning_mode(self): - return self.api.Networking.VLAN.get_learning_mode(self.vlans) - - def get_mac_masquerade_address(self): - return self.api.Networking.VLAN.get_mac_masquerade_address(self.vlans) - - def get_member(self): - return self.api.Networking.VLAN.get_member(self.vlans) - - def get_mtu(self): - return self.api.Networking.VLAN.get_mtu(self.vlans) - - def get_sflow_poll_interval(self): - return self.api.Networking.VLAN.get_sflow_poll_interval(self.vlans) - - def get_sflow_poll_interval_global(self): - return self.api.Networking.VLAN.get_sflow_poll_interval_global(self.vlans) - - def get_sflow_sampling_rate(self): - return self.api.Networking.VLAN.get_sflow_sampling_rate(self.vlans) - - def get_sflow_sampling_rate_global(self): - return self.api.Networking.VLAN.get_sflow_sampling_rate_global(self.vlans) - - def get_source_check_state(self): - return self.api.Networking.VLAN.get_source_check_state(self.vlans) - - def get_true_mac_address(self): - return self.api.Networking.VLAN.get_true_mac_address(self.vlans) - - def get_vlan_id(self): - return self.api.Networking.VLAN.get_vlan_id(self.vlans) - - -class Software(object): - """Software class. - - F5 BIG-IP software class. - - Attributes: - api: iControl API instance. - """ - - def __init__(self, api): - self.api = api - - def get_all_software_status(self): - return self.api.System.SoftwareManagement.get_all_software_status() - - -class VirtualServers(object): - """Virtual servers class. - - F5 BIG-IP virtual servers class. - - Attributes: - api: iControl API instance. - virtual_servers: List of virtual servers. - """ - - def __init__(self, api, regex=None): - self.api = api - self.virtual_servers = api.LocalLB.VirtualServer.get_list() - if regex: - re_filter = re.compile(regex) - self.virtual_servers = filter(re_filter.search, self.virtual_servers) - - def get_list(self): - return self.virtual_servers - - def get_actual_hardware_acceleration(self): - return self.api.LocalLB.VirtualServer.get_actual_hardware_acceleration(self.virtual_servers) - - def get_authentication_profile(self): - return self.api.LocalLB.VirtualServer.get_authentication_profile(self.virtual_servers) - - def get_auto_lasthop(self): - return self.api.LocalLB.VirtualServer.get_auto_lasthop(self.virtual_servers) - - def get_bw_controller_policy(self): - return self.api.LocalLB.VirtualServer.get_bw_controller_policy(self.virtual_servers) - - def get_clone_pool(self): - return self.api.LocalLB.VirtualServer.get_clone_pool(self.virtual_servers) - - def get_cmp_enable_mode(self): - return self.api.LocalLB.VirtualServer.get_cmp_enable_mode(self.virtual_servers) - - def get_connection_limit(self): - return self.api.LocalLB.VirtualServer.get_connection_limit(self.virtual_servers) - - def get_connection_mirror_state(self): - return self.api.LocalLB.VirtualServer.get_connection_mirror_state(self.virtual_servers) - - def get_default_pool_name(self): - return self.api.LocalLB.VirtualServer.get_default_pool_name(self.virtual_servers) - - def get_description(self): - return self.api.LocalLB.VirtualServer.get_description(self.virtual_servers) - - def get_destination(self): - return self.api.LocalLB.VirtualServer.get_destination_v2(self.virtual_servers) - - def get_enabled_state(self): - return self.api.LocalLB.VirtualServer.get_enabled_state(self.virtual_servers) - - def get_enforced_firewall_policy(self): - return self.api.LocalLB.VirtualServer.get_enforced_firewall_policy(self.virtual_servers) - - def get_fallback_persistence_profile(self): - return self.api.LocalLB.VirtualServer.get_fallback_persistence_profile(self.virtual_servers) - - def get_fw_rule(self): - return self.api.LocalLB.VirtualServer.get_fw_rule(self.virtual_servers) - - def get_gtm_score(self): - return self.api.LocalLB.VirtualServer.get_gtm_score(self.virtual_servers) - - def get_last_hop_pool(self): - return self.api.LocalLB.VirtualServer.get_last_hop_pool(self.virtual_servers) - - def get_nat64_state(self): - return self.api.LocalLB.VirtualServer.get_nat64_state(self.virtual_servers) - - def get_object_status(self): - return self.api.LocalLB.VirtualServer.get_object_status(self.virtual_servers) - - def get_persistence_profile(self): - return self.api.LocalLB.VirtualServer.get_persistence_profile(self.virtual_servers) - - def get_profile(self): - return self.api.LocalLB.VirtualServer.get_profile(self.virtual_servers) - - def get_protocol(self): - return self.api.LocalLB.VirtualServer.get_protocol(self.virtual_servers) - - def get_rate_class(self): - return self.api.LocalLB.VirtualServer.get_rate_class(self.virtual_servers) - - def get_rate_limit(self): - return self.api.LocalLB.VirtualServer.get_rate_limit(self.virtual_servers) - - def get_rate_limit_destination_mask(self): - return self.api.LocalLB.VirtualServer.get_rate_limit_destination_mask(self.virtual_servers) - - def get_rate_limit_mode(self): - return self.api.LocalLB.VirtualServer.get_rate_limit_mode(self.virtual_servers) - - def get_rate_limit_source_mask(self): - return self.api.LocalLB.VirtualServer.get_rate_limit_source_mask(self.virtual_servers) - - def get_related_rule(self): - return self.api.LocalLB.VirtualServer.get_related_rule(self.virtual_servers) - - def get_rule(self): - return self.api.LocalLB.VirtualServer.get_rule(self.virtual_servers) - - def get_security_log_profile(self): - return self.api.LocalLB.VirtualServer.get_security_log_profile(self.virtual_servers) - - def get_snat_pool(self): - return self.api.LocalLB.VirtualServer.get_snat_pool(self.virtual_servers) - - def get_snat_type(self): - return self.api.LocalLB.VirtualServer.get_snat_type(self.virtual_servers) - - def get_source_address(self): - return self.api.LocalLB.VirtualServer.get_source_address(self.virtual_servers) - - def get_source_address_translation_lsn_pool(self): - return self.api.LocalLB.VirtualServer.get_source_address_translation_lsn_pool(self.virtual_servers) - - def get_source_address_translation_snat_pool(self): - return self.api.LocalLB.VirtualServer.get_source_address_translation_snat_pool(self.virtual_servers) - - def get_source_address_translation_type(self): - return self.api.LocalLB.VirtualServer.get_source_address_translation_type(self.virtual_servers) - - def get_source_port_behavior(self): - return self.api.LocalLB.VirtualServer.get_source_port_behavior(self.virtual_servers) - - def get_staged_firewall_policy(self): - return self.api.LocalLB.VirtualServer.get_staged_firewall_policy(self.virtual_servers) - - def get_translate_address_state(self): - return self.api.LocalLB.VirtualServer.get_translate_address_state(self.virtual_servers) - - def get_translate_port_state(self): - return self.api.LocalLB.VirtualServer.get_translate_port_state(self.virtual_servers) - - def get_type(self): - return self.api.LocalLB.VirtualServer.get_type(self.virtual_servers) - - def get_vlan(self): - return self.api.LocalLB.VirtualServer.get_vlan(self.virtual_servers) - - def get_wildmask(self): - return self.api.LocalLB.VirtualServer.get_wildmask(self.virtual_servers) - - -class Pools(object): - """Pools class. - - F5 BIG-IP pools class. - - Attributes: - api: iControl API instance. - pool_names: List of pool names. - """ - - def __init__(self, api, regex=None): - self.api = api - self.pool_names = api.LocalLB.Pool.get_list() - if regex: - re_filter = re.compile(regex) - self.pool_names = filter(re_filter.search, self.pool_names) - - def get_list(self): - return self.pool_names - - def get_action_on_service_down(self): - return self.api.LocalLB.Pool.get_action_on_service_down(self.pool_names) - - def get_active_member_count(self): - return self.api.LocalLB.Pool.get_active_member_count(self.pool_names) - - def get_aggregate_dynamic_ratio(self): - return self.api.LocalLB.Pool.get_aggregate_dynamic_ratio(self.pool_names) - - def get_allow_nat_state(self): - return self.api.LocalLB.Pool.get_allow_nat_state(self.pool_names) - - def get_allow_snat_state(self): - return self.api.LocalLB.Pool.get_allow_snat_state(self.pool_names) - - def get_client_ip_tos(self): - return self.api.LocalLB.Pool.get_client_ip_tos(self.pool_names) - - def get_client_link_qos(self): - return self.api.LocalLB.Pool.get_client_link_qos(self.pool_names) - - def get_description(self): - return self.api.LocalLB.Pool.get_description(self.pool_names) - - def get_gateway_failsafe_device(self): - return self.api.LocalLB.Pool.get_gateway_failsafe_device(self.pool_names) - - def get_ignore_persisted_weight_state(self): - return self.api.LocalLB.Pool.get_ignore_persisted_weight_state(self.pool_names) - - def get_lb_method(self): - return self.api.LocalLB.Pool.get_lb_method(self.pool_names) - - def get_member(self): - return self.api.LocalLB.Pool.get_member_v2(self.pool_names) - - def get_minimum_active_member(self): - return self.api.LocalLB.Pool.get_minimum_active_member(self.pool_names) - - def get_minimum_up_member(self): - return self.api.LocalLB.Pool.get_minimum_up_member(self.pool_names) - - def get_minimum_up_member_action(self): - return self.api.LocalLB.Pool.get_minimum_up_member_action(self.pool_names) - - def get_minimum_up_member_enabled_state(self): - return self.api.LocalLB.Pool.get_minimum_up_member_enabled_state(self.pool_names) - - def get_monitor_association(self): - return self.api.LocalLB.Pool.get_monitor_association(self.pool_names) - - def get_monitor_instance(self): - return self.api.LocalLB.Pool.get_monitor_instance(self.pool_names) - - def get_object_status(self): - return self.api.LocalLB.Pool.get_object_status(self.pool_names) - - def get_profile(self): - return self.api.LocalLB.Pool.get_profile(self.pool_names) - - def get_queue_depth_limit(self): - return self.api.LocalLB.Pool.get_queue_depth_limit(self.pool_names) - - def get_queue_on_connection_limit_state(self): - return self.api.LocalLB.Pool.get_queue_on_connection_limit_state(self.pool_names) - - def get_queue_time_limit(self): - return self.api.LocalLB.Pool.get_queue_time_limit(self.pool_names) - - def get_reselect_tries(self): - return self.api.LocalLB.Pool.get_reselect_tries(self.pool_names) - - def get_server_ip_tos(self): - return self.api.LocalLB.Pool.get_server_ip_tos(self.pool_names) - - def get_server_link_qos(self): - return self.api.LocalLB.Pool.get_server_link_qos(self.pool_names) - - def get_simple_timeout(self): - return self.api.LocalLB.Pool.get_simple_timeout(self.pool_names) - - def get_slow_ramp_time(self): - return self.api.LocalLB.Pool.get_slow_ramp_time(self.pool_names) - - -class Devices(object): - """Devices class. - - F5 BIG-IP devices class. - - Attributes: - api: iControl API instance. - devices: List of devices. - """ - - def __init__(self, api, regex=None): - self.api = api - self.devices = api.Management.Device.get_list() - if regex: - re_filter = re.compile(regex) - self.devices = filter(re_filter.search, self.devices) - - def get_list(self): - return self.devices - - def get_active_modules(self): - return self.api.Management.Device.get_active_modules(self.devices) - - def get_base_mac_address(self): - return self.api.Management.Device.get_base_mac_address(self.devices) - - def get_blade_addresses(self): - return self.api.Management.Device.get_blade_addresses(self.devices) - - def get_build(self): - return self.api.Management.Device.get_build(self.devices) - - def get_chassis_id(self): - return self.api.Management.Device.get_chassis_id(self.devices) - - def get_chassis_type(self): - return self.api.Management.Device.get_chassis_type(self.devices) - - def get_comment(self): - return self.api.Management.Device.get_comment(self.devices) - - def get_configsync_address(self): - return self.api.Management.Device.get_configsync_address(self.devices) - - def get_contact(self): - return self.api.Management.Device.get_contact(self.devices) - - def get_description(self): - return self.api.Management.Device.get_description(self.devices) - - def get_edition(self): - return self.api.Management.Device.get_edition(self.devices) - - def get_failover_state(self): - return self.api.Management.Device.get_failover_state(self.devices) - - def get_local_device(self): - return self.api.Management.Device.get_local_device() - - def get_hostname(self): - return self.api.Management.Device.get_hostname(self.devices) - - def get_inactive_modules(self): - return self.api.Management.Device.get_inactive_modules(self.devices) - - def get_location(self): - return self.api.Management.Device.get_location(self.devices) - - def get_management_address(self): - return self.api.Management.Device.get_management_address(self.devices) - - def get_marketing_name(self): - return self.api.Management.Device.get_marketing_name(self.devices) - - def get_multicast_address(self): - return self.api.Management.Device.get_multicast_address(self.devices) - - def get_optional_modules(self): - return self.api.Management.Device.get_optional_modules(self.devices) - - def get_platform_id(self): - return self.api.Management.Device.get_platform_id(self.devices) - - def get_primary_mirror_address(self): - return self.api.Management.Device.get_primary_mirror_address(self.devices) - - def get_product(self): - return self.api.Management.Device.get_product(self.devices) - - def get_secondary_mirror_address(self): - return self.api.Management.Device.get_secondary_mirror_address(self.devices) - - def get_software_version(self): - return self.api.Management.Device.get_software_version(self.devices) - - def get_timelimited_modules(self): - return self.api.Management.Device.get_timelimited_modules(self.devices) - - def get_timezone(self): - return self.api.Management.Device.get_timezone(self.devices) - - def get_unicast_addresses(self): - return self.api.Management.Device.get_unicast_addresses(self.devices) - - -class DeviceGroups(object): - """Device groups class. - - F5 BIG-IP device groups class. - - Attributes: - api: iControl API instance. - device_groups: List of device groups. - """ - - def __init__(self, api, regex=None): - self.api = api - self.device_groups = api.Management.DeviceGroup.get_list() - if regex: - re_filter = re.compile(regex) - self.device_groups = filter(re_filter.search, self.device_groups) - - def get_list(self): - return self.device_groups - - def get_all_preferred_active(self): - return self.api.Management.DeviceGroup.get_all_preferred_active(self.device_groups) - - def get_autosync_enabled_state(self): - return self.api.Management.DeviceGroup.get_autosync_enabled_state(self.device_groups) - - def get_description(self): - return self.api.Management.DeviceGroup.get_description(self.device_groups) - - def get_device(self): - return self.api.Management.DeviceGroup.get_device(self.device_groups) - - def get_full_load_on_sync_state(self): - return self.api.Management.DeviceGroup.get_full_load_on_sync_state(self.device_groups) - - def get_incremental_config_sync_size_maximum(self): - return self.api.Management.DeviceGroup.get_incremental_config_sync_size_maximum(self.device_groups) - - def get_network_failover_enabled_state(self): - return self.api.Management.DeviceGroup.get_network_failover_enabled_state(self.device_groups) - - def get_sync_status(self): - return self.api.Management.DeviceGroup.get_sync_status(self.device_groups) - - def get_type(self): - return self.api.Management.DeviceGroup.get_type(self.device_groups) - - -class TrafficGroups(object): - """Traffic groups class. - - F5 BIG-IP traffic groups class. - - Attributes: - api: iControl API instance. - traffic_groups: List of traffic groups. - """ - - def __init__(self, api, regex=None): - self.api = api - self.traffic_groups = api.Management.TrafficGroup.get_list() - if regex: - re_filter = re.compile(regex) - self.traffic_groups = filter(re_filter.search, self.traffic_groups) - - def get_list(self): - return self.traffic_groups - - def get_auto_failback_enabled_state(self): - return self.api.Management.TrafficGroup.get_auto_failback_enabled_state(self.traffic_groups) - - def get_auto_failback_time(self): - return self.api.Management.TrafficGroup.get_auto_failback_time(self.traffic_groups) - - def get_default_device(self): - return self.api.Management.TrafficGroup.get_default_device(self.traffic_groups) - - def get_description(self): - return self.api.Management.TrafficGroup.get_description(self.traffic_groups) - - def get_ha_load_factor(self): - return self.api.Management.TrafficGroup.get_ha_load_factor(self.traffic_groups) - - def get_ha_order(self): - return self.api.Management.TrafficGroup.get_ha_order(self.traffic_groups) - - def get_is_floating(self): - return self.api.Management.TrafficGroup.get_is_floating(self.traffic_groups) - - def get_mac_masquerade_address(self): - return self.api.Management.TrafficGroup.get_mac_masquerade_address(self.traffic_groups) - - def get_unit_id(self): - return self.api.Management.TrafficGroup.get_unit_id(self.traffic_groups) - - -class Rules(object): - """Rules class. - - F5 BIG-IP iRules class. - - Attributes: - api: iControl API instance. - rules: List of iRules. - """ - - def __init__(self, api, regex=None): - self.api = api - self.rules = api.LocalLB.Rule.get_list() - if regex: - re_filter = re.compile(regex) - self.traffic_groups = filter(re_filter.search, self.rules) - - def get_list(self): - return self.rules - - def get_description(self): - return self.api.LocalLB.Rule.get_description(rule_names=self.rules) - - def get_ignore_vertification(self): - return self.api.LocalLB.Rule.get_ignore_vertification(rule_names=self.rules) - - def get_verification_status(self): - return self.api.LocalLB.Rule.get_verification_status_v2(rule_names=self.rules) - - def get_definition(self): - return [x['rule_definition'] for x in self.api.LocalLB.Rule.query_rule(rule_names=self.rules)] - -class Nodes(object): - """Nodes class. - - F5 BIG-IP nodes class. - - Attributes: - api: iControl API instance. - nodes: List of nodes. - """ - - def __init__(self, api, regex=None): - self.api = api - self.nodes = api.LocalLB.NodeAddressV2.get_list() - if regex: - re_filter = re.compile(regex) - self.nodes = filter(re_filter.search, self.nodes) - - def get_list(self): - return self.nodes - - def get_address(self): - return self.api.LocalLB.NodeAddressV2.get_address(nodes=self.nodes) - - def get_connection_limit(self): - return self.api.LocalLB.NodeAddressV2.get_connection_limit(nodes=self.nodes) - - def get_description(self): - return self.api.LocalLB.NodeAddressV2.get_description(nodes=self.nodes) - - def get_dynamic_ratio(self): - return self.api.LocalLB.NodeAddressV2.get_dynamic_ratio_v2(nodes=self.nodes) - - def get_monitor_instance(self): - return self.api.LocalLB.NodeAddressV2.get_monitor_instance(nodes=self.nodes) - - def get_monitor_rule(self): - return self.api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=self.nodes) - - def get_monitor_status(self): - return self.api.LocalLB.NodeAddressV2.get_monitor_status(nodes=self.nodes) - - def get_object_status(self): - return self.api.LocalLB.NodeAddressV2.get_object_status(nodes=self.nodes) - - def get_rate_limit(self): - return self.api.LocalLB.NodeAddressV2.get_rate_limit(nodes=self.nodes) - - def get_ratio(self): - return self.api.LocalLB.NodeAddressV2.get_ratio(nodes=self.nodes) - - def get_session_status(self): - return self.api.LocalLB.NodeAddressV2.get_session_status(nodes=self.nodes) - - -class VirtualAddresses(object): - """Virtual addresses class. - - F5 BIG-IP virtual addresses class. - - Attributes: - api: iControl API instance. - virtual_addresses: List of virtual addresses. - """ - - def __init__(self, api, regex=None): - self.api = api - self.virtual_addresses = api.LocalLB.VirtualAddressV2.get_list() - if regex: - re_filter = re.compile(regex) - self.virtual_addresses = filter(re_filter.search, self.virtual_addresses) - - def get_list(self): - return self.virtual_addresses - - def get_address(self): - return self.api.LocalLB.VirtualAddressV2.get_address(self.virtual_addresses) - - def get_arp_state(self): - return self.api.LocalLB.VirtualAddressV2.get_arp_state(self.virtual_addresses) - - def get_auto_delete_state(self): - return self.api.LocalLB.VirtualAddressV2.get_auto_delete_state(self.virtual_addresses) - - def get_connection_limit(self): - return self.api.LocalLB.VirtualAddressV2.get_connection_limit(self.virtual_addresses) - - def get_description(self): - return self.api.LocalLB.VirtualAddressV2.get_description(self.virtual_addresses) - - def get_enabled_state(self): - return self.api.LocalLB.VirtualAddressV2.get_enabled_state(self.virtual_addresses) - - def get_icmp_echo_state(self): - return self.api.LocalLB.VirtualAddressV2.get_icmp_echo_state(self.virtual_addresses) - - def get_is_floating_state(self): - return self.api.LocalLB.VirtualAddressV2.get_is_floating_state(self.virtual_addresses) - - def get_netmask(self): - return self.api.LocalLB.VirtualAddressV2.get_netmask(self.virtual_addresses) - - def get_object_status(self): - return self.api.LocalLB.VirtualAddressV2.get_object_status(self.virtual_addresses) - - def get_route_advertisement_state(self): - return self.api.LocalLB.VirtualAddressV2.get_route_advertisement_state(self.virtual_addresses) - - def get_traffic_group(self): - return self.api.LocalLB.VirtualAddressV2.get_traffic_group(self.virtual_addresses) - - -class AddressClasses(object): - """Address group/class class. - - F5 BIG-IP address group/class class. - - Attributes: - api: iControl API instance. - address_classes: List of address classes. - """ - - def __init__(self, api, regex=None): - self.api = api - self.address_classes = api.LocalLB.Class.get_address_class_list() - if regex: - re_filter = re.compile(regex) - self.address_classes = filter(re_filter.search, self.address_classes) - - def get_list(self): - return self.address_classes - - def get_address_class(self): - key = self.api.LocalLB.Class.get_address_class(self.address_classes) - value = self.api.LocalLB.Class.get_address_class_member_data_value(key) - result = map(zip, [x['members'] for x in key], value) - return result - - def get_description(self): - return self.api.LocalLB.Class.get_description(self.address_classes) - - -class Certificates(object): - """Certificates class. - - F5 BIG-IP certificates class. - - Attributes: - api: iControl API instance. - certificates: List of certificate identifiers. - certificate_list: List of certificate information structures. - """ - - def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"): - self.api = api - self.certificate_list = api.Management.KeyCertificate.get_certificate_list(mode=mode) - self.certificates = [x['certificate']['cert_info']['id'] for x in self.certificate_list] - if regex: - re_filter = re.compile(regex) - self.certificates = filter(re_filter.search, self.certificates) - self.certificate_list = [x for x in self.certificate_list if x['certificate']['cert_info']['id'] in self.certificates] - - def get_list(self): - return self.certificates - - def get_certificate_list(self): - return self.certificate_list - - -class Keys(object): - """Keys class. - - F5 BIG-IP keys class. - - Attributes: - api: iControl API instance. - keys: List of key identifiers. - key_list: List of key information structures. - """ - - def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"): - self.api = api - self.key_list = api.Management.KeyCertificate.get_key_list(mode=mode) - self.keys = [x['key_info']['id'] for x in self.key_list] - if regex: - re_filter = re.compile(regex) - self.keys = filter(re_filter.search, self.keys) - self.key_list = [x for x in self.key_list if x['key_info']['id'] in self.keys] - - def get_list(self): - return self.keys - - def get_key_list(self): - return self.key_list - - -class ProfileClientSSL(object): - """Client SSL profiles class. - - F5 BIG-IP client SSL profiles class. - - Attributes: - api: iControl API instance. - profiles: List of client SSL profiles. - """ - - def __init__(self, api, regex=None): - self.api = api - self.profiles = api.LocalLB.ProfileClientSSL.get_list() - if regex: - re_filter = re.compile(regex) - self.profiles = filter(re_filter.search, self.profiles) - - def get_list(self): - return self.profiles - - def get_alert_timeout(self): - return self.api.LocalLB.ProfileClientSSL.get_alert_timeout(self.profiles) - - def get_allow_nonssl_state(self): - return self.api.LocalLB.ProfileClientSSL.get_allow_nonssl_state(self.profiles) - - def get_authenticate_depth(self): - return self.api.LocalLB.ProfileClientSSL.get_authenticate_depth(self.profiles) - - def get_authenticate_once_state(self): - return self.api.LocalLB.ProfileClientSSL.get_authenticate_once_state(self.profiles) - - def get_ca_file(self): - return self.api.LocalLB.ProfileClientSSL.get_ca_file_v2(self.profiles) - - def get_cache_size(self): - return self.api.LocalLB.ProfileClientSSL.get_cache_size(self.profiles) - - def get_cache_timeout(self): - return self.api.LocalLB.ProfileClientSSL.get_cache_timeout(self.profiles) - - def get_certificate_file(self): - return self.api.LocalLB.ProfileClientSSL.get_certificate_file_v2(self.profiles) - - def get_chain_file(self): - return self.api.LocalLB.ProfileClientSSL.get_chain_file_v2(self.profiles) - - def get_cipher_list(self): - return self.api.LocalLB.ProfileClientSSL.get_cipher_list(self.profiles) - - def get_client_certificate_ca_file(self): - return self.api.LocalLB.ProfileClientSSL.get_client_certificate_ca_file_v2(self.profiles) - - def get_crl_file(self): - return self.api.LocalLB.ProfileClientSSL.get_crl_file_v2(self.profiles) - - def get_default_profile(self): - return self.api.LocalLB.ProfileClientSSL.get_default_profile(self.profiles) - - def get_description(self): - return self.api.LocalLB.ProfileClientSSL.get_description(self.profiles) - - def get_forward_proxy_ca_certificate_file(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_certificate_file(self.profiles) - - def get_forward_proxy_ca_key_file(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_key_file(self.profiles) - - def get_forward_proxy_ca_passphrase(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_passphrase(self.profiles) - - def get_forward_proxy_certificate_extension_include(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_extension_include(self.profiles) - - def get_forward_proxy_certificate_lifespan(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_lifespan(self.profiles) - - def get_forward_proxy_enabled_state(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_enabled_state(self.profiles) - - def get_forward_proxy_lookup_by_ipaddr_port_state(self): - return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_lookup_by_ipaddr_port_state(self.profiles) - - def get_handshake_timeout(self): - return self.api.LocalLB.ProfileClientSSL.get_handshake_timeout(self.profiles) - - def get_key_file(self): - return self.api.LocalLB.ProfileClientSSL.get_key_file_v2(self.profiles) - - def get_modssl_emulation_state(self): - return self.api.LocalLB.ProfileClientSSL.get_modssl_emulation_state(self.profiles) - - def get_passphrase(self): - return self.api.LocalLB.ProfileClientSSL.get_passphrase(self.profiles) - - def get_peer_certification_mode(self): - return self.api.LocalLB.ProfileClientSSL.get_peer_certification_mode(self.profiles) - - def get_profile_mode(self): - return self.api.LocalLB.ProfileClientSSL.get_profile_mode(self.profiles) - - def get_renegotiation_maximum_record_delay(self): - return self.api.LocalLB.ProfileClientSSL.get_renegotiation_maximum_record_delay(self.profiles) - - def get_renegotiation_period(self): - return self.api.LocalLB.ProfileClientSSL.get_renegotiation_period(self.profiles) - - def get_renegotiation_state(self): - return self.api.LocalLB.ProfileClientSSL.get_renegotiation_state(self.profiles) - - def get_renegotiation_throughput(self): - return self.api.LocalLB.ProfileClientSSL.get_renegotiation_throughput(self.profiles) - - def get_retain_certificate_state(self): - return self.api.LocalLB.ProfileClientSSL.get_retain_certificate_state(self.profiles) - - def get_secure_renegotiation_mode(self): - return self.api.LocalLB.ProfileClientSSL.get_secure_renegotiation_mode(self.profiles) - - def get_server_name(self): - return self.api.LocalLB.ProfileClientSSL.get_server_name(self.profiles) - - def get_session_ticket_state(self): - return self.api.LocalLB.ProfileClientSSL.get_session_ticket_state(self.profiles) - - def get_sni_default_state(self): - return self.api.LocalLB.ProfileClientSSL.get_sni_default_state(self.profiles) - - def get_sni_require_state(self): - return self.api.LocalLB.ProfileClientSSL.get_sni_require_state(self.profiles) - - def get_ssl_option(self): - return self.api.LocalLB.ProfileClientSSL.get_ssl_option(self.profiles) - - def get_strict_resume_state(self): - return self.api.LocalLB.ProfileClientSSL.get_strict_resume_state(self.profiles) - - def get_unclean_shutdown_state(self): - return self.api.LocalLB.ProfileClientSSL.get_unclean_shutdown_state(self.profiles) - - def get_is_base_profile(self): - return self.api.LocalLB.ProfileClientSSL.is_base_profile(self.profiles) - - def get_is_system_profile(self): - return self.api.LocalLB.ProfileClientSSL.is_system_profile(self.profiles) - - -class SystemInfo(object): - """System information class. - - F5 BIG-IP system information class. - - Attributes: - api: iControl API instance. - """ - - def __init__(self, api): - self.api = api - - def get_base_mac_address(self): - return self.api.System.SystemInfo.get_base_mac_address() - - def get_blade_temperature(self): - return self.api.System.SystemInfo.get_blade_temperature() - - def get_chassis_slot_information(self): - return self.api.System.SystemInfo.get_chassis_slot_information() - - def get_globally_unique_identifier(self): - return self.api.System.SystemInfo.get_globally_unique_identifier() - - def get_group_id(self): - return self.api.System.SystemInfo.get_group_id() - - def get_hardware_information(self): - return self.api.System.SystemInfo.get_hardware_information() - - def get_marketing_name(self): - return self.api.System.SystemInfo.get_marketing_name() - - def get_product_information(self): - return self.api.System.SystemInfo.get_product_information() - - def get_pva_version(self): - return self.api.System.SystemInfo.get_pva_version() - - def get_system_id(self): - return self.api.System.SystemInfo.get_system_id() - - def get_system_information(self): - return self.api.System.SystemInfo.get_system_information() - - def get_time(self): - return self.api.System.SystemInfo.get_time() - - def get_time_zone(self): - return self.api.System.SystemInfo.get_time_zone() - - def get_uptime(self): - return self.api.System.SystemInfo.get_uptime() - - -def generate_dict(api_obj, fields): - result_dict = {} - lists = [] - supported_fields = [] - if api_obj.get_list(): - for field in fields: - try: - api_response = getattr(api_obj, "get_" + field)() - except MethodNotFound: - pass - else: - lists.append(api_response) - supported_fields.append(field) - for i, j in enumerate(api_obj.get_list()): - temp = {} - temp.update([(item[0], item[1][i]) for item in zip(supported_fields, lists)]) - result_dict[j] = temp - return result_dict - -def generate_simple_dict(api_obj, fields): - result_dict = {} - for field in fields: - try: - api_response = getattr(api_obj, "get_" + field)() - except MethodNotFound: - pass - else: - result_dict[field] = api_response - return result_dict - -def generate_interface_dict(f5, regex): - interfaces = Interfaces(f5.get_api(), regex) - fields = ['active_media', 'actual_flow_control', 'bundle_state', - 'description', 'dual_media_state', 'enabled_state', 'if_index', - 'learning_mode', 'lldp_admin_status', 'lldp_tlvmap', - 'mac_address', 'media', 'media_option', 'media_option_sfp', - 'media_sfp', 'media_speed', 'media_status', 'mtu', - 'phy_master_slave_mode', 'prefer_sfp_state', 'flow_control', - 'sflow_poll_interval', 'sflow_poll_interval_global', - 'sfp_media_state', 'stp_active_edge_port_state', - 'stp_enabled_state', 'stp_link_type', - 'stp_protocol_detection_reset_state'] - return generate_dict(interfaces, fields) - -def generate_self_ip_dict(f5, regex): - self_ips = SelfIPs(f5.get_api(), regex) - fields = ['address', 'allow_access_list', 'description', - 'enforced_firewall_policy', 'floating_state', 'fw_rule', - 'netmask', 'staged_firewall_policy', 'traffic_group', - 'vlan', 'is_traffic_group_inherited'] - return generate_dict(self_ips, fields) - -def generate_trunk_dict(f5, regex): - trunks = Trunks(f5.get_api(), regex) - fields = ['active_lacp_state', 'configured_member_count', 'description', - 'distribution_hash_option', 'interface', 'lacp_enabled_state', - 'lacp_timeout_option', 'link_selection_policy', 'media_speed', - 'media_status', 'operational_member_count', 'stp_enabled_state', - 'stp_protocol_detection_reset_state'] - return generate_dict(trunks, fields) - -def generate_vlan_dict(f5, regex): - vlans = Vlans(f5.get_api(), regex) - fields = ['auto_lasthop', 'cmp_hash_algorithm', 'description', - 'dynamic_forwarding', 'failsafe_action', 'failsafe_state', - 'failsafe_timeout', 'if_index', 'learning_mode', - 'mac_masquerade_address', 'member', 'mtu', - 'sflow_poll_interval', 'sflow_poll_interval_global', - 'sflow_sampling_rate', 'sflow_sampling_rate_global', - 'source_check_state', 'true_mac_address', 'vlan_id'] - return generate_dict(vlans, fields) - -def generate_vs_dict(f5, regex): - virtual_servers = VirtualServers(f5.get_api(), regex) - fields = ['actual_hardware_acceleration', 'authentication_profile', - 'auto_lasthop', 'bw_controller_policy', 'clone_pool', - 'cmp_enable_mode', 'connection_limit', 'connection_mirror_state', - 'default_pool_name', 'description', 'destination', - 'enabled_state', 'enforced_firewall_policy', - 'fallback_persistence_profile', 'fw_rule', 'gtm_score', - 'last_hop_pool', 'nat64_state', 'object_status', - 'persistence_profile', 'profile', 'protocol', - 'rate_class', 'rate_limit', 'rate_limit_destination_mask', - 'rate_limit_mode', 'rate_limit_source_mask', 'related_rule', - 'rule', 'security_log_profile', 'snat_pool', 'snat_type', - 'source_address', 'source_address_translation_lsn_pool', - 'source_address_translation_snat_pool', - 'source_address_translation_type', 'source_port_behavior', - 'staged_firewall_policy', 'translate_address_state', - 'translate_port_state', 'type', 'vlan', 'wildmask'] - return generate_dict(virtual_servers, fields) - -def generate_pool_dict(f5, regex): - pools = Pools(f5.get_api(), regex) - fields = ['action_on_service_down', 'active_member_count', - 'aggregate_dynamic_ratio', 'allow_nat_state', - 'allow_snat_state', 'client_ip_tos', 'client_link_qos', - 'description', 'gateway_failsafe_device', - 'ignore_persisted_weight_state', 'lb_method', 'member', - 'minimum_active_member', 'minimum_up_member', - 'minimum_up_member_action', 'minimum_up_member_enabled_state', - 'monitor_association', 'monitor_instance', 'object_status', - 'profile', 'queue_depth_limit', - 'queue_on_connection_limit_state', 'queue_time_limit', - 'reselect_tries', 'server_ip_tos', 'server_link_qos', - 'simple_timeout', 'slow_ramp_time'] - return generate_dict(pools, fields) - -def generate_device_dict(f5, regex): - devices = Devices(f5.get_api(), regex) - fields = ['active_modules', 'base_mac_address', 'blade_addresses', - 'build', 'chassis_id', 'chassis_type', 'comment', - 'configsync_address', 'contact', 'description', 'edition', - 'failover_state', 'hostname', 'inactive_modules', 'location', - 'management_address', 'marketing_name', 'multicast_address', - 'optional_modules', 'platform_id', 'primary_mirror_address', - 'product', 'secondary_mirror_address', 'software_version', - 'timelimited_modules', 'timezone', 'unicast_addresses'] - return generate_dict(devices, fields) - -def generate_device_group_dict(f5, regex): - device_groups = DeviceGroups(f5.get_api(), regex) - fields = ['all_preferred_active', 'autosync_enabled_state','description', - 'device', 'full_load_on_sync_state', - 'incremental_config_sync_size_maximum', - 'network_failover_enabled_state', 'sync_status', 'type'] - return generate_dict(device_groups, fields) - -def generate_traffic_group_dict(f5, regex): - traffic_groups = TrafficGroups(f5.get_api(), regex) - fields = ['auto_failback_enabled_state', 'auto_failback_time', - 'default_device', 'description', 'ha_load_factor', - 'ha_order', 'is_floating', 'mac_masquerade_address', - 'unit_id'] - return generate_dict(traffic_groups, fields) - -def generate_rule_dict(f5, regex): - rules = Rules(f5.get_api(), regex) - fields = ['definition', 'description', 'ignore_vertification', - 'verification_status'] - return generate_dict(rules, fields) - -def generate_node_dict(f5, regex): - nodes = Nodes(f5.get_api(), regex) - fields = ['address', 'connection_limit', 'description', 'dynamic_ratio', - 'monitor_instance', 'monitor_rule', 'monitor_status', - 'object_status', 'rate_limit', 'ratio', 'session_status'] - return generate_dict(nodes, fields) - -def generate_virtual_address_dict(f5, regex): - virtual_addresses = VirtualAddresses(f5.get_api(), regex) - fields = ['address', 'arp_state', 'auto_delete_state', 'connection_limit', - 'description', 'enabled_state', 'icmp_echo_state', - 'is_floating_state', 'netmask', 'object_status', - 'route_advertisement_state', 'traffic_group'] - return generate_dict(virtual_addresses, fields) - -def generate_address_class_dict(f5, regex): - address_classes = AddressClasses(f5.get_api(), regex) - fields = ['address_class', 'description'] - return generate_dict(address_classes, fields) - -def generate_certificate_dict(f5, regex): - certificates = Certificates(f5.get_api(), regex) - return dict(zip(certificates.get_list(), certificates.get_certificate_list())) - -def generate_key_dict(f5, regex): - keys = Keys(f5.get_api(), regex) - return dict(zip(keys.get_list(), keys.get_key_list())) - -def generate_client_ssl_profile_dict(f5, regex): - profiles = ProfileClientSSL(f5.get_api(), regex) - fields = ['alert_timeout', 'allow_nonssl_state', 'authenticate_depth', - 'authenticate_once_state', 'ca_file', 'cache_size', - 'cache_timeout', 'certificate_file', 'chain_file', - 'cipher_list', 'client_certificate_ca_file', 'crl_file', - 'default_profile', 'description', - 'forward_proxy_ca_certificate_file', 'forward_proxy_ca_key_file', - 'forward_proxy_ca_passphrase', - 'forward_proxy_certificate_extension_include', - 'forward_proxy_certificate_lifespan', - 'forward_proxy_enabled_state', - 'forward_proxy_lookup_by_ipaddr_port_state', 'handshake_timeout', - 'key_file', 'modssl_emulation_state', 'passphrase', - 'peer_certification_mode', 'profile_mode', - 'renegotiation_maximum_record_delay', 'renegotiation_period', - 'renegotiation_state', 'renegotiation_throughput', - 'retain_certificate_state', 'secure_renegotiation_mode', - 'server_name', 'session_ticket_state', 'sni_default_state', - 'sni_require_state', 'ssl_option', 'strict_resume_state', - 'unclean_shutdown_state', 'is_base_profile', 'is_system_profile'] - return generate_dict(profiles, fields) - -def generate_system_info_dict(f5): - system_info = SystemInfo(f5.get_api()) - fields = ['base_mac_address', - 'blade_temperature', 'chassis_slot_information', - 'globally_unique_identifier', 'group_id', - 'hardware_information', - 'marketing_name', - 'product_information', 'pva_version', 'system_id', - 'system_information', 'time', - 'time_zone', 'uptime'] - return generate_simple_dict(system_info, fields) - -def generate_software_list(f5): - software = Software(f5.get_api()) - software_list = software.get_all_software_status() - return software_list - - -def main(): - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - session = dict(type='bool', default=False), - include = dict(type='list', required=True), - filter = dict(type='str', required=False), - ) - ) - - if not bigsuds_found: - module.fail_json(msg="the python suds and bigsuds modules is required") - - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - session = module.params['session'] - fact_filter = module.params['filter'] - if fact_filter: - regex = fnmatch.translate(fact_filter) - else: - regex = None - include = map(lambda x: x.lower(), module.params['include']) - valid_includes = ('address_class', 'certificate', 'client_ssl_profile', - 'device_group', 'interface', 'key', 'node', 'pool', - 'rule', 'self_ip', 'software', 'system_info', - 'traffic_group', 'trunk', 'virtual_address', - 'virtual_server', 'vlan') - include_test = map(lambda x: x in valid_includes, include) - if not all(include_test): - module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include))) - - try: - facts = {} - - if len(include) > 0: - f5 = F5(server, user, password, session) - saved_active_folder = f5.get_active_folder() - saved_recursive_query_state = f5.get_recursive_query_state() - if saved_active_folder != "/": - f5.set_active_folder("/") - if saved_recursive_query_state != "STATE_ENABLED": - f5.enable_recursive_query_state() - - if 'interface' in include: - facts['interface'] = generate_interface_dict(f5, regex) - if 'self_ip' in include: - facts['self_ip'] = generate_self_ip_dict(f5, regex) - if 'trunk' in include: - facts['trunk'] = generate_trunk_dict(f5, regex) - if 'vlan' in include: - facts['vlan'] = generate_vlan_dict(f5, regex) - if 'virtual_server' in include: - facts['virtual_server'] = generate_vs_dict(f5, regex) - if 'pool' in include: - facts['pool'] = generate_pool_dict(f5, regex) - if 'device' in include: - facts['device'] = generate_device_dict(f5, regex) - if 'device_group' in include: - facts['device_group'] = generate_device_group_dict(f5, regex) - if 'traffic_group' in include: - facts['traffic_group'] = generate_traffic_group_dict(f5, regex) - if 'rule' in include: - facts['rule'] = generate_rule_dict(f5, regex) - if 'node' in include: - facts['node'] = generate_node_dict(f5, regex) - if 'virtual_address' in include: - facts['virtual_address'] = generate_virtual_address_dict(f5, regex) - if 'address_class' in include: - facts['address_class'] = generate_address_class_dict(f5, regex) - if 'software' in include: - facts['software'] = generate_software_list(f5) - if 'certificate' in include: - facts['certificate'] = generate_certificate_dict(f5, regex) - if 'key' in include: - facts['key'] = generate_key_dict(f5, regex) - if 'client_ssl_profile' in include: - facts['client_ssl_profile'] = generate_client_ssl_profile_dict(f5, regex) - if 'system_info' in include: - facts['system_info'] = generate_system_info_dict(f5) - - # restore saved state - if saved_active_folder and saved_active_folder != "/": - f5.set_active_folder(saved_active_folder) - if saved_recursive_query_state and \ - saved_recursive_query_state != "STATE_ENABLED": - f5.set_recursive_query_state(saved_recursive_query_state) - - result = {'ansible_facts': facts} - - except Exception, e: - module.fail_json(msg="received exception: %s\ntraceback: %s" % (e, traceback.format_exc())) - - module.exit_json(**result) - -# include magic from lib/ansible/module_common.py -#<> -main() - diff --git a/library/net_infrastructure/bigip_monitor_http b/library/net_infrastructure/bigip_monitor_http deleted file mode 100644 index 62823f8657..0000000000 --- a/library/net_infrastructure/bigip_monitor_http +++ /dev/null @@ -1,464 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, serge van Ginderachter -# based on Matt Hite's bigip_pool module -# (c) 2013, Matt Hite -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: bigip_monitor_http -short_description: "Manages F5 BIG-IP LTM http monitors" -description: - - "Manages F5 BIG-IP LTM monitors via iControl SOAP API" -version_added: "1.4" -author: Serge van Ginderachter -notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" - - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx" -requirements: - - bigsuds -options: - server: - description: - - BIG-IP host - required: true - default: null - user: - description: - - BIG-IP username - required: true - default: null - password: - description: - - BIG-IP password - required: true - default: null - state: - description: - - Monitor state - required: false - default: 'present' - choices: ['present', 'absent'] - name: - description: - - Monitor name - required: true - default: null - aliases: ['monitor'] - partition: - description: - - Partition for the monitor - required: false - default: 'Common' - parent: - description: - - The parent template of this monitor template - required: false - default: 'http' - parent_partition: - description: - - Partition for the parent monitor - required: false - default: 'Common' - send: - description: - - The send string for the monitor call - required: true - default: none - receive: - description: - - The receive string for the monitor call - required: true - default: none - receive_disable: - description: - - The receive disable string for the monitor call - required: true - default: none - ip: - description: - - IP address part of the ipport definition. The default API setting - is "0.0.0.0". - required: false - default: none - port: - description: - - port address part op the ipport definition. The default API - setting is 0. - required: false - default: none - interval: - description: - - The interval specifying how frequently the monitor instance - of this template will run. By default, this interval is used for up and - down states. The default API setting is 5. - required: false - default: none - timeout: - description: - - The number of seconds in which the node or service must respond to - the monitor request. If the target responds within the set time - period, it is considered up. If the target does not respond within - the set time period, it is considered down. You can change this - number to any number you want, however, it should be 3 times the - interval number of seconds plus 1 second. The default API setting - is 16. - required: false - default: none - time_until_up: - description: - - Specifies the amount of time in seconds after the first successful - response before a node will be marked up. A value of 0 will cause a - node to be marked up immediately after a valid response is received - from the node. The default API setting is 0. - required: false - default: none -''' - -EXAMPLES = ''' -- name: BIGIP F5 | Create HTTP Monitor - local_action: - module: bigip_monitor_http - state: present - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ item.monitorname }}" - send: "{{ item.send }}" - receive: "{{ item.receive }}" - with_items: f5monitors -- name: BIGIP F5 | Remove HTTP Monitor - local_action: - module: bigip_monitor_http - state: absent - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ monitorname }}" -''' - -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -TEMPLATE_TYPE = 'TTYPE_HTTP' -DEFAULT_PARENT_TYPE = 'http' - - -# =========================================== -# bigip_monitor module generic methods. -# these should be re-useable for other monitor types -# - -def bigip_api(bigip, user, password): - - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - - -def check_monitor_exists(module, api, monitor, parent): - - # hack to determine if monitor exists - result = False - try: - ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0] - parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0] - if ttype == TEMPLATE_TYPE and parent == parent2: - result = True - else: - module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent)) - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - - -def create_monitor(api, monitor, template_attributes): - - try: - api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes]) - except bigsuds.OperationFailed, e: - if "already exists" in str(e): - return False - else: - # genuine exception - raise - return True - - -def delete_monitor(api, monitor): - - try: - api.LocalLB.Monitor.delete_template(template_names=[monitor]) - except bigsuds.OperationFailed, e: - # maybe it was deleted since we checked - if "was not found" in str(e): - return False - else: - # genuine exception - raise - return True - - -def check_string_property(api, monitor, str_property): - - try: - return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0] - except bigsuds.OperationFailed, e: - # happens in check mode if not created yet - if "was not found" in str(e): - return True - else: - # genuine exception - raise - - -def set_string_property(api, monitor, str_property): - - api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property]) - - -def check_integer_property(api, monitor, int_property): - - try: - return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0] - except bigsuds.OperationFailed, e: - # happens in check mode if not created yet - if "was not found" in str(e): - return True - else: - # genuine exception - raise - - - -def set_integer_property(api, monitor, int_property): - - api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property]) - - -def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): - - changed = False - for str_property in template_string_properties: - if str_property['value'] is not None and not check_string_property(api, monitor, str_property): - if not module.check_mode: - set_string_property(api, monitor, str_property) - changed = True - for int_property in template_integer_properties: - if int_property['value'] is not None and not check_integer_property(api, monitor, int_property): - if not module.check_mode: - set_integer_property(api, monitor, int_property) - changed = True - - return changed - - -def get_ipport(api, monitor): - - return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0] - - -def set_ipport(api, monitor, ipport): - - try: - api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport]) - return True, "" - - except bigsuds.OperationFailed, e: - if "Cannot modify the address type of monitor" in str(e): - return False, "Cannot modify the address type of monitor if already assigned to a pool." - else: - # genuine exception - raise - -# =========================================== -# main loop -# -# writing a module for other monitor types should -# only need an updated main() (and monitor specific functions) - -def main(): - - # begin monitor specific stuff - - module = AnsibleModule( - argument_spec = dict( - server = dict(required=True), - user = dict(required=True), - password = dict(required=True), - partition = dict(default='Common'), - state = dict(default='present', choices=['present', 'absent']), - name = dict(required=True), - parent = dict(default=DEFAULT_PARENT_TYPE), - parent_partition = dict(default='Common'), - send = dict(required=False), - receive = dict(required=False), - receive_disable = dict(required=False), - ip = dict(required=False), - port = dict(required=False, type='int'), - interval = dict(required=False, type='int'), - timeout = dict(required=False, type='int'), - time_until_up = dict(required=False, type='int', default=0) - ), - supports_check_mode=True - ) - - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - partition = module.params['partition'] - parent_partition = module.params['parent_partition'] - state = module.params['state'] - name = module.params['name'] - parent = "/%s/%s" % (parent_partition, module.params['parent']) - monitor = "/%s/%s" % (partition, name) - send = module.params['send'] - receive = module.params['receive'] - receive_disable = module.params['receive_disable'] - ip = module.params['ip'] - port = module.params['port'] - interval = module.params['interval'] - timeout = module.params['timeout'] - time_until_up = module.params['time_until_up'] - - # end monitor specific stuff - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") - api = bigip_api(server, user, password) - monitor_exists = check_monitor_exists(module, api, monitor, parent) - - - # ipport is a special setting - if monitor_exists: # make sure to not update current settings if not asked - cur_ipport = get_ipport(api, monitor) - if ip is None: - ip = cur_ipport['ipport']['address'] - if port is None: - port = cur_ipport['ipport']['port'] - else: # use API defaults if not defined to create it - if interval is None: - interval = 5 - if timeout is None: - timeout = 16 - if ip is None: - ip = '0.0.0.0' - if port is None: - port = 0 - if send is None: - send = '' - if receive is None: - receive = '' - if receive_disable is None: - receive_disable = '' - - # define and set address type - if ip == '0.0.0.0' and port == 0: - address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT' - elif ip == '0.0.0.0' and port != 0: - address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT' - elif ip != '0.0.0.0' and port != 0: - address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT' - else: - address_type = 'ATYPE_UNSET' - - ipport = {'address_type': address_type, - 'ipport': {'address': ip, - 'port': port}} - - template_attributes = {'parent_template': parent, - 'interval': interval, - 'timeout': timeout, - 'dest_ipport': ipport, - 'is_read_only': False, - 'is_directly_usable': True} - - # monitor specific stuff - template_string_properties = [{'type': 'STYPE_SEND', - 'value': send}, - {'type': 'STYPE_RECEIVE', - 'value': receive}, - {'type': 'STYPE_RECEIVE_DRAIN', - 'value': receive_disable}] - - template_integer_properties = [{'type': 'ITYPE_INTERVAL', - 'value': interval}, - {'type': 'ITYPE_TIMEOUT', - 'value': timeout}, - {'type': 'ITYPE_TIME_UNTIL_UP', - 'value': time_until_up}] - - # main logic, monitor generic - - try: - result = {'changed': False} # default - - - if state == 'absent': - if monitor_exists: - if not module.check_mode: - # possible race condition if same task - # on other node deleted it first - result['changed'] |= delete_monitor(api, monitor) - else: - result['changed'] |= True - - else: # state present - ## check for monitor itself - if not monitor_exists: # create it - if not module.check_mode: - # again, check changed status here b/c race conditions - # if other task already created it - result['changed'] |= create_monitor(api, monitor, template_attributes) - else: - result['changed'] |= True - - ## check for monitor parameters - # whether it already existed, or was just created, now update - # the update functions need to check for check mode but - # cannot update settings if it doesn't exist which happens in check mode - result['changed'] |= update_monitor_properties(api, module, monitor, - template_string_properties, - template_integer_properties) - - # we just have to update the ipport if monitor already exists and it's different - if monitor_exists and cur_ipport != ipport: - set_ipport(api, monitor, ipport) - result['changed'] |= True - #else: monitor doesn't exist (check mode) or ipport is already ok - - - except Exception, e: - module.fail_json(msg="received exception: %s" % e) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/net_infrastructure/bigip_monitor_tcp b/library/net_infrastructure/bigip_monitor_tcp deleted file mode 100644 index 8b89a0c611..0000000000 --- a/library/net_infrastructure/bigip_monitor_tcp +++ /dev/null @@ -1,489 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, serge van Ginderachter -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: bigip_monitor_tcp -short_description: "Manages F5 BIG-IP LTM tcp monitors" -description: - - "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API" -version_added: "1.4" -author: Serge van Ginderachter -notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" - - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx" -requirements: - - bigsuds -options: - server: - description: - - BIG-IP host - required: true - default: null - user: - description: - - BIG-IP username - required: true - default: null - password: - description: - - BIG-IP password - required: true - default: null - state: - description: - - Monitor state - required: false - default: 'present' - choices: ['present', 'absent'] - name: - description: - - Monitor name - required: true - default: null - aliases: ['monitor'] - partition: - description: - - Partition for the monitor - required: false - default: 'Common' - type: - description: - - The template type of this monitor template - required: false - default: 'tcp' - choices: [ 'TTYPE_TCP', 'TTYPE_TCP_ECHO', 'TTYPE_TCP_HALF_OPEN'] - parent: - description: - - The parent template of this monitor template - required: false - default: 'tcp' - choices: [ 'tcp', 'tcp_echo', 'tcp_half_open'] - parent_partition: - description: - - Partition for the parent monitor - required: false - default: 'Common' - send: - description: - - The send string for the monitor call - required: true - default: none - receive: - description: - - The receive string for the monitor call - required: true - default: none - ip: - description: - - IP address part of the ipport definition. The default API setting - is "0.0.0.0". - required: false - default: none - port: - description: - - port address part op the ipport definition. The default API - setting is 0. - required: false - default: none - interval: - description: - - The interval specifying how frequently the monitor instance - of this template will run. By default, this interval is used for up and - down states. The default API setting is 5. - required: false - default: none - timeout: - description: - - The number of seconds in which the node or service must respond to - the monitor request. If the target responds within the set time - period, it is considered up. If the target does not respond within - the set time period, it is considered down. You can change this - number to any number you want, however, it should be 3 times the - interval number of seconds plus 1 second. The default API setting - is 16. - required: false - default: none - time_until_up: - description: - - Specifies the amount of time in seconds after the first successful - response before a node will be marked up. A value of 0 will cause a - node to be marked up immediately after a valid response is received - from the node. The default API setting is 0. - required: false - default: none -''' - -EXAMPLES = ''' - -- name: BIGIP F5 | Create TCP Monitor - local_action: - module: bigip_monitor_tcp - state: present - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ item.monitorname }}" - type: tcp - send: "{{ item.send }}" - receive: "{{ item.receive }}" - with_items: f5monitors-tcp -- name: BIGIP F5 | Create TCP half open Monitor - local_action: - module: bigip_monitor_tcp - state: present - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ item.monitorname }}" - type: tcp - send: "{{ item.send }}" - receive: "{{ item.receive }}" - with_items: f5monitors-halftcp -- name: BIGIP F5 | Remove TCP Monitor - local_action: - module: bigip_monitor_tcp - state: absent - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ monitorname }}" - with_flattened: - - f5monitors-tcp - - f5monitors-halftcp - -''' - -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP' -TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open'] -DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower() - - -# =========================================== -# bigip_monitor module generic methods. -# these should be re-useable for other monitor types -# - -def bigip_api(bigip, user, password): - - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - - -def check_monitor_exists(module, api, monitor, parent): - - # hack to determine if monitor exists - result = False - try: - ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0] - parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0] - if ttype == TEMPLATE_TYPE and parent == parent2: - result = True - else: - module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent)) - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - - -def create_monitor(api, monitor, template_attributes): - - try: - api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes]) - except bigsuds.OperationFailed, e: - if "already exists" in str(e): - return False - else: - # genuine exception - raise - return True - - -def delete_monitor(api, monitor): - - try: - api.LocalLB.Monitor.delete_template(template_names=[monitor]) - except bigsuds.OperationFailed, e: - # maybe it was deleted since we checked - if "was not found" in str(e): - return False - else: - # genuine exception - raise - return True - - -def check_string_property(api, monitor, str_property): - - try: - return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0] - except bigsuds.OperationFailed, e: - # happens in check mode if not created yet - if "was not found" in str(e): - return True - else: - # genuine exception - raise - return True - - -def set_string_property(api, monitor, str_property): - - api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property]) - - -def check_integer_property(api, monitor, int_property): - - try: - return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0] - except bigsuds.OperationFailed, e: - # happens in check mode if not created yet - if "was not found" in str(e): - return True - else: - # genuine exception - raise - return True - - -def set_integer_property(api, monitor, int_property): - - api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property]) - - -def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): - - changed = False - for str_property in template_string_properties: - if str_property['value'] is not None and not check_string_property(api, monitor, str_property): - if not module.check_mode: - set_string_property(api, monitor, str_property) - changed = True - for int_property in template_integer_properties: - if int_property['value'] is not None and not check_integer_property(api, monitor, int_property): - if not module.check_mode: - set_integer_property(api, monitor, int_property) - changed = True - - return changed - - -def get_ipport(api, monitor): - - return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0] - - -def set_ipport(api, monitor, ipport): - - try: - api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport]) - return True, "" - - except bigsuds.OperationFailed, e: - if "Cannot modify the address type of monitor" in str(e): - return False, "Cannot modify the address type of monitor if already assigned to a pool." - else: - # genuine exception - raise - -# =========================================== -# main loop -# -# writing a module for other monitor types should -# only need an updated main() (and monitor specific functions) - -def main(): - - # begin monitor specific stuff - - module = AnsibleModule( - argument_spec = dict( - server = dict(required=True), - user = dict(required=True), - password = dict(required=True), - partition = dict(default='Common'), - state = dict(default='present', choices=['present', 'absent']), - name = dict(required=True), - type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES), - parent = dict(default=DEFAULT_PARENT), - parent_partition = dict(default='Common'), - send = dict(required=False), - receive = dict(required=False), - ip = dict(required=False), - port = dict(required=False, type='int'), - interval = dict(required=False, type='int'), - timeout = dict(required=False, type='int'), - time_until_up = dict(required=False, type='int', default=0) - ), - supports_check_mode=True - ) - - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - partition = module.params['partition'] - parent_partition = module.params['parent_partition'] - state = module.params['state'] - name = module.params['name'] - type = 'TTYPE_' + module.params['type'].upper() - parent = "/%s/%s" % (parent_partition, module.params['parent']) - monitor = "/%s/%s" % (partition, name) - send = module.params['send'] - receive = module.params['receive'] - ip = module.params['ip'] - port = module.params['port'] - interval = module.params['interval'] - timeout = module.params['timeout'] - time_until_up = module.params['time_until_up'] - - # tcp monitor has multiple types, so overrule - global TEMPLATE_TYPE - TEMPLATE_TYPE = type - - # end monitor specific stuff - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") - api = bigip_api(server, user, password) - monitor_exists = check_monitor_exists(module, api, monitor, parent) - - - # ipport is a special setting - if monitor_exists: # make sure to not update current settings if not asked - cur_ipport = get_ipport(api, monitor) - if ip is None: - ip = cur_ipport['ipport']['address'] - if port is None: - port = cur_ipport['ipport']['port'] - else: # use API defaults if not defined to create it - if interval is None: - interval = 5 - if timeout is None: - timeout = 16 - if ip is None: - ip = '0.0.0.0' - if port is None: - port = 0 - if send is None: - send = '' - if receive is None: - receive = '' - - # define and set address type - if ip == '0.0.0.0' and port == 0: - address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT' - elif ip == '0.0.0.0' and port != 0: - address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT' - elif ip != '0.0.0.0' and port != 0: - address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT' - else: - address_type = 'ATYPE_UNSET' - - ipport = {'address_type': address_type, - 'ipport': {'address': ip, - 'port': port}} - - template_attributes = {'parent_template': parent, - 'interval': interval, - 'timeout': timeout, - 'dest_ipport': ipport, - 'is_read_only': False, - 'is_directly_usable': True} - - # monitor specific stuff - if type == 'TTYPE_TCP': - template_string_properties = [{'type': 'STYPE_SEND', - 'value': send}, - {'type': 'STYPE_RECEIVE', - 'value': receive}] - else: - template_string_properties = [] - - template_integer_properties = [{'type': 'ITYPE_INTERVAL', - 'value': interval}, - {'type': 'ITYPE_TIMEOUT', - 'value': timeout}, - {'type': 'ITYPE_TIME_UNTIL_UP', - 'value': interval}] - - # main logic, monitor generic - - try: - result = {'changed': False} # default - - - if state == 'absent': - if monitor_exists: - if not module.check_mode: - # possible race condition if same task - # on other node deleted it first - result['changed'] |= delete_monitor(api, monitor) - else: - result['changed'] |= True - - else: # state present - ## check for monitor itself - if not monitor_exists: # create it - if not module.check_mode: - # again, check changed status here b/c race conditions - # if other task already created it - result['changed'] |= create_monitor(api, monitor, template_attributes) - else: - result['changed'] |= True - - ## check for monitor parameters - # whether it already existed, or was just created, now update - # the update functions need to check for check mode but - # cannot update settings if it doesn't exist which happens in check mode - if monitor_exists and not module.check_mode: - result['changed'] |= update_monitor_properties(api, module, monitor, - template_string_properties, - template_integer_properties) - # else assume nothing changed - - # we just have to update the ipport if monitor already exists and it's different - if monitor_exists and cur_ipport != ipport: - set_ipport(api, monitor, ipport) - result['changed'] |= True - #else: monitor doesn't exist (check mode) or ipport is already ok - - - except Exception, e: - module.fail_json(msg="received exception: %s" % e) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/net_infrastructure/bigip_node b/library/net_infrastructure/bigip_node deleted file mode 100644 index 68b6a2b52f..0000000000 --- a/library/net_infrastructure/bigip_node +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Matt Hite -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: bigip_node -short_description: "Manages F5 BIG-IP LTM nodes" -description: - - "Manages F5 BIG-IP LTM nodes via iControl SOAP API" -version_added: "1.4" -author: Matt Hite -notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" -requirements: - - bigsuds -options: - server: - description: - - BIG-IP host - required: true - default: null - choices: [] - aliases: [] - user: - description: - - BIG-IP username - required: true - default: null - choices: [] - aliases: [] - password: - description: - - BIG-IP password - required: true - default: null - choices: [] - aliases: [] - state: - description: - - Pool member state - required: true - default: present - choices: ['present', 'absent'] - aliases: [] - partition: - description: - - Partition - required: false - default: 'Common' - choices: [] - aliases: [] - name: - description: - - "Node name" - required: false - default: null - choices: [] - host: - description: - - "Node IP. Required when state=present and node does not exist. Error when state=absent." - required: true - default: null - choices: [] - aliases: ['address', 'ip'] - description: - description: - - "Node description." - required: false - default: null - choices: [] -''' - -EXAMPLES = ''' - -## playbook task examples: - ---- -# file bigip-test.yml -# ... -- hosts: bigip-test - tasks: - - name: Add node - local_action: > - bigip_node - server=lb.mydomain.com - user=admin - password=mysecret - state=present - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - name="{{ ansible_default_ipv4["address"] }}" - -# Note that the BIG-IP automatically names the node using the -# IP address specified in previous play's host parameter. -# Future plays referencing this node no longer use the host -# parameter but instead use the name parameter. -# Alternatively, you could have specified a name with the -# name parameter when state=present. - - - name: Modify node description - local_action: > - bigip_node - server=lb.mydomain.com - user=admin - password=mysecret - state=present - partition=matthite - name="{{ ansible_default_ipv4["address"] }}" - description="Our best server yet" - - - name: Delete node - local_action: > - bigip_node - server=lb.mydomain.com - user=admin - password=mysecret - state=absent - partition=matthite - name="{{ ansible_default_ipv4["address"] }}" - -''' - -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# ========================== -# bigip_node module specific -# - -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - -def node_exists(api, address): - # hack to determine if node exists - result = False - try: - api.LocalLB.NodeAddressV2.get_object_status(nodes=[address]) - result = True - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - -def create_node_address(api, address, name): - try: - api.LocalLB.NodeAddressV2.create(nodes=[name], addresses=[address], limits=[0]) - result = True - desc = "" - except bigsuds.OperationFailed, e: - if "already exists" in str(e): - result = False - desc = "referenced name or IP already in use" - else: - # genuine exception - raise - return (result, desc) - -def get_node_address(api, name): - return api.LocalLB.NodeAddressV2.get_address(nodes=[name])[0] - -def delete_node_address(api, address): - try: - api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) - result = True - desc = "" - except bigsuds.OperationFailed, e: - if "is referenced by a member of pool" in str(e): - result = False - desc = "node referenced by pool" - else: - # genuine exception - raise - return (result, desc) - -def set_node_description(api, name, description): - api.LocalLB.NodeAddressV2.set_description(nodes=[name], - descriptions=[description]) - -def get_node_description(api, name): - return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0] - -def main(): - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - state = dict(type='str', default='present', choices=['present', 'absent']), - partition = dict(type='str', default='Common'), - name = dict(type='str', required=True), - host = dict(type='str', aliases=['address', 'ip']), - description = dict(type='str') - ), - supports_check_mode=True - ) - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") - - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - partition = module.params['partition'] - host = module.params['host'] - name = module.params['name'] - address = "/%s/%s" % (partition, name) - description = module.params['description'] - - if state == 'absent' and host is not None: - module.fail_json(msg="host parameter invalid when state=absent") - - try: - api = bigip_api(server, user, password) - result = {'changed': False} # default - - if state == 'absent': - if node_exists(api, address): - if not module.check_mode: - deleted, desc = delete_node_address(api, address) - if not deleted: - module.fail_json(msg="unable to delete: %s" % desc) - else: - result = {'changed': True} - else: - # check-mode return value - result = {'changed': True} - - elif state == 'present': - if not node_exists(api, address): - if host is None: - module.fail_json(msg="host parameter required when " \ - "state=present and node does not exist") - if not module.check_mode: - created, desc = create_node_address(api, address=host, name=address) - if not created: - module.fail_json(msg="unable to create: %s" % desc) - else: - result = {'changed': True} - if description is not None: - set_node_description(api, address, description) - result = {'changed': True} - else: - # check-mode return value - result = {'changed': True} - else: - # node exists -- potentially modify attributes - if host is not None: - if get_node_address(api, address) != host: - module.fail_json(msg="Changing the node address is " \ - "not supported by the API; " \ - "delete and recreate the node.") - if description is not None: - if get_node_description(api, address) != description: - if not module.check_mode: - set_node_description(api, address, description) - result = {'changed': True} - - except Exception, e: - module.fail_json(msg="received exception: %s" % e) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/net_infrastructure/bigip_pool b/library/net_infrastructure/bigip_pool deleted file mode 100644 index 48d03b9f1c..0000000000 --- a/library/net_infrastructure/bigip_pool +++ /dev/null @@ -1,536 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Matt Hite -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: bigip_pool -short_description: "Manages F5 BIG-IP LTM pools" -description: - - "Manages F5 BIG-IP LTM pools via iControl SOAP API" -version_added: "1.2" -author: Matt Hite -notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" -requirements: - - bigsuds -options: - server: - description: - - BIG-IP host - required: true - default: null - choices: [] - aliases: [] - user: - description: - - BIG-IP username - required: true - default: null - choices: [] - aliases: [] - password: - description: - - BIG-IP password - required: true - default: null - choices: [] - aliases: [] - state: - description: - - Pool/pool member state - required: false - default: present - choices: ['present', 'absent'] - aliases: [] - name: - description: - - Pool name - required: true - default: null - choices: [] - aliases: ['pool'] - partition: - description: - - Partition of pool/pool member - required: false - default: 'Common' - choices: [] - aliases: [] - lb_method: - description: - - Load balancing method - version_added: "1.3" - required: False - default: 'round_robin' - choices: ['round_robin', 'ratio_member', 'least_connection_member', - 'observed_member', 'predictive_member', 'ratio_node_address', - 'least_connection_node_address', 'fastest_node_address', - 'observed_node_address', 'predictive_node_address', - 'dynamic_ratio', 'fastest_app_response', 'least_sessions', - 'dynamic_ratio_member', 'l3_addr', 'unknown', - 'weighted_least_connection_member', - 'weighted_least_connection_node_address', - 'ratio_session', 'ratio_least_connection_member', - 'ratio_least_connection_node_address'] - aliases: [] - monitor_type: - description: - - Monitor rule type when monitors > 1 - version_added: "1.3" - required: False - default: null - choices: ['and_list', 'm_of_n'] - aliases: [] - quorum: - description: - - Monitor quorum value when monitor_type is m_of_n - version_added: "1.3" - required: False - default: null - choices: [] - aliases: [] - monitors: - description: - - Monitor template name list. Always use the full path to the monitor. - version_added: "1.3" - required: False - default: null - choices: [] - aliases: [] - slow_ramp_time: - description: - - Sets the ramp-up time (in seconds) to gradually ramp up the load on newly added or freshly detected up pool members - version_added: "1.3" - required: False - default: null - choices: [] - aliases: [] - service_down_action: - description: - - Sets the action to take when node goes down in pool - version_added: "1.3" - required: False - default: null - choices: ['none', 'reset', 'drop', 'reselect'] - aliases: [] - host: - description: - - "Pool member IP" - required: False - default: null - choices: [] - aliases: ['address'] - port: - description: - - "Pool member port" - required: False - default: null - choices: [] - aliases: [] -''' - -EXAMPLES = ''' - -## playbook task examples: - ---- -# file bigip-test.yml -# ... -- hosts: localhost - tasks: - - name: Create pool - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=present - name=matthite-pool - partition=matthite - lb_method=least_connection_member - slow_ramp_time=120 - - - name: Modify load balancer method - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=present - name=matthite-pool - partition=matthite - lb_method=round_robin - -- hosts: bigip-test - tasks: - - name: Add pool member - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=present - name=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - - - name: Remove pool member from pool - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=absent - name=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - -- hosts: localhost - tasks: - - name: Delete pool - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=absent - name=matthite-pool - partition=matthite - -''' - -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# =========================================== -# bigip_pool module specific support methods. -# - -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - -def pool_exists(api, pool): - # hack to determine if pool exists - result = False - try: - api.LocalLB.Pool.get_object_status(pool_names=[pool]) - result = True - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - -def create_pool(api, pool, lb_method): - # create requires lb_method but we don't want to default - # to a value on subsequent runs - if not lb_method: - lb_method = 'round_robin' - lb_method = "LB_METHOD_%s" % lb_method.strip().upper() - api.LocalLB.Pool.create_v2(pool_names=[pool], lb_methods=[lb_method], - members=[[]]) - -def remove_pool(api, pool): - api.LocalLB.Pool.delete_pool(pool_names=[pool]) - -def get_lb_method(api, pool): - lb_method = api.LocalLB.Pool.get_lb_method(pool_names=[pool])[0] - lb_method = lb_method.strip().replace('LB_METHOD_', '').lower() - return lb_method - -def set_lb_method(api, pool, lb_method): - lb_method = "LB_METHOD_%s" % lb_method.strip().upper() - api.LocalLB.Pool.set_lb_method(pool_names=[pool], lb_methods=[lb_method]) - -def get_monitors(api, pool): - result = api.LocalLB.Pool.get_monitor_association(pool_names=[pool])[0]['monitor_rule'] - monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower() - quorum = result['quorum'] - monitor_templates = result['monitor_templates'] - return (monitor_type, quorum, monitor_templates) - -def set_monitors(api, pool, monitor_type, quorum, monitor_templates): - monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper() - monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates} - monitor_association = {'pool_name': pool, 'monitor_rule': monitor_rule} - api.LocalLB.Pool.set_monitor_association(monitor_associations=[monitor_association]) - -def get_slow_ramp_time(api, pool): - result = api.LocalLB.Pool.get_slow_ramp_time(pool_names=[pool])[0] - return result - -def set_slow_ramp_time(api, pool, seconds): - api.LocalLB.Pool.set_slow_ramp_time(pool_names=[pool], values=[seconds]) - -def get_action_on_service_down(api, pool): - result = api.LocalLB.Pool.get_action_on_service_down(pool_names=[pool])[0] - result = result.split("SERVICE_DOWN_ACTION_")[-1].lower() - return result - -def set_action_on_service_down(api, pool, action): - action = "SERVICE_DOWN_ACTION_%s" % action.strip().upper() - api.LocalLB.Pool.set_action_on_service_down(pool_names=[pool], actions=[action]) - -def member_exists(api, pool, address, port): - # hack to determine if member exists - result = False - try: - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.get_member_object_status(pool_names=[pool], - members=[members]) - result = True - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - -def delete_node_address(api, address): - result = False - try: - api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) - result = True - except bigsuds.OperationFailed, e: - if "is referenced by a member of pool" in str(e): - result = False - else: - # genuine exception - raise - return result - -def remove_pool_member(api, pool, address, port): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members]) - -def add_pool_member(api, pool, address, port): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members]) - -def main(): - lb_method_choices = ['round_robin', 'ratio_member', - 'least_connection_member', 'observed_member', - 'predictive_member', 'ratio_node_address', - 'least_connection_node_address', - 'fastest_node_address', 'observed_node_address', - 'predictive_node_address', 'dynamic_ratio', - 'fastest_app_response', 'least_sessions', - 'dynamic_ratio_member', 'l3_addr', 'unknown', - 'weighted_least_connection_member', - 'weighted_least_connection_node_address', - 'ratio_session', 'ratio_least_connection_member', - 'ratio_least_connection_node_address'] - - monitor_type_choices = ['and_list', 'm_of_n'] - - service_down_choices = ['none', 'reset', 'drop', 'reselect'] - - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - state = dict(type='str', default='present', choices=['present', 'absent']), - name = dict(type='str', required=True, aliases=['pool']), - partition = dict(type='str', default='Common'), - lb_method = dict(type='str', choices=lb_method_choices), - monitor_type = dict(type='str', choices=monitor_type_choices), - quorum = dict(type='int'), - monitors = dict(type='list'), - slow_ramp_time = dict(type='int'), - service_down_action = dict(type='str', choices=service_down_choices), - host = dict(type='str', aliases=['address']), - port = dict(type='int') - ), - supports_check_mode=True - ) - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") - - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - name = module.params['name'] - partition = module.params['partition'] - pool = "/%s/%s" % (partition, name) - lb_method = module.params['lb_method'] - if lb_method: - lb_method = lb_method.lower() - monitor_type = module.params['monitor_type'] - if monitor_type: - monitor_type = monitor_type.lower() - quorum = module.params['quorum'] - monitors = module.params['monitors'] - if monitors: - monitors = [] - for monitor in module.params['monitors']: - if "/" not in monitor: - monitors.append("/%s/%s" % (partition, monitor)) - else: - monitors.append(monitor) - slow_ramp_time = module.params['slow_ramp_time'] - service_down_action = module.params['service_down_action'] - if service_down_action: - service_down_action = service_down_action.lower() - host = module.params['host'] - address = "/%s/%s" % (partition, host) - port = module.params['port'] - - # sanity check user supplied values - - if (host and not port) or (port and not host): - module.fail_json(msg="both host and port must be supplied") - - if 1 > port > 65535: - module.fail_json(msg="valid ports must be in range 1 - 65535") - - if monitors: - if len(monitors) == 1: - # set default required values for single monitor - quorum = 0 - monitor_type = 'single' - elif len(monitors) > 1: - if not monitor_type: - module.fail_json(msg="monitor_type required for monitors > 1") - if monitor_type == 'm_of_n' and not quorum: - module.fail_json(msg="quorum value required for monitor_type m_of_n") - if monitor_type != 'm_of_n': - quorum = 0 - elif monitor_type: - # no monitors specified but monitor_type exists - module.fail_json(msg="monitor_type require monitors parameter") - elif quorum is not None: - # no monitors specified but quorum exists - module.fail_json(msg="quorum requires monitors parameter") - - try: - api = bigip_api(server, user, password) - result = {'changed': False} # default - - if state == 'absent': - if host and port and pool: - # member removal takes precedent - if pool_exists(api, pool) and member_exists(api, pool, address, port): - if not module.check_mode: - remove_pool_member(api, pool, address, port) - deleted = delete_node_address(api, address) - result = {'changed': True, 'deleted': deleted} - else: - result = {'changed': True} - elif pool_exists(api, pool): - # no host/port supplied, must be pool removal - if not module.check_mode: - # hack to handle concurrent runs of module - # pool might be gone before we actually remove it - try: - remove_pool(api, pool) - result = {'changed': True} - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = {'changed': False} - else: - # genuine exception - raise - else: - # check-mode return value - result = {'changed': True} - - elif state == 'present': - update = False - if not pool_exists(api, pool): - # pool does not exist -- need to create it - if not module.check_mode: - # a bit of a hack to handle concurrent runs of this module. - # even though we've checked the pool doesn't exist, - # it may exist by the time we run create_pool(). - # this catches the exception and does something smart - # about it! - try: - create_pool(api, pool, lb_method) - result = {'changed': True} - except bigsuds.OperationFailed, e: - if "already exists" in str(e): - update = True - else: - # genuine exception - raise - else: - if monitors: - set_monitors(api, pool, monitor_type, quorum, monitors) - if slow_ramp_time: - set_slow_ramp_time(api, pool, slow_ramp_time) - if service_down_action: - set_action_on_service_down(api, pool, service_down_action) - if host and port: - add_pool_member(api, pool, address, port) - else: - # check-mode return value - result = {'changed': True} - else: - # pool exists -- potentially modify attributes - update = True - - if update: - if lb_method and lb_method != get_lb_method(api, pool): - if not module.check_mode: - set_lb_method(api, pool, lb_method) - result = {'changed': True} - if monitors: - t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, pool) - if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)): - if not module.check_mode: - set_monitors(api, pool, monitor_type, quorum, monitors) - result = {'changed': True} - if slow_ramp_time and slow_ramp_time != get_slow_ramp_time(api, pool): - if not module.check_mode: - set_slow_ramp_time(api, pool, slow_ramp_time) - result = {'changed': True} - if service_down_action and service_down_action != get_action_on_service_down(api, pool): - if not module.check_mode: - set_action_on_service_down(api, pool, service_down_action) - result = {'changed': True} - if (host and port) and not member_exists(api, pool, address, port): - if not module.check_mode: - add_pool_member(api, pool, address, port) - result = {'changed': True} - - except Exception, e: - module.fail_json(msg="received exception: %s" % e) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/net_infrastructure/bigip_pool_member b/library/net_infrastructure/bigip_pool_member deleted file mode 100644 index 5aef9f0ae9..0000000000 --- a/library/net_infrastructure/bigip_pool_member +++ /dev/null @@ -1,378 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Matt Hite -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: bigip_pool_member -short_description: "Manages F5 BIG-IP LTM pool members" -description: - - "Manages F5 BIG-IP LTM pool members via iControl SOAP API" -version_added: "1.4" -author: Matt Hite -notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" - - "Supersedes bigip_pool for managing pool members" - -requirements: - - bigsuds -options: - server: - description: - - BIG-IP host - required: true - default: null - choices: [] - aliases: [] - user: - description: - - BIG-IP username - required: true - default: null - choices: [] - aliases: [] - password: - description: - - BIG-IP password - required: true - default: null - choices: [] - aliases: [] - state: - description: - - Pool member state - required: true - default: present - choices: ['present', 'absent'] - aliases: [] - pool: - description: - - Pool name. This pool must exist. - required: true - default: null - choices: [] - aliases: [] - partition: - description: - - Partition - required: false - default: 'Common' - choices: [] - aliases: [] - host: - description: - - Pool member IP - required: true - default: null - choices: [] - aliases: ['address', 'name'] - port: - description: - - Pool member port - required: true - default: null - choices: [] - aliases: [] - connection_limit: - description: - - Pool member connection limit. Setting this to 0 disables the limit. - required: false - default: null - choices: [] - aliases: [] - description: - description: - - Pool member description - required: false - default: null - choices: [] - aliases: [] - rate_limit: - description: - - Pool member rate limit (connections-per-second). Setting this to 0 disables the limit. - required: false - default: null - choices: [] - aliases: [] - ratio: - description: - - Pool member ratio weight. Valid values range from 1 through 100. New pool members -- unless overriden with this value -- default to 1. - required: false - default: null - choices: [] - aliases: [] -''' - -EXAMPLES = ''' - -## playbook task examples: - ---- -# file bigip-test.yml -# ... -- hosts: bigip-test - tasks: - - name: Add pool member - local_action: > - bigip_pool_member - server=lb.mydomain.com - user=admin - password=mysecret - state=present - pool=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - description="web server" - connection_limit=100 - rate_limit=50 - ratio=2 - - - name: Modify pool member ratio and description - local_action: > - bigip_pool_member - server=lb.mydomain.com - user=admin - password=mysecret - state=present - pool=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - ratio=1 - description="nginx server" - - - name: Remove pool member from pool - local_action: > - bigip_pool_member - server=lb.mydomain.com - user=admin - password=mysecret - state=absent - pool=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - -''' - -try: - import bigsuds -except ImportError: - bigsuds_found = False -else: - bigsuds_found = True - -# =========================================== -# bigip_pool_member module specific support methods. -# - -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api - -def pool_exists(api, pool): - # hack to determine if pool exists - result = False - try: - api.LocalLB.Pool.get_object_status(pool_names=[pool]) - result = True - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - -def member_exists(api, pool, address, port): - # hack to determine if member exists - result = False - try: - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.get_member_object_status(pool_names=[pool], - members=[members]) - result = True - except bigsuds.OperationFailed, e: - if "was not found" in str(e): - result = False - else: - # genuine exception - raise - return result - -def delete_node_address(api, address): - result = False - try: - api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) - result = True - except bigsuds.OperationFailed, e: - if "is referenced by a member of pool" in str(e): - result = False - else: - # genuine exception - raise - return result - -def remove_pool_member(api, pool, address, port): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members]) - -def add_pool_member(api, pool, address, port): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members]) - -def get_connection_limit(api, pool, address, port): - members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_connection_limit(pool_names=[pool], members=[members])[0][0] - return result - -def set_connection_limit(api, pool, address, port, limit): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.set_member_connection_limit(pool_names=[pool], members=[members], limits=[[limit]]) - -def get_description(api, pool, address, port): - members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_description(pool_names=[pool], members=[members])[0][0] - return result - -def set_description(api, pool, address, port, description): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.set_member_description(pool_names=[pool], members=[members], descriptions=[[description]]) - -def get_rate_limit(api, pool, address, port): - members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_rate_limit(pool_names=[pool], members=[members])[0][0] - return result - -def set_rate_limit(api, pool, address, port, limit): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.set_member_rate_limit(pool_names=[pool], members=[members], limits=[[limit]]) - -def get_ratio(api, pool, address, port): - members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_ratio(pool_names=[pool], members=[members])[0][0] - return result - -def set_ratio(api, pool, address, port, ratio): - members = [{'address': address, 'port': port}] - api.LocalLB.Pool.set_member_ratio(pool_names=[pool], members=[members], ratios=[[ratio]]) - -def main(): - module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - state = dict(type='str', default='present', choices=['present', 'absent']), - pool = dict(type='str', required=True), - partition = dict(type='str', default='Common'), - host = dict(type='str', required=True, aliases=['address', 'name']), - port = dict(type='int', required=True), - connection_limit = dict(type='int'), - description = dict(type='str'), - rate_limit = dict(type='int'), - ratio = dict(type='int') - ), - supports_check_mode=True - ) - - if not bigsuds_found: - module.fail_json(msg="the python bigsuds module is required") - - server = module.params['server'] - user = module.params['user'] - password = module.params['password'] - state = module.params['state'] - partition = module.params['partition'] - pool = "/%s/%s" % (partition, module.params['pool']) - connection_limit = module.params['connection_limit'] - description = module.params['description'] - rate_limit = module.params['rate_limit'] - ratio = module.params['ratio'] - host = module.params['host'] - address = "/%s/%s" % (partition, host) - port = module.params['port'] - - # sanity check user supplied values - - if (host and not port) or (port and not host): - module.fail_json(msg="both host and port must be supplied") - - if 1 > port > 65535: - module.fail_json(msg="valid ports must be in range 1 - 65535") - - try: - api = bigip_api(server, user, password) - if not pool_exists(api, pool): - module.fail_json(msg="pool %s does not exist" % pool) - result = {'changed': False} # default - - if state == 'absent': - if member_exists(api, pool, address, port): - if not module.check_mode: - remove_pool_member(api, pool, address, port) - deleted = delete_node_address(api, address) - result = {'changed': True, 'deleted': deleted} - else: - result = {'changed': True} - - elif state == 'present': - if not member_exists(api, pool, address, port): - if not module.check_mode: - add_pool_member(api, pool, address, port) - if connection_limit is not None: - set_connection_limit(api, pool, address, port, connection_limit) - if description is not None: - set_description(api, pool, address, port, description) - if rate_limit is not None: - set_rate_limit(api, pool, address, port, rate_limit) - if ratio is not None: - set_ratio(api, pool, address, port, ratio) - result = {'changed': True} - else: - # pool member exists -- potentially modify attributes - if connection_limit is not None and connection_limit != get_connection_limit(api, pool, address, port): - if not module.check_mode: - set_connection_limit(api, pool, address, port, connection_limit) - result = {'changed': True} - if description is not None and description != get_description(api, pool, address, port): - if not module.check_mode: - set_description(api, pool, address, port, description) - result = {'changed': True} - if rate_limit is not None and rate_limit != get_rate_limit(api, pool, address, port): - if not module.check_mode: - set_rate_limit(api, pool, address, port, rate_limit) - result = {'changed': True} - if ratio is not None and ratio != get_ratio(api, pool, address, port): - if not module.check_mode: - set_ratio(api, pool, address, port, ratio) - result = {'changed': True} - - except Exception, e: - module.fail_json(msg="received exception: %s" % e) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/net_infrastructure/dnsimple b/library/net_infrastructure/dnsimple deleted file mode 100755 index 19b167dee1..0000000000 --- a/library/net_infrastructure/dnsimple +++ /dev/null @@ -1,302 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: dnsimple -version_added: "1.6" -short_description: Interface with dnsimple.com (a DNS hosting service). -description: - - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)" -options: - account_email: - description: - - "Account email. If omitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for. If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)" - required: false - default: null - - account_api_token: - description: - - Account API token. See I(account_email) for info. - required: false - default: null - - domain: - description: - - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If omitted, a list of domains will be returned. - - If domain is present but the domain doesn't exist, it will be created. - required: false - default: null - - record: - description: - - Record to add, if blank a record for the domain will be created, supports the wildcard (*) - required: false - default: null - - record_ids: - description: - - List of records to ensure they either exist or don't exist - required: false - default: null - - type: - description: - - The type of DNS record to create - required: false - choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ] - default: null - - ttl: - description: - - The TTL to give the new record - required: false - default: 3600 (one hour) - - value: - description: - - Record value - - "Must be specified when trying to ensure a record exists" - required: false - default: null - - priority: - description: - - Record priority - required: false - default: null - - state: - description: - - whether the record should exist or not - required: false - choices: [ 'present', 'absent' ] - default: null - - solo: - description: - - Whether the record should be the only one for that record type and record name. Only use with state=present on a record - required: false - default: null - -requirements: [ dnsimple ] -author: Alex Coomans -''' - -EXAMPLES = ''' -# authenicate using email and API token -- local_action: dnsimple account_email=test@example.com account_api_token=dummyapitoken - -# fetch all domains -- local_action dnsimple - register: domains - -# fetch my.com domain records -- local_action: dnsimple domain=my.com state=present - register: records - -# delete a domain -- local_action: dnsimple domain=my.com state=absent - -# create a test.my.com A record to point to 127.0.0.01 -- local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1 - register: record - -# and then delete it -- local_action: dnsimple domain=my.com record_ids={{ record['id'] }} - -# create a my.com CNAME record to example.com -- local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present - -# change it's ttl -- local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present - -# and delete the record -- local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent - -''' - -import os -try: - from dnsimple import DNSimple - from dnsimple.dnsimple import DNSimpleException -except ImportError: - print "failed=True msg='dnsimple required for this module'" - sys.exit(1) - -def main(): - module = AnsibleModule( - argument_spec = dict( - account_email = dict(required=False), - account_api_token = dict(required=False, no_log=True), - domain = dict(required=False), - record = dict(required=False), - record_ids = dict(required=False, type='list'), - type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']), - ttl = dict(required=False, default=3600, type='int'), - value = dict(required=False), - priority = dict(required=False, type='int'), - state = dict(required=False, choices=['present', 'absent']), - solo = dict(required=False, type='bool'), - ), - required_together = ( - ['record', 'value'] - ), - supports_check_mode = True, - ) - - account_email = module.params.get('account_email') - account_api_token = module.params.get('account_api_token') - domain = module.params.get('domain') - record = module.params.get('record') - record_ids = module.params.get('record_ids') - record_type = module.params.get('type') - ttl = module.params.get('ttl') - value = module.params.get('value') - priority = module.params.get('priority') - state = module.params.get('state') - is_solo = module.params.get('solo') - - if account_email and account_api_token: - client = DNSimple(email=account_email, api_token=account_api_token) - elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'): - client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN')) - else: - client = DNSimple() - - try: - # Let's figure out what operation we want to do - - # No domain, return a list - if not domain: - domains = client.domains() - module.exit_json(changed=False, result=[d['domain'] for d in domains]) - - # Domain & No record - if domain and record is None and not record_ids: - domains = [d['domain'] for d in client.domains()] - if domain.isdigit(): - dr = next((d for d in domains if d['id'] == int(domain)), None) - else: - dr = next((d for d in domains if d['name'] == domain), None) - if state == 'present': - if dr: - module.exit_json(changed=False, result=dr) - else: - if module.check_mode: - module.exit_json(changed=True) - else: - module.exit_json(changed=True, result=client.add_domain(domain)['domain']) - elif state == 'absent': - if dr: - if not module.check_mode: - client.delete(domain) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - else: - module.fail_json(msg="'%s' is an unknown value for the state argument" % state) - - # need the not none check since record could be an empty string - if domain and record is not None: - records = [r['record'] for r in client.records(str(domain))] - - if not record_type: - module.fail_json(msg="Missing the record type") - - if not value: - module.fail_json(msg="Missing the record value") - - rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None) - - if state == 'present': - changed = False - if is_solo: - # delete any records that have the same name and record type - same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type] - if rr: - same_type = [rid for rid in same_type if rid != rr['id']] - if same_type: - if not module.check_mode: - for rid in same_type: - client.delete_record(str(domain), rid) - changed = True - if rr: - # check if we need to update - if rr['ttl'] != ttl or rr['prio'] != priority: - data = {} - if ttl: data['ttl'] = ttl - if priority: data['prio'] = priority - if module.check_mode: - module.exit_json(changed=True) - else: - module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record']) - else: - module.exit_json(changed=changed, result=rr) - else: - # create it - data = { - 'name': record, - 'record_type': record_type, - 'content': value, - } - if ttl: data['ttl'] = ttl - if priority: data['prio'] = priority - if module.check_mode: - module.exit_json(changed=True) - else: - module.exit_json(changed=True, result=client.add_record(str(domain), data)['record']) - elif state == 'absent': - if rr: - if not module.check_mode: - client.delete_record(str(domain), rr['id']) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - else: - module.fail_json(msg="'%s' is an unknown value for the state argument" % state) - - # Make sure these record_ids either all exist or none - if domain and record_ids: - current_records = [str(r['record']['id']) for r in client.records(str(domain))] - wanted_records = [str(r) for r in record_ids] - if state == 'present': - difference = list(set(wanted_records) - set(current_records)) - if difference: - module.fail_json(msg="Missing the following records: %s" % difference) - else: - module.exit_json(changed=False) - elif state == 'absent': - difference = list(set(wanted_records) & set(current_records)) - if difference: - if not module.check_mode: - for rid in difference: - client.delete_record(str(domain), rid) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - else: - module.fail_json(msg="'%s' is an unknown value for the state argument" % state) - - except DNSimpleException, e: - module.fail_json(msg="Unable to contact DNSimple: %s" % e.message) - - module.fail_json(msg="Unknown what you wanted me to do") - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/net_infrastructure/dnsmadeeasy b/library/net_infrastructure/dnsmadeeasy deleted file mode 100644 index 148e25a501..0000000000 --- a/library/net_infrastructure/dnsmadeeasy +++ /dev/null @@ -1,329 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: dnsmadeeasy -version_added: "1.3" -short_description: Interface with dnsmadeeasy.com (a DNS hosting service). -description: - - "Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or monitor/account support yet. See: U(http://www.dnsmadeeasy.com/services/rest-api/)" -options: - account_key: - description: - - Accout API Key. - required: true - default: null - - account_secret: - description: - - Accout Secret Key. - required: true - default: null - - domain: - description: - - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster resolution. - required: true - default: null - - record_name: - description: - - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless of the state argument. - required: false - default: null - - record_type: - description: - - Record type. - required: false - choices: [ 'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ] - default: null - - record_value: - description: - - "Record value. HTTPRED: , MX: , NS: , PTR: , SRV: , TXT: " - - "If record_value is not specified; no changes will be made and the record will be returned in 'result' (in other words, this module can be used to fetch a record's current id, type, and ttl)" - required: false - default: null - - record_ttl: - description: - - record's "Time to live". Number of seconds the record remains cached in DNS servers. - required: false - default: 1800 - - state: - description: - - whether the record should exist or not - required: true - choices: [ 'present', 'absent' ] - default: null - - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 - -notes: - - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP. - - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks. - -requirements: [ urllib, urllib2, hashlib, hmac ] -author: Brice Burgess -''' - -EXAMPLES = ''' -# fetch my.com domain records -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present - register: response - -# create / ensure the presence of a record -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_type="A" record_value="127.0.0.1" - -# update the previously created record -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_value="192.168.0.1" - -# fetch a specific record -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" - register: response - -# delete a record / ensure it is absent -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=absent record_name="test" -''' - -# ============================================ -# DNSMadeEasy module specific support methods. -# - -IMPORT_ERROR = None -try: - import json - from time import strftime, gmtime - import hashlib - import hmac -except ImportError, e: - IMPORT_ERROR = str(e) - -class DME2: - - def __init__(self, apikey, secret, domain, module): - self.module = module - - self.api = apikey - self.secret = secret - self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/' - self.domain = str(domain) - self.domain_map = None # ["domain_name"] => ID - self.record_map = None # ["record_name"] => ID - self.records = None # ["record_ID"] => - - # Lookup the domain ID if passed as a domain name vs. ID - if not self.domain.isdigit(): - self.domain = self.getDomainByName(self.domain)['id'] - - self.record_url = 'dns/managed/' + str(self.domain) + '/records' - - def _headers(self): - currTime = self._get_date() - hashstring = self._create_hash(currTime) - headers = {'x-dnsme-apiKey': self.api, - 'x-dnsme-hmac': hashstring, - 'x-dnsme-requestDate': currTime, - 'content-type': 'application/json'} - return headers - - def _get_date(self): - return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()) - - def _create_hash(self, rightnow): - return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest() - - def query(self, resource, method, data=None): - url = self.baseurl + resource - if data and not isinstance(data, basestring): - data = urllib.urlencode(data) - - response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) - if info['status'] not in (200, 201, 204): - self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) - - try: - return json.load(response) - except Exception, e: - return {} - - def getDomain(self, domain_id): - if not self.domain_map: - self._instMap('domain') - - return self.domains.get(domain_id, False) - - def getDomainByName(self, domain_name): - if not self.domain_map: - self._instMap('domain') - - return self.getDomain(self.domain_map.get(domain_name, 0)) - - def getDomains(self): - return self.query('dns/managed', 'GET')['data'] - - def getRecord(self, record_id): - if not self.record_map: - self._instMap('record') - - return self.records.get(record_id, False) - - def getRecordByName(self, record_name): - if not self.record_map: - self._instMap('record') - - return self.getRecord(self.record_map.get(record_name, 0)) - - def getRecords(self): - return self.query(self.record_url, 'GET')['data'] - - def _instMap(self, type): - #@TODO cache this call so it's executed only once per ansible execution - map = {} - results = {} - - # iterate over e.g. self.getDomains() || self.getRecords() - for result in getattr(self, 'get' + type.title() + 's')(): - - map[result['name']] = result['id'] - results[result['id']] = result - - # e.g. self.domain_map || self.record_map - setattr(self, type + '_map', map) - setattr(self, type + 's', results) # e.g. self.domains || self.records - - def prepareRecord(self, data): - return json.dumps(data, separators=(',', ':')) - - def createRecord(self, data): - #@TODO update the cache w/ resultant record + id when impleneted - return self.query(self.record_url, 'POST', data) - - def updateRecord(self, record_id, data): - #@TODO update the cache w/ resultant record + id when impleneted - return self.query(self.record_url + '/' + str(record_id), 'PUT', data) - - def deleteRecord(self, record_id): - #@TODO remove record from the cache when impleneted - return self.query(self.record_url + '/' + str(record_id), 'DELETE') - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - account_key=dict(required=True), - account_secret=dict(required=True, no_log=True), - domain=dict(required=True), - state=dict(required=True, choices=['present', 'absent']), - record_name=dict(required=False), - record_type=dict(required=False, choices=[ - 'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), - record_value=dict(required=False), - record_ttl=dict(required=False, default=1800, type='int'), - validate_certs = dict(default='yes', type='bool'), - ), - required_together=( - ['record_value', 'record_ttl', 'record_type'] - ) - ) - - if IMPORT_ERROR: - module.fail_json(msg="Import Error: " + IMPORT_ERROR) - - DME = DME2(module.params["account_key"], module.params[ - "account_secret"], module.params["domain"], module) - state = module.params["state"] - record_name = module.params["record_name"] - - # Follow Keyword Controlled Behavior - if not record_name: - domain_records = DME.getRecords() - if not domain_records: - module.fail_json( - msg="The requested domain name is not accessible with this api_key; try using its ID if known.") - module.exit_json(changed=False, result=domain_records) - - # Fetch existing record + Build new one - current_record = DME.getRecordByName(record_name) - new_record = {'name': record_name} - for i in ["record_value", "record_type", "record_ttl"]: - if module.params[i]: - new_record[i[len("record_"):]] = module.params[i] - - # Compare new record against existing one - changed = False - if current_record: - for i in new_record: - if str(current_record[i]) != str(new_record[i]): - changed = True - new_record['id'] = str(current_record['id']) - - # Follow Keyword Controlled Behavior - if state == 'present': - # return the record if no value is specified - if not "value" in new_record: - if not current_record: - module.fail_json( - msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, domain)) - module.exit_json(changed=False, result=current_record) - - # create record as it does not exist - if not current_record: - record = DME.createRecord(DME.prepareRecord(new_record)) - module.exit_json(changed=True, result=record) - - # update the record - if changed: - DME.updateRecord( - current_record['id'], DME.prepareRecord(new_record)) - module.exit_json(changed=True, result=new_record) - - # return the record (no changes) - module.exit_json(changed=False, result=current_record) - - elif state == 'absent': - # delete the record if it exists - if current_record: - DME.deleteRecord(current_record['id']) - module.exit_json(changed=True) - - # record does not exist, return w/o change. - module.exit_json(changed=False) - - else: - module.fail_json( - msg="'%s' is an unknown value for the state argument" % state) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/net_infrastructure/lldp b/library/net_infrastructure/lldp deleted file mode 100755 index 6b8836852f..0000000000 --- a/library/net_infrastructure/lldp +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/python -tt -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import subprocess - -DOCUMENTATION = ''' ---- -module: lldp -version_added: 1.6 -short_description: get details reported by lldp -description: - - Reads data out of lldpctl -options: {} -author: Andy Hill -notes: - - Requires lldpd running and lldp enabled on switches -''' - -EXAMPLES = ''' -# Retrieve switch/port information - - name: Gather information from lldp - lldp: - - - name: Print each switch/port - debug: msg="{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }} - with_items: lldp.keys() - -# TASK: [Print each switch/port] *********************************************************** -# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"} -# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"} -# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"} - -''' - -def gather_lldp(): - cmd = ['lldpctl', '-f', 'keyvalue'] - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) - (output, err) = proc.communicate() - if output: - output_dict = {} - lldp_entries = output.split("\n") - - for entry in lldp_entries: - if entry: - path, value = entry.strip().split("=", 1) - path = path.split(".") - path_components, final = path[:-1], path[-1] - - current_dict = output_dict - for path_component in path_components: - current_dict[path_component] = current_dict.get(path_component, {}) - current_dict = current_dict[path_component] - current_dict[final] = value - return output_dict - - -def main(): - module = AnsibleModule({}) - - lldp_output = gather_lldp() - try: - data = {'lldp': lldp_output['lldp']} - module.exit_json(ansible_facts=data) - except TypeError: - module.fail_json(msg="lldpctl command failed. is lldpd running?") - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/net_infrastructure/netscaler b/library/net_infrastructure/netscaler deleted file mode 100644 index de3c8fc242..0000000000 --- a/library/net_infrastructure/netscaler +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to manage Citrix NetScaler entities -(c) 2013, Nandor Sivok - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: netscaler -version_added: "1.1" -short_description: Manages Citrix NetScaler entities -description: - - Manages Citrix NetScaler server and service entities. -options: - nsc_host: - description: - - hostname or ip of your netscaler - required: true - default: null - aliases: [] - nsc_protocol: - description: - - protocol used to access netscaler - required: false - default: https - aliases: [] - user: - description: - - username - required: true - default: null - aliases: [] - password: - description: - - password - required: true - default: null - aliases: [] - action: - description: - - the action you want to perform on the entity - required: false - default: disable - choices: ["enable", "disable"] - aliases: [] - name: - description: - - name of the entity - required: true - default: hostname - aliases: [] - type: - description: - - type of the entity - required: false - default: server - choices: ["server", "service"] - aliases: [] - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -requirements: [ "urllib", "urllib2" ] -author: Nandor Sivok -''' - -EXAMPLES = ''' -# Disable the server -ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass" - -# Enable the server -ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass action=enable" - -# Disable the service local:8080 -ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass name=local:8080 type=service action=disable" -''' - - -import json -import base64 -import socket - - -class netscaler(object): - - _nitro_base_url = '/nitro/v1/' - - def __init__(self, module): - self.module = module - - def http_request(self, api_endpoint, data_json={}): - request_url = self._nsc_protocol + '://' + self._nsc_host + self._nitro_base_url + api_endpoint - - data_json = urllib.urlencode(data_json) - if not len(data_json): - data_json = None - - auth = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip() - headers = { - 'Authorization': 'Basic %s' % auth, - 'Content-Type' : 'application/x-www-form-urlencoded', - } - - response, info = fetch_url(self.module, request_url, data=data_json, headers=headers) - - return json.load(response) - - def prepare_request(self, action): - resp = self.http_request( - 'config', - { - "object": - { - "params": {"action": action}, - self._type: {"name": self._name} - } - } - ) - - return resp - - -def core(module): - n = netscaler(module) - n._nsc_host = module.params.get('nsc_host') - n._nsc_user = module.params.get('user') - n._nsc_pass = module.params.get('password') - n._nsc_protocol = module.params.get('nsc_protocol') - n._name = module.params.get('name') - n._type = module.params.get('type') - action = module.params.get('action') - - r = n.prepare_request(action) - - return r['errorcode'], r - - -def main(): - - module = AnsibleModule( - argument_spec = dict( - nsc_host = dict(required=True), - nsc_protocol = dict(default='https'), - user = dict(required=True), - password = dict(required=True), - action = dict(default='enable', choices=['enable','disable']), - name = dict(default=socket.gethostname()), - type = dict(default='server', choices=['service', 'server']), - validate_certs=dict(default='yes', type='bool'), - ) - ) - - rc = 0 - try: - rc, result = core(module) - except Exception, e: - module.fail_json(msg=str(e)) - - if rc != 0: - module.fail_json(rc=rc, msg=result) - else: - result['changed'] = True - module.exit_json(**result) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() diff --git a/library/net_infrastructure/openvswitch_bridge b/library/net_infrastructure/openvswitch_bridge deleted file mode 100644 index 551ca707a2..0000000000 --- a/library/net_infrastructure/openvswitch_bridge +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, David Stygstra -# -# This file is part of Ansible -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: openvswitch_bridge -version_added: 1.4 -author: David Stygstra -short_description: Manage Open vSwitch bridges -requirements: [ ovs-vsctl ] -description: - - Manage Open vSwitch bridges -options: - bridge: - required: true - description: - - Name of bridge to manage - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the bridge should exist - timeout: - required: false - default: 5 - description: - - How long to wait for ovs-vswitchd to respond -''' - -EXAMPLES = ''' -# Create a bridge named br-int -- openvswitch_bridge: bridge=br-int state=present -''' - - -class OVSBridge(object): - def __init__(self, module): - self.module = module - self.bridge = module.params['bridge'] - self.state = module.params['state'] - self.timeout = module.params['timeout'] - - def _vsctl(self, command): - '''Run ovs-vsctl command''' - return self.module.run_command(['ovs-vsctl', '-t', str(self.timeout)] + command) - - def exists(self): - '''Check if the bridge already exists''' - rc, _, err = self._vsctl(['br-exists', self.bridge]) - if rc == 0: # See ovs-vsctl(8) for status codes - return True - if rc == 2: - return False - raise Exception(err) - - def add(self): - '''Create the bridge''' - rc, _, err = self._vsctl(['add-br', self.bridge]) - if rc != 0: - raise Exception(err) - - def delete(self): - '''Delete the bridge''' - rc, _, err = self._vsctl(['del-br', self.bridge]) - if rc != 0: - raise Exception(err) - - def check(self): - '''Run check mode''' - try: - if self.state == 'absent' and self.exists(): - changed = True - elif self.state == 'present' and not self.exists(): - changed = True - else: - changed = False - except Exception, e: - self.module.fail_json(msg=str(e)) - self.module.exit_json(changed=changed) - - def run(self): - '''Make the necessary changes''' - changed = False - try: - if self.state == 'absent': - if self.exists(): - self.delete() - changed = True - elif self.state == 'present': - if not self.exists(): - self.add() - changed = True - except Exception, e: - self.module.fail_json(msg=str(e)) - self.module.exit_json(changed=changed) - - -def main(): - module = AnsibleModule( - argument_spec={ - 'bridge': {'required': True}, - 'state': {'default': 'present', 'choices': ['present', 'absent']}, - 'timeout': {'default': 5, 'type': 'int'} - }, - supports_check_mode=True, - ) - - br = OVSBridge(module) - if module.check_mode: - br.check() - else: - br.run() - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/net_infrastructure/openvswitch_port b/library/net_infrastructure/openvswitch_port deleted file mode 100644 index 66391937d1..0000000000 --- a/library/net_infrastructure/openvswitch_port +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, David Stygstra -# -# This file is part of Ansible -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: openvswitch_port -version_added: 1.4 -author: David Stygstra -short_description: Manage Open vSwitch ports -requirements: [ ovs-vsctl ] -description: - - Manage Open vSwitch ports -options: - bridge: - required: true - description: - - Name of bridge to manage - port: - required: true - description: - - Name of port to manage on the bridge - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the port should exist - timeout: - required: false - default: 5 - description: - - How long to wait for ovs-vswitchd to respond -''' - -EXAMPLES = ''' -# Creates port eth2 on bridge br-ex -- openvswitch_port: bridge=br-ex port=eth2 state=present -''' - - -class OVSPort(object): - def __init__(self, module): - self.module = module - self.bridge = module.params['bridge'] - self.port = module.params['port'] - self.state = module.params['state'] - self.timeout = module.params['timeout'] - - def _vsctl(self, command): - '''Run ovs-vsctl command''' - return self.module.run_command(['ovs-vsctl', '-t', str(self.timeout)] + command) - - def exists(self): - '''Check if the port already exists''' - rc, out, err = self._vsctl(['list-ports', self.bridge]) - if rc != 0: - raise Exception(err) - return any(port.rstrip() == self.port for port in out.split('\n')) - - def add(self): - '''Add the port''' - rc, _, err = self._vsctl(['add-port', self.bridge, self.port]) - if rc != 0: - raise Exception(err) - - def delete(self): - '''Remove the port''' - rc, _, err = self._vsctl(['del-port', self.bridge, self.port]) - if rc != 0: - raise Exception(err) - - def check(self): - '''Run check mode''' - try: - if self.state == 'absent' and self.exists(): - changed = True - elif self.state == 'present' and not self.exists(): - changed = True - else: - changed = False - except Exception, e: - self.module.fail_json(msg=str(e)) - self.module.exit_json(changed=changed) - - def run(self): - '''Make the necessary changes''' - changed = False - try: - if self.state == 'absent': - if self.exists(): - self.delete() - changed = True - elif self.state == 'present': - if not self.exists(): - self.add() - changed = True - except Exception, e: - self.module.fail_json(msg=str(e)) - self.module.exit_json(changed=changed) - - -def main(): - module = AnsibleModule( - argument_spec={ - 'bridge': {'required': True}, - 'port': {'required': True}, - 'state': {'default': 'present', 'choices': ['present', 'absent']}, - 'timeout': {'default': 5, 'type': 'int'} - }, - supports_check_mode=True, - ) - - port = OVSPort(module) - if module.check_mode: - port.check() - else: - port.run() - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/network/get_url b/library/network/get_url deleted file mode 100644 index c3b81129a2..0000000000 --- a/library/network/get_url +++ /dev/null @@ -1,313 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Jan-Piet Mens -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -# see examples/playbooks/get_url.yml - -import shutil -import datetime -import re -import tempfile - -DOCUMENTATION = ''' ---- -module: get_url -short_description: Downloads files from HTTP, HTTPS, or FTP to node -description: - - Downloads files from HTTP, HTTPS, or FTP to the remote server. The remote - server I(must) have direct access to the remote resource. - - By default, if an environment variable C(_proxy) is set on - the target host, requests will be sent through that proxy. This - behaviour can be overridden by setting a variable for this task - (see `setting the environment - `_), - or by using the use_proxy option. -version_added: "0.6" -options: - url: - description: - - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path - required: true - default: null - aliases: [] - dest: - description: - - absolute path of where to download the file to. - - If C(dest) is a directory, either the server provided filename or, if - none provided, the base name of the URL on the remote server will be - used. If a directory, C(force) has no effect. - If C(dest) is a directory, the file will always be - downloaded (regardless of the force option), but replaced only if the contents changed. - required: true - default: null - force: - description: - - If C(yes) and C(dest) is not a directory, will download the file every - time and replace the file if the contents change. If C(no), the file - will only be downloaded if the destination does not exist. Generally - should be C(yes) only for small local files. Prior to 0.6, this module - behaved as if C(yes) was the default. - version_added: "0.7" - required: false - choices: [ "yes", "no" ] - default: "no" - aliases: [ "thirsty" ] - sha256sum: - description: - - If a SHA-256 checksum is passed to this parameter, the digest of the - destination file will be calculated after it is downloaded to ensure - its integrity and verify that the transfer completed successfully. - version_added: "1.3" - required: false - default: null - use_proxy: - description: - - if C(no), it will not use a proxy, even if one is defined in - an environment variable on the target hosts. - required: false - default: 'yes' - choices: ['yes', 'no'] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - timeout: - description: - - Timeout for URL request - required: false - default: 10 - version_added: '1.8' - url_username: - description: - - The username for use in HTTP basic authentication. This parameter can be used - without C(url_password) for sites that allow empty passwords. - required: false - version_added: '1.6' - url_password: - description: - - The password for use in HTTP basic authentication. If the C(url_username) - parameter is not specified, the C(url_password) parameter will not be used. - required: false - version_added: '1.6' - others: - description: - - all arguments accepted by the M(file) module also work here - required: false -notes: - - This module doesn't yet support configuration for proxies. -# informational: requirements for nodes -requirements: [ urllib2, urlparse ] -author: Jan-Piet Mens -''' - -EXAMPLES=''' -- name: download foo.conf - get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf mode=0440 - -- name: download file with sha256 check - get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf sha256sum=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c -''' - -try: - import hashlib - HAS_HASHLIB=True -except ImportError: - HAS_HASHLIB=False - -# ============================================================== -# url handling - -def url_filename(url): - fn = os.path.basename(urlparse.urlsplit(url)[2]) - if fn == '': - return 'index.html' - return fn - -def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10): - """ - Download data from the url and store in a temporary file. - - Return (tempfile, info about the request) - """ - - rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout) - - if info['status'] == 304: - module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', '')) - - # create a temporary file and copy content to do md5-based replacement - if info['status'] != 200: - module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest) - - fd, tempname = tempfile.mkstemp() - f = os.fdopen(fd, 'wb') - try: - shutil.copyfileobj(rsp, f) - except Exception, err: - os.remove(tempname) - module.fail_json(msg="failed to create temporary content file: %s" % str(err)) - f.close() - rsp.close() - return tempname, info - -def extract_filename_from_headers(headers): - """ - Extracts a filename from the given dict of HTTP headers. - - Looks for the content-disposition header and applies a regex. - Returns the filename if successful, else None.""" - cont_disp_regex = 'attachment; ?filename="?([^"]+)' - res = None - - if 'content-disposition' in headers: - cont_disp = headers['content-disposition'] - match = re.match(cont_disp_regex, cont_disp) - if match: - res = match.group(1) - # Try preventing any funny business. - res = os.path.basename(res) - - return res - -# ============================================================== -# main - -def main(): - - argument_spec = url_argument_spec() - argument_spec.update( - url = dict(required=True), - dest = dict(required=True), - sha256sum = dict(default=''), - timeout = dict(required=False, type='int', default=10), - ) - - module = AnsibleModule( - # not checking because of daisy chain to file module - argument_spec = argument_spec, - add_file_common_args=True - ) - - url = module.params['url'] - dest = os.path.expanduser(module.params['dest']) - force = module.params['force'] - sha256sum = module.params['sha256sum'] - use_proxy = module.params['use_proxy'] - timeout = module.params['timeout'] - - dest_is_dir = os.path.isdir(dest) - last_mod_time = None - - if not dest_is_dir and os.path.exists(dest): - if not force: - module.exit_json(msg="file already exists", dest=dest, url=url, changed=False) - - # If the file already exists, prepare the last modified time for the - # request. - mtime = os.path.getmtime(dest) - last_mod_time = datetime.datetime.utcfromtimestamp(mtime) - - # download to tmpsrc - tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout) - - # Now the request has completed, we can finally generate the final - # destination file name from the info dict. - - if dest_is_dir: - filename = extract_filename_from_headers(info) - if not filename: - # Fall back to extracting the filename from the URL. - # Pluck the URL from the info, since a redirect could have changed - # it. - filename = url_filename(info['url']) - dest = os.path.join(dest, filename) - - md5sum_src = None - md5sum_dest = None - - # raise an error if there is no tmpsrc file - if not os.path.exists(tmpsrc): - os.remove(tmpsrc) - module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg']) - if not os.access(tmpsrc, os.R_OK): - os.remove(tmpsrc) - module.fail_json( msg="Source %s not readable" % (tmpsrc)) - md5sum_src = module.md5(tmpsrc) - - # check if there is no dest file - if os.path.exists(dest): - # raise an error if copy has no permission on dest - if not os.access(dest, os.W_OK): - os.remove(tmpsrc) - module.fail_json( msg="Destination %s not writable" % (dest)) - if not os.access(dest, os.R_OK): - os.remove(tmpsrc) - module.fail_json( msg="Destination %s not readable" % (dest)) - md5sum_dest = module.md5(dest) - else: - if not os.access(os.path.dirname(dest), os.W_OK): - os.remove(tmpsrc) - module.fail_json( msg="Destination %s not writable" % (os.path.dirname(dest))) - - if md5sum_src != md5sum_dest: - try: - shutil.copyfile(tmpsrc, dest) - except Exception, err: - os.remove(tmpsrc) - module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err))) - changed = True - else: - changed = False - - # Check the digest of the destination file and ensure that it matches the - # sha256sum parameter if it is present - if sha256sum != '': - # Remove any non-alphanumeric characters, including the infamous - # Unicode zero-width space - stripped_sha256sum = re.sub(r'\W+', '', sha256sum) - - if not HAS_HASHLIB: - os.remove(dest) - module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher") - else: - destination_checksum = module.sha256(dest) - - if stripped_sha256sum.lower() != destination_checksum: - os.remove(dest) - module.fail_json(msg="The SHA-256 checksum for %s did not match %s; it was %s." % (dest, sha256sum, destination_checksum)) - - os.remove(tmpsrc) - - # allow file attribute changes - module.params['path'] = dest - file_args = module.load_file_common_arguments(module.params) - file_args['path'] = dest - changed = module.set_fs_attributes_if_different(file_args, changed) - - # Mission complete - module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum_src, - sha256sum=sha256sum, changed=changed, msg=info.get('msg', '')) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() diff --git a/library/network/slurp b/library/network/slurp deleted file mode 100644 index a2130c354b..0000000000 --- a/library/network/slurp +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: slurp -version_added: historical -short_description: Slurps a file from remote nodes -description: - - This module works like M(fetch). It is used for fetching a base64- - encoded blob containing the data in a remote file. -options: - src: - description: - - The file on the remote system to fetch. This I(must) be a file, not a - directory. - required: true - default: null - aliases: [] -notes: - - "See also: M(fetch)" -requirements: [] -author: Michael DeHaan -''' - -EXAMPLES = ''' -ansible host -m slurp -a 'src=/tmp/xx' - host | success >> { - "content": "aGVsbG8gQW5zaWJsZSB3b3JsZAo=", - "encoding": "base64" - } -''' - -import base64 - -def main(): - module = AnsibleModule( - argument_spec = dict( - src = dict(required=True, aliases=['path']), - ), - supports_check_mode=True - ) - source = os.path.expanduser(module.params['src']) - - if not os.path.exists(source): - module.fail_json(msg="file not found: %s" % source) - if not os.access(source, os.R_OK): - module.fail_json(msg="file is not readable: %s" % source) - - data = base64.b64encode(file(source).read()) - - module.exit_json(content=data, source=source, encoding='base64') - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/network/uri b/library/network/uri deleted file mode 100644 index 8d62463df7..0000000000 --- a/library/network/uri +++ /dev/null @@ -1,445 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Romeo Theriault -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -# see examples/playbooks/uri.yml - -import shutil -import tempfile -import base64 -import datetime -try: - import json -except ImportError: - import simplejson as json - -DOCUMENTATION = ''' ---- -module: uri -short_description: Interacts with webservices -description: - - Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE - HTTP authentication mechanisms. -version_added: "1.1" -options: - url: - description: - - HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path - required: true - default: null - aliases: [] - dest: - description: - - path of where to download the file to (if desired). If I(dest) is a directory, the basename of the file on the remote server will be used. - required: false - default: null - user: - description: - - username for the module to use for Digest, Basic or WSSE authentication. - required: false - default: null - password: - description: - - password for the module to use for Digest, Basic or WSSE authentication. - required: false - default: null - body: - description: - - The body of the http request/response to the web service. - required: false - default: null - method: - description: - - The HTTP method of the request or response. - required: false - choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH" ] - default: "GET" - return_content: - description: - - Whether or not to return the body of the request as a "content" key in the dictionary result. If the reported Content-type is "application/json", then the JSON is additionally loaded into a key called C(json) in the dictionary results. - required: false - choices: [ "yes", "no" ] - default: "no" - force_basic_auth: - description: - - httplib2, the library used by the uri module only sends authentication information when a webservice - responds to an initial request with a 401 status. Since some basic auth services do not properly - send a 401, logins will fail. This option forces the sending of the Basic authentication header - upon initial request. - required: false - choices: [ "yes", "no" ] - default: "no" - follow_redirects: - description: - - Whether or not the URI module should follow redirects. C(all) will follow all redirects. - C(safe) will follow only "safe" redirects, where "safe" means that the client is only - doing a GET or HEAD on the URI to which it is being redirected. C(none) will not follow - any redirects. Note that C(yes) and C(no) choices are accepted for backwards compatibility, - where C(yes) is the equivalent of C(all) and C(no) is the equivalent of C(safe). C(yes) and C(no) - are deprecated and will be removed in some future version of Ansible. - required: false - choices: [ "all", "safe", "none" ] - default: "safe" - creates: - description: - - a filename, when it already exists, this step will not be run. - required: false - removes: - description: - - a filename, when it does not exist, this step will not be run. - required: false - status_code: - description: - - A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes. - required: false - default: 200 - timeout: - description: - - The socket level timeout in seconds - required: false - default: 30 - HEADER_: - description: - - Any parameter starting with "HEADER_" is a sent with your request as a header. - For example, HEADER_Content-Type="application/json" would send the header - "Content-Type" along with your request with a value of "application/json". - required: false - default: null - others: - description: - - all arguments accepted by the M(file) module also work here - required: false - -# informational: requirements for nodes -requirements: [ urlparse, httplib2 ] -author: Romeo Theriault -''' - -EXAMPLES = ''' -# Check that you can connect (GET) to a page and it returns a status 200 -- uri: url=http://www.example.com - -# Check that a page returns a status 200 and fail if the word AWESOME is not in the page contents. -- action: uri url=http://www.example.com return_content=yes - register: webpage - -- action: fail - when: 'AWESOME' not in "{{ webpage.content }}" - - -# Create a JIRA issue - -- uri: url=https://your.jira.example.com/rest/api/2/issue/ - method=POST user=your_username password=your_pass - body="{{ lookup('file','issue.json') }}" force_basic_auth=yes - status_code=201 HEADER_Content-Type="application/json" - -# Login to a form based webpage, then use the returned cookie to -# access the app in later tasks - -- uri: url=https://your.form.based.auth.examle.com/index.php - method=POST body="name=your_username&password=your_password&enter=Sign%20in" - status_code=302 HEADER_Content-Type="application/x-www-form-urlencoded" - register: login - -- uri: url=https://your.form.based.auth.example.com/dashboard.php - method=GET return_content=yes HEADER_Cookie="{{login.set_cookie}}" - -# Queue build of a project in Jenkins: - -- uri: url=http://{{jenkins.host}}/job/{{jenkins.job}}/build?token={{jenkins.token}} - method=GET user={{jenkins.user}} password={{jenkins.password}} force_basic_auth=yes status_code=201 - -''' - -HAS_HTTPLIB2 = True -try: - import httplib2 -except ImportError: - HAS_HTTPLIB2 = False - -HAS_URLPARSE = True - -try: - import urlparse - import socket -except ImportError: - HAS_URLPARSE = False - - -def write_file(module, url, dest, content): - # create a tempfile with some test content - fd, tmpsrc = tempfile.mkstemp() - f = open(tmpsrc, 'wb') - try: - f.write(content) - except Exception, err: - os.remove(tmpsrc) - module.fail_json(msg="failed to create temporary content file: %s" % str(err)) - f.close() - - md5sum_src = None - md5sum_dest = None - - # raise an error if there is no tmpsrc file - if not os.path.exists(tmpsrc): - os.remove(tmpsrc) - module.fail_json(msg="Source %s does not exist" % (tmpsrc)) - if not os.access(tmpsrc, os.R_OK): - os.remove(tmpsrc) - module.fail_json( msg="Source %s not readable" % (tmpsrc)) - md5sum_src = module.md5(tmpsrc) - - # check if there is no dest file - if os.path.exists(dest): - # raise an error if copy has no permission on dest - if not os.access(dest, os.W_OK): - os.remove(tmpsrc) - module.fail_json( msg="Destination %s not writable" % (dest)) - if not os.access(dest, os.R_OK): - os.remove(tmpsrc) - module.fail_json( msg="Destination %s not readable" % (dest)) - md5sum_dest = module.md5(dest) - else: - if not os.access(os.path.dirname(dest), os.W_OK): - os.remove(tmpsrc) - module.fail_json( msg="Destination dir %s not writable" % (os.path.dirname(dest))) - - if md5sum_src != md5sum_dest: - try: - shutil.copyfile(tmpsrc, dest) - except Exception, err: - os.remove(tmpsrc) - module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err))) - - os.remove(tmpsrc) - - -def url_filename(url): - fn = os.path.basename(urlparse.urlsplit(url)[2]) - if fn == '': - return 'index.html' - return fn - - -def uri(module, url, dest, user, password, body, method, headers, redirects, socket_timeout): - # To debug - #httplib2.debug = 4 - - # Handle Redirects - if redirects == "all" or redirects == "yes": - follow_redirects = True - follow_all_redirects = True - elif redirects == "none": - follow_redirects = False - follow_all_redirects = False - else: - follow_redirects = True - follow_all_redirects = False - - # Create a Http object and set some default options. - h = httplib2.Http(disable_ssl_certificate_validation=True, timeout=socket_timeout) - h.follow_all_redirects = follow_all_redirects - h.follow_redirects = follow_redirects - h.forward_authorization_headers = True - - # If they have a username or password verify they have both, then add them to the request - if user is not None and password is None: - module.fail_json(msg="Both a username and password need to be set.") - if password is not None and user is None: - module.fail_json(msg="Both a username and password need to be set.") - if user is not None and password is not None: - h.add_credentials(user, password) - - # is dest is set and is a directory, let's check if we get redirected and - # set the filename from that url - redirected = False - resp_redir = {} - r = {} - if dest is not None: - dest = os.path.expanduser(dest) - if os.path.isdir(dest): - # first check if we are redirected to a file download - h.follow_redirects=False - # Try the request - try: - resp_redir, content_redir = h.request(url, method=method, body=body, headers=headers) - # if we are redirected, update the url with the location header, - # and update dest with the new url filename - except: - pass - if 'status' in resp_redir and resp_redir['status'] in ["301", "302", "303", "307"]: - url = resp_redir['location'] - redirected = True - dest = os.path.join(dest, url_filename(url)) - # if destination file already exist, only download if file newer - if os.path.exists(dest): - t = datetime.datetime.utcfromtimestamp(os.path.getmtime(dest)) - tstamp = t.strftime('%a, %d %b %Y %H:%M:%S +0000') - headers['If-Modified-Since'] = tstamp - - # do safe redirects now, including 307 - h.follow_redirects=follow_redirects - - # Make the request, or try to :) - try: - resp, content = h.request(url, method=method, body=body, headers=headers) - r['redirected'] = redirected - r.update(resp_redir) - r.update(resp) - try: - return r, unicode(content.decode('unicode_escape')), dest - except: - return r, content, dest - except httplib2.RedirectMissingLocation: - module.fail_json(msg="A 3xx redirect response code was provided but no Location: header was provided to point to the new location.") - except httplib2.RedirectLimit: - module.fail_json(msg="The maximum number of redirections was reached without coming to a final URI.") - except httplib2.ServerNotFoundError: - module.fail_json(msg="Unable to resolve the host name given.") - except httplib2.RelativeURIError: - module.fail_json(msg="A relative, as opposed to an absolute URI, was passed in.") - except httplib2.FailedToDecompressContent: - module.fail_json(msg="The headers claimed that the content of the response was compressed but the decompression algorithm applied to the content failed.") - except httplib2.UnimplementedDigestAuthOptionError: - module.fail_json(msg="The server requested a type of Digest authentication that we are unfamiliar with.") - except httplib2.UnimplementedHmacDigestAuthOptionError: - module.fail_json(msg="The server requested a type of HMACDigest authentication that we are unfamiliar with.") - except httplib2.UnimplementedHmacDigestAuthOptionError: - module.fail_json(msg="The server requested a type of HMACDigest authentication that we are unfamiliar with.") - except socket.error, e: - module.fail_json(msg="Socket error: %s to %s" % (e, url)) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - url = dict(required=True), - dest = dict(required=False, default=None), - user = dict(required=False, default=None), - password = dict(required=False, default=None), - body = dict(required=False, default=None), - method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH']), - return_content = dict(required=False, default='no', type='bool'), - force_basic_auth = dict(required=False, default='no', type='bool'), - follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']), - creates = dict(required=False, default=None), - removes = dict(required=False, default=None), - status_code = dict(required=False, default=[200], type='list'), - timeout = dict(required=False, default=30, type='int'), - ), - check_invalid_arguments=False, - add_file_common_args=True - ) - - if not HAS_HTTPLIB2: - module.fail_json(msg="httplib2 is not installed") - if not HAS_URLPARSE: - module.fail_json(msg="urlparse is not installed") - - url = module.params['url'] - user = module.params['user'] - password = module.params['password'] - body = module.params['body'] - method = module.params['method'] - dest = module.params['dest'] - return_content = module.params['return_content'] - force_basic_auth = module.params['force_basic_auth'] - redirects = module.params['follow_redirects'] - creates = module.params['creates'] - removes = module.params['removes'] - status_code = [int(x) for x in list(module.params['status_code'])] - socket_timeout = module.params['timeout'] - - # Grab all the http headers. Need this hack since passing multi-values is currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}') - dict_headers = {} - for key, value in module.params.iteritems(): - if key.startswith("HEADER_"): - skey = key.replace("HEADER_", "") - dict_headers[skey] = value - - - if creates is not None: - # do not run the command if the line contains creates=filename - # and the filename already exists. This allows idempotence - # of uri executions. - creates = os.path.expanduser(creates) - if os.path.exists(creates): - module.exit_json(stdout="skipped, since %s exists" % creates, skipped=True, changed=False, stderr=False, rc=0) - - if removes is not None: - # do not run the command if the line contains removes=filename - # and the filename do not exists. This allows idempotence - # of uri executions. - v = os.path.expanduser(removes) - if not os.path.exists(removes): - module.exit_json(stdout="skipped, since %s does not exist" % removes, skipped=True, changed=False, stderr=False, rc=0) - - - # httplib2 only sends authentication after the server asks for it with a 401. - # Some 'basic auth' servies fail to send a 401 and require the authentication - # up front. This creates the Basic authentication header and sends it immediately. - if force_basic_auth: - dict_headers["Authorization"] = "Basic {0}".format(base64.b64encode("{0}:{1}".format(user, password))) - - - # Make the request - resp, content, dest = uri(module, url, dest, user, password, body, method, dict_headers, redirects, socket_timeout) - resp['status'] = int(resp['status']) - - # Write the file out if requested - if dest is not None: - if resp['status'] == 304: - changed = False - else: - write_file(module, url, dest, content) - # allow file attribute changes - changed = True - module.params['path'] = dest - file_args = module.load_file_common_arguments(module.params) - file_args['path'] = dest - changed = module.set_fs_attributes_if_different(file_args, changed) - resp['path'] = dest - else: - changed = False - - # Transmogrify the headers, replacing '-' with '_', since variables dont work with dashes. - uresp = {} - for key, value in resp.iteritems(): - ukey = key.replace("-", "_") - uresp[ukey] = value - - if 'content_type' in uresp: - if uresp['content_type'].startswith('application/json'): - try: - js = json.loads(content) - uresp['json'] = js - except: - pass - if resp['status'] not in status_code: - module.fail_json(msg="Status code was not " + str(status_code), content=content, **uresp) - elif return_content: - module.exit_json(changed=changed, content=content, **uresp) - else: - module.exit_json(changed=changed, **uresp) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/campfire b/library/notification/campfire deleted file mode 100644 index 31e69fc545..0000000000 --- a/library/notification/campfire +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -DOCUMENTATION = ''' ---- -module: campfire -version_added: "1.2" -short_description: Send a message to Campfire -description: - - Send a message to Campfire. - - Messages with newlines will result in a "Paste" message being sent. -version_added: "1.2" -options: - subscription: - description: - - The subscription name to use. - required: true - token: - description: - - API token. - required: true - room: - description: - - Room number to which the message should be sent. - required: true - msg: - description: - - The message body. - required: true - notify: - description: - - Send a notification sound before the message. - required: false - choices: ["56k", "bell", "bezos", "bueller", "clowntown", - "cottoneyejoe", "crickets", "dadgummit", "dangerzone", - "danielsan", "deeper", "drama", "greatjob", "greyjoy", - "guarantee", "heygirl", "horn", "horror", - "inconceivable", "live", "loggins", "makeitso", "noooo", - "nyan", "ohmy", "ohyeah", "pushit", "rimshot", - "rollout", "rumble", "sax", "secret", "sexyback", - "story", "tada", "tmyk", "trololo", "trombone", "unix", - "vuvuzela", "what", "whoomp", "yeah", "yodel"] - -# informational: requirements for nodes -requirements: [ urllib2, cgi ] -author: Adam Garside -''' - -EXAMPLES = ''' -- campfire: subscription=foo token=12345 room=123 msg="Task completed." - -- campfire: subscription=foo token=12345 room=123 notify=loggins - msg="Task completed ... with feeling." -''' - - -def main(): - - try: - import urllib2 - except ImportError: - module.fail_json(msg="urllib2 is required") - - try: - import cgi - except ImportError: - module.fail_json(msg="cgi is required") - - module = AnsibleModule( - argument_spec=dict( - subscription=dict(required=True), - token=dict(required=True), - room=dict(required=True), - msg=dict(required=True), - notify=dict(required=False, - choices=["56k", "bell", "bezos", "bueller", - "clowntown", "cottoneyejoe", - "crickets", "dadgummit", "dangerzone", - "danielsan", "deeper", "drama", - "greatjob", "greyjoy", "guarantee", - "heygirl", "horn", "horror", - "inconceivable", "live", "loggins", - "makeitso", "noooo", "nyan", "ohmy", - "ohyeah", "pushit", "rimshot", - "rollout", "rumble", "sax", "secret", - "sexyback", "story", "tada", "tmyk", - "trololo", "trombone", "unix", - "vuvuzela", "what", "whoomp", "yeah", - "yodel"]), - ), - supports_check_mode=False - ) - - subscription = module.params["subscription"] - token = module.params["token"] - room = module.params["room"] - msg = module.params["msg"] - notify = module.params["notify"] - - URI = "https://%s.campfirenow.com" % subscription - NSTR = "SoundMessage%s" - MSTR = "%s" - AGENT = "Ansible/1.2" - - try: - - # Setup basic auth using token as the username - pm = urllib2.HTTPPasswordMgrWithDefaultRealm() - pm.add_password(None, URI, token, 'X') - - # Setup Handler and define the opener for the request - handler = urllib2.HTTPBasicAuthHandler(pm) - opener = urllib2.build_opener(handler) - - target_url = '%s/room/%s/speak.xml' % (URI, room) - - # Send some audible notification if requested - if notify: - req = urllib2.Request(target_url, NSTR % cgi.escape(notify)) - req.add_header('Content-Type', 'application/xml') - req.add_header('User-agent', AGENT) - response = opener.open(req) - - # Send the message - req = urllib2.Request(target_url, MSTR % cgi.escape(msg)) - req.add_header('Content-Type', 'application/xml') - req.add_header('User-agent', AGENT) - response = opener.open(req) - - except urllib2.HTTPError, e: - if not (200 <= e.code < 300): - module.fail_json(msg="unable to send msg: '%s', campfire api" - " returned error code: '%s'" % - (msg, e.code)) - - except Exception, e: - module.fail_json(msg="unable to send msg: %s" % msg) - - module.exit_json(changed=True, room=room, msg=msg, notify=notify) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/flowdock b/library/notification/flowdock deleted file mode 100644 index 009487fb43..0000000000 --- a/library/notification/flowdock +++ /dev/null @@ -1,192 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Matt Coddington -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: flowdock -version_added: "1.2" -author: Matt Coddington -short_description: Send a message to a flowdock -description: - - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) -options: - token: - description: - - API token. - required: true - type: - description: - - Whether to post to 'inbox' or 'chat' - required: true - choices: [ "inbox", "chat" ] - msg: - description: - - Content of the message - required: true - tags: - description: - - tags of the message, separated by commas - required: false - external_user_name: - description: - - (chat only - required) Name of the "user" sending the message - required: false - from_address: - description: - - (inbox only - required) Email address of the message sender - required: false - source: - description: - - (inbox only - required) Human readable identifier of the application that uses the Flowdock API - required: false - subject: - description: - - (inbox only - required) Subject line of the message - required: false - from_name: - description: - - (inbox only) Name of the message sender - required: false - reply_to: - description: - - (inbox only) Email address for replies - required: false - project: - description: - - (inbox only) Human readable identifier for more detailed message categorization - required: false - link: - description: - - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox. - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 - -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] -''' - -EXAMPLES = ''' -- flowdock: type=inbox - token=AAAAAA - from_address=user@example.com - source='my cool app' - msg='test from ansible' - subject='test subject' - -- flowdock: type=chat - token=AAAAAA - external_user_name=testuser - msg='test from ansible' - tags=tag1,tag2,tag3 -''' - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True), - msg=dict(required=True), - type=dict(required=True, choices=["inbox","chat"]), - external_user_name=dict(required=False), - from_address=dict(required=False), - source=dict(required=False), - subject=dict(required=False), - from_name=dict(required=False), - reply_to=dict(required=False), - project=dict(required=False), - tags=dict(required=False), - link=dict(required=False), - validate_certs = dict(default='yes', type='bool'), - ), - supports_check_mode=True - ) - - type = module.params["type"] - token = module.params["token"] - if type == 'inbox': - url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token) - else: - url = "https://api.flowdock.com/v1/messages/chat/%s" % (token) - - params = {} - - # required params - params['content'] = module.params["msg"] - - # required params for the 'chat' type - if module.params['external_user_name']: - if type == 'inbox': - module.fail_json(msg="external_user_name is not valid for the 'inbox' type") - else: - params['external_user_name'] = module.params["external_user_name"] - elif type == 'chat': - module.fail_json(msg="%s is required for the 'inbox' type" % item) - - # required params for the 'inbox' type - for item in [ 'from_address', 'source', 'subject' ]: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - elif type == 'inbox': - module.fail_json(msg="%s is required for the 'inbox' type" % item) - - # optional params - if module.params["tags"]: - params['tags'] = module.params["tags"] - - # optional params for the 'inbox' type - for item in [ 'from_name', 'reply_to', 'project', 'link' ]: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=False) - - # Send the data to Flowdock - data = urllib.urlencode(params) - response, info = fetch_url(module, url, data=data) - if info['status'] != 200: - module.fail_json(msg="unable to send msg: %s" % info['msg']) - - module.exit_json(changed=True, msg=module.params["msg"]) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() - diff --git a/library/notification/grove b/library/notification/grove deleted file mode 100644 index e6bf241bda..0000000000 --- a/library/notification/grove +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -DOCUMENTATION = ''' ---- -module: grove -version_added: 1.4 -short_description: Sends a notification to a grove.io channel -description: - - The M(grove) module sends a message for a service to a Grove.io - channel. -options: - channel_token: - description: - - Token of the channel to post to. - required: true - service: - description: - - Name of the service (displayed as the "user" in the message) - required: false - default: ansible - message: - description: - - Message content - required: true - url: - description: - - Service URL for the web client - required: false - icon_url: - description: - - Icon for the service - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 -author: Jonas Pfenniger -''' - -EXAMPLES = ''' -- grove: > - channel_token=6Ph62VBBJOccmtTPZbubiPzdrhipZXtg - service=my-app - message=deployed {{ target }} -''' - -BASE_URL = 'https://grove.io/api/notice/%s/' - -# ============================================================== -# do_notify_grove - -def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None): - my_url = BASE_URL % (channel_token,) - - my_data = dict(service=service, message=message) - if url is not None: - my_data['url'] = url - if icon_url is not None: - my_data['icon_url'] = icon_url - - data = urllib.urlencode(my_data) - response, info = fetch_url(module, my_url, data=data) - if info['status'] != 200: - module.fail_json(msg="failed to send notification: %s" % info['msg']) - -# ============================================================== -# main - -def main(): - module = AnsibleModule( - argument_spec = dict( - channel_token = dict(type='str', required=True), - message = dict(type='str', required=True), - service = dict(type='str', default='ansible'), - url = dict(type='str', default=None), - icon_url = dict(type='str', default=None), - validate_certs = dict(default='yes', type='bool'), - ) - ) - - channel_token = module.params['channel_token'] - service = module.params['service'] - message = module.params['message'] - url = module.params['url'] - icon_url = module.params['icon_url'] - - do_notify_grove(module, channel_token, service, message, url, icon_url) - - # Mission complete - module.exit_json(msg="OK") - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/hipchat b/library/notification/hipchat deleted file mode 100644 index 4ff95b32bf..0000000000 --- a/library/notification/hipchat +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -DOCUMENTATION = ''' ---- -module: hipchat -version_added: "1.2" -short_description: Send a message to hipchat -description: - - Send a message to hipchat -options: - token: - description: - - API token. - required: true - room: - description: - - ID or name of the room. - required: true - from: - description: - - Name the message will appear be sent from. max 15 characters. - Over 15, will be shorten. - required: false - default: Ansible - msg: - description: - - The message body. - required: true - default: null - color: - description: - - Background color for the message. Default is yellow. - required: false - default: yellow - choices: [ "yellow", "red", "green", "purple", "gray", "random" ] - msg_format: - description: - - message format. html or text. Default is text. - required: false - default: text - choices: [ "text", "html" ] - notify: - description: - - notify or not (change the tab color, play a sound, etc) - required: false - default: 'yes' - choices: [ "yes", "no" ] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 1.5.1 - api: - description: - - API url if using a self-hosted hipchat server - required: false - default: 'https://api.hipchat.com/v1/rooms/message' - version_added: 1.6.0 - - -# informational: requirements for nodes -requirements: [ urllib, urllib2 ] -author: WAKAYAMA Shirou -''' - -EXAMPLES = ''' -- hipchat: token=AAAAAA room=notify msg="Ansible task finished" -''' - -# =========================================== -# HipChat module specific support methods. -# - -MSG_URI = "https://api.hipchat.com/v1/rooms/message" - -def send_msg(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=MSG_URI): - '''sending message to hipchat''' - - params = {} - params['room_id'] = room - params['from'] = msg_from[:15] # max length is 15 - params['message'] = msg - params['message_format'] = msg_format - params['color'] = color - params['api'] = api - - if notify: - params['notify'] = 1 - else: - params['notify'] = 0 - - url = api + "?auth_token=%s" % (token) - data = urllib.urlencode(params) - response, info = fetch_url(module, url, data=data) - if info['status'] == 200: - return response.read() - else: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True), - room=dict(required=True), - msg=dict(required=True), - msg_from=dict(default="Ansible", aliases=['from']), - color=dict(default="yellow", choices=["yellow", "red", "green", - "purple", "gray", "random"]), - msg_format=dict(default="text", choices=["text", "html"]), - notify=dict(default=True, type='bool'), - validate_certs = dict(default='yes', type='bool'), - api = dict(default=MSG_URI), - ), - supports_check_mode=True - ) - - token = module.params["token"] - room = module.params["room"] - msg = module.params["msg"] - msg_from = module.params["msg_from"] - color = module.params["color"] - msg_format = module.params["msg_format"] - notify = module.params["notify"] - api = module.params["api"] - - try: - send_msg(module, token, room, msg_from, msg, msg_format, color, notify, api) - except Exception, e: - module.fail_json(msg="unable to sent msg: %s" % e) - - changed = True - module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/notification/irc b/library/notification/irc deleted file mode 100644 index a90834f820..0000000000 --- a/library/notification/irc +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Jan-Piet Mens -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: irc -version_added: "1.2" -short_description: Send a message to an IRC channel -description: - - Send a message to an IRC channel. This is a very simplistic implementation. -options: - server: - description: - - IRC server name/address - required: false - default: localhost - port: - description: - - IRC server port number - required: false - default: 6667 - nick: - description: - - Nickname. May be shortened, depending on server's NICKLEN setting. - required: false - default: ansible - msg: - description: - - The message body. - required: true - default: null - color: - description: - - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). - required: false - default: "none" - choices: [ "none", "yellow", "red", "green", "blue", "black" ] - channel: - description: - - Channel name - required: true - key: - description: - - Channel key - required: false - version_added: 1.7 - passwd: - description: - - Server password - required: false - timeout: - description: - - Timeout to use while waiting for successful registration and join - messages, this is to prevent an endless loop - default: 30 - version_added: 1.5 - use_ssl: - description: - - Designates whether TLS/SSL should be used when connecting to the IRC server - default: False - version_added: 1.8 - -# informational: requirements for nodes -requirements: [ socket ] -author: Jan-Piet Mens, Matt Martz -''' - -EXAMPLES = ''' -- irc: server=irc.example.net channel="#t1" msg="Hello world" - -- local_action: irc port=6669 - channel="#t1" - msg="All finished at {{ ansible_date_time.iso8601 }}" - color=red - nick=ansibleIRC -''' - -# =========================================== -# IRC module support methods. -# - -import re -import socket -import ssl - -from time import sleep - - -def send_msg(channel, msg, server='localhost', port='6667', key=None, - nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False): - '''send message to IRC''' - - colornumbers = { - 'black': "01", - 'red': "04", - 'green': "09", - 'yellow': "08", - 'blue': "12", - } - - try: - colornumber = colornumbers[color] - colortext = "\x03" + colornumber - except: - colortext = "" - - message = colortext + msg - - irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - if use_ssl: - irc = ssl.wrap_socket(irc) - irc.connect((server, int(port))) - if passwd: - irc.send('PASS %s\r\n' % passwd) - irc.send('NICK %s\r\n' % nick) - irc.send('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick)) - motd = '' - start = time.time() - while 1: - motd += irc.recv(1024) - # The server might send back a shorter nick than we specified (due to NICKLEN), - # so grab that and use it from now on (assuming we find the 00[1-4] response). - match = re.search('^:\S+ 00[1-4] (?P\S+) :', motd, flags=re.M) - if match: - nick = match.group('nick') - break - elif time.time() - start > timeout: - raise Exception('Timeout waiting for IRC server welcome response') - sleep(0.5) - - if key: - irc.send('JOIN %s %s\r\n' % (channel, key)) - else: - irc.send('JOIN %s\r\n' % channel) - - join = '' - start = time.time() - while 1: - join += irc.recv(1024) - if re.search('^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M): - break - elif time.time() - start > timeout: - raise Exception('Timeout waiting for IRC JOIN response') - sleep(0.5) - - irc.send('PRIVMSG %s :%s\r\n' % (channel, message)) - sleep(1) - irc.send('PART %s\r\n' % channel) - irc.send('QUIT\r\n') - sleep(1) - irc.close() - -# =========================================== -# Main -# - - -def main(): - module = AnsibleModule( - argument_spec=dict( - server=dict(default='localhost'), - port=dict(default=6667), - nick=dict(default='ansible'), - msg=dict(required=True), - color=dict(default="none", choices=["yellow", "red", "green", - "blue", "black", "none"]), - channel=dict(required=True), - key=dict(), - passwd=dict(), - timeout=dict(type='int', default=30), - use_ssl=dict(type='bool', default=False) - ), - supports_check_mode=True - ) - - server = module.params["server"] - port = module.params["port"] - nick = module.params["nick"] - msg = module.params["msg"] - color = module.params["color"] - channel = module.params["channel"] - key = module.params["key"] - passwd = module.params["passwd"] - timeout = module.params["timeout"] - use_ssl = module.params["use_ssl"] - - try: - send_msg(channel, msg, server, port, key, nick, color, passwd, timeout, use_ssl) - except Exception, e: - module.fail_json(msg="unable to send to IRC: %s" % e) - - module.exit_json(changed=False, channel=channel, nick=nick, - msg=msg) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/jabber b/library/notification/jabber deleted file mode 100644 index 8a7eed37b3..0000000000 --- a/library/notification/jabber +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -DOCUMENTATION = ''' ---- -version_added: "1.2" -module: jabber -short_description: Send a message to jabber user or chat room -description: - - Send a message to jabber -options: - user: - description: - User as which to connect - required: true - password: - description: - password for user to connect - required: true - to: - description: - user ID or name of the room, when using room use a slash to indicate your nick. - required: true - msg: - description: - - The message body. - required: true - default: null - host: - description: - host to connect, overrides user info - required: false - port: - description: - port to connect to, overrides default - required: false - default: 5222 - encoding: - description: - message encoding - required: false - -# informational: requirements for nodes -requirements: [ xmpp ] -author: Brian Coca -''' - -EXAMPLES = ''' -# send a message to a user -- jabber: user=mybot@example.net - password=secret - to=friend@example.net - msg="Ansible task finished" - -# send a message to a room -- jabber: user=mybot@example.net - password=secret - to=mychaps@conference.example.net/ansiblebot - msg="Ansible task finished" - -# send a message, specifying the host and port -- jabber user=mybot@example.net - host=talk.example.net - port=5223 - password=secret - to=mychaps@example.net - msg="Ansible task finished" -''' - -import os -import re -import time - -HAS_XMPP = True -try: - import xmpp -except ImportError: - HAS_XMPP = False - -def main(): - - module = AnsibleModule( - argument_spec=dict( - user=dict(required=True), - password=dict(required=True), - to=dict(required=True), - msg=dict(required=True), - host=dict(required=False), - port=dict(required=False,default=5222), - encoding=dict(required=False), - ), - supports_check_mode=True - ) - - if not HAS_XMPP: - module.fail_json(msg="xmpp is not installed") - - jid = xmpp.JID(module.params['user']) - user = jid.getNode() - server = jid.getDomain() - port = module.params['port'] - password = module.params['password'] - try: - to, nick = module.params['to'].split('/', 1) - except ValueError: - to, nick = module.params['to'], None - - if module.params['host']: - host = module.params['host'] - else: - host = server - if module.params['encoding']: - xmpp.simplexml.ENCODING = params['encoding'] - - msg = xmpp.protocol.Message(body=module.params['msg']) - - try: - conn=xmpp.Client(server) - if not conn.connect(server=(host,port)): - module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server)) - if not conn.auth(user,password,'Ansible'): - module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user,server)) - # some old servers require this, also the sleep following send - conn.sendInitPresence(requestRoster=0) - - if nick: # sending to room instead of user, need to join - msg.setType('groupchat') - msg.setTag('x', namespace='http://jabber.org/protocol/muc#user') - conn.send(xmpp.Presence(to=module.params['to'])) - time.sleep(1) - else: - msg.setType('chat') - - msg.setTo(to) - if not module.check_mode: - conn.send(msg) - time.sleep(1) - conn.disconnect() - except Exception, e: - module.fail_json(msg="unable to send msg: %s" % e) - - module.exit_json(changed=False, to=to, user=user, msg=msg.getBody()) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/mail b/library/notification/mail deleted file mode 100644 index 34cd3a09bf..0000000000 --- a/library/notification/mail +++ /dev/null @@ -1,252 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2012 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ ---- -author: Dag Wieers -module: mail -short_description: Send an email -description: - - This module is useful for sending emails from playbooks. - - One may wonder why automate sending emails? In complex environments - there are from time to time processes that cannot be automated, either - because you lack the authority to make it so, or because not everyone - agrees to a common approach. - - If you cannot automate a specific step, but the step is non-blocking, - sending out an email to the responsible party to make him perform his - part of the bargain is an elegant way to put the responsibility in - someone else's lap. - - Of course sending out a mail can be equally useful as a way to notify - one or more people in a team that a specific action has been - (successfully) taken. -version_added: "0.8" -options: - from: - description: - - The email-address the mail is sent from. May contain address and phrase. - default: root - required: false - to: - description: - - The email-address(es) the mail is being sent to. This is - a comma-separated list, which may contain address and phrase portions. - default: root - required: false - cc: - description: - - The email-address(es) the mail is being copied to. This is - a comma-separated list, which may contain address and phrase portions. - required: false - bcc: - description: - - The email-address(es) the mail is being 'blind' copied to. This is - a comma-separated list, which may contain address and phrase portions. - required: false - subject: - description: - - The subject of the email being sent. - aliases: [ msg ] - required: true - body: - description: - - The body of the email being sent. - default: $subject - required: false - host: - description: - - The mail server - default: 'localhost' - required: false - port: - description: - - The mail server port - default: '25' - required: false - version_added: "1.0" - attach: - description: - - A space-separated list of pathnames of files to attach to the message. - Attached files will have their content-type set to C(application/octet-stream). - default: null - required: false - version_added: "1.0" - headers: - description: - - A vertical-bar-separated list of headers which should be added to the message. - Each individual header is specified as C(header=value) (see example below). - default: null - required: false - version_added: "1.0" - charset: - description: - - The character set of email being sent - default: 'us-ascii' - required: false -""" - -EXAMPLES = ''' -# Example playbook sending mail to root -- local_action: mail msg='System {{ ansible_hostname }} has been successfully provisioned.' - -# Send e-mail to a bunch of users, attaching files -- local_action: mail - host='127.0.0.1' - port=2025 - subject="Ansible-report" - body="Hello, this is an e-mail. I hope you like it ;-)" - from="jane@example.net (Jane Jolie)" - to="John Doe , Suzie Something " - cc="Charlie Root " - attach="/etc/group /tmp/pavatar2.png" - headers=Reply-To=john@example.com|X-Special="Something or other" - charset=utf8 -''' - -import os -import sys -import smtplib - -try: - from email import encoders - import email.utils - from email.utils import parseaddr, formataddr - from email.mime.base import MIMEBase - from mail.mime.multipart import MIMEMultipart - from email.mime.text import MIMEText -except ImportError: - from email import Encoders as encoders - import email.Utils - from email.Utils import parseaddr, formataddr - from email.MIMEBase import MIMEBase - from email.MIMEMultipart import MIMEMultipart - from email.MIMEText import MIMEText - -def main(): - - module = AnsibleModule( - argument_spec = dict( - host = dict(default='localhost'), - port = dict(default='25'), - sender = dict(default='root', aliases=['from']), - to = dict(default='root', aliases=['recipients']), - cc = dict(default=None), - bcc = dict(default=None), - subject = dict(required=True, aliases=['msg']), - body = dict(default=None), - attach = dict(default=None), - headers = dict(default=None), - charset = dict(default='us-ascii') - ) - ) - - host = module.params.get('host') - port = module.params.get('port') - sender = module.params.get('sender') - recipients = module.params.get('to') - copies = module.params.get('cc') - blindcopies = module.params.get('bcc') - subject = module.params.get('subject') - body = module.params.get('body') - attach_files = module.params.get('attach') - headers = module.params.get('headers') - charset = module.params.get('charset') - - sender_phrase, sender_addr = parseaddr(sender) - - if not body: - body = subject - - try: - smtp = smtplib.SMTP(host, port=int(port)) - except Exception, e: - module.fail_json(rc=1, msg='Failed to send mail to server %s on port %s: %s' % (host, port, e)) - - - msg = MIMEMultipart() - msg['Subject'] = subject - msg['From'] = formataddr((sender_phrase, sender_addr)) - msg.preamble = "Multipart message" - - if headers is not None: - for hdr in [x.strip() for x in headers.split('|')]: - try: - h_key, h_val = hdr.split('=') - msg.add_header(h_key, h_val) - except: - pass - - if 'X-Mailer' not in msg: - msg.add_header('X-Mailer', "Ansible") - - to_list = [] - cc_list = [] - addr_list = [] - - if recipients is not None: - for addr in [x.strip() for x in recipients.split(',')]: - to_list.append( formataddr( parseaddr(addr)) ) - addr_list.append( parseaddr(addr)[1] ) # address only, w/o phrase - if copies is not None: - for addr in [x.strip() for x in copies.split(',')]: - cc_list.append( formataddr( parseaddr(addr)) ) - addr_list.append( parseaddr(addr)[1] ) # address only, w/o phrase - if blindcopies is not None: - for addr in [x.strip() for x in blindcopies.split(',')]: - addr_list.append( parseaddr(addr)[1] ) - - if len(to_list) > 0: - msg['To'] = ", ".join(to_list) - if len(cc_list) > 0: - msg['Cc'] = ", ".join(cc_list) - - part = MIMEText(body + "\n\n", _charset=charset) - msg.attach(part) - - if attach_files is not None: - for file in attach_files.split(): - try: - fp = open(file, 'rb') - - part = MIMEBase('application', 'octet-stream') - part.set_payload(fp.read()) - fp.close() - - encoders.encode_base64(part) - - part.add_header('Content-disposition', 'attachment', filename=os.path.basename(file)) - msg.attach(part) - except Exception, e: - module.fail_json(rc=1, msg="Failed to send mail: can't attach file %s: %s" % (file, e)) - sys.exit() - - composed = msg.as_string() - - try: - smtp.sendmail(sender_addr, set(addr_list), composed) - except Exception, e: - module.fail_json(rc=1, msg='Failed to send mail to %s: %s' % (", ".join(addr_list), e)) - - smtp.quit() - - module.exit_json(changed=False) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/mqtt b/library/notification/mqtt deleted file mode 100644 index d701bd9348..0000000000 --- a/library/notification/mqtt +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, 2014, Jan-Piet Mens -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: mqtt -short_description: Publish a message on an MQTT topic for the IoT -version_added: "1.2" -description: - - Publish a message on an MQTT topic. -options: - server: - description: - - MQTT broker address/name - required: false - default: localhost - port: - description: - - MQTT broker port number - required: false - default: 1883 - username: - description: - - Username to authenticate against the broker. - required: false - password: - description: - - Password for C(username) to authenticate against the broker. - required: false - client_id: - description: - - MQTT client identifier - required: false - default: hostname + pid - topic: - description: - - MQTT topic name - required: true - default: null - payload: - description: - - Payload. The special string C("None") may be used to send a NULL - (i.e. empty) payload which is useful to simply notify with the I(topic) - or to clear previously retained messages. - required: true - default: null - qos: - description: - - QoS (Quality of Service) - required: false - default: 0 - choices: [ "0", "1", "2" ] - retain: - description: - - Setting this flag causes the broker to retain (i.e. keep) the message so that - applications that subsequently subscribe to the topic can received the last - retained message immediately. - required: false - default: False - -# informational: requirements for nodes -requirements: [ mosquitto ] -notes: - - This module requires a connection to an MQTT broker such as Mosquitto - U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)). -author: Jan-Piet Mens -''' - -EXAMPLES = ''' -- local_action: mqtt - topic=service/ansible/{{ ansible_hostname }} - payload="Hello at {{ ansible_date_time.iso8601 }}" - qos=0 - retain=false - client_id=ans001 -''' - -# =========================================== -# MQTT module support methods. -# - -HAS_PAHOMQTT = True -try: - import socket - import paho.mqtt.publish as mqtt -except ImportError: - HAS_PAHOMQTT = False - -# =========================================== -# Main -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - server = dict(default = 'localhost'), - port = dict(default = 1883), - topic = dict(required = True), - payload = dict(required = True), - client_id = dict(default = None), - qos = dict(default="0", choices=["0", "1", "2"]), - retain = dict(default=False, type='bool'), - username = dict(default = None), - password = dict(default = None), - ), - supports_check_mode=True - ) - - if not HAS_PAHOMQTT: - module.fail_json(msg="Paho MQTT is not installed") - - server = module.params.get("server", 'localhost') - port = module.params.get("port", 1883) - topic = module.params.get("topic") - payload = module.params.get("payload") - client_id = module.params.get("client_id", '') - qos = int(module.params.get("qos", 0)) - retain = module.params.get("retain") - username = module.params.get("username", None) - password = module.params.get("password", None) - - if client_id is None: - client_id = "%s_%s" % (socket.getfqdn(), os.getpid()) - - if payload and payload == 'None': - payload = None - - auth=None - if username is not None: - auth = { 'username' : username, 'password' : password } - - try: - rc = mqtt.single(topic, payload, - qos=qos, - retain=retain, - client_id=client_id, - hostname=server, - port=port, - auth=auth) - except Exception, e: - module.fail_json(msg="unable to publish to MQTT broker %s" % (e)) - - module.exit_json(changed=False, topic=topic) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/nexmo b/library/notification/nexmo deleted file mode 100644 index d4898c40cd..0000000000 --- a/library/notification/nexmo +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Matt Martz -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ -module: nexmo -short_description: Send a SMS via nexmo -description: - - Send a SMS message via nexmo -version_added: 1.6 -author: Matt Martz -options: - api_key: - description: - - Nexmo API Key - required: true - api_secret: - description: - - Nexmo API Secret - required: true - src: - description: - - Nexmo Number to send from - required: true - dest: - description: - - Phone number(s) to send SMS message to - required: true - msg: - description: - - Message to text to send. Messages longer than 160 characters will be - split into multiple messages - required: true - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: - - 'yes' - - 'no' -""" - -EXAMPLES = """ -- name: Send notification message via Nexmo - local_action: - module: nexmo - api_key: 640c8a53 - api_secret: 0ce239a6 - src: 12345678901 - dest: - - 10987654321 - - 16789012345 - msg: "{{ inventory_hostname }} completed" -""" - - -NEXMO_API = 'https://rest.nexmo.com/sms/json' - - -def send_msg(module): - failed = list() - responses = dict() - msg = { - 'api_key': module.params.get('api_key'), - 'api_secret': module.params.get('api_secret'), - 'from': module.params.get('src'), - 'text': module.params.get('msg') - } - for number in module.params.get('dest'): - msg['to'] = number - url = "%s?%s" % (NEXMO_API, urllib.urlencode(msg)) - - headers = dict(Accept='application/json') - response, info = fetch_url(module, url, headers=headers) - if info['status'] != 200: - failed.append(number) - responses[number] = dict(failed=True) - - try: - responses[number] = json.load(response) - except: - failed.append(number) - responses[number] = dict(failed=True) - else: - for message in responses[number]['messages']: - if int(message['status']) != 0: - failed.append(number) - responses[number] = dict(failed=True, **responses[number]) - - if failed: - msg = 'One or messages failed to send' - else: - msg = '' - - module.exit_json(failed=bool(failed), msg=msg, changed=False, - responses=responses) - - -def main(): - argument_spec = url_argument_spec() - argument_spec.update( - dict( - api_key=dict(required=True, no_log=True), - api_secret=dict(required=True, no_log=True), - src=dict(required=True, type='int'), - dest=dict(required=True, type='list'), - msg=dict(required=True), - ), - ) - - module = AnsibleModule( - argument_spec=argument_spec - ) - - send_msg(module) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/notification/osx_say b/library/notification/osx_say deleted file mode 100644 index 39e3da88c1..0000000000 --- a/library/notification/osx_say +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: osx_say -version_added: "1.2" -short_description: Makes an OSX computer to speak. -description: - - makes an OS computer speak! Amuse your friends, annoy your coworkers! -notes: - - If you like this module, you may also be interested in the osx_say callback in the plugins/ directory of the source checkout. -options: - msg: - description: - What to say - required: true - voice: - description: - What voice to use - required: false -requirements: [ say ] -author: Michael DeHaan -''' - -EXAMPLES = ''' -- local_action: osx_say msg="{{inventory_hostname}} is all done" voice=Zarvox -''' - -DEFAULT_VOICE='Trinoids' - -def say(module, msg, voice): - module.run_command(["/usr/bin/say", msg, "--voice=%s" % (voice)], check_rc=True) - -def main(): - - module = AnsibleModule( - argument_spec=dict( - msg=dict(required=True), - voice=dict(required=False, default=DEFAULT_VOICE), - ), - supports_check_mode=False - ) - - if not os.path.exists("/usr/bin/say"): - module.fail_json(msg="/usr/bin/say is not installed") - - msg = module.params['msg'] - voice = module.params['voice'] - - say(module, msg, voice) - - module.exit_json(msg=msg, changed=False) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/slack b/library/notification/slack deleted file mode 100644 index 176d6b338f..0000000000 --- a/library/notification/slack +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Ramon de la Fuente -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ -module: slack -short_description: Send Slack notifications -description: - - The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration -version_added: 1.6 -author: Ramon de la Fuente -options: - domain: - description: - - Slack (sub)domain for your environment without protocol. - (i.e. C(future500.slack.com)) - required: true - token: - description: - - Slack integration token - required: true - msg: - description: - - Message to send. - required: true - channel: - description: - - Channel to send the message to. If absent, the message goes to the channel selected for the I(token). - required: false - username: - description: - - This is the sender of the message. - required: false - default: ansible - icon_url: - description: - - Url for the message sender's icon (default C(http://www.ansible.com/favicon.ico)) - required: false - icon_emoji: - description: - - Emoji for the message sender. See Slack documentation for options. - (if I(icon_emoji) is set, I(icon_url) will not be used) - required: false - link_names: - description: - - Automatically create links for channels and usernames in I(msg). - required: false - default: 1 - choices: - - 1 - - 0 - parse: - description: - - Setting for the message parser at Slack - required: false - choices: - - 'full' - - 'none' - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: - - 'yes' - - 'no' -""" - -EXAMPLES = """ -- name: Send notification message via Slack - local_action: - module: slack - domain: future500.slack.com - token: thetokengeneratedbyslack - msg: "{{ inventory_hostname }} completed" - -- name: Send notification message via Slack all options - local_action: - module: slack - domain: future500.slack.com - token: thetokengeneratedbyslack - msg: "{{ inventory_hostname }} completed" - channel: "#ansible" - username: "Ansible on {{ inventory_hostname }}" - icon_url: "http://www.example.com/some-image-file.png" - link_names: 0 - parse: 'none' - -""" - - -SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' - -def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse): - payload = dict(text=text) - - if channel is not None: - payload['channel'] = channel if (channel[0] == '#') else '#'+channel - if username is not None: - payload['username'] = username - if icon_emoji is not None: - payload['icon_emoji'] = icon_emoji - else: - payload['icon_url'] = icon_url - if link_names is not None: - payload['link_names'] = link_names - if parse is not None: - payload['parse'] = parse - - payload="payload=" + module.jsonify(payload) - return payload - -def do_notify_slack(module, domain, token, payload): - slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, token) - - response, info = fetch_url(module, slack_incoming_webhook, data=payload) - if info['status'] != 200: - obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, '[obscured]') - module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg'])) - -def main(): - module = AnsibleModule( - argument_spec = dict( - domain = dict(type='str', required=True), - token = dict(type='str', required=True), - msg = dict(type='str', required=True), - channel = dict(type='str', default=None), - username = dict(type='str', default='Ansible'), - icon_url = dict(type='str', default='http://www.ansible.com/favicon.ico'), - icon_emoji = dict(type='str', default=None), - link_names = dict(type='int', default=1, choices=[0,1]), - parse = dict(type='str', default=None, choices=['none', 'full']), - - validate_certs = dict(default='yes', type='bool'), - ) - ) - - domain = module.params['domain'] - token = module.params['token'] - text = module.params['msg'] - channel = module.params['channel'] - username = module.params['username'] - icon_url = module.params['icon_url'] - icon_emoji = module.params['icon_emoji'] - link_names = module.params['link_names'] - parse = module.params['parse'] - - payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse) - do_notify_slack(module, domain, token, payload) - - module.exit_json(msg="OK") - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() \ No newline at end of file diff --git a/library/notification/sns b/library/notification/sns deleted file mode 100644 index f2ed178554..0000000000 --- a/library/notification/sns +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Michael J. Schultz -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ -module: sns -short_description: Send Amazon Simple Notification Service (SNS) messages -description: - - The M(sns) module sends notifications to a topic on your Amazon SNS account -version_added: 1.6 -author: Michael J. Schultz -options: - msg: - description: - - Default message to send. - required: true - aliases: [ "default" ] - subject: - description: - - Subject line for email delivery. - required: false - topic: - description: - - The topic you want to publish to. - required: true - email: - description: - - Message to send to email-only subscription - required: false - sqs: - description: - - Message to send to SQS-only subscription - required: false - sms: - description: - - Message to send to SMS-only subscription - required: false - http: - description: - - Message to send to HTTP-only subscription - required: false - https: - description: - - Message to send to HTTPS-only subscription - required: false - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key'] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key'] - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - aliases: ['aws_region', 'ec2_region'] - -requirements: [ "boto" ] -author: Michael J. Schultz -""" - -EXAMPLES = """ -- name: Send default notification message via SNS - local_action: - module: sns - msg: "{{ inventory_hostname }} has completed the play." - subject: "Deploy complete!" - topic: "deploy" - -- name: Send notification messages via SNS with short message for SMS - local_action: - module: sns - msg: "{{ inventory_hostname }} has completed the play." - sms: "deployed!" - subject: "Deploy complete!" - topic: "deploy" -""" - -import sys - -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -try: - import boto - import boto.sns -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - - -def arn_topic_lookup(connection, short_topic): - response = connection.get_all_topics() - result = response[u'ListTopicsResponse'][u'ListTopicsResult'] - # topic names cannot have colons, so this captures the full topic name - lookup_topic = ':{}'.format(short_topic) - for topic in result[u'Topics']: - if topic[u'TopicArn'].endswith(lookup_topic): - return topic[u'TopicArn'] - return None - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - msg=dict(type='str', required=True, aliases=['default']), - subject=dict(type='str', default=None), - topic=dict(type='str', required=True), - email=dict(type='str', default=None), - sqs=dict(type='str', default=None), - sms=dict(type='str', default=None), - http=dict(type='str', default=None), - https=dict(type='str', default=None), - ) - ) - - module = AnsibleModule(argument_spec=argument_spec) - - msg = module.params['msg'] - subject = module.params['subject'] - topic = module.params['topic'] - email = module.params['email'] - sqs = module.params['sqs'] - sms = module.params['sms'] - http = module.params['http'] - https = module.params['https'] - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg="region must be specified") - try: - connection = connect_to_aws(boto.sns, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - - # .publish() takes full ARN topic id, but I'm lazy and type shortnames - # so do a lookup (topics cannot contain ':', so thats the decider) - if ':' in topic: - arn_topic = topic - else: - arn_topic = arn_topic_lookup(connection, topic) - - if not arn_topic: - module.fail_json(msg='Could not find topic: {}'.format(topic)) - - dict_msg = {'default': msg} - if email: - dict_msg.update(email=email) - if sqs: - dict_msg.update(sqs=sqs) - if sms: - dict_msg.update(sms=sms) - if http: - dict_msg.update(http=http) - if https: - dict_msg.update(https=https) - - json_msg = json.dumps(dict_msg) - try: - connection.publish(topic=arn_topic, subject=subject, - message_structure='json', message=json_msg) - except boto.exception.BotoServerError, e: - module.fail_json(msg=str(e)) - - module.exit_json(msg="OK") - -main() diff --git a/library/notification/twilio b/library/notification/twilio deleted file mode 100644 index 8969c28aa5..0000000000 --- a/library/notification/twilio +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Matt Makai -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -version_added: "1.6" -module: twilio -short_description: Sends a text message to a mobile phone through Twilio. -description: - - Sends a text message to a phone number through an the Twilio SMS service. -notes: - - Like the other notification modules, this one requires an external - dependency to work. In this case, you'll need a Twilio account with - a purchased or verified phone number to send the text message. -options: - account_sid: - description: - user's account id for Twilio found on the account page - required: true - auth_token: - description: user's authentication token for Twilio found on the account page - required: true - msg: - description: - the body of the text message - required: true - to_number: - description: - what phone number to send the text message to, format +15551112222 - required: true - from_number: - description: - what phone number to send the text message from, format +15551112222 - required: true - -requirements: [ urllib, urllib2 ] -author: Matt Makai -''' - -EXAMPLES = ''' -# send a text message from the local server about the build status to (555) 303 5681 -# note: you have to have purchased the 'from_number' on your Twilio account -- local_action: text msg="All servers with webserver role are now configured." - account_sid={{ twilio_account_sid }} - auth_token={{ twilio_auth_token }} - from_number=+15552014545 to_number=+15553035681 - -# send a text message from a server to (555) 111 3232 -# note: you have to have purchased the 'from_number' on your Twilio account -- text: msg="This server's configuration is now complete." - account_sid={{ twilio_account_sid }} - auth_token={{ twilio_auth_token }} - from_number=+15553258899 to_number=+15551113232 - -''' - -# ======================================= -# text module support methods -# -try: - import urllib, urllib2 -except ImportError: - module.fail_json(msg="urllib and urllib2 are required") - -import base64 - - -def post_text(module, account_sid, auth_token, msg, from_number, to_number): - URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ - % (account_sid,) - AGENT = "Ansible/1.5" - - data = {'From':from_number, 'To':to_number, 'Body':msg} - encoded_data = urllib.urlencode(data) - request = urllib2.Request(URI) - base64string = base64.encodestring('%s:%s' % \ - (account_sid, auth_token)).replace('\n', '') - request.add_header('User-Agent', AGENT) - request.add_header('Content-type', 'application/x-www-form-urlencoded') - request.add_header('Accept', 'application/ansible') - request.add_header('Authorization', 'Basic %s' % base64string) - return urllib2.urlopen(request, encoded_data) - - -# ======================================= -# Main -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - account_sid=dict(required=True), - auth_token=dict(required=True), - msg=dict(required=True), - from_number=dict(required=True), - to_number=dict(required=True), - ), - supports_check_mode=True - ) - - account_sid = module.params['account_sid'] - auth_token = module.params['auth_token'] - msg = module.params['msg'] - from_number = module.params['from_number'] - to_number = module.params['to_number'] - - try: - response = post_text(module, account_sid, auth_token, msg, - from_number, to_number) - except Exception, e: - module.fail_json(msg="unable to send text message to %s" % to_number) - - module.exit_json(msg=msg, changed=False) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/notification/typetalk b/library/notification/typetalk deleted file mode 100644 index b987acbe83..0000000000 --- a/library/notification/typetalk +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -DOCUMENTATION = ''' ---- -module: typetalk -version_added: "1.6" -short_description: Send a message to typetalk -description: - - Send a message to typetalk using typetalk API ( http://developers.typetalk.in/ ) -options: - client_id: - description: - - OAuth2 client ID - required: true - client_secret: - description: - - OAuth2 client secret - required: true - topic: - description: - - topic id to post message - required: true - msg: - description: - - message body - required: true -requirements: [ urllib, urllib2, json ] -author: Takashi Someda -''' - -EXAMPLES = ''' -- typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed" -''' - -try: - import urllib -except ImportError: - urllib = None - -try: - import urllib2 -except ImportError: - urllib2 = None - -try: - import json -except ImportError: - json = None - - -def do_request(url, params, headers={}): - data = urllib.urlencode(params) - headers = dict(headers, **{ - 'User-Agent': 'Ansible/typetalk module', - }) - return urllib2.urlopen(urllib2.Request(url, data, headers)) - - -def get_access_token(client_id, client_secret): - params = { - 'client_id': client_id, - 'client_secret': client_secret, - 'grant_type': 'client_credentials', - 'scope': 'topic.post' - } - res = do_request('https://typetalk.in/oauth2/access_token', params) - return json.load(res)['access_token'] - - -def send_message(client_id, client_secret, topic, msg): - """ - send message to typetalk - """ - try: - access_token = get_access_token(client_id, client_secret) - url = 'https://typetalk.in/api/v1/topics/%d' % topic - headers = { - 'Authorization': 'Bearer %s' % access_token, - } - do_request(url, {'message': msg}, headers) - return True, {'access_token': access_token} - except urllib2.HTTPError, e: - return False, e - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - client_id=dict(required=True), - client_secret=dict(required=True), - topic=dict(required=True, type='int'), - msg=dict(required=True), - ), - supports_check_mode=False - ) - - if not (urllib and urllib2 and json): - module.fail_json(msg="urllib, urllib2 and json modules are required") - - client_id = module.params["client_id"] - client_secret = module.params["client_secret"] - topic = module.params["topic"] - msg = module.params["msg"] - - res, error = send_message(client_id, client_secret, topic, msg) - if not res: - module.fail_json(msg='fail to send message with response code %s' % error.code) - - module.exit_json(changed=True, topic=topic, msg=msg) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/apt b/library/packaging/apt deleted file mode 100755 index e5a38e538d..0000000000 --- a/library/packaging/apt +++ /dev/null @@ -1,562 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Flowroute LLC -# Written by Matthew Williams -# Based on yum module written by Seth Vidal -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . -# - -DOCUMENTATION = ''' ---- -module: apt -short_description: Manages apt-packages -description: - - Manages I(apt) packages (such as for Debian/Ubuntu). -version_added: "0.0.2" -options: - name: - description: - - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Wildcards (fnmatch) like apt* are also supported. - required: false - default: null - state: - description: - - Indicates the desired package state. C(latest) ensures that the latest version is installed. - required: false - default: present - choices: [ "latest", "absent", "present" ] - update_cache: - description: - - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step. - required: false - default: no - choices: [ "yes", "no" ] - cache_valid_time: - description: - - If C(update_cache) is specified and the last run is less or equal than I(cache_valid_time) seconds ago, the C(update_cache) gets skipped. - required: false - default: no - purge: - description: - - Will force purging of configuration files if the module state is set to I(absent). - required: false - default: no - choices: [ "yes", "no" ] - default_release: - description: - - Corresponds to the C(-t) option for I(apt) and sets pin priorities - required: false - default: null - install_recommends: - description: - - Corresponds to the C(--no-install-recommends) option for I(apt). Default behavior (C(yes)) replicates apt's default behavior; C(no) does not install recommended packages. Suggested packages are never installed. - required: false - default: yes - choices: [ "yes", "no" ] - force: - description: - - If C(yes), force installs/removes. - required: false - default: "no" - choices: [ "yes", "no" ] - upgrade: - description: - - 'If yes or safe, performs an aptitude safe-upgrade.' - - 'If full, performs an aptitude full-upgrade.' - - 'If dist, performs an apt-get dist-upgrade.' - - 'Note: This does not upgrade a specific package, use state=latest for that.' - version_added: "1.1" - required: false - default: "yes" - choices: [ "yes", "safe", "full", "dist"] - dpkg_options: - description: - - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"' - - Options should be supplied as comma separated list - required: false - default: 'force-confdef,force-confold' - deb: - description: - - Path to a .deb package on the remote machine. - required: false - version_added: "1.6" -requirements: [ python-apt, aptitude ] -author: Matthew Williams -notes: - - Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) require C(aptitude), otherwise - C(apt-get) suffices. -''' - -EXAMPLES = ''' -# Update repositories cache and install "foo" package -- apt: name=foo update_cache=yes - -# Remove "foo" package -- apt: name=foo state=absent - -# Install the package "foo" -- apt: name=foo state=present - -# Install the version '1.00' of package "foo" -- apt: name=foo=1.00 state=present - -# Update the repository cache and update package "nginx" to latest version using default release squeeze-backport -- apt: name=nginx state=latest default_release=squeeze-backports update_cache=yes - -# Install latest version of "openjdk-6-jdk" ignoring "install-recommends" -- apt: name=openjdk-6-jdk state=latest install_recommends=no - -# Update all packages to the latest version -- apt: upgrade=dist - -# Run the equivalent of "apt-get update" as a separate step -- apt: update_cache=yes - -# Only run "update_cache=yes" if the last one is more than 3600 seconds ago -- apt: update_cache=yes cache_valid_time=3600 - -# Pass options to dpkg on run -- apt: upgrade=dist update_cache=yes dpkg_options='force-confold,force-confdef' - -# Install a .deb package -- apt: deb=/tmp/mypackage.deb -''' - - -import traceback -# added to stave off future warnings about apt api -import warnings -warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning) - -import os -import datetime -import fnmatch - -# APT related constants -APT_ENV_VARS = dict( - DEBIAN_FRONTEND = 'noninteractive', - DEBIAN_PRIORITY = 'critical', - LANG = 'C' -) - -DPKG_OPTIONS = 'force-confdef,force-confold' -APT_GET_ZERO = "0 upgraded, 0 newly installed" -APTITUDE_ZERO = "0 packages upgraded, 0 newly installed" -APT_LISTS_PATH = "/var/lib/apt/lists" -APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp" - -HAS_PYTHON_APT = True -try: - import apt - import apt.debfile - import apt_pkg -except ImportError: - HAS_PYTHON_APT = False - -def package_split(pkgspec): - parts = pkgspec.split('=') - if len(parts) > 1: - return parts[0], parts[1] - else: - return parts[0], None - -def package_status(m, pkgname, version, cache, state): - try: - # get the package from the cache, as well as the - # the low-level apt_pkg.Package object which contains - # state fields not directly acccesible from the - # higher-level apt.package.Package object. - pkg = cache[pkgname] - ll_pkg = cache._cache[pkgname] # the low-level package object - except KeyError: - if state == 'install': - if cache.get_providing_packages(pkgname): - return False, True, False - m.fail_json(msg="No package matching '%s' is available" % pkgname) - else: - return False, False, False - try: - has_files = len(pkg.installed_files) > 0 - except UnicodeDecodeError: - has_files = True - except AttributeError: - has_files = False # older python-apt cannot be used to determine non-purged - - try: - package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED - except AttributeError: # python-apt 0.7.X has very weak low-level object - try: - # might not be necessary as python-apt post-0.7.X should have current_state property - package_is_installed = pkg.is_installed - except AttributeError: - # assume older version of python-apt is installed - package_is_installed = pkg.isInstalled - - if version and package_is_installed: - try: - installed_version = pkg.installed.version - except AttributeError: - installed_version = pkg.installedVersion - return package_is_installed and fnmatch.fnmatch(installed_version, version), False, has_files - else: - try: - package_is_upgradable = pkg.is_upgradable - except AttributeError: - # assume older version of python-apt is installed - package_is_upgradable = pkg.isUpgradable - return package_is_installed, package_is_upgradable, has_files - -def expand_dpkg_options(dpkg_options_compressed): - options_list = dpkg_options_compressed.split(',') - dpkg_options = "" - for dpkg_option in options_list: - dpkg_options = '%s -o "Dpkg::Options::=--%s"' \ - % (dpkg_options, dpkg_option) - return dpkg_options.strip() - -def expand_pkgspec_from_fnmatches(m, pkgspec, cache): - new_pkgspec = [] - for pkgname_or_fnmatch_pattern in pkgspec: - # note that any of these chars is not allowed in a (debian) pkgname - if [c for c in pkgname_or_fnmatch_pattern if c in "*?[]!"]: - if "=" in pkgname_or_fnmatch_pattern: - m.fail_json(msg="pkgname wildcard and version can not be mixed") - # handle multiarch pkgnames, the idea is that "apt*" should - # only select native packages. But "apt*:i386" should still work - if not ":" in pkgname_or_fnmatch_pattern: - matches = fnmatch.filter( - [pkg.name for pkg in cache - if not ":" in pkg.name], pkgname_or_fnmatch_pattern) - else: - matches = fnmatch.filter( - [pkg.name for pkg in cache], pkgname_or_fnmatch_pattern) - - if len(matches) == 0: - m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_or_fnmatch_pattern)) - else: - new_pkgspec.extend(matches) - else: - new_pkgspec.append(pkgname_or_fnmatch_pattern) - return new_pkgspec - -def install(m, pkgspec, cache, upgrade=False, default_release=None, - install_recommends=True, force=False, - dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): - packages = "" - pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) - for package in pkgspec: - name, version = package_split(package) - installed, upgradable, has_files = package_status(m, name, version, cache, state='install') - if not installed or (upgrade and upgradable): - packages += "'%s' " % package - - if len(packages) != 0: - if force: - force_yes = '--force-yes' - else: - force_yes = '' - - if m.check_mode: - check_arg = '--simulate' - else: - check_arg = '' - - for (k,v) in APT_ENV_VARS.iteritems(): - os.environ[k] = v - - cmd = "%s -y %s %s %s install %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages) - - if default_release: - cmd += " -t '%s'" % (default_release,) - if not install_recommends: - cmd += " --no-install-recommends" - - rc, out, err = m.run_command(cmd) - if rc: - return (False, dict(msg="'apt-get install %s' failed: %s" % (packages, err), stdout=out, stderr=err)) - else: - return (True, dict(changed=True, stdout=out, stderr=err)) - else: - return (True, dict(changed=False)) - -def install_deb(m, debs, cache, force, install_recommends, dpkg_options): - changed=False - deps_to_install = [] - pkgs_to_install = [] - for deb_file in debs.split(','): - pkg = apt.debfile.DebPackage(deb_file) - - # Check if it's already installed - if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME: - continue - # Check if package is installable - if not pkg.check(): - m.fail_json(msg=pkg._failure_string) - - # add any missing deps to the list of deps we need - # to install so they're all done in one shot - deps_to_install.extend(pkg.missing_deps) - - # and add this deb to the list of packages to install - pkgs_to_install.append(deb_file) - - # install the deps through apt - retvals = {} - if len(deps_to_install) > 0: - (success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache, - install_recommends=install_recommends, - dpkg_options=expand_dpkg_options(dpkg_options)) - if not success: - m.fail_json(**retvals) - changed = retvals.get('changed', False) - - if len(pkgs_to_install) > 0: - options = ' '.join(["--%s"% x for x in dpkg_options.split(",")]) - if m.check_mode: - options += " --simulate" - if force: - options += " --force-yes" - - cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install)) - rc, out, err = m.run_command(cmd) - if "stdout" in retvals: - stdout = retvals["stdout"] + out - else: - stdout = out - if "stderr" in retvals: - stderr = retvals["stderr"] + err - else: - stderr = err - - if rc == 0: - m.exit_json(changed=True, stdout=stdout, stderr=stderr) - else: - m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr) - else: - m.exit_json(changed=changed, stdout=retvals.get('stdout',''), stderr=retvals.get('stderr','')) - -def remove(m, pkgspec, cache, purge=False, - dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): - packages = "" - pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) - for package in pkgspec: - name, version = package_split(package) - installed, upgradable, has_files = package_status(m, name, version, cache, state='remove') - if installed or (has_files and purge): - packages += "'%s' " % package - - if len(packages) == 0: - m.exit_json(changed=False) - else: - if purge: - purge = '--purge' - else: - purge = '' - - for (k,v) in APT_ENV_VARS.iteritems(): - os.environ[k] = v - - cmd = "%s -q -y %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, packages) - - if m.check_mode: - m.exit_json(changed=True) - - rc, out, err = m.run_command(cmd) - if rc: - m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err) - m.exit_json(changed=True, stdout=out, stderr=err) - -def upgrade(m, mode="yes", force=False, default_release=None, - dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): - if m.check_mode: - check_arg = '--simulate' - else: - check_arg = '' - - apt_cmd = None - if mode == "dist": - # apt-get dist-upgrade - apt_cmd = APT_GET_CMD - upgrade_command = "dist-upgrade" - elif mode == "full": - # aptitude full-upgrade - apt_cmd = APTITUDE_CMD - upgrade_command = "full-upgrade" - else: - # aptitude safe-upgrade # mode=yes # default - apt_cmd = APTITUDE_CMD - upgrade_command = "safe-upgrade" - - if force: - if apt_cmd == APT_GET_CMD: - force_yes = '--force-yes' - else: - force_yes = '' - else: - force_yes = '' - - apt_cmd_path = m.get_bin_path(apt_cmd, required=True) - - for (k,v) in APT_ENV_VARS.iteritems(): - os.environ[k] = v - - cmd = '%s -y %s %s %s %s' % (apt_cmd_path, dpkg_options, - force_yes, check_arg, upgrade_command) - - if default_release: - cmd += " -t '%s'" % (default_release,) - - rc, out, err = m.run_command(cmd) - if rc: - m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out) - if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out): - m.exit_json(changed=False, msg=out, stdout=out, stderr=err) - m.exit_json(changed=True, msg=out, stdout=out, stderr=err) - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default='installed', choices=['installed', 'latest', 'removed', 'absent', 'present']), - update_cache = dict(default=False, aliases=['update-cache'], type='bool'), - cache_valid_time = dict(type='int'), - purge = dict(default=False, type='bool'), - package = dict(default=None, aliases=['pkg', 'name'], type='list'), - deb = dict(default=None), - default_release = dict(default=None, aliases=['default-release']), - install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'), - force = dict(default='no', type='bool'), - upgrade = dict(choices=['yes', 'safe', 'full', 'dist']), - dpkg_options = dict(default=DPKG_OPTIONS) - ), - mutually_exclusive = [['package', 'upgrade', 'deb']], - required_one_of = [['package', 'upgrade', 'update_cache', 'deb']], - supports_check_mode = True - ) - - if not HAS_PYTHON_APT: - try: - module.run_command('apt-get update && apt-get install python-apt -y -q', use_unsafe_shell=True, check_rc=True) - global apt, apt_pkg - import apt - import apt_pkg - except ImportError: - module.fail_json(msg="Could not import python modules: apt, apt_pkg. Please install python-apt package.") - - global APTITUDE_CMD - APTITUDE_CMD = module.get_bin_path("aptitude", False) - global APT_GET_CMD - APT_GET_CMD = module.get_bin_path("apt-get") - - p = module.params - if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]: - module.fail_json(msg="Could not find aptitude. Please ensure it is installed.") - - install_recommends = p['install_recommends'] - dpkg_options = expand_dpkg_options(p['dpkg_options']) - - try: - cache = apt.Cache() - if p['default_release']: - try: - apt_pkg.config['APT::Default-Release'] = p['default_release'] - except AttributeError: - apt_pkg.Config['APT::Default-Release'] = p['default_release'] - # reopen cache w/ modified config - cache.open(progress=None) - - if p['update_cache']: - # Default is: always update the cache - cache_valid = False - if p['cache_valid_time']: - tdelta = datetime.timedelta(seconds=p['cache_valid_time']) - try: - mtime = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime - except: - mtime = False - if mtime is False: - # Looks like the update-success-stamp is not available - # Fallback: Checking the mtime of the lists - try: - mtime = os.stat(APT_LISTS_PATH).st_mtime - except: - mtime = False - if mtime is False: - # No mtime could be read - looks like lists are not there - # We update the cache to be safe - cache_valid = False - else: - mtimestamp = datetime.datetime.fromtimestamp(mtime) - if mtimestamp + tdelta >= datetime.datetime.now(): - # dont update the cache - # the old cache is less than cache_valid_time seconds old - so still valid - cache_valid = True - - if cache_valid is not True: - cache.update() - cache.open(progress=None) - if not p['package'] and not p['upgrade'] and not p['deb']: - module.exit_json(changed=False) - - force_yes = p['force'] - - if p['upgrade']: - upgrade(module, p['upgrade'], force_yes, - p['default_release'], dpkg_options) - - if p['deb']: - if p['state'] != "installed": - module.fail_json(msg="deb only supports state=installed") - install_deb(module, p['deb'], cache, - install_recommends=install_recommends, - force=force_yes, dpkg_options=p['dpkg_options']) - - packages = p['package'] - latest = p['state'] == 'latest' - for package in packages: - if package.count('=') > 1: - module.fail_json(msg="invalid package spec: %s" % package) - if latest and '=' in package: - module.fail_json(msg='version number inconsistent with state=latest: %s' % package) - - if p['state'] == 'latest': - result = install(module, packages, cache, upgrade=True, - default_release=p['default_release'], - install_recommends=install_recommends, - force=force_yes, dpkg_options=dpkg_options) - (success, retvals) = result - if success: - module.exit_json(**retvals) - else: - module.fail_json(**retvals) - elif p['state'] in [ 'installed', 'present' ]: - result = install(module, packages, cache, default_release=p['default_release'], - install_recommends=install_recommends,force=force_yes, - dpkg_options=dpkg_options) - (success, retvals) = result - if success: - module.exit_json(**retvals) - else: - module.fail_json(**retvals) - elif p['state'] in [ 'removed', 'absent' ]: - remove(module, packages, cache, p['purge'], dpkg_options) - - except apt.cache.LockFailedException: - module.fail_json(msg="Failed to lock apt for exclusive operation") - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/apt_key b/library/packaging/apt_key deleted file mode 100644 index 0a483a97bb..0000000000 --- a/library/packaging/apt_key +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# (c) 2012, Jayson Vantuyl -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: apt_key -author: Jayson Vantuyl & others -version_added: "1.0" -short_description: Add or remove an apt key -description: - - Add or remove an I(apt) key, optionally downloading it -notes: - - doesn't download the key unless it really needs it - - as a sanity check, downloaded key id must match the one specified - - best practice is to specify the key id and the url -options: - id: - required: false - default: none - description: - - identifier of key - data: - required: false - default: none - description: - - keyfile contents - file: - required: false - default: none - description: - - keyfile path - keyring: - required: false - default: none - description: - - path to specific keyring file in /etc/apt/trusted.gpg.d - version_added: "1.3" - url: - required: false - default: none - description: - - url to retrieve key from. - keyserver: - version_added: "1.6" - required: false - default: none - description: - - keyserver to retrieve key from. - state: - required: false - choices: [ absent, present ] - default: present - description: - - used to specify if key is being added or revoked - validate_certs: - description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -''' - -EXAMPLES = ''' -# Add an Apt signing key, uses whichever key is at the URL -- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present - -# Add an Apt signing key, will not download if present -- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present - -# Remove an Apt signing key, uses whichever key is at the URL -- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=absent - -# Remove a Apt specific signing key, leading 0x is valid -- apt_key: id=0x473041FA state=absent - -# Add a key from a file on the Ansible server -- apt_key: data="{{ lookup('file', 'apt.gpg') }}" state=present - -# Add an Apt signing key to a specific keyring file -- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc keyring=/etc/apt/trusted.gpg.d/debian.gpg state=present -''' - - -# FIXME: standardize into module_common -from traceback import format_exc -from re import compile as re_compile -# FIXME: standardize into module_common -from distutils.spawn import find_executable -from os import environ -from sys import exc_info -import traceback - -match_key = re_compile("^gpg:.*key ([0-9a-fA-F]+):.*$") - -REQUIRED_EXECUTABLES=['gpg', 'grep', 'apt-key'] - - -def check_missing_binaries(module): - missing = [e for e in REQUIRED_EXECUTABLES if not find_executable(e)] - if len(missing): - module.fail_json(msg="binaries are missing", names=missing) - -def all_keys(module, keyring, short_format): - if keyring: - cmd = "apt-key --keyring %s adv --list-public-keys --keyid-format=long" % keyring - else: - cmd = "apt-key adv --list-public-keys --keyid-format=long" - (rc, out, err) = module.run_command(cmd) - results = [] - lines = out.split('\n') - for line in lines: - if line.startswith("pub"): - tokens = line.split() - code = tokens[1] - (len_type, real_code) = code.split("/") - results.append(real_code) - if short_format: - results = shorten_key_ids(results) - return results - -def shorten_key_ids(key_id_list): - """ - Takes a list of key ids, and converts them to the 'short' format, - by reducing them to their last 8 characters. - """ - short = [] - for key in key_id_list: - short.append(key[-8:]) - return short - -def download_key(module, url): - # FIXME: move get_url code to common, allow for in-memory D/L, support proxies - # and reuse here - if url is None: - module.fail_json(msg="needed a URL but was not specified") - - try: - rsp, info = fetch_url(module, url) - if info['status'] != 200: - module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg'])) - - return rsp.read() - except Exception: - module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc()) - -def import_key(module, keyserver, key_id): - cmd = "apt-key adv --keyserver %s --recv %s" % (keyserver, key_id) - (rc, out, err) = module.run_command(cmd, check_rc=True) - return True - -def add_key(module, keyfile, keyring, data=None): - if data is not None: - if keyring: - cmd = "apt-key --keyring %s add -" % keyring - else: - cmd = "apt-key add -" - (rc, out, err) = module.run_command(cmd, data=data, check_rc=True, binary_data=True) - else: - if keyring: - cmd = "apt-key --keyring %s add %s" % (keyring, keyfile) - else: - cmd = "apt-key add %s" % (keyfile) - (rc, out, err) = module.run_command(cmd, check_rc=True) - return True - -def remove_key(module, key_id, keyring): - # FIXME: use module.run_command, fail at point of error and don't discard useful stdin/stdout - if keyring: - cmd = 'apt-key --keyring %s del %s' % (keyring, key_id) - else: - cmd = 'apt-key del %s' % key_id - (rc, out, err) = module.run_command(cmd, check_rc=True) - return True - -def main(): - module = AnsibleModule( - argument_spec=dict( - id=dict(required=False, default=None), - url=dict(required=False), - data=dict(required=False), - file=dict(required=False), - key=dict(required=False), - keyring=dict(required=False), - validate_certs=dict(default='yes', type='bool'), - keyserver=dict(required=False), - state=dict(required=False, choices=['present', 'absent'], default='present') - ), - supports_check_mode=True - ) - - key_id = module.params['id'] - url = module.params['url'] - data = module.params['data'] - filename = module.params['file'] - keyring = module.params['keyring'] - state = module.params['state'] - keyserver = module.params['keyserver'] - changed = False - - if key_id: - try: - _ = int(key_id, 16) - if key_id.startswith('0x'): - key_id = key_id[2:] - key_id = key_id.upper() - except ValueError: - module.fail_json(msg="Invalid key_id", id=key_id) - - # FIXME: I think we have a common facility for this, if not, want - check_missing_binaries(module) - - short_format = (key_id is not None and len(key_id) == 8) - keys = all_keys(module, keyring, short_format) - return_values = {} - - if state == 'present': - if key_id and key_id in keys: - module.exit_json(changed=False) - else: - if not filename and not data and not keyserver: - data = download_key(module, url) - if key_id and key_id in keys: - module.exit_json(changed=False) - else: - if module.check_mode: - module.exit_json(changed=True) - if filename: - add_key(module, filename, keyring) - elif keyserver: - import_key(module, keyserver, key_id) - else: - add_key(module, "-", keyring, data) - changed=False - keys2 = all_keys(module, keyring, short_format) - if len(keys) != len(keys2): - changed=True - if key_id and not key_id[-16:] in keys2: - module.fail_json(msg="key does not seem to have been added", id=key_id) - module.exit_json(changed=changed) - elif state == 'absent': - if not key_id: - module.fail_json(msg="key is required") - if key_id in keys: - if module.check_mode: - module.exit_json(changed=True) - if remove_key(module, key_id, keyring): - changed=True - else: - # FIXME: module.fail_json or exit-json immediately at point of failure - module.fail_json(msg="error removing key_id", **return_values) - - module.exit_json(changed=changed, **return_values) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() diff --git a/library/packaging/apt_repository b/library/packaging/apt_repository deleted file mode 100644 index 2ee5819fc4..0000000000 --- a/library/packaging/apt_repository +++ /dev/null @@ -1,446 +0,0 @@ -#!/usr/bin/python -# encoding: utf-8 - -# (c) 2012, Matt Wright -# (c) 2013, Alexander Saltanov -# (c) 2014, Rutger Spiertz -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -DOCUMENTATION = ''' ---- -module: apt_repository -short_description: Add and remove APT repositories -description: - - Add or remove an APT repositories in Ubuntu and Debian. -notes: - - This module works on Debian and Ubuntu and requires C(python-apt). - - This module supports Debian Squeeze (version 6) as well as its successors. - - This module treats Debian and Ubuntu distributions separately. So PPA could be installed only on Ubuntu machines. -options: - repo: - required: true - default: none - description: - - A source string for the repository. - state: - required: false - choices: [ "absent", "present" ] - default: "present" - description: - - A source string state. - mode: - required: false - default: 0644 - description: - - The octal mode for newly created files in sources.list.d - version_added: "1.6" - update_cache: - description: - - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes. - required: false - default: "yes" - choices: [ "yes", "no" ] - validate_certs: - version_added: '1.8' - description: - - If C(no), SSL certificates for the target repo will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] -author: Alexander Saltanov -version_added: "0.7" -requirements: [ python-apt ] -''' - -EXAMPLES = ''' -# Add specified repository into sources list. -apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=present - -# Add source repository into sources list. -apt_repository: repo='deb-src http://archive.canonical.com/ubuntu hardy partner' state=present - -# Remove specified repository from sources list. -apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=absent - -# On Ubuntu target: add nginx stable repository from PPA and install its signing key. -# On Debian target: adding PPA is not available, so it will fail immediately. -apt_repository: repo='ppa:nginx/stable' -''' - -import glob -import os -import re -import tempfile - -try: - import apt - import apt_pkg - import aptsources.distro as aptsources_distro - distro = aptsources_distro.get_distro() - HAVE_PYTHON_APT = True -except ImportError: - distro = None - HAVE_PYTHON_APT = False - - -VALID_SOURCE_TYPES = ('deb', 'deb-src') - -def install_python_apt(module): - - if not module.check_mode: - apt_get_path = module.get_bin_path('apt-get') - if apt_get_path: - rc, so, se = module.run_command('%s update && %s install python-apt -y -q' % (apt_get_path, apt_get_path), use_unsafe_shell=True) - if rc == 0: - global apt, apt_pkg, aptsources_distro, distro, HAVE_PYTHON_APT - import apt - import apt_pkg - import aptsources.distro as aptsources_distro - distro = aptsources_distro.get_distro() - HAVE_PYTHON_APT = True - else: - module.fail_json(msg="Failed to auto-install python-apt. Error was: '%s'" % se.strip()) - -class InvalidSource(Exception): - pass - - -# Simple version of aptsources.sourceslist.SourcesList. -# No advanced logic and no backups inside. -class SourcesList(object): - def __init__(self): - self.files = {} # group sources by file - self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist') - - # read sources.list if it exists - if os.path.isfile(self.default_file): - self.load(self.default_file) - - # read sources.list.d - for file in glob.iglob('%s/*.list' % self._apt_cfg_dir('Dir::Etc::sourceparts')): - self.load(file) - - def __iter__(self): - '''Simple iterator to go over all sources. Empty, non-source, and other not valid lines will be skipped.''' - for file, sources in self.files.items(): - for n, valid, enabled, source, comment in sources: - if valid: - yield file, n, enabled, source, comment - raise StopIteration - - def _expand_path(self, filename): - if '/' in filename: - return filename - else: - return os.path.abspath(os.path.join(self._apt_cfg_dir('Dir::Etc::sourceparts'), filename)) - - def _suggest_filename(self, line): - def _cleanup_filename(s): - return '_'.join(re.sub('[^a-zA-Z0-9]', ' ', s).split()) - def _strip_username_password(s): - if '@' in s: - s = s.split('@', 1) - s = s[-1] - return s - - # Drop options and protocols. - line = re.sub('\[[^\]]+\]', '', line) - line = re.sub('\w+://', '', line) - - # split line into valid keywords - parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES] - - # Drop usernames and passwords - parts[0] = _strip_username_password(parts[0]) - - return '%s.list' % _cleanup_filename(' '.join(parts[:1])) - - def _parse(self, line, raise_if_invalid_or_disabled=False): - valid = False - enabled = True - source = '' - comment = '' - - line = line.strip() - if line.startswith('#'): - enabled = False - line = line[1:] - - # Check for another "#" in the line and treat a part after it as a comment. - i = line.find('#') - if i > 0: - comment = line[i+1:].strip() - line = line[:i] - - # Split a source into substring to make sure that it is source spec. - # Duplicated whitespaces in a valid source spec will be removed. - source = line.strip() - if source: - chunks = source.split() - if chunks[0] in VALID_SOURCE_TYPES: - valid = True - source = ' '.join(chunks) - - if raise_if_invalid_or_disabled and (not valid or not enabled): - raise InvalidSource(line) - - return valid, enabled, source, comment - - @staticmethod - def _apt_cfg_file(filespec): - ''' - Wrapper for `apt_pkg` module for running with Python 2.5 - ''' - try: - result = apt_pkg.config.find_file(filespec) - except AttributeError: - result = apt_pkg.Config.FindFile(filespec) - return result - - @staticmethod - def _apt_cfg_dir(dirspec): - ''' - Wrapper for `apt_pkg` module for running with Python 2.5 - ''' - try: - result = apt_pkg.config.find_dir(dirspec) - except AttributeError: - result = apt_pkg.Config.FindDir(dirspec) - return result - - def load(self, file): - group = [] - f = open(file, 'r') - for n, line in enumerate(f): - valid, enabled, source, comment = self._parse(line) - group.append((n, valid, enabled, source, comment)) - self.files[file] = group - - def save(self, module): - for filename, sources in self.files.items(): - if sources: - d, fn = os.path.split(filename) - fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d) - - # allow the user to override the default mode - this_mode = module.params['mode'] - module.set_mode_if_different(tmp_path, this_mode, False) - - f = os.fdopen(fd, 'w') - for n, valid, enabled, source, comment in sources: - chunks = [] - if not enabled: - chunks.append('# ') - chunks.append(source) - if comment: - chunks.append(' # ') - chunks.append(comment) - chunks.append('\n') - line = ''.join(chunks) - - try: - f.write(line) - except IOError, err: - module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err))) - module.atomic_move(tmp_path, filename) - else: - del self.files[filename] - if os.path.exists(filename): - os.remove(filename) - - def dump(self): - return '\n'.join([str(i) for i in self]) - - def modify(self, file, n, enabled=None, source=None, comment=None): - ''' - This function to be used with iterator, so we don't care of invalid sources. - If source, enabled, or comment is None, original value from line ``n`` will be preserved. - ''' - valid, enabled_old, source_old, comment_old = self.files[file][n][1:] - choice = lambda new, old: old if new is None else new - self.files[file][n] = (n, valid, choice(enabled, enabled_old), choice(source, source_old), choice(comment, comment_old)) - - def _add_valid_source(self, source_new, comment_new, file): - # We'll try to reuse disabled source if we have it. - # If we have more than one entry, we will enable them all - no advanced logic, remember. - found = False - for filename, n, enabled, source, comment in self: - if source == source_new: - self.modify(filename, n, enabled=True) - found = True - - if not found: - if file is None: - file = self.default_file - else: - file = self._expand_path(file) - - if file not in self.files: - self.files[file] = [] - - files = self.files[file] - files.append((len(files), True, True, source_new, comment_new)) - - def add_source(self, line, comment='', file=None): - source = self._parse(line, raise_if_invalid_or_disabled=True)[2] - - # Prefer separate files for new sources. - self._add_valid_source(source, comment, file=file or self._suggest_filename(source)) - - def _remove_valid_source(self, source): - # If we have more than one entry, we will remove them all (not comment, remove!) - for filename, n, enabled, src, comment in self: - if source == src and enabled: - self.files[filename].pop(n) - - def remove_source(self, line): - source = self._parse(line, raise_if_invalid_or_disabled=True)[2] - self._remove_valid_source(source) - - -class UbuntuSourcesList(SourcesList): - - LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s' - - def __init__(self, module, add_ppa_signing_keys_callback=None): - self.module = module - self.add_ppa_signing_keys_callback = add_ppa_signing_keys_callback - super(UbuntuSourcesList, self).__init__() - - def _get_ppa_info(self, owner_name, ppa_name): - lp_api = self.LP_API % (owner_name, ppa_name) - - headers = dict(Accept='application/json') - response, info = fetch_url(self.module, lp_api, headers=headers) - if info['status'] != 200: - self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg']) - return json.load(response) - - def _expand_ppa(self, path): - ppa = path.split(':')[1] - ppa_owner = ppa.split('/')[0] - try: - ppa_name = ppa.split('/')[1] - except IndexError: - ppa_name = 'ppa' - - line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, distro.codename) - return line, ppa_owner, ppa_name - - def _key_already_exists(self, key_fingerprint): - rc, out, err = self.module.run_command('apt-key export %s' % key_fingerprint, check_rc=True) - return len(err) == 0 - - def add_source(self, line, comment='', file=None): - if line.startswith('ppa:'): - source, ppa_owner, ppa_name = self._expand_ppa(line) - - if self.add_ppa_signing_keys_callback is not None: - info = self._get_ppa_info(ppa_owner, ppa_name) - if not self._key_already_exists(info['signing_key_fingerprint']): - command = ['apt-key', 'adv', '--recv-keys', '--keyserver', 'hkp://keyserver.ubuntu.com:80', info['signing_key_fingerprint']] - self.add_ppa_signing_keys_callback(command) - - file = file or self._suggest_filename('%s_%s' % (line, distro.codename)) - else: - source = self._parse(line, raise_if_invalid_or_disabled=True)[2] - file = file or self._suggest_filename(source) - self._add_valid_source(source, comment, file) - - def remove_source(self, line): - if line.startswith('ppa:'): - source = self._expand_ppa(line)[0] - else: - source = self._parse(line, raise_if_invalid_or_disabled=True)[2] - self._remove_valid_source(source) - - -def get_add_ppa_signing_key_callback(module): - def _run_command(command): - module.run_command(command, check_rc=True) - - if module.check_mode: - return None - else: - return _run_command - - -def main(): - module = AnsibleModule( - argument_spec=dict( - repo=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - mode=dict(required=False, default=0644), - update_cache = dict(aliases=['update-cache'], type='bool', default='yes'), - # this should not be needed, but exists as a failsafe - install_python_apt=dict(required=False, default="yes", type='bool'), - validate_certs = dict(default='yes', type='bool'), - ), - supports_check_mode=True, - ) - - params = module.params - if params['install_python_apt'] and not HAVE_PYTHON_APT and not module.check_mode: - install_python_apt(module) - - repo = module.params['repo'] - state = module.params['state'] - update_cache = module.params['update_cache'] - sourceslist = None - - if HAVE_PYTHON_APT: - if isinstance(distro, aptsources_distro.UbuntuDistribution): - sourceslist = UbuntuSourcesList(module, - add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module)) - elif HAVE_PYTHON_APT and \ - isinstance(distro, aptsources_distro.DebianDistribution) or isinstance(distro, aptsources_distro.Distribution): - sourceslist = SourcesList() - else: - module.fail_json(msg='Module apt_repository supports only Debian and Ubuntu. ' + \ - 'You may be seeing this because python-apt is not installed, but you requested that it not be auto-installed') - - sources_before = sourceslist.dump() - - try: - if state == 'present': - sourceslist.add_source(repo) - elif state == 'absent': - sourceslist.remove_source(repo) - except InvalidSource, err: - module.fail_json(msg='Invalid repository string: %s' % unicode(err)) - - sources_after = sourceslist.dump() - changed = sources_before != sources_after - - if not module.check_mode and changed: - try: - sourceslist.save(module) - if update_cache: - cache = apt.Cache() - cache.update() - except OSError, err: - module.fail_json(msg=unicode(err)) - - module.exit_json(changed=changed, repo=repo, state=state) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/packaging/apt_rpm b/library/packaging/apt_rpm deleted file mode 100755 index a85c528a23..0000000000 --- a/library/packaging/apt_rpm +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -tt -# -*- coding: utf-8 -*- - -# (c) 2013, Evgenii Terechkov -# Written by Evgenii Terechkov -# Based on urpmi module written by Philippe Makowski -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: apt_rpm -short_description: apt_rpm package manager -description: - - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required. -version_added: "1.5" -options: - pkg: - description: - - name of package to install, upgrade or remove. - required: true - default: null - state: - description: - - Indicates the desired package state - required: false - default: present - choices: [ "absent", "present" ] - update_cache: - description: - - update the package database first C(apt-get update). - required: false - default: no - choices: [ "yes", "no" ] -author: Evgenii Terechkov -notes: [] -''' - -EXAMPLES = ''' -# install package foo -- apt_rpm: pkg=foo state=present -# remove package foo -- apt_rpm: pkg=foo state=absent -# description: remove packages foo and bar -- apt_rpm: pkg=foo,bar state=absent -# description: update the package database and install bar (bar will be the updated if a newer version exists) -- apt_rpm: name=bar state=present update_cache=yes -''' - - -try: - import json -except ImportError: - import simplejson as json - -import shlex -import os -import sys - -APT_PATH="/usr/bin/apt-get" -RPM_PATH="/usr/bin/rpm" - -def query_package(module, name): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - rc = os.system("%s -q %s" % (RPM_PATH,name)) - if rc == 0: - return True - else: - return False - -def query_package_provides(module, name): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - rc = os.system("%s -q --provides %s >/dev/null" % (RPM_PATH,name)) - return rc == 0 - -def update_package_db(module): - rc = os.system("%s update" % APT_PATH) - - if rc != 0: - module.fail_json(msg="could not update package db") - -def remove_packages(module, packages): - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, package): - continue - - rc = os.system("%s -y remove %s > /dev/null" % (APT_PATH,package)) - - if rc != 0: - module.fail_json(msg="failed to remove %s" % (package)) - - remove_c += 1 - - if remove_c > 0: - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, pkgspec): - - packages = "" - for package in pkgspec: - if not query_package_provides(module, package): - packages += "'%s' " % package - - if len(packages) != 0: - - cmd = ("%s -y install %s > /dev/null" % (APT_PATH, packages)) - - rc, out, err = module.run_command(cmd) - - installed = True - for packages in pkgspec: - if not query_package_provides(module, package): - installed = False - - # apt-rpm always have 0 for exit code if --force is used - if rc or not installed: - module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err)) - else: - module.exit_json(changed=True, msg="%s present(s)" % packages) - else: - module.exit_json(changed=False) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']), - update_cache = dict(default=False, aliases=['update-cache'], type='bool'), - package = dict(aliases=['pkg', 'name'], required=True))) - - - if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH): - module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm") - - p = module.params - - if p['update_cache']: - update_package_db(module) - - packages = p['package'].split(',') - - if p['state'] in [ 'installed', 'present' ]: - install_packages(module, packages) - - elif p['state'] in [ 'removed', 'absent' ]: - remove_packages(module, packages) - -# this is magic, see lib/ansible/module_common.py -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/composer b/library/packaging/composer deleted file mode 100644 index 2930018bd9..0000000000 --- a/library/packaging/composer +++ /dev/null @@ -1,164 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Dimitrios Tydeas Mengidis - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: composer -author: Dimitrios Tydeas Mengidis -short_description: Dependency Manager for PHP -version_added: "1.6" -description: - - Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs and it will install them in your project for you -options: - command: - version_added: "1.8" - description: - - Composer command like "install", "update" and so on - required: false - default: install - working_dir: - description: - - Directory of your project ( see --working-dir ) - required: true - default: null - aliases: [ "working-dir" ] - prefer_source: - description: - - Forces installation from package sources when possible ( see --prefer-source ) - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [ "prefer-source" ] - prefer_dist: - description: - - Forces installation from package dist even for de versions ( see --prefer-dist ) - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [ "prefer-dist" ] - no_dev: - description: - - Disables installation of require-dev packages ( see --no-dev ) - required: false - default: "yes" - choices: [ "yes", "no" ] - aliases: [ "no-dev" ] - no_scripts: - description: - - Skips the execution of all scripts defined in composer.json ( see --no-scripts ) - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [ "no-scripts" ] - no_plugins: - description: - - Disables all plugins ( see --no-plugins ) - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [ "no-plugins" ] - optimize_autoloader: - description: - - Optimize autoloader during autoloader dump ( see --optimize-autoloader ). Convert PSR-0/4 autoloading to classmap to get a faster autoloader. This is recommended especially for production, but can take a bit of time to run so it is currently not done by default. - required: false - default: "yes" - choices: [ "yes", "no" ] - aliases: [ "optimize-autoloader" ] -requirements: - - php - - composer installed in bin path (recommended /usr/local/bin) -notes: - - Default options that are always appended in each execution are --no-ansi, --no-progress, and --no-interaction -''' - -EXAMPLES = ''' -# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock -- composer: command=install working_dir=/path/to/project -''' - -import os -import re - -def parse_out(string): - return re.sub("\s+", " ", string).strip() - -def has_changed(string): - return (re.match("Nothing to install or update", string) != None) - -def composer_install(module, command, options): - php_path = module.get_bin_path("php", True, ["/usr/local/bin"]) - composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) - cmd = "%s %s %s %s" % (php_path, composer_path, command, " ".join(options)) - - return module.run_command(cmd) - -def main(): - module = AnsibleModule( - argument_spec = dict( - command = dict(default="install", type="str", required=False), - working_dir = dict(aliases=["working-dir"], required=True), - prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]), - prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]), - no_dev = dict(default="yes", type="bool", aliases=["no-dev"]), - no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]), - no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]), - optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]), - ), - supports_check_mode=True - ) - - module.params["working_dir"] = os.path.abspath(module.params["working_dir"]) - - options = set([]) - # Default options - options.add("--no-ansi") - options.add("--no-progress") - options.add("--no-interaction") - - if module.check_mode: - options.add("--dry-run") - - # Get composer command with fallback to default - command = module.params['command'] - del module.params['command']; - - # Prepare options - for i in module.params: - opt = "--%s" % i.replace("_","-") - p = module.params[i] - if isinstance(p, (bool)) and p: - options.add(opt) - elif isinstance(p, (str)): - options.add("%s=%s" % (opt, p)) - - rc, out, err = composer_install(module, command, options) - - if rc != 0: - output = parse_out(err) - module.fail_json(msg=output) - else: - output = parse_out(out) - module.exit_json(changed=has_changed(output), msg=output) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/cpanm b/library/packaging/cpanm deleted file mode 100644 index 5b1a9878d2..0000000000 --- a/library/packaging/cpanm +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Franck Cuny -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: cpanm -short_description: Manages Perl library dependencies. -description: - - Manage Perl library dependencies. -version_added: "1.6" -options: - name: - description: - - The name of the Perl library to install - required: false - default: null - aliases: ["pkg"] - from_path: - description: - - The local directory from where to install - required: false - default: null - notest: - description: - - Do not run unit tests - required: false - default: false - locallib: - description: - - Specify the install base to install modules - required: false - default: false - mirror: - description: - - Specifies the base URL for the CPAN mirror to use - required: false - default: false -examples: - - code: "cpanm: name=Dancer" - description: Install I(Dancer) perl package. - - code: "cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib" - description: "Install I(Dancer) (U(http://perldancer.org/)) into the specified I(locallib)" - - code: "cpanm: from_path=/srv/webapps/my_app/src/" - description: Install perl dependencies from local directory. - - code: "cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib" - description: Install I(Dancer) perl package without running the unit tests in indicated I(locallib). - - code: "cpanm: name=Dancer mirror=http://cpan.cpantesters.org/" - description: Install I(Dancer) perl package from a specific mirror -notes: - - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. -author: Franck Cuny -''' - -def _is_package_installed(module, name, locallib, cpanm): - cmd = "" - if locallib: - os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib - cmd = "%s perl -M%s -e '1'" % (cmd, name) - res, stdout, stderr = module.run_command(cmd, check_rc=False) - if res == 0: - return True - else: - return False - -def _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm): - # this code should use "%s" like everything else and just return early but not fixing all of it now. - # don't copy stuff like this - if from_path: - cmd = "{cpanm} {path}".format(cpanm=cpanm, path=from_path) - else: - cmd = "{cpanm} {name}".format(cpanm=cpanm, name=name) - - if notest is True: - cmd = "{cmd} -n".format(cmd=cmd) - - if locallib is not None: - cmd = "{cmd} -l {locallib}".format(cmd=cmd, locallib=locallib) - - if mirror is not None: - cmd = "{cmd} --mirror {mirror}".format(cmd=cmd, mirror=mirror) - - return cmd - - -def main(): - arg_spec = dict( - name=dict(default=None, required=False, aliases=['pkg']), - from_path=dict(default=None, required=False), - notest=dict(default=False, type='bool'), - locallib=dict(default=None, required=False), - mirror=dict(default=None, required=False) - ) - - module = AnsibleModule( - argument_spec=arg_spec, - required_one_of=[['name', 'from_path']], - ) - - cpanm = module.get_bin_path('cpanm', True) - name = module.params['name'] - from_path = module.params['from_path'] - notest = module.boolean(module.params.get('notest', False)) - locallib = module.params['locallib'] - mirror = module.params['mirror'] - - changed = False - - installed = _is_package_installed(module, name, locallib, cpanm) - - if not installed: - out_cpanm = err_cpanm = '' - cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm) - - rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False) - - if rc_cpanm != 0: - module.fail_json(msg=err_cpanm, cmd=cmd) - - if err_cpanm and 'is up to date' not in err_cpanm: - changed = True - - module.exit_json(changed=changed, binary=cpanm, name=name) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/easy_install b/library/packaging/easy_install deleted file mode 100644 index 889a81f025..0000000000 --- a/library/packaging/easy_install +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Matt Wright -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -import tempfile -import os.path - -DOCUMENTATION = ''' ---- -module: easy_install -short_description: Installs Python libraries -description: - - Installs Python libraries, optionally in a I(virtualenv) -version_added: "0.7" -options: - name: - description: - - A Python library name - required: true - default: null - aliases: [] - virtualenv: - description: - - an optional I(virtualenv) directory path to install into. If the - I(virtualenv) does not exist, it is created automatically - required: false - default: null - virtualenv_site_packages: - version_added: "1.1" - description: - - Whether the virtual environment will inherit packages from the - global site-packages directory. Note that if this setting is - changed on an already existing virtual environment it will not - have any effect, the environment must be deleted and newly - created. - required: false - default: "no" - choices: [ "yes", "no" ] - virtualenv_command: - version_added: "1.1" - description: - - The command to create the virtual environment with. For example - C(pyvenv), C(virtualenv), C(virtualenv2). - required: false - default: virtualenv - executable: - description: - - The explicit executable or a pathname to the executable to be used to - run easy_install for a specific version of Python installed in the - system. For example C(easy_install-3.3), if there are both Python 2.7 - and 3.3 installations in the system and you want to run easy_install - for the Python 3.3 installation. - version_added: "1.3" - required: false - default: null -notes: - - Please note that the M(easy_install) module can only install Python - libraries. Thus this module is not able to remove libraries. It is - generally recommended to use the M(pip) module which you can first install - using M(easy_install). - - Also note that I(virtualenv) must be installed on the remote host if the - C(virtualenv) parameter is specified. -requirements: [ "virtualenv" ] -author: Matt Wright -''' - -EXAMPLES = ''' -# Examples from Ansible Playbooks -- easy_install: name=pip - -# Install Bottle into the specified virtualenv. -- easy_install: name=bottle virtualenv=/webapps/myapp/venv -''' - -def _is_package_installed(module, name, easy_install): - cmd = '%s --dry-run %s' % (easy_install, name) - rc, status_stdout, status_stderr = module.run_command(cmd) - return not ('Reading' in status_stdout or 'Downloading' in status_stdout) - - -def _get_easy_install(module, env=None, executable=None): - candidate_easy_inst_basenames = ['easy_install'] - easy_install = None - if executable is not None: - if os.path.isabs(executable): - easy_install = executable - else: - candidate_easy_inst_basenames.insert(0, executable) - if easy_install is None: - if env is None: - opt_dirs = [] - else: - # Try easy_install with the virtualenv directory first. - opt_dirs = ['%s/bin' % env] - for basename in candidate_easy_inst_basenames: - easy_install = module.get_bin_path(basename, False, opt_dirs) - if easy_install is not None: - break - # easy_install should have been found by now. The final call to - # get_bin_path will trigger fail_json. - if easy_install is None: - basename = candidate_easy_inst_basenames[0] - easy_install = module.get_bin_path(basename, True, opt_dirs) - return easy_install - - -def main(): - arg_spec = dict( - name=dict(required=True), - virtualenv=dict(default=None, required=False), - virtualenv_site_packages=dict(default='no', type='bool'), - virtualenv_command=dict(default='virtualenv', required=False), - executable=dict(default='easy_install', required=False), - ) - - module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - - name = module.params['name'] - env = module.params['virtualenv'] - executable = module.params['executable'] - site_packages = module.params['virtualenv_site_packages'] - virtualenv_command = module.params['virtualenv_command'] - - rc = 0 - err = '' - out = '' - - if env: - virtualenv = module.get_bin_path(virtualenv_command, True) - - if not os.path.exists(os.path.join(env, 'bin', 'activate')): - if module.check_mode: - module.exit_json(changed=True) - command = '%s %s' % (virtualenv, env) - if site_packages: - command += ' --system-site-packages' - cwd = tempfile.gettempdir() - rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd) - - rc += rc_venv - out += out_venv - err += err_venv - - easy_install = _get_easy_install(module, env, executable) - - cmd = None - changed = False - installed = _is_package_installed(module, name, easy_install) - - if not installed: - if module.check_mode: - module.exit_json(changed=True) - cmd = '%s %s' % (easy_install, name) - rc_easy_inst, out_easy_inst, err_easy_inst = module.run_command(cmd) - - rc += rc_easy_inst - out += out_easy_inst - err += err_easy_inst - - changed = True - - if rc != 0: - module.fail_json(msg=err, cmd=cmd) - - module.exit_json(changed=changed, binary=easy_install, - name=name, virtualenv=env) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/gem b/library/packaging/gem deleted file mode 100644 index 3740a3e7ce..0000000000 --- a/library/packaging/gem +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Johan Wiren -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: gem -short_description: Manage Ruby gems -description: - - Manage installation and uninstallation of Ruby gems. -version_added: "1.1" -options: - name: - description: - - The name of the gem to be managed. - required: true - state: - description: - - The desired state of the gem. C(latest) ensures that the latest version is installed. - required: false - choices: [present, absent, latest] - default: present - gem_source: - description: - - The path to a local gem used as installation source. - required: false - include_dependencies: - description: - - Whether to include dependencies or not. - required: false - choices: [ "yes", "no" ] - default: "yes" - repository: - description: - - The repository from which the gem will be installed - required: false - aliases: [source] - user_install: - description: - - Install gem in user's local gems cache or for all users - required: false - default: "yes" - version_added: "1.3" - executable: - description: - - Override the path to the gem executable - required: false - version_added: "1.4" - version: - description: - - Version of the gem to be installed/removed. - required: false - pre_release: - description: - - Allow installation of pre-release versions of the gem. - required: false - default: "no" - version_added: "1.6" -author: Johan Wiren -''' - -EXAMPLES = ''' -# Installs version 1.0 of vagrant. -- gem: name=vagrant version=1.0 state=present - -# Installs latest available version of rake. -- gem: name=rake state=latest - -# Installs rake version 1.0 from a local gem on disk. -- gem: name=rake gem_source=/path/to/gems/rake-1.0.gem state=present -''' - -import re - -def get_rubygems_path(module): - if module.params['executable']: - return module.params['executable'].split(' ') - else: - return [ module.get_bin_path('gem', True) ] - -def get_rubygems_version(module): - cmd = get_rubygems_path(module) + [ '--version' ] - (rc, out, err) = module.run_command(cmd, check_rc=True) - - match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out) - if not match: - return None - - return tuple(int(x) for x in match.groups()) - -def get_installed_versions(module, remote=False): - - cmd = get_rubygems_path(module) - cmd.append('query') - if remote: - cmd.append('--remote') - if module.params['repository']: - cmd.extend([ '--source', module.params['repository'] ]) - cmd.append('-n') - cmd.append('^%s$' % module.params['name']) - (rc, out, err) = module.run_command(cmd, check_rc=True) - installed_versions = [] - for line in out.splitlines(): - match = re.match(r"\S+\s+\((.+)\)", line) - if match: - versions = match.group(1) - for version in versions.split(', '): - installed_versions.append(version.split()[0]) - return installed_versions - -def exists(module): - - if module.params['state'] == 'latest': - remoteversions = get_installed_versions(module, remote=True) - if remoteversions: - module.params['version'] = remoteversions[0] - installed_versions = get_installed_versions(module) - if module.params['version']: - if module.params['version'] in installed_versions: - return True - else: - if installed_versions: - return True - return False - -def uninstall(module): - - if module.check_mode: - return - cmd = get_rubygems_path(module) - cmd.append('uninstall') - if module.params['version']: - cmd.extend([ '--version', module.params['version'] ]) - else: - cmd.append('--all') - cmd.append('--executable') - cmd.append(module.params['name']) - module.run_command(cmd, check_rc=True) - -def install(module): - - if module.check_mode: - return - - ver = get_rubygems_version(module) - if ver: - major = ver[0] - else: - major = None - - cmd = get_rubygems_path(module) - cmd.append('install') - if module.params['version']: - cmd.extend([ '--version', module.params['version'] ]) - if module.params['repository']: - cmd.extend([ '--source', module.params['repository'] ]) - if not module.params['include_dependencies']: - cmd.append('--ignore-dependencies') - else: - if major and major < 2: - cmd.append('--include-dependencies') - if module.params['user_install']: - cmd.append('--user-install') - else: - cmd.append('--no-user-install') - if module.params['pre_release']: - cmd.append('--pre') - cmd.append('--no-rdoc') - cmd.append('--no-ri') - cmd.append(module.params['gem_source']) - module.run_command(cmd, check_rc=True) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - executable = dict(required=False, type='str'), - gem_source = dict(required=False, type='str'), - include_dependencies = dict(required=False, default=True, type='bool'), - name = dict(required=True, type='str'), - repository = dict(required=False, aliases=['source'], type='str'), - state = dict(required=False, default='present', choices=['present','absent','latest'], type='str'), - user_install = dict(required=False, default=True, type='bool'), - pre_release = dict(required=False, default=False, type='bool'), - version = dict(required=False, type='str'), - ), - supports_check_mode = True, - mutually_exclusive = [ ['gem_source','repository'], ['gem_source','version'] ], - ) - - if module.params['version'] and module.params['state'] == 'latest': - module.fail_json(msg="Cannot specify version when state=latest") - if module.params['gem_source'] and module.params['state'] == 'latest': - module.fail_json(msg="Cannot maintain state=latest when installing from local source") - - if not module.params['gem_source']: - module.params['gem_source'] = module.params['name'] - - changed = False - - if module.params['state'] in [ 'present', 'latest']: - if not exists(module): - install(module) - changed = True - elif module.params['state'] == 'absent': - if exists(module): - uninstall(module) - changed = True - - result = {} - result['name'] = module.params['name'] - result['state'] = module.params['state'] - if module.params['version']: - result['version'] = module.params['version'] - result['changed'] = changed - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/homebrew b/library/packaging/homebrew deleted file mode 100644 index 2ecac0c4ac..0000000000 --- a/library/packaging/homebrew +++ /dev/null @@ -1,835 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Andrew Dunham -# (c) 2013, Daniel Jaouen -# -# Based on macports (Jimmy Tang ) -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: homebrew -author: Andrew Dunham and Daniel Jaouen -short_description: Package manager for Homebrew -description: - - Manages Homebrew packages -version_added: "1.1" -options: - name: - description: - - name of package to install/remove - required: true - state: - description: - - state of the package - choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'unlinked' ] - required: false - default: present - update_homebrew: - description: - - update homebrew itself first - required: false - default: "no" - choices: [ "yes", "no" ] - upgrade_all: - description: - - upgrade all homebrew packages - required: false - default: no - choices: [ "yes", "no" ] - install_options: - description: - - options flags to install a package - required: false - default: null - version_added: "1.4" -notes: [] -''' -EXAMPLES = ''' -- homebrew: name=foo state=present -- homebrew: name=foo state=present update_homebrew=yes -- homebrew: name=foo state=latest update_homebrew=yes -- homebrew: update_homebrew=yes upgrade_all=yes -- homebrew: name=foo state=head -- homebrew: name=foo state=linked -- homebrew: name=foo state=absent -- homebrew: name=foo,bar state=absent -- homebrew: name=foo state=present install_options=with-baz,enable-debug -''' - -import os.path -import re - - -# exceptions -------------------------------------------------------------- {{{ -class HomebrewException(Exception): - pass -# /exceptions ------------------------------------------------------------- }}} - - -# utils ------------------------------------------------------------------- {{{ -def _create_regex_group(s): - lines = (line.strip() for line in s.split('\n') if line.strip()) - chars = filter(None, (line.split('#')[0].strip() for line in lines)) - group = r'[^' + r''.join(chars) + r']' - return re.compile(group) -# /utils ------------------------------------------------------------------ }}} - - -class Homebrew(object): - '''A class to manage Homebrew packages.''' - - # class regexes ------------------------------------------------ {{{ - VALID_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - : # colons - {sep} # the OS-specific path separator - . # dots - - # dashes - '''.format(sep=os.path.sep) - - VALID_BREW_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - {sep} # the OS-specific path separator - . # dots - - # dashes - '''.format(sep=os.path.sep) - - VALID_PACKAGE_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - . # dots - \+ # plusses - - # dashes - ''' - - INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS) - INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS) - INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS) - # /class regexes ----------------------------------------------- }}} - - # class validations -------------------------------------------- {{{ - @classmethod - def valid_path(cls, path): - ''' - `path` must be one of: - - list of paths - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - colons - - os.path.sep - ''' - - if isinstance(path, basestring): - return not cls.INVALID_PATH_REGEX.search(path) - - try: - iter(path) - except TypeError: - return False - else: - paths = path - return all(cls.valid_brew_path(path_) for path_ in paths) - - @classmethod - def valid_brew_path(cls, brew_path): - ''' - `brew_path` must be one of: - - None - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - os.path.sep - ''' - - if brew_path is None: - return True - - return ( - isinstance(brew_path, basestring) - and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) - ) - - @classmethod - def valid_package(cls, package): - '''A valid package is either None or alphanumeric.''' - - if package is None: - return True - - return ( - isinstance(package, basestring) - and not cls.INVALID_PACKAGE_REGEX.search(package) - ) - - @classmethod - def valid_state(cls, state): - ''' - A valid state is one of: - - None - - installed - - upgraded - - head - - linked - - unlinked - - absent - ''' - - if state is None: - return True - else: - return ( - isinstance(state, basestring) - and state.lower() in ( - 'installed', - 'upgraded', - 'head', - 'linked', - 'unlinked', - 'absent', - ) - ) - - @classmethod - def valid_module(cls, module): - '''A valid module is an instance of AnsibleModule.''' - - return isinstance(module, AnsibleModule) - - # /class validations ------------------------------------------- }}} - - # class properties --------------------------------------------- {{{ - @property - def module(self): - return self._module - - @module.setter - def module(self, module): - if not self.valid_module(module): - self._module = None - self.failed = True - self.message = 'Invalid module: {0}.'.format(module) - raise HomebrewException(self.message) - - else: - self._module = module - return module - - @property - def path(self): - return self._path - - @path.setter - def path(self, path): - if not self.valid_path(path): - self._path = [] - self.failed = True - self.message = 'Invalid path: {0}.'.format(path) - raise HomebrewException(self.message) - - else: - if isinstance(path, basestring): - self._path = path.split(':') - else: - self._path = path - - return path - - @property - def brew_path(self): - return self._brew_path - - @brew_path.setter - def brew_path(self, brew_path): - if not self.valid_brew_path(brew_path): - self._brew_path = None - self.failed = True - self.message = 'Invalid brew_path: {0}.'.format(brew_path) - raise HomebrewException(self.message) - - else: - self._brew_path = brew_path - return brew_path - - @property - def params(self): - return self._params - - @params.setter - def params(self, params): - self._params = self.module.params - return self._params - - @property - def current_package(self): - return self._current_package - - @current_package.setter - def current_package(self, package): - if not self.valid_package(package): - self._current_package = None - self.failed = True - self.message = 'Invalid package: {0}.'.format(package) - raise HomebrewException(self.message) - - else: - self._current_package = package - return package - # /class properties -------------------------------------------- }}} - - def __init__(self, module, path=None, packages=None, state=None, - update_homebrew=False, upgrade_all=False, - install_options=None): - if not install_options: - install_options = list() - self._setup_status_vars() - self._setup_instance_vars(module=module, path=path, packages=packages, - state=state, update_homebrew=update_homebrew, - upgrade_all=upgrade_all, - install_options=install_options, ) - - self._prep() - - # prep --------------------------------------------------------- {{{ - def _setup_status_vars(self): - self.failed = False - self.changed = False - self.changed_count = 0 - self.unchanged_count = 0 - self.message = '' - - def _setup_instance_vars(self, **kwargs): - for key, val in kwargs.iteritems(): - setattr(self, key, val) - - def _prep(self): - self._prep_path() - self._prep_brew_path() - - def _prep_path(self): - if not self.path: - self.path = ['/usr/local/bin'] - - def _prep_brew_path(self): - if not self.module: - self.brew_path = None - self.failed = True - self.message = 'AnsibleModule not set.' - raise HomebrewException(self.message) - - self.brew_path = self.module.get_bin_path( - 'brew', - required=True, - opt_dirs=self.path, - ) - if not self.brew_path: - self.brew_path = None - self.failed = True - self.message = 'Unable to locate homebrew executable.' - raise HomebrewException('Unable to locate homebrew executable.') - - return self.brew_path - - def _status(self): - return (self.failed, self.changed, self.message) - # /prep -------------------------------------------------------- }}} - - def run(self): - try: - self._run() - except HomebrewException: - pass - - if not self.failed and (self.changed_count + self.unchanged_count > 1): - self.message = "Changed: %d, Unchanged: %d" % ( - self.changed_count, - self.unchanged_count, - ) - (failed, changed, message) = self._status() - - return (failed, changed, message) - - # checks ------------------------------------------------------- {{{ - def _current_package_is_installed(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - cmd = [ - "{brew_path}".format(brew_path=self.brew_path), - "info", - self.current_package, - ] - rc, out, err = self.module.run_command(cmd) - for line in out.split('\n'): - if ( - re.search(r'Built from source', line) - or re.search(r'Poured from bottle', line) - ): - return True - - return False - - def _outdated_packages(self): - rc, out, err = self.module.run_command([ - self.brew_path, - 'outdated', - ]) - return [line.split(' ')[0].strip() for line in out.split('\n') if line] - - def _current_package_is_outdated(self): - if not self.valid_package(self.current_package): - return False - - return self.current_package in self._outdated_packages() - - def _current_package_is_installed_from_head(self): - if not Homebrew.valid_package(self.current_package): - return False - elif not self._current_package_is_installed(): - return False - - rc, out, err = self.module.run_command([ - self.brew_path, - 'info', - self.current_package, - ]) - - try: - version_info = [line for line in out.split('\n') if line][0] - except IndexError: - return False - - return version_info.split(' ')[-1] == 'HEAD' - # /checks ------------------------------------------------------ }}} - - # commands ----------------------------------------------------- {{{ - def _run(self): - if self.update_homebrew: - self._update_homebrew() - - if self.upgrade_all: - self._upgrade_all() - - if self.packages: - if self.state == 'installed': - return self._install_packages() - elif self.state == 'upgraded': - return self._upgrade_packages() - elif self.state == 'head': - return self._install_packages() - elif self.state == 'linked': - return self._link_packages() - elif self.state == 'unlinked': - return self._unlink_packages() - elif self.state == 'absent': - return self._uninstall_packages() - - # updated -------------------------------- {{{ - def _update_homebrew(self): - rc, out, err = self.module.run_command([ - self.brew_path, - 'update', - ]) - if rc == 0: - if out and isinstance(out, basestring): - already_updated = any( - re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) - for s in out.split('\n') - if s - ) - if not already_updated: - self.changed = True - self.message = 'Homebrew updated successfully.' - else: - self.message = 'Homebrew already up-to-date.' - - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - # /updated ------------------------------- }}} - - # _upgrade_all --------------------------- {{{ - def _upgrade_all(self): - rc, out, err = self.module.run_command([ - self.brew_path, - 'upgrade', - ]) - if rc == 0: - if not out: - self.message = 'Homebrew packages already upgraded.' - - else: - self.changed = True - self.message = 'Homebrew upgraded.' - - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - # /_upgrade_all -------------------------- }}} - - # installed ------------------------------ {{{ - def _install_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if self._current_package_is_installed(): - self.unchanged_count += 1 - self.message = 'Package already installed: {0}'.format( - self.current_package, - ) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be installed: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - if self.state == 'head': - head = '--HEAD' - else: - head = None - - opts = ( - [self.brew_path, 'install'] - + self.install_options - + [self.current_package, head] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if self._current_package_is_installed(): - self.changed_count += 1 - self.changed = True - self.message = 'Package installed: {0}'.format(self.current_package) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - - def _install_packages(self): - for package in self.packages: - self.current_package = package - self._install_current_package() - - return True - # /installed ----------------------------- }}} - - # upgraded ------------------------------- {{{ - def _upgrade_current_package(self): - command = 'upgrade' - - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - command = 'install' - - if self._current_package_is_installed() and not self._current_package_is_outdated(): - self.message = 'Package is already upgraded: {0}'.format( - self.current_package, - ) - self.unchanged_count += 1 - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be upgraded: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, command] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if self._current_package_is_installed() and not self._current_package_is_outdated(): - self.changed_count += 1 - self.changed = True - self.message = 'Package upgraded: {0}'.format(self.current_package) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - - def _upgrade_all_packages(self): - opts = ( - [self.brew_path, 'upgrade'] - + self.install_options - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if rc == 0: - self.changed = True - self.message = 'All packages upgraded.' - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - - def _upgrade_packages(self): - if not self.packages: - self._upgrade_all_packages() - else: - for package in self.packages: - self.current_package = package - self._upgrade_current_package() - return True - # /upgraded ------------------------------ }}} - - # uninstalled ---------------------------- {{{ - def _uninstall_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - self.unchanged_count += 1 - self.message = 'Package already uninstalled: {0}'.format( - self.current_package, - ) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be uninstalled: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, 'uninstall'] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if not self._current_package_is_installed(): - self.changed_count += 1 - self.changed = True - self.message = 'Package uninstalled: {0}'.format(self.current_package) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - - def _uninstall_packages(self): - for package in self.packages: - self.current_package = package - self._uninstall_current_package() - - return True - # /uninstalled ----------------------------- }}} - - # linked --------------------------------- {{{ - def _link_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - self.failed = True - self.message = 'Package not installed: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be linked: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, 'link'] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if rc == 0: - self.changed_count += 1 - self.changed = True - self.message = 'Package linked: {0}'.format(self.current_package) - - return True - else: - self.failed = True - self.message = 'Package could not be linked: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - def _link_packages(self): - for package in self.packages: - self.current_package = package - self._link_current_package() - - return True - # /linked -------------------------------- }}} - - # unlinked ------------------------------- {{{ - def _unlink_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - self.failed = True - self.message = 'Package not installed: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be unlinked: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, 'unlink'] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if rc == 0: - self.changed_count += 1 - self.changed = True - self.message = 'Package unlinked: {0}'.format(self.current_package) - - return True - else: - self.failed = True - self.message = 'Package could not be unlinked: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - def _unlink_packages(self): - for package in self.packages: - self.current_package = package - self._unlink_current_package() - - return True - # /unlinked ------------------------------ }}} - # /commands ---------------------------------------------------- }}} - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=["pkg"], required=False), - path=dict(required=False), - state=dict( - default="present", - choices=[ - "present", "installed", - "latest", "upgraded", "head", - "linked", "unlinked", - "absent", "removed", "uninstalled", - ], - ), - update_homebrew=dict( - default="no", - aliases=["update-brew"], - type='bool', - ), - upgrade_all=dict( - default="no", - aliases=["upgrade"], - type='bool', - ), - install_options=dict( - default=None, - aliases=['options'], - type='list', - ) - ), - supports_check_mode=True, - ) - p = module.params - - if p['name']: - packages = p['name'].split(',') - else: - packages = None - - path = p['path'] - if path: - path = path.split(':') - else: - path = ['/usr/local/bin'] - - state = p['state'] - if state in ('present', 'installed'): - state = 'installed' - if state in ('head', ): - state = 'head' - if state in ('latest', 'upgraded'): - state = 'upgraded' - if state == 'linked': - state = 'linked' - if state == 'unlinked': - state = 'unlinked' - if state in ('absent', 'removed', 'uninstalled'): - state = 'absent' - - update_homebrew = p['update_homebrew'] - upgrade_all = p['upgrade_all'] - p['install_options'] = p['install_options'] or [] - install_options = ['--{0}'.format(install_option) - for install_option in p['install_options']] - - brew = Homebrew(module=module, path=path, packages=packages, - state=state, update_homebrew=update_homebrew, - upgrade_all=upgrade_all, install_options=install_options) - (failed, changed, message) = brew.run() - if failed: - module.fail_json(msg=message) - else: - module.exit_json(changed=changed, msg=message) - -# this is magic, see lib/ansible/module_common.py -#<> -main() diff --git a/library/packaging/homebrew_cask b/library/packaging/homebrew_cask deleted file mode 100644 index dede8d4bb3..0000000000 --- a/library/packaging/homebrew_cask +++ /dev/null @@ -1,513 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Daniel Jaouen -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: homebrew_cask -author: Daniel Jaouen -short_description: Install/uninstall homebrew casks. -description: - - Manages Homebrew casks. -version_added: "1.6" -options: - name: - description: - - name of cask to install/remove - required: true - state: - description: - - state of the cask - choices: [ 'installed', 'uninstalled' ] - required: false - default: present -''' -EXAMPLES = ''' -- homebrew_cask: name=alfred state=present -- homebrew_cask: name=alfred state=absent -''' - -import os.path -import re - - -# exceptions -------------------------------------------------------------- {{{ -class HomebrewCaskException(Exception): - pass -# /exceptions ------------------------------------------------------------- }}} - - -# utils ------------------------------------------------------------------- {{{ -def _create_regex_group(s): - lines = (line.strip() for line in s.split('\n') if line.strip()) - chars = filter(None, (line.split('#')[0].strip() for line in lines)) - group = r'[^' + r''.join(chars) + r']' - return re.compile(group) -# /utils ------------------------------------------------------------------ }}} - - -class HomebrewCask(object): - '''A class to manage Homebrew casks.''' - - # class regexes ------------------------------------------------ {{{ - VALID_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - : # colons - {sep} # the OS-specific path separator - - # dashes - '''.format(sep=os.path.sep) - - VALID_BREW_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - {sep} # the OS-specific path separator - - # dashes - '''.format(sep=os.path.sep) - - VALID_CASK_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - - # dashes - ''' - - INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS) - INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS) - INVALID_CASK_REGEX = _create_regex_group(VALID_CASK_CHARS) - # /class regexes ----------------------------------------------- }}} - - # class validations -------------------------------------------- {{{ - @classmethod - def valid_path(cls, path): - ''' - `path` must be one of: - - list of paths - - a string containing only: - - alphanumeric characters - - dashes - - spaces - - colons - - os.path.sep - ''' - - if isinstance(path, basestring): - return not cls.INVALID_PATH_REGEX.search(path) - - try: - iter(path) - except TypeError: - return False - else: - paths = path - return all(cls.valid_brew_path(path_) for path_ in paths) - - @classmethod - def valid_brew_path(cls, brew_path): - ''' - `brew_path` must be one of: - - None - - a string containing only: - - alphanumeric characters - - dashes - - spaces - - os.path.sep - ''' - - if brew_path is None: - return True - - return ( - isinstance(brew_path, basestring) - and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) - ) - - @classmethod - def valid_cask(cls, cask): - '''A valid cask is either None or alphanumeric + backslashes.''' - - if cask is None: - return True - - return ( - isinstance(cask, basestring) - and not cls.INVALID_CASK_REGEX.search(cask) - ) - - @classmethod - def valid_state(cls, state): - ''' - A valid state is one of: - - installed - - absent - ''' - - if state is None: - return True - else: - return ( - isinstance(state, basestring) - and state.lower() in ( - 'installed', - 'absent', - ) - ) - - @classmethod - def valid_module(cls, module): - '''A valid module is an instance of AnsibleModule.''' - - return isinstance(module, AnsibleModule) - # /class validations ------------------------------------------- }}} - - # class properties --------------------------------------------- {{{ - @property - def module(self): - return self._module - - @module.setter - def module(self, module): - if not self.valid_module(module): - self._module = None - self.failed = True - self.message = 'Invalid module: {0}.'.format(module) - raise HomebrewCaskException(self.message) - - else: - self._module = module - return module - - @property - def path(self): - return self._path - - @path.setter - def path(self, path): - if not self.valid_path(path): - self._path = [] - self.failed = True - self.message = 'Invalid path: {0}.'.format(path) - raise HomebrewCaskException(self.message) - - else: - if isinstance(path, basestring): - self._path = path.split(':') - else: - self._path = path - - return path - - @property - def brew_path(self): - return self._brew_path - - @brew_path.setter - def brew_path(self, brew_path): - if not self.valid_brew_path(brew_path): - self._brew_path = None - self.failed = True - self.message = 'Invalid brew_path: {0}.'.format(brew_path) - raise HomebrewCaskException(self.message) - - else: - self._brew_path = brew_path - return brew_path - - @property - def params(self): - return self._params - - @params.setter - def params(self, params): - self._params = self.module.params - return self._params - - @property - def current_cask(self): - return self._current_cask - - @current_cask.setter - def current_cask(self, cask): - if not self.valid_cask(cask): - self._current_cask = None - self.failed = True - self.message = 'Invalid cask: {0}.'.format(cask) - raise HomebrewCaskException(self.message) - - else: - self._current_cask = cask - return cask - # /class properties -------------------------------------------- }}} - - def __init__(self, module, path=None, casks=None, state=None): - self._setup_status_vars() - self._setup_instance_vars(module=module, path=path, casks=casks, - state=state) - - self._prep() - - # prep --------------------------------------------------------- {{{ - def _setup_status_vars(self): - self.failed = False - self.changed = False - self.changed_count = 0 - self.unchanged_count = 0 - self.message = '' - - def _setup_instance_vars(self, **kwargs): - for key, val in kwargs.iteritems(): - setattr(self, key, val) - - def _prep(self): - self._prep_path() - self._prep_brew_path() - - def _prep_path(self): - if not self.path: - self.path = ['/usr/local/bin'] - - def _prep_brew_path(self): - if not self.module: - self.brew_path = None - self.failed = True - self.message = 'AnsibleModule not set.' - raise HomebrewCaskException(self.message) - - self.brew_path = self.module.get_bin_path( - 'brew', - required=True, - opt_dirs=self.path, - ) - if not self.brew_path: - self.brew_path = None - self.failed = True - self.message = 'Unable to locate homebrew executable.' - raise HomebrewCaskException('Unable to locate homebrew executable.') - - return self.brew_path - - def _status(self): - return (self.failed, self.changed, self.message) - # /prep -------------------------------------------------------- }}} - - def run(self): - try: - self._run() - except HomebrewCaskException: - pass - - if not self.failed and (self.changed_count + self.unchanged_count > 1): - self.message = "Changed: %d, Unchanged: %d" % ( - self.changed_count, - self.unchanged_count, - ) - (failed, changed, message) = self._status() - - return (failed, changed, message) - - # checks ------------------------------------------------------- {{{ - def _current_cask_is_installed(self): - if not self.valid_cask(self.current_cask): - self.failed = True - self.message = 'Invalid cask: {0}.'.format(self.current_cask) - raise HomebrewCaskException(self.message) - - cmd = [self.brew_path, 'cask', 'list'] - rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0]) - - if 'nothing to list' in err: - return False - elif rc == 0: - casks = [cask_.strip() for cask_ in out.split('\n') if cask_.strip()] - return self.current_cask in casks - else: - self.failed = True - self.message = err.strip() - raise HomebrewCaskException(self.message) - # /checks ------------------------------------------------------ }}} - - # commands ----------------------------------------------------- {{{ - def _run(self): - if self.state == 'installed': - return self._install_casks() - elif self.state == 'absent': - return self._uninstall_casks() - - if self.command: - return self._command() - - # updated -------------------------------- {{{ - def _update_homebrew(self): - rc, out, err = self.module.run_command([ - self.brew_path, - 'update', - ], path_prefix=self.path[0]) - if rc == 0: - if out and isinstance(out, basestring): - already_updated = any( - re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) - for s in out.split('\n') - if s - ) - if not already_updated: - self.changed = True - self.message = 'Homebrew updated successfully.' - else: - self.message = 'Homebrew already up-to-date.' - - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewCaskException(self.message) - # /updated ------------------------------- }}} - - # installed ------------------------------ {{{ - def _install_current_cask(self): - if not self.valid_cask(self.current_cask): - self.failed = True - self.message = 'Invalid cask: {0}.'.format(self.current_cask) - raise HomebrewCaskException(self.message) - - if self._current_cask_is_installed(): - self.unchanged_count += 1 - self.message = 'Cask already installed: {0}'.format( - self.current_cask, - ) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Cask would be installed: {0}'.format( - self.current_cask - ) - raise HomebrewCaskException(self.message) - - cmd = [opt - for opt in (self.brew_path, 'cask', 'install', self.current_cask) - if opt] - - rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0]) - - if self._current_cask_is_installed(): - self.changed_count += 1 - self.changed = True - self.message = 'Cask installed: {0}'.format(self.current_cask) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewCaskException(self.message) - - def _install_casks(self): - for cask in self.casks: - self.current_cask = cask - self._install_current_cask() - - return True - # /installed ----------------------------- }}} - - # uninstalled ---------------------------- {{{ - def _uninstall_current_cask(self): - if not self.valid_cask(self.current_cask): - self.failed = True - self.message = 'Invalid cask: {0}.'.format(self.current_cask) - raise HomebrewCaskException(self.message) - - if not self._current_cask_is_installed(): - self.unchanged_count += 1 - self.message = 'Cask already uninstalled: {0}'.format( - self.current_cask, - ) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Cask would be uninstalled: {0}'.format( - self.current_cask - ) - raise HomebrewCaskException(self.message) - - cmd = [opt - for opt in (self.brew_path, 'cask', 'uninstall', self.current_cask) - if opt] - - rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0]) - - if not self._current_cask_is_installed(): - self.changed_count += 1 - self.changed = True - self.message = 'Cask uninstalled: {0}'.format(self.current_cask) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewCaskException(self.message) - - def _uninstall_casks(self): - for cask in self.casks: - self.current_cask = cask - self._uninstall_current_cask() - - return True - # /uninstalled ----------------------------- }}} - # /commands ---------------------------------------------------- }}} - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=["cask"], required=False), - path=dict(required=False), - state=dict( - default="present", - choices=[ - "present", "installed", - "absent", "removed", "uninstalled", - ], - ), - ), - supports_check_mode=True, - ) - p = module.params - - if p['name']: - casks = p['name'].split(',') - else: - casks = None - - path = p['path'] - if path: - path = path.split(':') - else: - path = ['/usr/local/bin'] - - state = p['state'] - if state in ('present', 'installed'): - state = 'installed' - if state in ('absent', 'removed', 'uninstalled'): - state = 'absent' - - brew_cask = HomebrewCask(module=module, path=path, casks=casks, - state=state) - (failed, changed, message) = brew_cask.run() - if failed: - module.fail_json(msg=message) - else: - module.exit_json(changed=changed, msg=message) - -# this is magic, see lib/ansible/module_common.py -#<> -main() diff --git a/library/packaging/homebrew_tap b/library/packaging/homebrew_tap deleted file mode 100644 index a79ba076a8..0000000000 --- a/library/packaging/homebrew_tap +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Daniel Jaouen -# Based on homebrew (Andrew Dunham ) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import re - -DOCUMENTATION = ''' ---- -module: homebrew_tap -author: Daniel Jaouen -short_description: Tap a Homebrew repository. -description: - - Tap external Homebrew repositories. -version_added: "1.6" -options: - tap: - description: - - The repository to tap. - required: true - state: - description: - - state of the repository. - choices: [ 'present', 'absent' ] - required: false - default: 'present' -requirements: [ homebrew ] -''' - -EXAMPLES = ''' -homebrew_tap: tap=homebrew/dupes state=present -homebrew_tap: tap=homebrew/dupes state=absent -homebrew_tap: tap=homebrew/dupes,homebrew/science state=present -''' - - -def a_valid_tap(tap): - '''Returns True if the tap is valid.''' - regex = re.compile(r'^(\S+)/(homebrew-)?(\w+)$') - return regex.match(tap) - - -def already_tapped(module, brew_path, tap): - '''Returns True if already tapped.''' - - rc, out, err = module.run_command([ - brew_path, - 'tap', - ]) - taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_] - return tap.lower() in taps - - -def add_tap(module, brew_path, tap): - '''Adds a single tap.''' - failed, changed, msg = False, False, '' - - if not a_valid_tap(tap): - failed = True - msg = 'not a valid tap: %s' % tap - - elif not already_tapped(module, brew_path, tap): - if module.check_mode: - module.exit_json(changed=True) - - rc, out, err = module.run_command([ - brew_path, - 'tap', - tap, - ]) - if already_tapped(module, brew_path, tap): - changed = True - msg = 'successfully tapped: %s' % tap - else: - failed = True - msg = 'failed to tap: %s' % tap - - else: - msg = 'already tapped: %s' % tap - - return (failed, changed, msg) - - -def add_taps(module, brew_path, taps): - '''Adds one or more taps.''' - failed, unchanged, added, msg = False, 0, 0, '' - - for tap in taps: - (failed, changed, msg) = add_tap(module, brew_path, tap) - if failed: - break - if changed: - added += 1 - else: - unchanged += 1 - - if failed: - msg = 'added: %d, unchanged: %d, error: ' + msg - msg = msg % (added, unchanged) - elif added: - changed = True - msg = 'added: %d, unchanged: %d' % (added, unchanged) - else: - msg = 'added: %d, unchanged: %d' % (added, unchanged) - - return (failed, changed, msg) - - -def remove_tap(module, brew_path, tap): - '''Removes a single tap.''' - failed, changed, msg = False, False, '' - - if not a_valid_tap(tap): - failed = True - msg = 'not a valid tap: %s' % tap - - elif already_tapped(module, brew_path, tap): - if module.check_mode: - module.exit_json(changed=True) - - rc, out, err = module.run_command([ - brew_path, - 'untap', - tap, - ]) - if not already_tapped(module, brew_path, tap): - changed = True - msg = 'successfully untapped: %s' % tap - else: - failed = True - msg = 'failed to untap: %s' % tap - - else: - msg = 'already untapped: %s' % tap - - return (failed, changed, msg) - - -def remove_taps(module, brew_path, taps): - '''Removes one or more taps.''' - failed, unchanged, removed, msg = False, 0, 0, '' - - for tap in taps: - (failed, changed, msg) = remove_tap(module, brew_path, tap) - if failed: - break - if changed: - removed += 1 - else: - unchanged += 1 - - if failed: - msg = 'removed: %d, unchanged: %d, error: ' + msg - msg = msg % (removed, unchanged) - elif removed: - changed = True - msg = 'removed: %d, unchanged: %d' % (removed, unchanged) - else: - msg = 'removed: %d, unchanged: %d' % (removed, unchanged) - - return (failed, changed, msg) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=['tap'], required=True), - state=dict(default='present', choices=['present', 'absent']), - ), - supports_check_mode=True, - ) - - brew_path = module.get_bin_path( - 'brew', - required=True, - opt_dirs=['/usr/local/bin'] - ) - - taps = module.params['name'].split(',') - - if module.params['state'] == 'present': - failed, changed, msg = add_taps(module, brew_path, taps) - - if failed: - module.fail_json(msg=msg) - else: - module.exit_json(changed=changed, msg=msg) - - elif module.params['state'] == 'absent': - failed, changed, msg = remove_taps(module, brew_path, taps) - - if failed: - module.fail_json(msg=msg) - else: - module.exit_json(changed=changed, msg=msg) - -# this is magic, see lib/ansible/module_common.py -#<> -main() diff --git a/library/packaging/layman b/library/packaging/layman deleted file mode 100644 index 57c03528c9..0000000000 --- a/library/packaging/layman +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Jakub Jirutka -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import shutil -from os import path -from urllib2 import Request, urlopen, URLError - -DOCUMENTATION = ''' ---- -module: layman -author: Jakub Jirutka -version_added: "1.6" -short_description: Manage Gentoo overlays -description: - - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. - Please note that Layman must be installed on a managed node prior using this module. -options: - name: - description: - - The overlay id to install, synchronize, or uninstall. - Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)). - required: true - list_url: - description: - - An URL of the alternative overlays list that defines the overlay to install. - This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where - C(overlay_defs) is readed from the Layman's configuration. - required: false - state: - description: - - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay. - required: false - default: present - choices: [present, absent, updated] -''' - -EXAMPLES = ''' -# Install the overlay 'mozilla' which is on the central overlays list. -- layman: name=mozilla - -# Install the overlay 'cvut' from the specified alternative list. -- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml - -# Update (sync) the overlay 'cvut', or install if not installed yet. -- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml state=updated - -# Update (sync) all of the installed overlays. -- layman: name=ALL state=updated - -# Uninstall the overlay 'cvut'. -- layman: name=cvut state=absent -''' - -USERAGENT = 'ansible-httpget' - -try: - from layman.api import LaymanAPI - from layman.config import BareConfig - HAS_LAYMAN_API = True -except ImportError: - HAS_LAYMAN_API = False - - -class ModuleError(Exception): pass - - -def init_layman(config=None): - '''Returns the initialized ``LaymanAPI``. - - :param config: the layman's configuration to use (optional) - ''' - if config is None: config = BareConfig(read_configfile=True, quietness=1) - return LaymanAPI(config) - - -def download_url(url, dest): - ''' - :param url: the URL to download - :param dest: the absolute path of where to save the downloaded content to; - it must be writable and not a directory - - :raises ModuleError - ''' - request = Request(url) - request.add_header('User-agent', USERAGENT) - - try: - response = urlopen(request) - except URLError, e: - raise ModuleError("Failed to get %s: %s" % (url, str(e))) - - try: - with open(dest, 'w') as f: - shutil.copyfileobj(response, f) - except IOError, e: - raise ModuleError("Failed to write: %s" % str(e)) - - -def install_overlay(name, list_url=None): - '''Installs the overlay repository. If not on the central overlays list, - then :list_url of an alternative list must be provided. The list will be - fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the - ``overlay_defs`` is read from the Layman's configuration). - - :param name: the overlay id - :param list_url: the URL of the remote repositories list to look for the overlay - definition (optional, default: None) - - :returns: True if the overlay was installed, or False if already exists - (i.e. nothing has changed) - :raises ModuleError - ''' - # read Layman configuration - layman_conf = BareConfig(read_configfile=True) - layman = init_layman(layman_conf) - - if layman.is_installed(name): - return False - - if not layman.is_repo(name): - if not list_url: raise ModuleError("Overlay '%s' is not on the list of known " \ - "overlays and URL of the remote list was not provided." % name) - - overlay_defs = layman_conf.get_option('overlay_defs') - dest = path.join(overlay_defs, name + '.xml') - - download_url(list_url, dest) - - # reload config - layman = init_layman() - - if not layman.add_repos(name): raise ModuleError(layman.get_errors()) - - return True - - -def uninstall_overlay(name): - '''Uninstalls the given overlay repository from the system. - - :param name: the overlay id to uninstall - - :returns: True if the overlay was uninstalled, or False if doesn't exist - (i.e. nothing has changed) - :raises ModuleError - ''' - layman = init_layman() - - if not layman.is_installed(name): - return False - - layman.delete_repos(name) - if layman.get_errors(): raise ModuleError(layman.get_errors()) - - return True - - -def sync_overlay(name): - '''Synchronizes the specified overlay repository. - - :param name: the overlay repository id to sync - :raises ModuleError - ''' - layman = init_layman() - - if not layman.sync(name): - messages = [ str(item[1]) for item in layman.sync_results[2] ] - raise ModuleError(messages) - - -def sync_overlays(): - '''Synchronize all of the installed overlays. - - :raises ModuleError - ''' - layman = init_layman() - - for name in layman.get_installed(): - sync_overlay(name) - - -def main(): - # define module - module = AnsibleModule( - argument_spec = { - 'name': { 'required': True }, - 'list_url': { 'aliases': ['url'] }, - 'state': { 'default': "present", 'choices': ['present', 'absent', 'updated'] }, - } - ) - - if not HAS_LAYMAN_API: - module.fail_json(msg='Layman is not installed') - - state, name, url = (module.params[key] for key in ['state', 'name', 'list_url']) - - changed = False - try: - if state == 'present': - changed = install_overlay(name, url) - - elif state == 'updated': - if name == 'ALL': - sync_overlays() - elif install_overlay(name, url): - changed = True - else: - sync_overlay(name) - else: - changed = uninstall_overlay(name) - - except ModuleError, e: - module.fail_json(msg=e.message) - else: - module.exit_json(changed=changed, name=name) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/macports b/library/packaging/macports deleted file mode 100644 index ae7010b1cb..0000000000 --- a/library/packaging/macports +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Jimmy Tang -# Based on okpg (Patrick Pelletier ), pacman -# (Afterburn) and pkgin (Shaun Zinck) modules -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: macports -author: Jimmy Tang -short_description: Package manager for MacPorts -description: - - Manages MacPorts packages -version_added: "1.1" -options: - name: - description: - - name of package to install/remove - required: true - state: - description: - - state of the package - choices: [ 'present', 'absent', 'active', 'inactive' ] - required: false - default: present - update_cache: - description: - - update the package db first - required: false - default: "no" - choices: [ "yes", "no" ] -notes: [] -''' -EXAMPLES = ''' -- macports: name=foo state=present -- macports: name=foo state=present update_cache=yes -- macports: name=foo state=absent -- macports: name=foo state=active -- macports: name=foo state=inactive -''' - -import pipes - -def update_package_db(module, port_path): - """ Updates packages list. """ - - rc, out, err = module.run_command("%s sync" % port_path) - - if rc != 0: - module.fail_json(msg="could not update package db") - - -def query_package(module, port_path, name, state="present"): - """ Returns whether a package is installed or not. """ - - if state == "present": - - rc, out, err = module.run_command("%s installed | grep -q ^.*%s" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True) - if rc == 0: - return True - - return False - - elif state == "active": - - rc, out, err = module.run_command("%s installed %s | grep -q active" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True) - - if rc == 0: - return True - - return False - - -def remove_packages(module, port_path, packages): - """ Uninstalls one or more packages if installed. """ - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, port_path, package): - continue - - rc, out, err = module.run_command("%s uninstall %s" % (port_path, package)) - - if query_package(module, port_path, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, port_path, packages): - """ Installs one or more packages if not already installed. """ - - install_c = 0 - - for package in packages: - if query_package(module, port_path, package): - continue - - rc, out, err = module.run_command("%s install %s" % (port_path, package)) - - if not query_package(module, port_path, package): - module.fail_json(msg="failed to install %s: %s" % (package, out)) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already present") - - -def activate_packages(module, port_path, packages): - """ Activate a package if it's inactive. """ - - activate_c = 0 - - for package in packages: - if not query_package(module, port_path, package): - module.fail_json(msg="failed to activate %s, package(s) not present" % (package)) - - if query_package(module, port_path, package, state="active"): - continue - - rc, out, err = module.run_command("%s activate %s" % (port_path, package)) - - if not query_package(module, port_path, package, state="active"): - module.fail_json(msg="failed to activate %s: %s" % (package, out)) - - activate_c += 1 - - if activate_c > 0: - module.exit_json(changed=True, msg="activated %s package(s)" % (activate_c)) - - module.exit_json(changed=False, msg="package(s) already active") - - -def deactivate_packages(module, port_path, packages): - """ Deactivate a package if it's active. """ - - deactivated_c = 0 - - for package in packages: - if not query_package(module, port_path, package): - module.fail_json(msg="failed to activate %s, package(s) not present" % (package)) - - if not query_package(module, port_path, package, state="active"): - continue - - rc, out, err = module.run_command("%s deactivate %s" % (port_path, package)) - - if query_package(module, port_path, package, state="active"): - module.fail_json(msg="failed to deactivated %s: %s" % (package, out)) - - deactivated_c += 1 - - if deactivated_c > 0: - module.exit_json(changed=True, msg="deactivated %s package(s)" % (deactivated_c)) - - module.exit_json(changed=False, msg="package(s) already inactive") - - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(aliases=["pkg"], required=True), - state = dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]), - update_cache = dict(default="no", aliases=["update-cache"], type='bool') - ) - ) - - port_path = module.get_bin_path('port', True, ['/opt/local/bin']) - - p = module.params - - if p["update_cache"]: - update_package_db(module, port_path) - - pkgs = p["name"].split(",") - - if p["state"] in ["present", "installed"]: - install_packages(module, port_path, pkgs) - - elif p["state"] in ["absent", "removed"]: - remove_packages(module, port_path, pkgs) - - elif p["state"] == "active": - activate_packages(module, port_path, pkgs) - - elif p["state"] == "inactive": - deactivate_packages(module, port_path, pkgs) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/npm b/library/packaging/npm deleted file mode 100644 index 1dd2e99849..0000000000 --- a/library/packaging/npm +++ /dev/null @@ -1,263 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Chris Hoffman -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: npm -short_description: Manage node.js packages with npm -description: - - Manage node.js packages with Node Package Manager (npm) -version_added: 1.2 -author: Chris Hoffman -options: - name: - description: - - The name of a node.js library to install - required: false - path: - description: - - The base path where to install the node.js libraries - required: false - version: - description: - - The version to be installed - required: false - global: - description: - - Install the node.js library globally - required: false - default: no - choices: [ "yes", "no" ] - executable: - description: - - The executable location for npm. - - This is useful if you are using a version manager, such as nvm - required: false - ignore_scripts: - description: - - Use the --ignore-scripts flag when installing. - required: false - choices: [ "yes", "no" ] - default: no - version_added: "1.8" - production: - description: - - Install dependencies in production mode, excluding devDependencies - required: false - choices: [ "yes", "no" ] - default: no - registry: - description: - - The registry to install modules from. - required: false - version_added: "1.6" - state: - description: - - The state of the node.js library - required: false - default: present - choices: [ "present", "absent", "latest" ] -''' - -EXAMPLES = ''' -description: Install "coffee-script" node.js package. -- npm: name=coffee-script path=/app/location - -description: Install "coffee-script" node.js package on version 1.6.1. -- npm: name=coffee-script version=1.6.1 path=/app/location - -description: Install "coffee-script" node.js package globally. -- npm: name=coffee-script global=yes - -description: Remove the globally package "coffee-script". -- npm: name=coffee-script global=yes state=absent - -description: Install "coffee-script" node.js package from custom registry. -- npm: name=coffee-script registry=http://registry.mysite.com - -description: Install packages based on package.json. -- npm: path=/app/location - -description: Update packages based on package.json to their latest version. -- npm: path=/app/location state=latest - -description: Install packages based on package.json using the npm installed with nvm v0.10.1. -- npm: path=/app/location executable=/opt/nvm/v0.10.1/bin/npm state=present -''' - -import os - -try: - import json -except ImportError: - import simplejson as json - -class Npm(object): - def __init__(self, module, **kwargs): - self.module = module - self.glbl = kwargs['glbl'] - self.name = kwargs['name'] - self.version = kwargs['version'] - self.path = kwargs['path'] - self.registry = kwargs['registry'] - self.production = kwargs['production'] - self.ignore_scripts = kwargs['ignore_scripts'] - - if kwargs['executable']: - self.executable = kwargs['executable'].split(' ') - else: - self.executable = [module.get_bin_path('npm', True)] - - if kwargs['version']: - self.name_version = self.name + '@' + self.version - else: - self.name_version = self.name - - def _exec(self, args, run_in_check_mode=False, check_rc=True): - if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = self.executable + args - - if self.glbl: - cmd.append('--global') - if self.production: - cmd.append('--production') - if self.ignore_scripts: - cmd.append('--ignore-scripts') - if self.name: - cmd.append(self.name_version) - if self.registry: - cmd.append('--registry') - cmd.append(self.registry) - - #If path is specified, cd into that path and run the command. - cwd = None - if self.path: - if not os.path.exists(self.path): - os.makedirs(self.path) - if not os.path.isdir(self.path): - self.module.fail_json(msg="path %s is not a directory" % self.path) - cwd = self.path - - rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) - return out - return '' - - def list(self): - cmd = ['list', '--json'] - - installed = list() - missing = list() - data = json.loads(self._exec(cmd, True, False)) - if 'dependencies' in data: - for dep in data['dependencies']: - if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']: - missing.append(dep) - elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']: - missing.append(dep) - else: - installed.append(dep) - if self.name and self.name not in installed: - missing.append(self.name) - #Named dependency not installed - else: - missing.append(self.name) - - return installed, missing - - def install(self): - return self._exec(['install']) - - def update(self): - return self._exec(['update']) - - def uninstall(self): - return self._exec(['uninstall']) - - def list_outdated(self): - outdated = list() - data = self._exec(['outdated'], True, False) - for dep in data.splitlines(): - if dep: - # node.js v0.10.22 changed the `npm outdated` module separator - # from "@" to " ". Split on both for backwards compatibility. - pkg, other = re.split('\s|@', dep, 1) - outdated.append(pkg) - - return outdated - - -def main(): - arg_spec = dict( - name=dict(default=None), - path=dict(default=None), - version=dict(default=None), - production=dict(default='no', type='bool'), - executable=dict(default=None), - registry=dict(default=None), - state=dict(default='present', choices=['present', 'absent', 'latest']), - ignore_scripts=dict(default=False, type='bool'), - ) - arg_spec['global'] = dict(default='no', type='bool') - module = AnsibleModule( - argument_spec=arg_spec, - supports_check_mode=True - ) - - name = module.params['name'] - path = module.params['path'] - version = module.params['version'] - glbl = module.params['global'] - production = module.params['production'] - executable = module.params['executable'] - registry = module.params['registry'] - state = module.params['state'] - ignore_scripts = module.params['ignore_scripts'] - - if not path and not glbl: - module.fail_json(msg='path must be specified when not using global') - if state == 'absent' and not name: - module.fail_json(msg='uninstalling a package is only available for named packages') - - npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, \ - executable=executable, registry=registry, ignore_scripts=ignore_scripts) - - changed = False - if state == 'present': - installed, missing = npm.list() - if len(missing): - changed = True - npm.install() - elif state == 'latest': - installed, missing = npm.list() - outdated = npm.list_outdated() - if len(missing) or len(outdated): - changed = True - npm.install() - else: #absent - installed, missing = npm.list() - if name in installed: - changed = True - npm.uninstall() - - module.exit_json(changed=changed) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/openbsd_pkg b/library/packaging/openbsd_pkg deleted file mode 100644 index 790fa89fac..0000000000 --- a/library/packaging/openbsd_pkg +++ /dev/null @@ -1,373 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Patrik Lundin -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import re -import shlex -import syslog - -DOCUMENTATION = ''' ---- -module: openbsd_pkg -author: Patrik Lundin -version_added: "1.1" -short_description: Manage packages on OpenBSD. -description: - - Manage packages on OpenBSD using the pkg tools. -options: - name: - required: true - description: - - Name of the package. - state: - required: true - choices: [ present, latest, absent ] - description: - - C(present) will make sure the package is installed. - C(latest) will make sure the latest version of the package is installed. - C(absent) will make sure the specified package is not installed. -''' - -EXAMPLES = ''' -# Make sure nmap is installed -- openbsd_pkg: name=nmap state=present - -# Make sure nmap is the latest version -- openbsd_pkg: name=nmap state=latest - -# Make sure nmap is not installed -- openbsd_pkg: name=nmap state=absent - -# Specify a pkg flavour with '--' -- openbsd_pkg: name=vim--nox11 state=present - -# Specify the default flavour to avoid ambiguity errors -- openbsd_pkg: name=vim-- state=present -''' - -# Control if we write debug information to syslog. -debug = False - -# Function used for executing commands. -def execute_command(cmd, module): - if debug: - syslog.syslog("execute_command(): cmd = %s" % cmd) - # Break command line into arguments. - # This makes run_command() use shell=False which we need to not cause shell - # expansion of special characters like '*'. - cmd_args = shlex.split(cmd) - return module.run_command(cmd_args) - -# Function used for getting the name of a currently installed package. -def get_current_name(name, pkg_spec, module): - info_cmd = 'pkg_info' - (rc, stdout, stderr) = execute_command("%s" % (info_cmd), module) - if rc != 0: - return (rc, stdout, stderr) - - if pkg_spec['version']: - pattern = "^%s" % name - elif pkg_spec['flavor']: - pattern = "^%s-.*-%s\s" % (pkg_spec['stem'], pkg_spec['flavor']) - else: - pattern = "^%s-" % pkg_spec['stem'] - - if debug: - syslog.syslog("get_current_name(): pattern = %s" % pattern) - - for line in stdout.splitlines(): - if debug: - syslog.syslog("get_current_name: line = %s" % line) - match = re.search(pattern, line) - if match: - current_name = line.split()[0] - - return current_name - -# Function used to find out if a package is currently installed. -def get_package_state(name, pkg_spec, module): - info_cmd = 'pkg_info -e' - - if pkg_spec['version']: - command = "%s %s" % (info_cmd, name) - elif pkg_spec['flavor']: - command = "%s %s-*-%s" % (info_cmd, pkg_spec['stem'], pkg_spec['flavor']) - else: - command = "%s %s-*" % (info_cmd, pkg_spec['stem']) - - rc, stdout, stderr = execute_command(command, module) - - if (stderr): - module.fail_json(msg="failed in get_package_state(): " + stderr) - - if rc == 0: - return True - else: - return False - -# Function used to make sure a package is present. -def package_present(name, installed_state, pkg_spec, module): - if module.check_mode: - install_cmd = 'pkg_add -Imn' - else: - install_cmd = 'pkg_add -Im' - - if installed_state is False: - - # Attempt to install the package - (rc, stdout, stderr) = execute_command("%s %s" % (install_cmd, name), module) - - # The behaviour of pkg_add is a bit different depending on if a - # specific version is supplied or not. - # - # When a specific version is supplied the return code will be 0 when - # a package is found and 1 when it is not, if a version is not - # supplied the tool will exit 0 in both cases: - if pkg_spec['version']: - # Depend on the return code. - if debug: - syslog.syslog("package_present(): depending on return code") - if rc: - changed=False - else: - # Depend on stderr instead. - if debug: - syslog.syslog("package_present(): depending on stderr") - if stderr: - # There is a corner case where having an empty directory in - # installpath prior to the right location will result in a - # "file:/local/package/directory/ is empty" message on stderr - # while still installing the package, so we need to look for - # for a message like "packagename-1.0: ok" just in case. - match = re.search("\W%s-[^:]+: ok\W" % name, stdout) - if match: - # It turns out we were able to install the package. - if debug: - syslog.syslog("package_present(): we were able to install package") - pass - else: - # We really did fail, fake the return code. - if debug: - syslog.syslog("package_present(): we really did fail") - rc = 1 - changed=False - else: - if debug: - syslog.syslog("package_present(): stderr was not set") - - if rc == 0: - if module.check_mode: - module.exit_json(changed=True) - - changed=True - - else: - rc = 0 - stdout = '' - stderr = '' - changed=False - - return (rc, stdout, stderr, changed) - -# Function used to make sure a package is the latest available version. -def package_latest(name, installed_state, pkg_spec, module): - if module.check_mode: - upgrade_cmd = 'pkg_add -umn' - else: - upgrade_cmd = 'pkg_add -um' - - pre_upgrade_name = '' - - if installed_state is True: - - # Fetch name of currently installed package. - pre_upgrade_name = get_current_name(name, pkg_spec, module) - - if debug: - syslog.syslog("package_latest(): pre_upgrade_name = %s" % pre_upgrade_name) - - # Attempt to upgrade the package. - (rc, stdout, stderr) = execute_command("%s %s" % (upgrade_cmd, name), module) - - # Look for output looking something like "nmap-6.01->6.25: ok" to see if - # something changed (or would have changed). Use \W to delimit the match - # from progress meter output. - match = re.search("\W%s->.+: ok\W" % pre_upgrade_name, stdout) - if match: - if module.check_mode: - module.exit_json(changed=True) - - changed = True - else: - changed = False - - # FIXME: This part is problematic. Based on the issues mentioned (and - # handled) in package_present() it is not safe to blindly trust stderr - # as an indicator that the command failed, and in the case with - # empty installpath directories this will break. - # - # For now keep this safeguard here, but ignore it if we managed to - # parse out a successful update above. This way we will report a - # successful run when we actually modify something but fail - # otherwise. - if changed != True: - if stderr: - rc=1 - - return (rc, stdout, stderr, changed) - - else: - # If package was not installed at all just make it present. - if debug: - syslog.syslog("package_latest(): package is not installed, calling package_present()") - return package_present(name, installed_state, pkg_spec, module) - -# Function used to make sure a package is not installed. -def package_absent(name, installed_state, module): - if module.check_mode: - remove_cmd = 'pkg_delete -In' - else: - remove_cmd = 'pkg_delete -I' - - if installed_state is True: - - # Attempt to remove the package. - rc, stdout, stderr = execute_command("%s %s" % (remove_cmd, name), module) - - if rc == 0: - if module.check_mode: - module.exit_json(changed=True) - - changed=True - else: - changed=False - - else: - rc = 0 - stdout = '' - stderr = '' - changed=False - - return (rc, stdout, stderr, changed) - -# Function used to parse the package name based on packages-specs(7) -# The general name structure is "stem-version[-flavors]" -def parse_package_name(name, pkg_spec, module): - # Do some initial matches so we can base the more advanced regex on that. - version_match = re.search("-[0-9]", name) - versionless_match = re.search("--", name) - - # Stop if someone is giving us a name that both has a version and is - # version-less at the same time. - if version_match and versionless_match: - module.fail_json(msg="Package name both has a version and is version-less: " + name) - - # If name includes a version. - if version_match: - match = re.search("^(?P.*)-(?P[0-9][^-]*)(?P-)?(?P[a-z].*)?$", name) - if match: - pkg_spec['stem'] = match.group('stem') - pkg_spec['version_separator'] = '-' - pkg_spec['version'] = match.group('version') - pkg_spec['flavor_separator'] = match.group('flavor_separator') - pkg_spec['flavor'] = match.group('flavor') - else: - module.fail_json(msg="Unable to parse package name at version_match: " + name) - - # If name includes no version but is version-less ("--"). - elif versionless_match: - match = re.search("^(?P.*)--(?P[a-z].*)?$", name) - if match: - pkg_spec['stem'] = match.group('stem') - pkg_spec['version_separator'] = '-' - pkg_spec['version'] = None - pkg_spec['flavor_separator'] = '-' - pkg_spec['flavor'] = match.group('flavor') - else: - module.fail_json(msg="Unable to parse package name at versionless_match: " + name) - - # If name includes no version, and is not version-less, it is all a stem. - else: - match = re.search("^(?P.*)$", name) - if match: - pkg_spec['stem'] = match.group('stem') - pkg_spec['version_separator'] = None - pkg_spec['version'] = None - pkg_spec['flavor_separator'] = None - pkg_spec['flavor'] = None - else: - module.fail_json(msg="Unable to parse package name at else: " + name) - - # Sanity check that there are no trailing dashes in flavor. - # Try to stop strange stuff early so we can be strict later. - if pkg_spec['flavor']: - match = re.search("-$", pkg_spec['flavor']) - if match: - module.fail_json(msg="Trailing dash in flavor: " + pkg_spec['flavor']) - -# =========================================== -# Main control flow - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - state = dict(required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']), - ), - supports_check_mode = True - ) - - name = module.params['name'] - state = module.params['state'] - - rc = 0 - stdout = '' - stderr = '' - result = {} - result['name'] = name - result['state'] = state - - # Parse package name and put results in the pkg_spec dictionary. - pkg_spec = {} - parse_package_name(name, pkg_spec, module) - - # Get package state. - installed_state = get_package_state(name, pkg_spec, module) - - # Perform requested action. - if state in ['installed', 'present']: - (rc, stdout, stderr, changed) = package_present(name, installed_state, pkg_spec, module) - elif state in ['absent', 'removed']: - (rc, stdout, stderr, changed) = package_absent(name, installed_state, module) - elif state == 'latest': - (rc, stdout, stderr, changed) = package_latest(name, installed_state, pkg_spec, module) - - if rc != 0: - if stderr: - module.fail_json(msg=stderr) - else: - module.fail_json(msg=stdout) - - result['changed'] = changed - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/opkg b/library/packaging/opkg deleted file mode 100644 index 0187abe56a..0000000000 --- a/library/packaging/opkg +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Patrick Pelletier -# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: opkg -author: Patrick Pelletier -short_description: Package manager for OpenWrt -description: - - Manages OpenWrt packages -version_added: "1.1" -options: - name: - description: - - name of package to install/remove - required: true - state: - description: - - state of the package - choices: [ 'present', 'absent' ] - required: false - default: present - update_cache: - description: - - update the package db first - required: false - default: "no" - choices: [ "yes", "no" ] -notes: [] -''' -EXAMPLES = ''' -- opkg: name=foo state=present -- opkg: name=foo state=present update_cache=yes -- opkg: name=foo state=absent -- opkg: name=foo,bar state=absent -''' - -import pipes - -def update_package_db(module, opkg_path): - """ Updates packages list. """ - - rc, out, err = module.run_command("%s update" % opkg_path) - - if rc != 0: - module.fail_json(msg="could not update package db") - - -def query_package(module, opkg_path, name, state="present"): - """ Returns whether a package is installed or not. """ - - if state == "present": - - rc, out, err = module.run_command("%s list-installed | grep -q ^%s" % (pipes.quote(opkg_path), pipes.quote(name)), use_unsafe_shell=True) - if rc == 0: - return True - - return False - - -def remove_packages(module, opkg_path, packages): - """ Uninstalls one or more packages if installed. """ - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, opkg_path, package): - continue - - rc, out, err = module.run_command("%s remove %s" % (opkg_path, package)) - - if query_package(module, opkg_path, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, opkg_path, packages): - """ Installs one or more packages if not already installed. """ - - install_c = 0 - - for package in packages: - if query_package(module, opkg_path, package): - continue - - rc, out, err = module.run_command("%s install %s" % (opkg_path, package)) - - if not query_package(module, opkg_path, package): - module.fail_json(msg="failed to install %s: %s" % (package, out)) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already present") - - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(aliases=["pkg"], required=True), - state = dict(default="present", choices=["present", "installed", "absent", "removed"]), - update_cache = dict(default="no", aliases=["update-cache"], type='bool') - ) - ) - - opkg_path = module.get_bin_path('opkg', True, ['/bin']) - - p = module.params - - if p["update_cache"]: - update_package_db(module, opkg_path) - - pkgs = p["name"].split(",") - - if p["state"] in ["present", "installed"]: - install_packages(module, opkg_path, pkgs) - - elif p["state"] in ["absent", "removed"]: - remove_packages(module, opkg_path, pkgs) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/pacman b/library/packaging/pacman deleted file mode 100644 index 0b23a2f93c..0000000000 --- a/library/packaging/pacman +++ /dev/null @@ -1,234 +0,0 @@ -#!/usr/bin/python -tt -# -*- coding: utf-8 -*- - -# (c) 2012, Afterburn -# (c) 2013, Aaron Bull Schaefer -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: pacman -short_description: Manage packages with I(pacman) -description: - - Manage packages with the I(pacman) package manager, which is used by - Arch Linux and its variants. -version_added: "1.0" -author: Afterburn -notes: [] -requirements: [] -options: - name: - description: - - Name of the package to install, upgrade, or remove. - required: false - default: null - - state: - description: - - Desired state of the package. - required: false - default: "present" - choices: ["present", "absent"] - - recurse: - description: - - When removing a package, also remove its dependencies, provided - that they are not required by other packages and were not - explicitly installed by a user. - required: false - default: "no" - choices: ["yes", "no"] - version_added: "1.3" - - update_cache: - description: - - Whether or not to refresh the master package lists. This can be - run as part of a package installation or as a separate step. - required: false - default: "no" - choices: ["yes", "no"] -''' - -EXAMPLES = ''' -# Install package foo -- pacman: name=foo state=present - -# Remove packages foo and bar -- pacman: name=foo,bar state=absent - -# Recursively remove package baz -- pacman: name=baz state=absent recurse=yes - -# Run the equivalent of "pacman -Syy" as a separate step -- pacman: update_cache=yes -''' - -import json -import shlex -import os -import re -import sys - -PACMAN_PATH = "/usr/bin/pacman" - -def query_package(module, name, state="present"): - # pacman -Q returns 0 if the package is installed, - # 1 if it is not installed - if state == "present": - cmd = "pacman -Q %s" % (name) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc == 0: - return True - - return False - - -def update_package_db(module): - cmd = "pacman -Syy" - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc == 0: - return True - else: - module.fail_json(msg="could not update package db") - - -def remove_packages(module, packages): - if module.params["recurse"]: - args = "Rs" - else: - args = "R" - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, package): - continue - - cmd = "pacman -%s %s --noconfirm" % (args, package) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc != 0: - module.fail_json(msg="failed to remove %s" % (package)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, packages, package_files): - install_c = 0 - - for i, package in enumerate(packages): - if query_package(module, package): - continue - - if package_files[i]: - params = '-U %s' % package_files[i] - else: - params = '-S %s' % package - - cmd = "pacman %s --noconfirm" % (params) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc != 0: - module.fail_json(msg="failed to install %s" % (package)) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already installed") - - -def check_packages(module, packages, state): - would_be_changed = [] - for package in packages: - installed = query_package(module, package) - if ((state == "present" and not installed) or - (state == "absent" and installed)): - would_be_changed.append(package) - if would_be_changed: - if state == "absent": - state = "removed" - module.exit_json(changed=True, msg="%s package(s) would be %s" % ( - len(would_be_changed), state)) - else: - module.exit_json(change=False, msg="package(s) already %s" % state) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(aliases=['pkg']), - state = dict(default='present', choices=['present', 'installed', 'absent', 'removed']), - recurse = dict(default='no', choices=BOOLEANS, type='bool'), - update_cache = dict(default='no', aliases=['update-cache'], choices=BOOLEANS, type='bool')), - required_one_of = [['name', 'update_cache']], - supports_check_mode = True) - - if not os.path.exists(PACMAN_PATH): - module.fail_json(msg="cannot find pacman, looking for %s" % (PACMAN_PATH)) - - p = module.params - - # normalize the state parameter - if p['state'] in ['present', 'installed']: - p['state'] = 'present' - elif p['state'] in ['absent', 'removed']: - p['state'] = 'absent' - - if p["update_cache"] and not module.check_mode: - update_package_db(module) - if not p['name']: - module.exit_json(changed=True, msg='updated the package master lists') - - if p['update_cache'] and module.check_mode and not p['name']: - module.exit_json(changed=True, msg='Would have updated the package cache') - - if p['name']: - pkgs = p['name'].split(',') - - pkg_files = [] - for i, pkg in enumerate(pkgs): - if pkg.endswith('.pkg.tar.xz'): - # The package given is a filename, extract the raw pkg name from - # it and store the filename - pkg_files.append(pkg) - pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1]) - else: - pkg_files.append(None) - - if module.check_mode: - check_packages(module, pkgs, p['state']) - - if p['state'] == 'present': - install_packages(module, pkgs, pkg_files) - elif p['state'] == 'absent': - remove_packages(module, pkgs) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/pip b/library/packaging/pip deleted file mode 100644 index 17f52c0039..0000000000 --- a/library/packaging/pip +++ /dev/null @@ -1,356 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Matt Wright -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -import tempfile -import os - -DOCUMENTATION = ''' ---- -module: pip -short_description: Manages Python library dependencies. -description: - - "Manage Python library dependencies. To use this module, one of the following keys is required: C(name) - or C(requirements)." -version_added: "0.7" -options: - name: - description: - - The name of a Python library to install or the url of the remote package. - required: false - default: null - version: - description: - - The version number to install of the Python library specified in the I(name) parameter - required: false - default: null - requirements: - description: - - The path to a pip requirements file - required: false - default: null - virtualenv: - description: - - An optional path to a I(virtualenv) directory to install into - required: false - default: null - virtualenv_site_packages: - version_added: "1.0" - description: - - Whether the virtual environment will inherit packages from the - global site-packages directory. Note that if this setting is - changed on an already existing virtual environment it will not - have any effect, the environment must be deleted and newly - created. - required: false - default: "no" - choices: [ "yes", "no" ] - virtualenv_command: - version_aded: "1.1" - description: - - The command or a pathname to the command to create the virtual - environment with. For example C(pyvenv), C(virtualenv), - C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv). - required: false - default: virtualenv - state: - description: - - The state of module - required: false - default: present - choices: [ "present", "absent", "latest" ] - extra_args: - description: - - Extra arguments passed to pip. - required: false - default: null - version_added: "1.0" - chdir: - description: - - cd into this directory before running the command - version_added: "1.3" - required: false - default: null - executable: - description: - - The explicit executable or a pathname to the executable to be used to - run pip for a specific version of Python installed in the system. For - example C(pip-3.3), if there are both Python 2.7 and 3.3 installations - in the system and you want to run pip for the Python 3.3 installation. - version_added: "1.3" - required: false - default: null -notes: - - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified. -requirements: [ "virtualenv", "pip" ] -author: Matt Wright -''' - -EXAMPLES = ''' -# Install (Bottle) python package. -- pip: name=bottle - -# Install (Bottle) python package on version 0.11. -- pip: name=bottle version=0.11 - -# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args. -- pip: name='svn+http://myrepo/svn/MyApp#egg=MyApp' - -# Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules -- pip: name=bottle virtualenv=/my_app/venv - -# Install (Bottle) into the specified (virtualenv), inheriting globally installed modules -- pip: name=bottle virtualenv=/my_app/venv virtualenv_site_packages=yes - -# Install (Bottle) into the specified (virtualenv), using Python 2.7 -- pip: name=bottle virtualenv=/my_app/venv virtualenv_command=virtualenv-2.7 - -# Install specified python requirements. -- pip: requirements=/my_app/requirements.txt - -# Install specified python requirements in indicated (virtualenv). -- pip: requirements=/my_app/requirements.txt virtualenv=/my_app/venv - -# Install specified python requirements and custom Index URL. -- pip: requirements=/my_app/requirements.txt extra_args='-i https://example.com/pypi/simple' - -# Install (Bottle) for Python 3.3 specifically,using the 'pip-3.3' executable. -- pip: name=bottle executable=pip-3.3 -''' - -def _get_cmd_options(module, cmd): - thiscmd = cmd + " --help" - rc, stdout, stderr = module.run_command(thiscmd) - if rc != 0: - module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr)) - - words = stdout.strip().split() - cmd_options = [ x for x in words if x.startswith('--') ] - return cmd_options - - -def _get_full_name(name, version=None): - if version is None: - resp = name - else: - resp = name + '==' + version - return resp - -def _is_present(name, version, installed_pkgs): - for pkg in installed_pkgs: - if '==' not in pkg: - continue - - [pkg_name, pkg_version] = pkg.split('==') - - if pkg_name == name and (version is None or version == pkg_version): - return True - - return False - - - -def _get_pip(module, env=None, executable=None): - # On Debian and Ubuntu, pip is pip. - # On Fedora18 and up, pip is python-pip. - # On Fedora17 and below, CentOS and RedHat 6 and 5, pip is pip-python. - # On Fedora, CentOS, and RedHat, the exception is in the virtualenv. - # There, pip is just pip. - candidate_pip_basenames = ['pip', 'python-pip', 'pip-python'] - pip = None - if executable is not None: - if os.path.isabs(executable): - pip = executable - else: - # If you define your own executable that executable should be the only candidate. - candidate_pip_basenames = [executable] - if pip is None: - if env is None: - opt_dirs = [] - else: - # Try pip with the virtualenv directory first. - opt_dirs = ['%s/bin' % env] - for basename in candidate_pip_basenames: - pip = module.get_bin_path(basename, False, opt_dirs) - if pip is not None: - break - # pip should have been found by now. The final call to get_bin_path will - # trigger fail_json. - if pip is None: - basename = candidate_pip_basenames[0] - pip = module.get_bin_path(basename, True, opt_dirs) - return pip - - -def _fail(module, cmd, out, err): - msg = '' - if out: - msg += "stdout: %s" % (out, ) - if err: - msg += "\n:stderr: %s" % (err, ) - module.fail_json(cmd=cmd, msg=msg) - - -def main(): - state_map = dict( - present='install', - absent='uninstall -y', - latest='install -U', - ) - - module = AnsibleModule( - argument_spec=dict( - state=dict(default='present', choices=state_map.keys()), - name=dict(default=None, required=False), - version=dict(default=None, required=False, type='str'), - requirements=dict(default=None, required=False), - virtualenv=dict(default=None, required=False), - virtualenv_site_packages=dict(default='no', type='bool'), - virtualenv_command=dict(default='virtualenv', required=False), - use_mirrors=dict(default='yes', type='bool'), - extra_args=dict(default=None, required=False), - chdir=dict(default=None, required=False), - executable=dict(default=None, required=False), - ), - required_one_of=[['name', 'requirements']], - mutually_exclusive=[['name', 'requirements']], - supports_check_mode=True - ) - - state = module.params['state'] - name = module.params['name'] - version = module.params['version'] - requirements = module.params['requirements'] - extra_args = module.params['extra_args'] - chdir = module.params['chdir'] - - if state == 'latest' and version is not None: - module.fail_json(msg='version is incompatible with state=latest') - - err = '' - out = '' - - env = module.params['virtualenv'] - virtualenv_command = module.params['virtualenv_command'] - - if env: - env = os.path.expanduser(env) - virtualenv = os.path.expanduser(virtualenv_command) - if os.path.basename(virtualenv) == virtualenv: - virtualenv = module.get_bin_path(virtualenv_command, True) - if not os.path.exists(os.path.join(env, 'bin', 'activate')): - if module.check_mode: - module.exit_json(changed=True) - if module.params['virtualenv_site_packages']: - cmd = '%s --system-site-packages %s' % (virtualenv, env) - else: - cmd_opts = _get_cmd_options(module, virtualenv) - if '--no-site-packages' in cmd_opts: - cmd = '%s --no-site-packages %s' % (virtualenv, env) - else: - cmd = '%s %s' % (virtualenv, env) - this_dir = tempfile.gettempdir() - if chdir: - this_dir = os.path.join(this_dir, chdir) - rc, out_venv, err_venv = module.run_command(cmd, cwd=this_dir) - out += out_venv - err += err_venv - if rc != 0: - _fail(module, cmd, out, err) - - pip = _get_pip(module, env, module.params['executable']) - - cmd = '%s %s' % (pip, state_map[state]) - - # If there's a virtualenv we want things we install to be able to use other - # installations that exist as binaries within this virtualenv. Example: we - # install cython and then gevent -- gevent needs to use the cython binary, - # not just a python package that will be found by calling the right python. - # So if there's a virtualenv, we add that bin/ to the beginning of the PATH - # in run_command by setting path_prefix here. - path_prefix = None - if env: - path_prefix="/".join(pip.split('/')[:-1]) - - # Automatically apply -e option to extra_args when source is a VCS url. VCS - # includes those beginning with svn+, git+, hg+ or bzr+ - if name: - if name.startswith('svn+') or name.startswith('git+') or \ - name.startswith('hg+') or name.startswith('bzr+'): - args_list = [] # used if extra_args is not used at all - if extra_args: - args_list = extra_args.split(' ') - if '-e' not in args_list: - args_list.append('-e') - # Ok, we will reconstruct the option string - extra_args = ' '.join(args_list) - - if extra_args: - cmd += ' %s' % extra_args - if name: - cmd += ' %s' % _get_full_name(name, version) - elif requirements: - cmd += ' -r %s' % requirements - - this_dir = tempfile.gettempdir() - if chdir: - this_dir = os.path.join(this_dir, chdir) - - if module.check_mode: - if env or extra_args or requirements or state == 'latest' or not name: - module.exit_json(changed=True) - elif name.startswith('svn+') or name.startswith('git+') or \ - name.startswith('hg+') or name.startswith('bzr+'): - module.exit_json(changed=True) - - freeze_cmd = '%s freeze' % pip - rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=this_dir) - - if rc != 0: - module.exit_json(changed=True) - - out += out_pip - err += err_pip - - is_present = _is_present(name, version, out.split()) - - changed = (state == 'present' and not is_present) or (state == 'absent' and is_present) - module.exit_json(changed=changed, cmd=freeze_cmd, stdout=out, stderr=err) - - rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=this_dir) - out += out_pip - err += err_pip - if rc == 1 and state == 'absent' and 'not installed' in out_pip: - pass # rc is 1 when attempting to uninstall non-installed package - elif rc != 0: - _fail(module, cmd, out, err) - - if state == 'absent': - changed = 'Successfully uninstalled' in out_pip - else: - changed = 'Successfully installed' in out_pip - - module.exit_json(changed=changed, cmd=cmd, name=name, version=version, - state=state, requirements=requirements, virtualenv=env, stdout=out, stderr=err) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/pkgin b/library/packaging/pkgin deleted file mode 100755 index 866c9f76a4..0000000000 --- a/library/packaging/pkgin +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Shaun Zinck -# Written by Shaun Zinck -# Based on pacman module written by Afterburn -# that was based on apt module written by Matthew Williams -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: pkgin -short_description: Package manager for SmartOS -description: - - Manages SmartOS packages -version_added: "1.0" -options: - name: - description: - - name of package to install/remove - required: true - state: - description: - - state of the package - choices: [ 'present', 'absent' ] - required: false - default: present -author: Shaun Zinck -notes: [] -''' - -EXAMPLES = ''' -# install package foo" -- pkgin: name=foo state=present - -# remove package foo -- pkgin: name=foo state=absent - -# remove packages foo and bar -- pkgin: name=foo,bar state=absent -''' - - -import json -import shlex -import os -import sys -import pipes - -def query_package(module, pkgin_path, name, state="present"): - - if state == "present": - - rc, out, err = module.run_command("%s -y list | grep ^%s" % (pipes.quote(pkgin_path), pipes.quote(name)), use_unsafe_shell=True) - - if rc == 0: - # At least one package with a package name that starts with ``name`` - # is installed. For some cases this is not sufficient to determine - # wether the queried package is installed. - # - # E.g. for ``name='gcc47'``, ``gcc47`` not being installed, but - # ``gcc47-libs`` being installed, ``out`` would be: - # - # gcc47-libs-4.7.2nb4 The GNU Compiler Collection (GCC) support shared libraries. - # - # Multiline output is also possible, for example with the same query - # and bot ``gcc47`` and ``gcc47-libs`` being installed: - # - # gcc47-libs-4.7.2nb4 The GNU Compiler Collection (GCC) support shared libraries. - # gcc47-4.7.2nb3 The GNU Compiler Collection (GCC) - 4.7 Release Series - - # Loop over lines in ``out`` - for line in out.split('\n'): - - # Strip description - # (results in sth. like 'gcc47-libs-4.7.2nb4') - pkgname_with_version = out.split(' ')[0] - - # Strip version - # (results in sth like 'gcc47-libs') - pkgname_without_version = '-'.join(pkgname_with_version.split('-')[:-1]) - - if name == pkgname_without_version: - return True - - return False - - -def remove_packages(module, pkgin_path, packages): - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, pkgin_path, package): - continue - - rc, out, err = module.run_command("%s -y remove %s" % (pkgin_path, package)) - - if query_package(module, pkgin_path, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, pkgin_path, packages): - - install_c = 0 - - for package in packages: - if query_package(module, pkgin_path, package): - continue - - rc, out, err = module.run_command("%s -y install %s" % (pkgin_path, package)) - - if not query_package(module, pkgin_path, package): - module.fail_json(msg="failed to install %s: %s" % (package, out)) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="present %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already present") - - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default="present", choices=["present","absent"]), - name = dict(aliases=["pkg"], required=True))) - - pkgin_path = module.get_bin_path('pkgin', True, ['/opt/local/bin']) - - p = module.params - - pkgs = p["name"].split(",") - - if p["state"] == "present": - install_packages(module, pkgin_path, pkgs) - - elif p["state"] == "absent": - remove_packages(module, pkgin_path, pkgs) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/pkgng b/library/packaging/pkgng deleted file mode 100644 index a1f443fd4e..0000000000 --- a/library/packaging/pkgng +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, bleader -# Written by bleader -# Based on pkgin module written by Shaun Zinck -# that was based on pacman module written by Afterburn -# that was based on apt module written by Matthew Williams -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: pkgng -short_description: Package manager for FreeBSD >= 9.0 -description: - - Manage binary packages for FreeBSD using 'pkgng' which - is available in versions after 9.0. -version_added: "1.2" -options: - name: - description: - - name of package to install/remove - required: true - state: - description: - - state of the package - choices: [ 'present', 'absent' ] - required: false - default: present - cached: - description: - - use local package base or try to fetch an updated one - choices: [ 'yes', 'no' ] - required: false - default: no - annotation: - description: - - a comma-separated list of keyvalue-pairs of the form - <+/-/:>[=]. A '+' denotes adding an annotation, a - '-' denotes removing an annotation, and ':' denotes modifying an - annotation. - If setting or modifying annotations, a value must be provided. - required: false - version_added: "1.6" - pkgsite: - description: - - for pkgng versions before 1.1.4, specify packagesite to use - for downloading packages, if not specified, use settings from - /usr/local/etc/pkg.conf - for newer pkgng versions, specify a the name of a repository - configured in /usr/local/etc/pkg/repos - required: false -author: bleader -notes: - - When using pkgsite, be careful that already in cache packages won't be downloaded again. -''' - -EXAMPLES = ''' -# Install package foo -- pkgng: name=foo state=present - -# Annotate package foo and bar -- pkgng: name=foo,bar annotation=+test1=baz,-test2,:test3=foobar - -# Remove packages foo and bar -- pkgng: name=foo,bar state=absent -''' - - -import json -import shlex -import os -import re -import sys - -def query_package(module, pkgng_path, name): - - rc, out, err = module.run_command("%s info -g -e %s" % (pkgng_path, name)) - - if rc == 0: - return True - - return False - -def pkgng_older_than(module, pkgng_path, compare_version): - - rc, out, err = module.run_command("%s -v" % pkgng_path) - version = map(lambda x: int(x), re.split(r'[\._]', out)) - - i = 0 - new_pkgng = True - while compare_version[i] == version[i]: - i += 1 - if i == min(len(compare_version), len(version)): - break - else: - if compare_version[i] > version[i]: - new_pkgng = False - return not new_pkgng - - -def remove_packages(module, pkgng_path, packages): - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, pkgng_path, package): - continue - - if not module.check_mode: - rc, out, err = module.run_command("%s delete -y %s" % (pkgng_path, package)) - - if not module.check_mode and query_package(module, pkgng_path, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - return (True, "removed %s package(s)" % remove_c) - - return (False, "package(s) already absent") - - -def install_packages(module, pkgng_path, packages, cached, pkgsite): - - install_c = 0 - - # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions - # in /usr/local/etc/pkg/repos - old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4]) - if pkgsite != "": - if old_pkgng: - pkgsite = "PACKAGESITE=%s" % (pkgsite) - else: - pkgsite = "-r %s" % (pkgsite) - - if not module.check_mode and not cached: - if old_pkgng: - rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path)) - else: - rc, out, err = module.run_command("%s update" % (pkgng_path)) - if rc != 0: - module.fail_json(msg="Could not update catalogue") - - for package in packages: - if query_package(module, pkgng_path, package): - continue - - if not module.check_mode: - if old_pkgng: - rc, out, err = module.run_command("%s %s install -g -U -y %s" % (pkgsite, pkgng_path, package)) - else: - rc, out, err = module.run_command("%s install %s -g -U -y %s" % (pkgng_path, pkgsite, package)) - - if not module.check_mode and not query_package(module, pkgng_path, package): - module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err) - - install_c += 1 - - if install_c > 0: - return (True, "added %s package(s)" % (install_c)) - - return (False, "package(s) already present") - -def annotation_query(module, pkgng_path, package, tag): - rc, out, err = module.run_command("%s info -g -A %s" % (pkgng_path, package)) - match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE) - if match: - return match.group('value') - return False - - -def annotation_add(module, pkgng_path, package, tag, value): - _value = annotation_query(module, pkgng_path, package, tag) - if not _value: - # Annotation does not exist, add it. - rc, out, err = module.run_command('%s annotate -y -A %s %s "%s"' - % (pkgng_path, package, tag, value)) - if rc != 0: - module.fail_json("could not annotate %s: %s" - % (package, out), stderr=err) - return True - elif _value != value: - # Annotation exists, but value differs - module.fail_json( - mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s" - % (package, tag, _value, value)) - return False - else: - # Annotation exists, nothing to do - return False - -def annotation_delete(module, pkgng_path, package, tag, value): - _value = annotation_query(module, pkgng_path, package, tag) - if _value: - rc, out, err = module.run_command('%s annotate -y -D %s %s' - % (pkgng_path, package, tag)) - if rc != 0: - module.fail_json("could not delete annotation to %s: %s" - % (package, out), stderr=err) - return True - return False - -def annotation_modify(module, pkgng_path, package, tag, value): - _value = annotation_query(module, pkgng_path, package, tag) - if not value: - # No such tag - module.fail_json("could not change annotation to %s: tag %s does not exist" - % (package, tag)) - elif _value == value: - # No change in value - return False - else: - rc,out,err = module.run_command('%s annotate -y -M %s %s "%s"' - % (pkgng_path, package, tag, value)) - if rc != 0: - module.fail_json("could not change annotation annotation to %s: %s" - % (package, out), stderr=err) - return True - - -def annotate_packages(module, pkgng_path, packages, annotation): - annotate_c = 0 - annotations = map(lambda _annotation: - re.match(r'(?P[\+-:])(?P\w+)(=(?P\w+))?', - _annotation).groupdict(), - re.split(r',', annotation)) - - operation = { - '+': annotation_add, - '-': annotation_delete, - ':': annotation_modify - } - - for package in packages: - for _annotation in annotations: - annotate_c += ( 1 if operation[_annotation['operation']]( - module, pkgng_path, package, - _annotation['tag'], _annotation['value']) else 0 ) - - if annotate_c > 0: - return (True, "added %s annotations." % annotate_c) - return (False, "changed no annotations") - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default="present", choices=["present","absent"], required=False), - name = dict(aliases=["pkg"], required=True), - cached = dict(default=False, type='bool'), - annotation = dict(default="", required=False), - pkgsite = dict(default="", required=False)), - supports_check_mode = True) - - pkgng_path = module.get_bin_path('pkg', True) - - p = module.params - - pkgs = p["name"].split(",") - - changed = False - msgs = [] - - if p["state"] == "present": - _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"]) - changed = changed or _changed - msgs.append(_msg) - - elif p["state"] == "absent": - _changed, _msg = remove_packages(module, pkgng_path, pkgs) - changed = changed or _changed - msgs.append(_msg) - - if p["annotation"]: - _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"]) - changed = changed or _changed - msgs.append(_msg) - - module.exit_json(changed=changed, msg=", ".join(msgs)) - - - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/pkgutil b/library/packaging/pkgutil deleted file mode 100644 index 78a7db72bf..0000000000 --- a/library/packaging/pkgutil +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Alexander Winkler -# based on svr4pkg by -# Boyd Adamson (2012) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: pkgutil -short_description: Manage CSW-Packages on Solaris -description: - - Manages CSW packages (SVR4 format) on Solaris 10 and 11. - - These were the native packages on Solaris <= 10 and are available - as a legacy feature in Solaris 11. - - Pkgutil is an advanced packaging system, which resolves dependency on installation. - It is designed for CSW packages. -version_added: "1.3" -author: Alexander Winkler -options: - name: - description: - - Package name, e.g. (C(CSWnrpe)) - required: true - site: - description: - - Specifies the repository path to install the package from. - - Its global definition is done in C(/etc/opt/csw/pkgutil.conf). - state: - description: - - Whether to install (C(present)), or remove (C(absent)) a package. - - The upgrade (C(latest)) operation will update/install the package to the latest version available. - - "Note: The module has a limitation that (C(latest)) only works for one package, not lists of them." - required: true - choices: ["present", "absent", "latest"] -''' - -EXAMPLES = ''' -# Install a package -pkgutil: name=CSWcommon state=present - -# Install a package from a specific repository -pkgutil: name=CSWnrpe site='ftp://myinternal.repo/opencsw/kiel state=latest' -''' - -import os -import pipes - -def package_installed(module, name): - cmd = [module.get_bin_path('pkginfo', True)] - cmd.append('-q') - cmd.append(name) - rc, out, err = module.run_command(' '.join(cmd)) - if rc == 0: - return True - else: - return False - -def package_latest(module, name, site): - # Only supports one package - cmd = [ 'pkgutil', '--single', '-c' ] - if site is not None: - cmd += [ '-t', pipes.quote(site) ] - cmd.append(pipes.quote(name)) - cmd += [ '| tail -1 | grep -v SAME' ] - rc, out, err = module.run_command(' '.join(cmd), use_unsafe_shell=True) - if rc == 1: - return True - else: - return False - -def run_command(module, cmd): - progname = cmd[0] - cmd[0] = module.get_bin_path(progname, True) - return module.run_command(cmd) - -def package_install(module, state, name, site): - cmd = [ 'pkgutil', '-iy' ] - if site is not None: - cmd += [ '-t', site ] - if state == 'latest': - cmd += [ '-f' ] - cmd.append(name) - (rc, out, err) = run_command(module, cmd) - return (rc, out, err) - -def package_upgrade(module, name, site): - cmd = [ 'pkgutil', '-ufy' ] - if site is not None: - cmd += [ '-t', site ] - cmd.append(name) - (rc, out, err) = run_command(module, cmd) - return (rc, out, err) - -def package_uninstall(module, name): - cmd = [ 'pkgutil', '-ry', name] - (rc, out, err) = run_command(module, cmd) - return (rc, out, err) - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required = True), - state = dict(required = True, choices=['present', 'absent','latest']), - site = dict(default = None), - ), - supports_check_mode=True - ) - name = module.params['name'] - state = module.params['state'] - site = module.params['site'] - rc = None - out = '' - err = '' - result = {} - result['name'] = name - result['state'] = state - - if state == 'present': - if not package_installed(module, name): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_install(module, state, name, site) - # Stdout is normally empty but for some packages can be - # very long and is not often useful - if len(out) > 75: - out = out[:75] + '...' - - elif state == 'latest': - if not package_installed(module, name): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_install(module, state, name, site) - else: - if not package_latest(module, name, site): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_upgrade(module, name, site) - if len(out) > 75: - out = out[:75] + '...' - - elif state == 'absent': - if package_installed(module, name): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_uninstall(module, name) - out = out[:75] - - if rc is None: - result['changed'] = False - else: - result['changed'] = True - - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/portage b/library/packaging/portage deleted file mode 100644 index 85027bfc79..0000000000 --- a/library/packaging/portage +++ /dev/null @@ -1,405 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Yap Sok Ann -# Written by Yap Sok Ann -# Based on apt module written by Matthew Williams -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: portage -short_description: Package manager for Gentoo -description: - - Manages Gentoo packages -version_added: "1.6" - -options: - package: - description: - - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world) - required: false - default: null - - state: - description: - - State of the package atom - required: false - default: "present" - choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged" ] - - update: - description: - - Update packages to the best version available (--update) - required: false - default: null - choices: [ "yes" ] - - deep: - description: - - Consider the entire dependency tree of packages (--deep) - required: false - default: null - choices: [ "yes" ] - - newuse: - description: - - Include installed packages where USE flags have changed (--newuse) - required: false - default: null - choices: [ "yes" ] - - changed_use: - description: - - Include installed packages where USE flags have changed, except when - - flags that the user has not enabled are added or removed - - (--changed-use) - required: false - default: null - choices: [ "yes" ] - version_added: 1.8 - - oneshot: - description: - - Do not add the packages to the world file (--oneshot) - required: false - default: null - choices: [ "yes" ] - - noreplace: - description: - - Do not re-emerge installed packages (--noreplace) - required: false - default: null - choices: [ "yes" ] - - nodeps: - description: - - Only merge packages but not their dependencies (--nodeps) - required: false - default: null - choices: [ "yes" ] - - onlydeps: - description: - - Only merge packages' dependencies but not the packages (--onlydeps) - required: false - default: null - choices: [ "yes" ] - - depclean: - description: - - Remove packages not needed by explicitly merged packages (--depclean) - - If no package is specified, clean up the world's dependencies - - Otherwise, --depclean serves as a dependency aware version of --unmerge - required: false - default: null - choices: [ "yes" ] - - quiet: - description: - - Run emerge in quiet mode (--quiet) - required: false - default: null - choices: [ "yes" ] - - verbose: - description: - - Run emerge in verbose mode (--verbose) - required: false - default: null - choices: [ "yes" ] - - sync: - description: - - Sync package repositories first - - If yes, perform "emerge --sync" - - If web, perform "emerge-webrsync" - required: false - default: null - choices: [ "yes", "web" ] - -requirements: [ gentoolkit ] -author: Yap Sok Ann -notes: [] -''' - -EXAMPLES = ''' -# Make sure package foo is installed -- portage: package=foo state=present - -# Make sure package foo is not installed -- portage: package=foo state=absent - -# Update package foo to the "best" version -- portage: package=foo update=yes - -# Sync repositories and update world -- portage: package=@world update=yes deep=yes sync=yes - -# Remove unneeded packages -- portage: depclean=yes - -# Remove package foo if it is not explicitly needed -- portage: package=foo state=absent depclean=yes -''' - - -import os -import pipes - - -def query_package(module, package, action): - if package.startswith('@'): - return query_set(module, package, action) - return query_atom(module, package, action) - - -def query_atom(module, atom, action): - cmd = '%s list %s' % (module.equery_path, atom) - - rc, out, err = module.run_command(cmd) - return rc == 0 - - -def query_set(module, set, action): - system_sets = [ - '@live-rebuild', - '@module-rebuild', - '@preserved-rebuild', - '@security', - '@selected', - '@system', - '@world', - '@x11-module-rebuild', - ] - - if set in system_sets: - if action == 'unmerge': - module.fail_json(msg='set %s cannot be removed' % set) - return False - - world_sets_path = '/var/lib/portage/world_sets' - if not os.path.exists(world_sets_path): - return False - - cmd = 'grep %s %s' % (set, world_sets_path) - - rc, out, err = module.run_command(cmd) - return rc == 0 - - -def sync_repositories(module, webrsync=False): - if module.check_mode: - module.exit_json(msg='check mode not supported by sync') - - if webrsync: - webrsync_path = module.get_bin_path('emerge-webrsync', required=True) - cmd = '%s --quiet' % webrsync_path - else: - cmd = '%s --sync --quiet' % module.emerge_path - - rc, out, err = module.run_command(cmd) - if rc != 0: - module.fail_json(msg='could not sync package repositories') - - -# Note: In the 3 functions below, equery is done one-by-one, but emerge is done -# in one go. If that is not desirable, split the packages into multiple tasks -# instead of joining them together with comma. - - -def emerge_packages(module, packages): - p = module.params - - if not (p['update'] or p['noreplace']): - for package in packages: - if not query_package(module, package, 'emerge'): - break - else: - module.exit_json(changed=False, msg='Packages already present.') - - args = [] - emerge_flags = { - 'update': '--update', - 'deep': '--deep', - 'newuse': '--newuse', - 'changed_use': '--changed-use', - 'oneshot': '--oneshot', - 'noreplace': '--noreplace', - 'nodeps': '--nodeps', - 'onlydeps': '--onlydeps', - 'quiet': '--quiet', - 'verbose': '--verbose', - } - for flag, arg in emerge_flags.iteritems(): - if p[flag]: - args.append(arg) - - cmd, (rc, out, err) = run_emerge(module, packages, *args) - if rc != 0: - module.fail_json( - cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages not installed.', - ) - - changed = True - for line in out.splitlines(): - if line.startswith('>>> Emerging (1 of'): - break - else: - changed = False - - module.exit_json( - changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages installed.', - ) - - -def unmerge_packages(module, packages): - p = module.params - - for package in packages: - if query_package(module, package, 'unmerge'): - break - else: - module.exit_json(changed=False, msg='Packages already absent.') - - args = ['--unmerge'] - - for flag in ['quiet', 'verbose']: - if p[flag]: - args.append('--%s' % flag) - - cmd, (rc, out, err) = run_emerge(module, packages, *args) - - if rc != 0: - module.fail_json( - cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages not removed.', - ) - - module.exit_json( - changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Packages removed.', - ) - - -def cleanup_packages(module, packages): - p = module.params - - if packages: - for package in packages: - if query_package(module, package, 'unmerge'): - break - else: - module.exit_json(changed=False, msg='Packages already absent.') - - args = ['--depclean'] - - for flag in ['quiet', 'verbose']: - if p[flag]: - args.append('--%s' % flag) - - cmd, (rc, out, err) = run_emerge(module, packages, *args) - if rc != 0: - module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err) - - removed = 0 - for line in out.splitlines(): - if not line.startswith('Number removed:'): - continue - parts = line.split(':') - removed = int(parts[1].strip()) - changed = removed > 0 - - module.exit_json( - changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, - msg='Depclean completed.', - ) - - -def run_emerge(module, packages, *args): - args = list(args) - - if module.check_mode: - args.append('--pretend') - - cmd = [module.emerge_path] + args + packages - return cmd, module.run_command(cmd) - - -portage_present_states = ['present', 'emerged', 'installed'] -portage_absent_states = ['absent', 'unmerged', 'removed'] - - -def main(): - module = AnsibleModule( - argument_spec=dict( - package=dict(default=None, aliases=['name']), - state=dict( - default=portage_present_states[0], - choices=portage_present_states + portage_absent_states, - ), - update=dict(default=None, choices=['yes']), - deep=dict(default=None, choices=['yes']), - newuse=dict(default=None, choices=['yes']), - changed_use=dict(default=None, choices=['yes']), - oneshot=dict(default=None, choices=['yes']), - noreplace=dict(default=None, choices=['yes']), - nodeps=dict(default=None, choices=['yes']), - onlydeps=dict(default=None, choices=['yes']), - depclean=dict(default=None, choices=['yes']), - quiet=dict(default=None, choices=['yes']), - verbose=dict(default=None, choices=['yes']), - sync=dict(default=None, choices=['yes', 'web']), - ), - required_one_of=[['package', 'sync', 'depclean']], - mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']], - supports_check_mode=True, - ) - - module.emerge_path = module.get_bin_path('emerge', required=True) - module.equery_path = module.get_bin_path('equery', required=True) - - p = module.params - - if p['sync']: - sync_repositories(module, webrsync=(p['sync'] == 'web')) - if not p['package']: - module.exit_json(msg='Sync successfully finished.') - - packages = p['package'].split(',') if p['package'] else [] - - if p['depclean']: - if packages and p['state'] not in portage_absent_states: - module.fail_json( - msg='Depclean can only be used with package when the state is ' - 'one of: %s' % portage_absent_states, - ) - - cleanup_packages(module, packages) - - elif p['state'] in portage_present_states: - emerge_packages(module, packages) - - elif p['state'] in portage_absent_states: - unmerge_packages(module, packages) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/portinstall b/library/packaging/portinstall deleted file mode 100644 index 068f413af7..0000000000 --- a/library/packaging/portinstall +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, berenddeboer -# Written by berenddeboer -# Based on pkgng module written by bleader -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: portinstall -short_description: Installing packages from FreeBSD's ports system -description: - - Manage packages for FreeBSD using 'portinstall'. -version_added: "1.3" -options: - name: - description: - - name of package to install/remove - required: true - state: - description: - - state of the package - choices: [ 'present', 'absent' ] - required: false - default: present - use_packages: - description: - - use packages instead of ports whenever available - choices: [ 'yes', 'no' ] - required: false - default: yes -author: berenddeboer -''' - -EXAMPLES = ''' -# Install package foo -- portinstall: name=foo state=present - -# Install package security/cyrus-sasl2-saslauthd -- portinstall: name=security/cyrus-sasl2-saslauthd state=present - -# Remove packages foo and bar -- portinstall: name=foo,bar state=absent -''' - - -import json -import shlex -import os -import sys - -def query_package(module, name): - - pkg_info_path = module.get_bin_path('pkg_info', False) - - # Assume that if we have pkg_info, we haven't upgraded to pkgng - if pkg_info_path: - pkgng = False - pkg_glob_path = module.get_bin_path('pkg_glob', True) - rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, pipes.quote(name)), use_unsafe_shell=True) - else: - pkgng = True - pkg_info_path = module.get_bin_path('pkg', True) - pkg_info_path = pkg_info_path + " info" - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name)) - - found = rc == 0 - - if not found: - # databases/mysql55-client installs as mysql-client, so try solving - # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking - # some package is installed - name_without_digits = re.sub('[0-9]', '', name) - if name != name_without_digits: - if pkgng: - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) - else: - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) - - found = rc == 0 - - return found - - -def matching_packages(module, name): - - ports_glob_path = module.get_bin_path('ports_glob', True) - rc, out, err = module.run_command("%s %s" % (ports_glob_path, name)) - #counts the numer of packages found - occurrences = out.count('\n') - if occurrences == 0: - name_without_digits = re.sub('[0-9]', '', name) - if name != name_without_digits: - rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits)) - occurrences = out.count('\n') - return occurrences - - -def remove_packages(module, packages): - - remove_c = 0 - pkg_glob_path = module.get_bin_path('pkg_glob', True) - - # If pkg_delete not found, we assume pkgng - pkg_delete_path = module.get_bin_path('pkg_delete', False) - if not pkg_delete_path: - pkg_delete_path = module.get_bin_path('pkg', True) - pkg_delete_path = pkg_delete_path + " delete -y" - - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, package): - continue - - rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(package)), use_unsafe_shell=True) - - if query_package(module, package): - name_without_digits = re.sub('[0-9]', '', package) - rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(name_without_digits)),use_unsafe_shell=True) - if query_package(module, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, packages, use_packages): - - install_c = 0 - - # If portinstall not found, automagically install - portinstall_path = module.get_bin_path('portinstall', False) - if not portinstall_path: - pkg_path = module.get_bin_path('pkg', False) - if pkg_path: - module.run_command("pkg install -y portupgrade") - portinstall_path = module.get_bin_path('portinstall', True) - - if use_packages == "yes": - portinstall_params="--use-packages" - else: - portinstall_params="" - - for package in packages: - if query_package(module, package): - continue - - # TODO: check how many match - matches = matching_packages(module, package) - if matches == 1: - rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package)) - if not query_package(module, package): - module.fail_json(msg="failed to install %s: %s" % (package, out)) - elif matches == 0: - module.fail_json(msg="no matches for package %s" % (package)) - else: - module.fail_json(msg="%s matches found for package name %s" % (matches, package)) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="present %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already present") - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default="present", choices=["present","absent"]), - name = dict(aliases=["pkg"], required=True), - use_packages = dict(type='bool', default='yes'))) - - p = module.params - - pkgs = p["name"].split(",") - - if p["state"] == "present": - install_packages(module, pkgs, p["use_packages"]) - - elif p["state"] == "absent": - remove_packages(module, pkgs) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/redhat_subscription b/library/packaging/redhat_subscription deleted file mode 100644 index df1c043b89..0000000000 --- a/library/packaging/redhat_subscription +++ /dev/null @@ -1,396 +0,0 @@ -#!/usr/bin/python - -DOCUMENTATION = ''' ---- -module: redhat_subscription -short_description: Manage Red Hat Network registration and subscriptions using the C(subscription-manager) command -description: - - Manage registration and subscription to the Red Hat Network entitlement platform. -version_added: "1.2" -author: James Laska -notes: - - In order to register a system, subscription-manager requires either a username and password, or an activationkey. -requirements: - - subscription-manager -options: - state: - description: - - whether to register and subscribe (C(present)), or unregister (C(absent)) a system - required: false - choices: [ "present", "absent" ] - default: "present" - username: - description: - - Red Hat Network username - required: False - default: null - password: - description: - - Red Hat Network password - required: False - default: null - server_hostname: - description: - - Specify an alternative Red Hat Network server - required: False - default: Current value from C(/etc/rhsm/rhsm.conf) is the default - server_insecure: - description: - - Allow traffic over insecure http - required: False - default: Current value from C(/etc/rhsm/rhsm.conf) is the default - rhsm_baseurl: - description: - - Specify CDN baseurl - required: False - default: Current value from C(/etc/rhsm/rhsm.conf) is the default - autosubscribe: - description: - - Upon successful registration, auto-consume available subscriptions - required: False - default: False - activationkey: - description: - - supply an activation key for use with registration - required: False - default: null - pool: - description: - - Specify a subscription pool name to consume. Regular expressions accepted. - required: False - default: '^$' -''' - -EXAMPLES = ''' -# Register as user (joe_user) with password (somepass) and auto-subscribe to available content. -- redhat_subscription: action=register username=joe_user password=somepass autosubscribe=true - -# Register with activationkey (1-222333444) and consume subscriptions matching -# the names (Red hat Enterprise Server) and (Red Hat Virtualization) -- redhat_subscription: action=register - activationkey=1-222333444 - pool='^(Red Hat Enterprise Server|Red Hat Virtualization)$' -''' - -import os -import re -import types -import ConfigParser -import shlex - - -class RegistrationBase(object): - def __init__(self, module, username=None, password=None): - self.module = module - self.username = username - self.password = password - - def configure(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def enable(self): - # Remove any existing redhat.repo - redhat_repo = '/etc/yum.repos.d/redhat.repo' - if os.path.isfile(redhat_repo): - os.unlink(redhat_repo) - - def register(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unregister(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unsubscribe(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def update_plugin_conf(self, plugin, enabled=True): - plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin - if os.path.isfile(plugin_conf): - cfg = ConfigParser.ConfigParser() - cfg.read([plugin_conf]) - if enabled: - cfg.set('main', 'enabled', 1) - else: - cfg.set('main', 'enabled', 0) - fd = open(plugin_conf, 'rwa+') - cfg.write(fd) - fd.close() - - def subscribe(self, **kwargs): - raise NotImplementedError("Must be implemented by a sub-class") - - -class Rhsm(RegistrationBase): - def __init__(self, module, username=None, password=None): - RegistrationBase.__init__(self, module, username, password) - self.config = self._read_config() - self.module = module - - def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'): - ''' - Load RHSM configuration from /etc/rhsm/rhsm.conf. - Returns: - * ConfigParser object - ''' - - # Read RHSM defaults ... - cp = ConfigParser.ConfigParser() - cp.read(rhsm_conf) - - # Add support for specifying a default value w/o having to standup some configuration - # Yeah, I know this should be subclassed ... but, oh well - def get_option_default(self, key, default=''): - sect, opt = key.split('.', 1) - if self.has_section(sect) and self.has_option(sect, opt): - return self.get(sect, opt) - else: - return default - - cp.get_option = types.MethodType(get_option_default, cp, ConfigParser.ConfigParser) - - return cp - - def enable(self): - ''' - Enable the system to receive updates from subscription-manager. - This involves updating affected yum plugins and removing any - conflicting yum repositories. - ''' - RegistrationBase.enable(self) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', True) - - def configure(self, **kwargs): - ''' - Configure the system as directed for registration with RHN - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'config'] - - # Pass supplied **kwargs as parameters to subscription-manager. Ignore - # non-configuration parameters and replace '_' with '.'. For example, - # 'server_hostname' becomes '--system.hostname'. - for k,v in kwargs.items(): - if re.search(r'^(system|rhsm)_', k): - args.append('--%s=%s' % (k.replace('_','.'), v)) - - self.module.run_command(args, check_rc=True) - - @property - def is_registered(self): - ''' - Determine whether the current system - Returns: - * Boolean - whether the current system is currently registered to - RHN. - ''' - # Quick version... - if False: - return os.path.isfile('/etc/pki/consumer/cert.pem') and \ - os.path.isfile('/etc/pki/consumer/key.pem') - - args = ['subscription-manager', 'identity'] - rc, stdout, stderr = self.module.run_command(args, check_rc=False) - if rc == 0: - return True - else: - return False - - def register(self, username, password, autosubscribe, activationkey): - ''' - Register the current system to the provided RHN server - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'register'] - - # Generate command arguments - if activationkey: - args.append('--activationkey "%s"' % activationkey) - else: - if autosubscribe: - args.append('--autosubscribe') - if username: - args.extend(['--username', username]) - if password: - args.extend(['--password', password]) - - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - - def unsubscribe(self): - ''' - Unsubscribe a system from all subscribed channels - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'unsubscribe', '--all'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - - def unregister(self): - ''' - Unregister a currently registered system - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'unregister'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - - def subscribe(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression - Raises: - * Exception - if error occurs while running command - ''' - - # Available pools ready for subscription - available_pools = RhsmPools(self.module) - - for pool in available_pools.filter(regexp): - pool.subscribe() - - -class RhsmPool(object): - ''' - Convenience class for housing subscription information - ''' - - def __init__(self, module, **kwargs): - self.module = module - for k,v in kwargs.items(): - setattr(self, k, v) - - def __str__(self): - return str(self.__getattribute__('_name')) - - def subscribe(self): - args = "subscription-manager subscribe --pool %s" % self.PoolId - rc, stdout, stderr = self.module.run_command(args, check_rc=True) - if rc == 0: - return True - else: - return False - - -class RhsmPools(object): - """ - This class is used for manipulating pools subscriptions with RHSM - """ - def __init__(self, module): - self.module = module - self.products = self._load_product_list() - - def __iter__(self): - return self.products.__iter__() - - def _load_product_list(self): - """ - Loads list of all available pools for system in data structure - """ - args = "subscription-manager list --available" - rc, stdout, stderr = self.module.run_command(args, check_rc=True) - - products = [] - for line in stdout.split('\n'): - # Remove leading+trailing whitespace - line = line.strip() - # An empty line implies the end of a output group - if len(line) == 0: - continue - # If a colon ':' is found, parse - elif ':' in line: - (key, value) = line.split(':',1) - key = key.strip().replace(" ", "") # To unify - value = value.strip() - if key in ['ProductName', 'SubscriptionName']: - # Remember the name for later processing - products.append(RhsmPool(self.module, _name=value, key=value)) - elif products: - # Associate value with most recently recorded product - products[-1].__setattr__(key, value) - # FIXME - log some warning? - #else: - # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) - return products - - def filter(self, regexp='^$'): - ''' - Return a list of RhsmPools whose name matches the provided regular expression - ''' - r = re.compile(regexp) - for product in self.products: - if r.search(product._name): - yield product - - -def main(): - - # Load RHSM configuration from file - rhn = Rhsm(None) - - module = AnsibleModule( - argument_spec = dict( - state = dict(default='present', choices=['present', 'absent']), - username = dict(default=None, required=False), - password = dict(default=None, required=False), - server_hostname = dict(default=rhn.config.get_option('server.hostname'), required=False), - server_insecure = dict(default=rhn.config.get_option('server.insecure'), required=False), - rhsm_baseurl = dict(default=rhn.config.get_option('rhsm.baseurl'), required=False), - autosubscribe = dict(default=False, type='bool'), - activationkey = dict(default=None, required=False), - pool = dict(default='^$', required=False, type='str'), - ) - ) - - rhn.module = module - state = module.params['state'] - username = module.params['username'] - password = module.params['password'] - server_hostname = module.params['server_hostname'] - server_insecure = module.params['server_insecure'] - rhsm_baseurl = module.params['rhsm_baseurl'] - autosubscribe = module.params['autosubscribe'] == True - activationkey = module.params['activationkey'] - pool = module.params['pool'] - - # Ensure system is registered - if state == 'present': - - # Check for missing parameters ... - if not (activationkey or username or password): - module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, username, password)) - if not activationkey and not (username and password): - module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password") - - # Register system - if rhn.is_registered: - module.exit_json(changed=False, msg="System already registered.") - else: - try: - rhn.enable() - rhn.configure(**module.params) - rhn.register(username, password, autosubscribe, activationkey) - rhn.subscribe(pool) - except Exception, e: - module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, e)) - else: - module.exit_json(changed=True, msg="System successfully registered to '%s'." % server_hostname) - - # Ensure system is *not* registered - if state == 'absent': - if not rhn.is_registered: - module.exit_json(changed=False, msg="System already unregistered.") - else: - try: - rhn.unsubscribe() - rhn.unregister() - except Exception, e: - module.fail_json(msg="Failed to unregister: %s" % e) - else: - module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/rhn_channel b/library/packaging/rhn_channel deleted file mode 100644 index 05a155f7ca..0000000000 --- a/library/packaging/rhn_channel +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/python - -# (c) Vincent Van de Kussen -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rhn_channel -short_description: Adds or removes Red Hat software channels -description: - - Adds or removes Red Hat software channels -version_added: "1.1" -author: Vincent Van der Kussen -notes: - - this module fetches the system id from RHN. -requirements: - - none -options: - name: - description: - - name of the software channel - required: true - default: null - sysname: - description: - - name of the system as it is known in RHN/Satellite - required: true - default: null - state: - description: - - whether the channel should be present or not - required: false - default: present - url: - description: - - The full url to the RHN/Satellite api - required: true - user: - description: - - RHN/Satellite user - required: true - password: - description: - - "the user's password" - required: true -''' - -EXAMPLES = ''' -- rhn_channel: name=rhel-x86_64-server-v2vwin-6 sysname=server01 url=https://rhn.redhat.com/rpc/api user=rhnuser password=guessme -''' - -import xmlrpclib -from operator import itemgetter -import re - - -# ------------------------------------------------------- # - -def get_systemid(client, session, sysname): - systems = client.system.listUserSystems(session) - for system in systems: - if system.get('name') == sysname: - idres = system.get('id') - idd = int(idres) - return idd - -# ------------------------------------------------------- # - -# unused: -# -#def get_localsystemid(): -# f = open("/etc/sysconfig/rhn/systemid", "r") -# content = f.read() -# loc_id = re.search(r'\b(ID-)(\d{10})' ,content) -# return loc_id.group(2) - -# ------------------------------------------------------- # - -def subscribe_channels(channels, client, session, sysname, sys_id): - c = base_channels(client, session, sys_id) - c.append(channels) - return client.channel.software.setSystemChannels(session, sys_id, c) - -# ------------------------------------------------------- # - -def unsubscribe_channels(channels, client, session, sysname, sys_id): - c = base_channels(client, session, sys_id) - c.remove(channels) - return client.channel.software.setSystemChannels(session, sys_id, c) - -# ------------------------------------------------------- # - -def base_channels(client, session, sys_id): - basechan = client.channel.software.listSystemChannels(session, sys_id) - try: - chans = [item['label'] for item in basechan] - except KeyError: - chans = [item['channel_label'] for item in basechan] - return chans - -# ------------------------------------------------------- # - - -def main(): - - module = AnsibleModule( - argument_spec = dict( - state = dict(default='present', choices=['present', 'absent']), - name = dict(required=True), - sysname = dict(required=True), - url = dict(required=True), - user = dict(required=True), - password = dict(required=True, aliases=['pwd']), - ) -# supports_check_mode=True - ) - - state = module.params['state'] - channelname = module.params['name'] - systname = module.params['sysname'] - saturl = module.params['url'] - user = module.params['user'] - password = module.params['password'] - - #initialize connection - client = xmlrpclib.Server(saturl, verbose=0) - session = client.auth.login(user, password) - - # get systemid - sys_id = get_systemid(client, session, systname) - - # get channels for system - chans = base_channels(client, session, sys_id) - - - if state == 'present': - if channelname in chans: - module.exit_json(changed=False, msg="Channel %s already exists" % channelname) - else: - subscribe_channels(channelname, client, session, systname, sys_id) - module.exit_json(changed=True, msg="Channel %s added" % channelname) - - if state == 'absent': - if not channelname in chans: - module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname) - else: - unsubscribe_channels(channelname, client, session, systname, sys_id) - module.exit_json(changed=True, msg="Channel %s removed" % channelname) - - client.auth.logout(session) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/rhn_register b/library/packaging/rhn_register deleted file mode 100644 index 1e92405c82..0000000000 --- a/library/packaging/rhn_register +++ /dev/null @@ -1,336 +0,0 @@ -#!/usr/bin/python - -# (c) James Laska -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rhn_register -short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command -description: - - Manage registration to the Red Hat Network. -version_added: "1.2" -author: James Laska -notes: - - In order to register a system, rhnreg_ks requires either a username and password, or an activationkey. -requirements: - - rhnreg_ks -options: - state: - description: - - whether to register (C(present)), or unregister (C(absent)) a system - required: false - choices: [ "present", "absent" ] - default: "present" - username: - description: - - Red Hat Network username - required: False - default: null - password: - description: - - Red Hat Network password - required: False - default: null - server_url: - description: - - Specify an alternative Red Hat Network server URL - required: False - default: Current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date) is the default - activationkey: - description: - - supply an activation key for use with registration - required: False - default: null - channels: - description: - - Optionally specify a list of comma-separated channels to subscribe to upon successful registration. - required: false - default: [] -''' - -EXAMPLES = ''' -# Unregister system from RHN. -- rhn_register: state=absent username=joe_user password=somepass - -# Register as user (joe_user) with password (somepass) and auto-subscribe to available content. -- rhn_register: state=present username=joe_user password=somepass - -# Register with activationkey (1-222333444) and enable extended update support. -- rhn_register: state=present activationkey=1-222333444 enable_eus=true - -# Register as user (joe_user) with password (somepass) against a satellite -# server specified by (server_url). -- rhn_register: > - state=present - username=joe_user - password=somepass - server_url=https://xmlrpc.my.satellite/XMLRPC - -# Register as user (joe_user) with password (somepass) and enable -# channels (rhel-x86_64-server-6-foo-1) and (rhel-x86_64-server-6-bar-1). -- rhn_register: state=present username=joe_user - password=somepass - channels=rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1 -''' - -import sys -import types -import xmlrpclib -import urlparse - -# Attempt to import rhn client tools -sys.path.insert(0, '/usr/share/rhn') -try: - import up2date_client - import up2date_client.config -except ImportError, e: - module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?\n%s" % e) - -# INSERT REDHAT SNIPPETS -from ansible.module_utils.redhat import * -# INSERT COMMON SNIPPETS -from ansible.module_utils.basic import * - -class Rhn(RegistrationBase): - - def __init__(self, username=None, password=None): - RegistrationBase.__init__(self, username, password) - self.config = self.load_config() - - def load_config(self): - ''' - Read configuration from /etc/sysconfig/rhn/up2date - ''' - self.config = up2date_client.config.initUp2dateConfig() - - # Add support for specifying a default value w/o having to standup some - # configuration. Yeah, I know this should be subclassed ... but, oh - # well - def get_option_default(self, key, default=''): - # ignore pep8 W601 errors for this line - # setting this to use 'in' does not work in the rhn library - if self.has_key(key): - return self[key] - else: - return default - - self.config.get_option = types.MethodType(get_option_default, self.config, up2date_client.config.Config) - - return self.config - - @property - def hostname(self): - ''' - Return the non-xmlrpc RHN hostname. This is a convenience method - used for displaying a more readable RHN hostname. - - Returns: str - ''' - url = urlparse.urlparse(self.config['serverURL']) - return url[1].replace('xmlrpc.','') - - @property - def systemid(self): - systemid = None - xpath_str = "//member[name='system_id']/value/string" - - if os.path.isfile(self.config['systemIdPath']): - fd = open(self.config['systemIdPath'], 'r') - xml_data = fd.read() - fd.close() - - # Ugh, xml parsing time ... - # First, try parsing with libxml2 ... - if systemid is None: - try: - import libxml2 - doc = libxml2.parseDoc(xml_data) - ctxt = doc.xpathNewContext() - systemid = ctxt.xpathEval(xpath_str)[0].content - doc.freeDoc() - ctxt.xpathFreeContext() - except ImportError: - pass - - # m-kay, let's try with lxml now ... - if systemid is None: - try: - from lxml import etree - root = etree.fromstring(xml_data) - systemid = root.xpath(xpath_str)[0].text - except ImportError: - pass - - # Strip the 'ID-' prefix - if systemid is not None and systemid.startswith('ID-'): - systemid = systemid[3:] - - return int(systemid) - - @property - def is_registered(self): - ''' - Determine whether the current system is registered. - - Returns: True|False - ''' - return os.path.isfile(self.config['systemIdPath']) - - def configure(self, server_url): - ''' - Configure system for registration - ''' - - self.config.set('serverURL', server_url) - self.config.save() - - def enable(self): - ''' - Prepare the system for RHN registration. This includes ... - * enabling the rhnplugin yum plugin - * disabling the subscription-manager yum plugin - ''' - RegistrationBase.enable(self) - self.update_plugin_conf('rhnplugin', True) - self.update_plugin_conf('subscription-manager', False) - - def register(self, enable_eus=False, activationkey=None): - ''' - Register system to RHN. If enable_eus=True, extended update - support will be requested. - ''' - register_cmd = "/usr/sbin/rhnreg_ks --username='%s' --password='%s' --force" % (self.username, self.password) - if self.module.params.get('server_url', None): - register_cmd += " --serverUrl=%s" % self.module.params.get('server_url') - if enable_eus: - register_cmd += " --use-eus-channel" - if activationkey is not None: - register_cmd += " --activationkey '%s'" % activationkey - # FIXME - support --profilename - # FIXME - support --systemorgid - rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True) - - def api(self, method, *args): - ''' - Convenience RPC wrapper - ''' - if not hasattr(self, 'server') or self.server is None: - if self.hostname != 'rhn.redhat.com': - url = "https://%s/rpc/api" % self.hostname - else: - url = "https://xmlrpc.%s/rpc/api" % self.hostname - self.server = xmlrpclib.Server(url, verbose=0) - self.session = self.server.auth.login(self.username, self.password) - - func = getattr(self.server, method) - return func(self.session, *args) - - def unregister(self): - ''' - Unregister a previously registered system - ''' - - # Initiate RPC connection - self.api('system.deleteSystems', [self.systemid]) - - # Remove systemid file - os.unlink(self.config['systemIdPath']) - - def subscribe(self, channels=[]): - if len(channels) <= 0: - return - current_channels = self.api('channel.software.listSystemChannels', self.systemid) - new_channels = [item['channel_label'] for item in current_channels] - new_channels.extend(channels) - return self.api('channel.software.setSystemChannels', self.systemid, new_channels) - - def _subscribe(self, channels=[]): - ''' - Subscribe to requested yum repositories using 'rhn-channel' command - ''' - rhn_channel_cmd = "rhn-channel --user='%s' --password='%s'" % (self.username, self.password) - rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --available-channels", check_rc=True) - - # Enable requested repoid's - for wanted_channel in channels: - # Each inserted repo regexp will be matched. If no match, no success. - for available_channel in stdout.rstrip().split('\n'): # .rstrip() because of \n at the end -> empty string at the end - if re.search(wanted_repo, available_channel): - rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel, check_rc=True) - -def main(): - - # Read system RHN configuration - rhn = Rhn() - - module = AnsibleModule( - argument_spec = dict( - state = dict(default='present', choices=['present', 'absent']), - username = dict(default=None, required=False), - password = dict(default=None, required=False), - server_url = dict(default=rhn.config.get_option('serverURL'), required=False), - activationkey = dict(default=None, required=False), - enable_eus = dict(default=False, type='bool'), - channels = dict(default=[], type='list'), - ) - ) - - state = module.params['state'] - rhn.username = module.params['username'] - rhn.password = module.params['password'] - rhn.configure(module.params['server_url']) - activationkey = module.params['activationkey'] - channels = module.params['channels'] - rhn.module = module - - # Ensure system is registered - if state == 'present': - - # Check for missing parameters ... - if not (activationkey or rhn.username or rhn.password): - module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username, rhn.password)) - if not activationkey and not (rhn.username and rhn.password): - module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password") - - # Register system - if rhn.is_registered: - module.exit_json(changed=False, msg="System already registered.") - else: - try: - rhn.enable() - rhn.register(module.params['enable_eus'] == True, activationkey) - rhn.subscribe(channels) - except Exception, e: - module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, e)) - - module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname) - - # Ensure system is *not* registered - if state == 'absent': - if not rhn.is_registered: - module.exit_json(changed=False, msg="System already unregistered.") - else: - try: - rhn.unregister() - except Exception, e: - module.fail_json(msg="Failed to unregister: %s" % e) - - module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname) - - -main() diff --git a/library/packaging/rpm_key b/library/packaging/rpm_key deleted file mode 100644 index f132d55250..0000000000 --- a/library/packaging/rpm_key +++ /dev/null @@ -1,206 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Ansible module to import third party repo keys to your rpm db -# (c) 2013, Héctor Acosta -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: rpm_key -author: Hector Acosta -short_description: Adds or removes a gpg key from the rpm db -description: - - Adds or removes (rpm --import) a gpg key to your rpm database. -version_added: "1.3" -options: - key: - required: true - default: null - aliases: [] - description: - - Key that will be modified. Can be a url, a file, or a keyid if the key already exists in the database. - state: - required: false - default: "present" - choices: [present, absent] - description: - - Wheather the key will be imported or removed from the rpm db. - validate_certs: - description: - - If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -''' - -EXAMPLES = ''' -# Example action to import a key from a url -- rpm_key: state=present key=http://apt.sw.be/RPM-GPG-KEY.dag.txt - -# Example action to import a key from a file -- rpm_key: state=present key=/path/to/key.gpg - -# Example action to ensure a key is not present in the db -- rpm_key: state=absent key=DEADB33F -''' -import syslog -import os.path -import re -import tempfile - -def is_pubkey(string): - """Verifies if string is a pubkey""" - pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*" - return re.match(pgp_regex, string, re.DOTALL) - -class RpmKey: - - def __init__(self, module): - self.syslogging = False - # If the key is a url, we need to check if it's present to be idempotent, - # to do that, we need to check the keyid, which we can get from the armor. - keyfile = None - should_cleanup_keyfile = False - self.module = module - self.rpm = self.module.get_bin_path('rpm', True) - state = module.params['state'] - key = module.params['key'] - - if '://' in key: - keyfile = self.fetch_key(key) - keyid = self.getkeyid(keyfile) - should_cleanup_keyfile = True - elif self.is_keyid(key): - keyid = key - elif os.path.isfile(key): - keyfile = key - keyid = self.getkeyid(keyfile) - else: - self.module.fail_json(msg="Not a valid key %s" % key) - keyid = self.normalize_keyid(keyid) - - if state == 'present': - if self.is_key_imported(keyid): - module.exit_json(changed=False) - else: - if not keyfile: - self.module.fail_json(msg="When importing a key, a valid file must be given") - self.import_key(keyfile, dryrun=module.check_mode) - if should_cleanup_keyfile: - self.module.cleanup(keyfile) - module.exit_json(changed=True) - else: - if self.is_key_imported(keyid): - self.drop_key(keyid, dryrun=module.check_mode) - module.exit_json(changed=True) - else: - module.exit_json(changed=False) - - - def fetch_key(self, url): - """Downloads a key from url, returns a valid path to a gpg key""" - try: - rsp, info = fetch_url(self.module, url) - key = rsp.read() - if not is_pubkey(key): - self.module.fail_json(msg="Not a public key: %s" % url) - tmpfd, tmpname = tempfile.mkstemp() - tmpfile = os.fdopen(tmpfd, "w+b") - tmpfile.write(key) - tmpfile.close() - return tmpname - except urllib2.URLError, e: - self.module.fail_json(msg=str(e)) - - def normalize_keyid(self, keyid): - """Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is lowercase""" - ret = keyid.strip().lower() - if ret.startswith('0x'): - return ret[2:] - elif ret.startswith('0X'): - return ret[2:] - else: - return ret - - def getkeyid(self, keyfile): - gpg = self.module.get_bin_path('gpg', True) - stdout, stderr = self.execute_command([gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', '--list-packets', keyfile]) - for line in stdout.splitlines(): - line = line.strip() - if line.startswith(':signature packet:'): - # We want just the last 8 characters of the keyid - keyid = line.split()[-1].strip()[8:] - return keyid - self.json_fail(msg="Unexpected gpg output") - - def is_keyid(self, keystr): - """Verifies if a key, as provided by the user is a keyid""" - return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE) - - def execute_command(self, cmd): - if self.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) - rc, stdout, stderr = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg=stderr) - return stdout, stderr - - def is_key_imported(self, keyid): - stdout, stderr = self.execute_command([self.rpm, '-qa', 'gpg-pubkey']) - for line in stdout.splitlines(): - line = line.strip() - if not line: - continue - match = re.match('gpg-pubkey-([0-9a-f]+)-([0-9a-f]+)', line) - if not match: - self.module.fail_json(msg="rpm returned unexpected output [%s]" % line) - else: - if keyid == match.group(1): - return True - return False - - def import_key(self, keyfile, dryrun=False): - if not dryrun: - self.execute_command([self.rpm, '--import', keyfile]) - - def drop_key(self, key, dryrun=False): - if not dryrun: - self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % key]) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state=dict(default='present', choices=['present', 'absent'], type='str'), - key=dict(required=True, type='str'), - validate_certs=dict(default='yes', type='bool'), - ), - supports_check_mode=True - ) - - RpmKey(module) - - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() diff --git a/library/packaging/svr4pkg b/library/packaging/svr4pkg deleted file mode 100644 index e95d4d8643..0000000000 --- a/library/packaging/svr4pkg +++ /dev/null @@ -1,234 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Boyd Adamson -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: svr4pkg -short_description: Manage Solaris SVR4 packages -description: - - Manages SVR4 packages on Solaris 10 and 11. - - These were the native packages on Solaris <= 10 and are available - as a legacy feature in Solaris 11. - - Note that this is a very basic packaging system. It will not enforce - dependencies on install or remove. -version_added: "0.9" -author: Boyd Adamson -options: - name: - description: - - Package name, e.g. C(SUNWcsr) - required: true - - state: - description: - - Whether to install (C(present)), or remove (C(absent)) a package. - - If the package is to be installed, then I(src) is required. - - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package. - required: true - choices: ["present", "absent"] - - src: - description: - - Specifies the location to install the package from. Required when C(state=present). - - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)." - - If using a file or directory, they must already be accessible by the host. See the M(copy) module for a way to get them there. - proxy: - description: - - HTTP[s] proxy to be used if C(src) is a URL. - response_file: - description: - - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4) - required: false - zone: - description: - - Whether to install the package only in the current zone, or install it into all zones. - - The installation into all zones works only if you are working with the global zone. - required: false - default: "all" - choices: ["current", "all"] - version_added: "1.6" - category: - description: - - Install/Remove category instead of a single package. - required: false - choices: ["true", "false"] - version_added: "1.6" -''' - -EXAMPLES = ''' -# Install a package from an already copied file -- svr4pkg: name=CSWcommon src=/tmp/cswpkgs.pkg state=present - -# Install a package directly from an http site -- svr4pkg: name=CSWpkgutil src=http://get.opencsw.org/now state=present zone=current - -# Install a package with a response file -- svr4pkg: name=CSWggrep src=/tmp/third-party.pkg response_file=/tmp/ggrep.response state=present - -# Ensure that a package is not installed. -- svr4pkg: name=SUNWgnome-sound-recorder state=absent - -# Ensure that a category is not installed. -- svr4pkg: name=FIREFOX state=absent category=true -''' - - -import os -import tempfile - -def package_installed(module, name, category): - cmd = [module.get_bin_path('pkginfo', True)] - cmd.append('-q') - if category: - cmd.append('-c') - cmd.append(name) - rc, out, err = module.run_command(' '.join(cmd)) - if rc == 0: - return True - else: - return False - -def create_admin_file(): - (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True) - fullauto = ''' -mail= -instance=unique -partial=nocheck -runlevel=quit -idepend=nocheck -rdepend=nocheck -space=quit -setuid=nocheck -conflict=nocheck -action=nocheck -networktimeout=60 -networkretries=3 -authentication=quit -keystore=/var/sadm/security -proxy= -basedir=default -''' - os.write(desc, fullauto) - os.close(desc) - return filename - -def run_command(module, cmd): - progname = cmd[0] - cmd[0] = module.get_bin_path(progname, True) - return module.run_command(cmd) - -def package_install(module, name, src, proxy, response_file, zone, category): - adminfile = create_admin_file() - cmd = [ 'pkgadd', '-n'] - if zone == 'current': - cmd += [ '-G' ] - cmd += [ '-a', adminfile, '-d', src ] - if proxy is not None: - cmd += [ '-x', proxy ] - if response_file is not None: - cmd += [ '-r', response_file ] - if category: - cmd += [ '-Y' ] - cmd.append(name) - (rc, out, err) = run_command(module, cmd) - os.unlink(adminfile) - return (rc, out, err) - -def package_uninstall(module, name, src, category): - adminfile = create_admin_file() - if category: - cmd = [ 'pkgrm', '-na', adminfile, '-Y', name ] - else: - cmd = [ 'pkgrm', '-na', adminfile, name] - (rc, out, err) = run_command(module, cmd) - os.unlink(adminfile) - return (rc, out, err) - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required = True), - state = dict(required = True, choices=['present', 'absent']), - src = dict(default = None), - proxy = dict(default = None), - response_file = dict(default = None), - zone = dict(required=False, default = 'all', choices=['current','all']), - category = dict(default=False, type='bool') - ), - supports_check_mode=True - ) - state = module.params['state'] - name = module.params['name'] - src = module.params['src'] - proxy = module.params['proxy'] - response_file = module.params['response_file'] - zone = module.params['zone'] - category = module.params['category'] - rc = None - out = '' - err = '' - result = {} - result['name'] = name - result['state'] = state - - if state == 'present': - if src is None: - module.fail_json(name=name, - msg="src is required when state=present") - if not package_installed(module, name, category): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category) - # Stdout is normally empty but for some packages can be - # very long and is not often useful - if len(out) > 75: - out = out[:75] + '...' - - elif state == 'absent': - if package_installed(module, name, category): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = package_uninstall(module, name, src, category) - out = out[:75] - - # Success, Warning, Interruption, Reboot all, Reboot this return codes - if rc in (0, 2, 3, 10, 20): - result['changed'] = True - # no install nor uninstall, or failed - else: - result['changed'] = False - - # Fatal error, Administration, Administration Interaction return codes - if rc in (1, 4 , 5): - result['failed'] = True - else: - result['failed'] = False - - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/swdepot b/library/packaging/swdepot deleted file mode 100644 index b41a860531..0000000000 --- a/library/packaging/swdepot +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/python -tt -# -*- coding: utf-8 -*- - -# (c) 2013, Raul Melo -# Written by Raul Melo -# Based on yum module written by Seth Vidal -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -import re -import pipes - -DOCUMENTATION = ''' ---- -module: swdepot -short_description: Manage packages with swdepot package manager (HP-UX) -description: - - Will install, upgrade and remove packages with swdepot package manager (HP-UX) -version_added: "1.4" -notes: [] -author: Raul Melo -options: - name: - description: - - package name. - required: true - default: null - choices: [] - aliases: [] - version_added: 1.4 - state: - description: - - whether to install (C(present), C(latest)), or remove (C(absent)) a package. - required: true - default: null - choices: [ 'present', 'latest', 'absent'] - aliases: [] - version_added: 1.4 - depot: - description: - - The source repository from which install or upgrade a package. - required: false - default: null - choices: [] - aliases: [] - version_added: 1.4 -''' - -EXAMPLES = ''' -- swdepot: name=unzip-6.0 state=installed depot=repository:/path -- swdepot: name=unzip state=latest depot=repository:/path -- swdepot: name=unzip state=absent -''' - -def compare_package(version1, version2): - """ Compare version packages. - Return values: - -1 first minor - 0 equal - 1 fisrt greater """ - - def normalize(v): - return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")] - return cmp(normalize(version1), normalize(version2)) - -def query_package(module, name, depot=None): - """ Returns whether a package is installed or not and version. """ - - cmd_list = '/usr/sbin/swlist -a revision -l product' - if depot: - rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, pipes.quote(depot), pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True) - else: - rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True) - if rc == 0: - version = re.sub("\s\s+|\t" , " ", stdout).strip().split()[1] - else: - version = None - - return rc, version - -def remove_package(module, name): - """ Uninstall package if installed. """ - - cmd_remove = '/usr/sbin/swremove' - rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name)) - - if rc == 0: - return rc, stdout - else: - return rc, stderr - -def install_package(module, depot, name): - """ Install package if not already installed """ - - cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false' - rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name)) - if rc == 0: - return rc, stdout - else: - return rc, stderr - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(aliases=['pkg'], required=True), - state = dict(choices=['present', 'absent', 'latest'], required=True), - depot = dict(default=None, required=False) - ), - supports_check_mode=True - ) - name = module.params['name'] - state = module.params['state'] - depot = module.params['depot'] - - changed = False - msg = "No changed" - rc = 0 - if ( state == 'present' or state == 'latest' ) and depot == None: - output = "depot parameter is mandatory in present or latest task" - module.fail_json(name=name, msg=output, rc=rc) - - - #Check local version - rc, version_installed = query_package(module, name) - if not rc: - installed = True - msg = "Already installed" - - else: - installed = False - - if ( state == 'present' or state == 'latest' ) and installed == False: - if module.check_mode: - module.exit_json(changed=True) - rc, output = install_package(module, depot, name) - - if not rc: - changed = True - msg = "Packaged installed" - - else: - module.fail_json(name=name, msg=output, rc=rc) - - elif state == 'latest' and installed == True: - #Check depot version - rc, version_depot = query_package(module, name, depot) - - if not rc: - if compare_package(version_installed,version_depot) == -1: - if module.check_mode: - module.exit_json(changed=True) - #Install new version - rc, output = install_package(module, depot, name) - - if not rc: - msg = "Packge upgraded, Before " + version_installed + " Now " + version_depot - changed = True - - else: - module.fail_json(name=name, msg=output, rc=rc) - - else: - output = "Software package not in repository " + depot - module.fail_json(name=name, msg=output, rc=rc) - - elif state == 'absent' and installed == True: - if module.check_mode: - module.exit_json(changed=True) - rc, output = remove_package(module, name) - if not rc: - changed = True - msg = "Package removed" - else: - module.fail_json(name=name, msg=output, rc=rc) - - if module.check_mode: - module.exit_json(changed=False) - - module.exit_json(changed=changed, name=name, state=state, msg=msg) - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/packaging/urpmi b/library/packaging/urpmi deleted file mode 100644 index a42ee7b87f..0000000000 --- a/library/packaging/urpmi +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/python -tt -# -*- coding: utf-8 -*- - -# (c) 2013, Philippe Makowski -# Written by Philippe Makowski -# Based on apt module written by Matthew Williams -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: urpmi -short_description: Urpmi manager -description: - - Manages packages with I(urpmi) (such as for Mageia or Mandriva) -version_added: "1.3.4" -options: - pkg: - description: - - name of package to install, upgrade or remove. - required: true - default: null - state: - description: - - Indicates the desired package state - required: false - default: present - choices: [ "absent", "present" ] - update_cache: - description: - - update the package database first C(urpmi.update -a). - required: false - default: no - choices: [ "yes", "no" ] - no-suggests: - description: - - Corresponds to the C(--no-suggests) option for I(urpmi). - required: false - default: yes - choices: [ "yes", "no" ] - force: - description: - - Corresponds to the C(--force) option for I(urpmi). - required: false - default: yes - choices: [ "yes", "no" ] -author: Philippe Makowski -notes: [] -''' - -EXAMPLES = ''' -# install package foo -- urpmi: pkg=foo state=present -# remove package foo -- urpmi: pkg=foo state=absent -# description: remove packages foo and bar -- urpmi: pkg=foo,bar state=absent -# description: update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists) -- urpmi: name=bar, state=present, update_cache=yes -''' - - -import json -import shlex -import os -import sys - -URPMI_PATH = '/usr/sbin/urpmi' -URPME_PATH = '/usr/sbin/urpme' - -def query_package(module, name): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - cmd = "rpm -q %s" % (name) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - if rc == 0: - return True - else: - return False - -def query_package_provides(module, name): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - cmd = "rpm -q --provides %s" % (name) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - return rc == 0 - - -def update_package_db(module): - cmd = "urpmi.update -a -q" - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - if rc != 0: - module.fail_json(msg="could not update package db") - - -def remove_packages(module, packages): - - remove_c = 0 - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, package): - continue - - cmd = "%s --auto %s" % (URPME_PATH, package) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc != 0: - module.fail_json(msg="failed to remove %s" % (package)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, pkgspec, force=True, no_suggests=True): - - packages = "" - for package in pkgspec: - if not query_package_provides(module, package): - packages += "'%s' " % package - - if len(packages) != 0: - if no_suggests: - no_suggests_yes = '--no-suggests' - else: - no_suggests_yes = '' - - if force: - force_yes = '--force' - else: - force_yes = '' - - cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_suggests_yes, packages)) - - rc, out, err = module.run_command(cmd) - - installed = True - for packages in pkgspec: - if not query_package_provides(module, package): - installed = False - - # urpmi always have 0 for exit code if --force is used - if rc or not installed: - module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err)) - else: - module.exit_json(changed=True, msg="%s present(s)" % packages) - else: - module.exit_json(changed=False) - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']), - update_cache = dict(default=False, aliases=['update-cache'], type='bool'), - force = dict(default=True, type='bool'), - no_suggests = dict(default=True, aliases=['no-suggests'], type='bool'), - package = dict(aliases=['pkg', 'name'], required=True))) - - - if not os.path.exists(URPMI_PATH): - module.fail_json(msg="cannot find urpmi, looking for %s" % (URPMI_PATH)) - - p = module.params - - force_yes = p['force'] - no_suggest_yes = p['no_suggests'] - - if p['update_cache']: - update_package_db(module) - - packages = p['package'].split(',') - - if p['state'] in [ 'installed', 'present' ]: - install_packages(module, packages, force_yes, no_suggest_yes) - - elif p['state'] in [ 'removed', 'absent' ]: - remove_packages(module, packages) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/packaging/yum b/library/packaging/yum deleted file mode 100644 index c3158077d1..0000000000 --- a/library/packaging/yum +++ /dev/null @@ -1,838 +0,0 @@ -#!/usr/bin/python -tt -# -*- coding: utf-8 -*- - -# (c) 2012, Red Hat, Inc -# Written by Seth Vidal -# (c) 2014, Epic Games, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - - -import traceback -import os -import yum - -try: - from yum.misc import find_unfinished_transactions, find_ts_remaining - from rpmUtils.miscutils import splitFilename - transaction_helpers = True -except: - transaction_helpers = False - -DOCUMENTATION = ''' ---- -module: yum -version_added: historical -short_description: Manages packages with the I(yum) package manager -description: - - Installs, upgrade, removes, and lists packages and groups with the I(yum) package manager. -options: - name: - description: - - "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: yum -y update. You can also pass a url or a local path to a rpm file." - required: true - default: null - aliases: [] - list: - description: - - Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples. - required: false - default: null - state: - description: - - Whether to install (C(present), C(latest)), or remove (C(absent)) a package. - required: false - choices: [ "present", "latest", "absent" ] - default: "present" - enablerepo: - description: - - I(Repoid) of repositories to enable for the install/update operation. - These repos will not persist beyond the transaction. - When specifying multiple repos, separate them with a ",". - required: false - version_added: "0.9" - default: null - aliases: [] - - disablerepo: - description: - - I(Repoid) of repositories to disable for the install/update operation. - These repos will not persist beyond the transaction. - When specifying multiple repos, separate them with a ",". - required: false - version_added: "0.9" - default: null - aliases: [] - - conf_file: - description: - - The remote yum configuration file to use for the transaction. - required: false - version_added: "0.6" - default: null - aliases: [] - - disable_gpg_check: - description: - - Whether to disable the GPG checking of signatures of packages being - installed. Has an effect only if state is I(present) or I(latest). - required: false - version_added: "1.2" - default: "no" - choices: ["yes", "no"] - aliases: [] - -notes: [] -# informational: requirements for nodes -requirements: [ yum, rpm ] -author: Seth Vidal -''' - -EXAMPLES = ''' -- name: install the latest version of Apache - yum: name=httpd state=latest - -- name: remove the Apache package - yum: name=httpd state=absent - -- name: install the latest version of Apache from the testing repo - yum: name=httpd enablerepo=testing state=present - -- name: upgrade all packages - yum: name=* state=latest - -- name: install the nginx rpm from a remote repo - yum: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present - -- name: install nginx rpm from a local file - yum: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present - -- name: install the 'Development tools' package group - yum: name="@Development tools" state=present -''' - -def_qf = "%{name}-%{version}-%{release}.%{arch}" - -repoquery='/usr/bin/repoquery' -if not os.path.exists(repoquery): - repoquery = None - -yumbin='/usr/bin/yum' - -import syslog - -def log(msg): - syslog.openlog('ansible-yum', 0, syslog.LOG_USER) - syslog.syslog(syslog.LOG_NOTICE, msg) - -def yum_base(conf_file=None, cachedir=False): - - my = yum.YumBase() - my.preconf.debuglevel=0 - my.preconf.errorlevel=0 - if conf_file and os.path.exists(conf_file): - my.preconf.fn = conf_file - if cachedir or os.geteuid() != 0: - if hasattr(my, 'setCacheDir'): - my.setCacheDir() - else: - cachedir = yum.misc.getCacheDir() - my.repos.setCacheDir(cachedir) - my.conf.cache = 0 - - return my - -def install_yum_utils(module): - - if not module.check_mode: - yum_path = module.get_bin_path('yum') - if yum_path: - rc, so, se = module.run_command('%s -y install yum-utils' % yum_path) - if rc == 0: - this_path = module.get_bin_path('repoquery') - global repoquery - repoquery = this_path - -def po_to_nevra(po): - - if hasattr(po, 'ui_nevra'): - return po.ui_nevra - else: - return '%s-%s-%s.%s' % (po.name, po.version, po.release, po.arch) - -def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[], is_pkg=False): - - if not repoq: - - pkgs = [] - try: - my = yum_base(conf_file) - for rid in en_repos: - my.repos.enableRepo(rid) - for rid in dis_repos: - my.repos.disableRepo(rid) - - e,m,u = my.rpmdb.matchPackageNames([pkgspec]) - pkgs = e + m - if not pkgs: - pkgs.extend(my.returnInstalledPackagesByDep(pkgspec)) - except Exception, e: - module.fail_json(msg="Failure talking to yum: %s" % e) - - return [ po_to_nevra(p) for p in pkgs ] - - else: - - cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec] - rc,out,err = module.run_command(cmd) - if not is_pkg: - cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec] - rc2,out2,err2 = module.run_command(cmd) - else: - rc2,out2,err2 = (0, '', '') - - if rc == 0 and rc2 == 0: - out += out2 - return [ p for p in out.split('\n') if p.strip() ] - else: - module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2)) - - return [] - -def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): - - if not repoq: - - pkgs = [] - try: - my = yum_base(conf_file) - for rid in en_repos: - my.repos.enableRepo(rid) - for rid in dis_repos: - my.repos.disableRepo(rid) - - e,m,u = my.pkgSack.matchPackageNames([pkgspec]) - pkgs = e + m - if not pkgs: - pkgs.extend(my.returnPackagesByDep(pkgspec)) - except Exception, e: - module.fail_json(msg="Failure talking to yum: %s" % e) - - return [ po_to_nevra(p) for p in pkgs ] - - else: - myrepoq = list(repoq) - - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) - - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) - - cmd = myrepoq + ["--qf", qf, pkgspec] - rc,out,err = module.run_command(cmd) - if rc == 0: - return [ p for p in out.split('\n') if p.strip() ] - else: - module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err)) - - - return [] - -def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): - - if not repoq: - - retpkgs = [] - pkgs = [] - updates = [] - - try: - my = yum_base(conf_file) - for rid in en_repos: - my.repos.enableRepo(rid) - for rid in dis_repos: - my.repos.disableRepo(rid) - - pkgs = my.returnPackagesByDep(pkgspec) + my.returnInstalledPackagesByDep(pkgspec) - if not pkgs: - e,m,u = my.pkgSack.matchPackageNames([pkgspec]) - pkgs = e + m - updates = my.doPackageLists(pkgnarrow='updates').updates - except Exception, e: - module.fail_json(msg="Failure talking to yum: %s" % e) - - for pkg in pkgs: - if pkg in updates: - retpkgs.append(pkg) - - return set([ po_to_nevra(p) for p in retpkgs ]) - - else: - myrepoq = list(repoq) - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) - - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) - - cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec] - rc,out,err = module.run_command(cmd) - - if rc == 0: - return set([ p for p in out.split('\n') if p.strip() ]) - else: - module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err)) - - return [] - -def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): - - if not repoq: - - pkgs = [] - try: - my = yum_base(conf_file) - for rid in en_repos: - my.repos.enableRepo(rid) - for rid in dis_repos: - my.repos.disableRepo(rid) - - pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec) - if not pkgs: - e,m,u = my.pkgSack.matchPackageNames([req_spec]) - pkgs.extend(e) - pkgs.extend(m) - e,m,u = my.rpmdb.matchPackageNames([req_spec]) - pkgs.extend(e) - pkgs.extend(m) - except Exception, e: - module.fail_json(msg="Failure talking to yum: %s" % e) - - return set([ po_to_nevra(p) for p in pkgs ]) - - else: - myrepoq = list(repoq) - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) - - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) - - cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec] - rc,out,err = module.run_command(cmd) - cmd = myrepoq + ["--qf", qf, req_spec] - rc2,out2,err2 = module.run_command(cmd) - if rc == 0 and rc2 == 0: - out += out2 - pkgs = set([ p for p in out.split('\n') if p.strip() ]) - if not pkgs: - pkgs = is_installed(module, repoq, req_spec, conf_file, qf=qf) - return pkgs - else: - module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2)) - - return [] - -def transaction_exists(pkglist): - """ - checks the package list to see if any packages are - involved in an incomplete transaction - """ - - conflicts = [] - if not transaction_helpers: - return conflicts - - # first, we create a list of the package 'nvreas' - # so we can compare the pieces later more easily - pkglist_nvreas = [] - for pkg in pkglist: - pkglist_nvreas.append(splitFilename(pkg)) - - # next, we build the list of packages that are - # contained within an unfinished transaction - unfinished_transactions = find_unfinished_transactions() - for trans in unfinished_transactions: - steps = find_ts_remaining(trans) - for step in steps: - # the action is install/erase/etc., but we only - # care about the package spec contained in the step - (action, step_spec) = step - (n,v,r,e,a) = splitFilename(step_spec) - # and see if that spec is in the list of packages - # requested for installation/updating - for pkg in pkglist_nvreas: - # if the name and arch match, we're going to assume - # this package is part of a pending transaction - # the label is just for display purposes - label = "%s-%s" % (n,a) - if n == pkg[0] and a == pkg[4]: - if label not in conflicts: - conflicts.append("%s-%s" % (n,a)) - break - return conflicts - -def local_nvra(module, path): - """return nvra of a local rpm passed in""" - - cmd = ['/bin/rpm', '-qp' ,'--qf', - '%{name}-%{version}-%{release}.%{arch}\n', path ] - rc, out, err = module.run_command(cmd) - if rc != 0: - return None - nvra = out.split('\n')[0] - return nvra - -def pkg_to_dict(pkgstr): - - if pkgstr.strip(): - n,e,v,r,a,repo = pkgstr.split('|') - else: - return {'error_parsing': pkgstr} - - d = { - 'name':n, - 'arch':a, - 'epoch':e, - 'release':r, - 'version':v, - 'repo':repo, - 'nevra': '%s:%s-%s-%s.%s' % (e,n,v,r,a) - } - - if repo == 'installed': - d['yumstate'] = 'installed' - else: - d['yumstate'] = 'available' - - return d - -def repolist(module, repoq, qf="%{repoid}"): - - cmd = repoq + ["--qf", qf, "-a"] - rc,out,err = module.run_command(cmd) - ret = [] - if rc == 0: - ret = set([ p for p in out.split('\n') if p.strip() ]) - return ret - -def list_stuff(module, conf_file, stuff): - - qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}" - repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q'] - if conf_file and os.path.exists(conf_file): - repoq += ['-c', conf_file] - - if stuff == 'installed': - return [ pkg_to_dict(p) for p in is_installed(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] - elif stuff == 'updates': - return [ pkg_to_dict(p) for p in is_update(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] - elif stuff == 'available': - return [ pkg_to_dict(p) for p in is_available(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] - elif stuff == 'repos': - return [ dict(repoid=name, state='enabled') for name in repolist(module, repoq) if name.strip() ] - else: - return [ pkg_to_dict(p) for p in is_installed(module, repoq, stuff, conf_file, qf=qf) + is_available(module, repoq, stuff, conf_file, qf=qf) if p.strip() ] - -def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): - - res = {} - res['results'] = [] - res['msg'] = '' - res['rc'] = 0 - res['changed'] = False - - for spec in items: - pkg = None - - # check if pkgspec is installed (if possible for idempotence) - # localpkg - if spec.endswith('.rpm') and '://' not in spec: - # get the pkg name-v-r.arch - if not os.path.exists(spec): - res['msg'] += "No Package file matching '%s' found on system" % spec - module.fail_json(**res) - - nvra = local_nvra(module, spec) - # look for them in the rpmdb - if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos): - # if they are there, skip it - continue - pkg = spec - - # URL - elif '://' in spec: - pkg = spec - - #groups :( - elif spec.startswith('@'): - # complete wild ass guess b/c it's a group - pkg = spec - - # range requires or file-requires or pkgname :( - else: - # most common case is the pkg is already installed and done - # short circuit all the bs - and search for it as a pkg in is_installed - # if you find it then we're done - if not set(['*','?']).intersection(set(spec)): - pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) - if pkgs: - res['results'].append('%s providing %s is already installed' % (pkgs[0], spec)) - continue - - # look up what pkgs provide this - pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos) - if not pkglist: - res['msg'] += "No Package matching '%s' found available, installed or updated" % spec - module.fail_json(**res) - - # if any of the packages are involved in a transaction, fail now - # so that we don't hang on the yum operation later - conflicts = transaction_exists(pkglist) - if len(conflicts) > 0: - res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts) - module.fail_json(**res) - - # if any of them are installed - # then nothing to do - - found = False - for this in pkglist: - if is_installed(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True): - found = True - res['results'].append('%s providing %s is already installed' % (this, spec)) - break - - # if the version of the pkg you have installed is not in ANY repo, but there are - # other versions in the repos (both higher and lower) then the previous checks won't work. - # so we check one more time. This really only works for pkgname - not for file provides or virt provides - # but virt provides should be all caught in what_provides on its own. - # highly irritating - if not found: - if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos): - found = True - res['results'].append('package providing %s is already installed' % (spec)) - - if found: - continue - - # if not - then pass in the spec as what to install - # we could get here if nothing provides it but that's not - # the error we're catching here - pkg = spec - - cmd = yum_basecmd + ['install', pkg] - - if module.check_mode: - module.exit_json(changed=True) - - changed = True - - rc, out, err = module.run_command(cmd) - - # Fail on invalid urls: - if (rc == 1 and '://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)): - err = 'Package at %s could not be installed' % spec - module.fail_json(changed=False,msg=err,rc=1) - elif (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out: - # avoid failing in the 'Nothing To Do' case - # this may happen with an URL spec. - # for an already installed group, - # we get rc = 0 and 'Nothing to do' in out, not in err. - rc = 0 - err = '' - out = '%s: Nothing to do' % spec - changed = False - - res['rc'] += rc - res['results'].append(out) - res['msg'] += err - - # FIXME - if we did an install - go and check the rpmdb to see if it actually installed - # look for the pkg in rpmdb - # look for the pkg via obsoletes - - # accumulate any changes - res['changed'] |= changed - - module.exit_json(**res) - - -def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): - - res = {} - res['results'] = [] - res['msg'] = '' - res['changed'] = False - res['rc'] = 0 - - for pkg in items: - is_group = False - # group remove - this is doom on a stick - if pkg.startswith('@'): - is_group = True - else: - if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos): - res['results'].append('%s is not installed' % pkg) - continue - - # run an actual yum transaction - cmd = yum_basecmd + ["remove", pkg] - - if module.check_mode: - module.exit_json(changed=True) - - rc, out, err = module.run_command(cmd) - - res['rc'] += rc - res['results'].append(out) - res['msg'] += err - - # compile the results into one batch. If anything is changed - # then mark changed - # at the end - if we've end up failed then fail out of the rest - # of the process - - # at this point we should check to see if the pkg is no longer present - - if not is_group: # we can't sensibly check for a group being uninstalled reliably - # look to see if the pkg shows up from is_installed. If it doesn't - if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos): - res['changed'] = True - else: - module.fail_json(**res) - - if rc != 0: - module.fail_json(**res) - - module.exit_json(**res) - -def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): - - res = {} - res['results'] = [] - res['msg'] = '' - res['changed'] = False - res['rc'] = 0 - - for spec in items: - - pkg = None - basecmd = 'update' - cmd = '' - # groups, again - if spec.startswith('@'): - pkg = spec - - elif spec == '*': #update all - # use check-update to see if there is any need - rc,out,err = module.run_command(yum_basecmd + ['check-update']) - if rc == 100: - cmd = yum_basecmd + [basecmd] - else: - res['results'].append('All packages up to date') - continue - - # dep/pkgname - find it - else: - if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos): - basecmd = 'update' - else: - basecmd = 'install' - - pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos) - if not pkglist: - res['msg'] += "No Package matching '%s' found available, installed or updated" % spec - module.fail_json(**res) - - nothing_to_do = True - for this in pkglist: - if basecmd == 'install' and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): - nothing_to_do = False - break - - if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): - nothing_to_do = False - break - - if nothing_to_do: - res['results'].append("All packages providing %s are up to date" % spec) - continue - - # if any of the packages are involved in a transaction, fail now - # so that we don't hang on the yum operation later - conflicts = transaction_exists(pkglist) - if len(conflicts) > 0: - res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts) - module.fail_json(**res) - - pkg = spec - if not cmd: - cmd = yum_basecmd + [basecmd, pkg] - - if module.check_mode: - return module.exit_json(changed=True) - - rc, out, err = module.run_command(cmd) - - res['rc'] += rc - res['results'].append(out) - res['msg'] += err - - # FIXME if it is - update it and check to see if it applied - # check to see if there is no longer an update available for the pkgspec - - if rc: - res['failed'] = True - else: - res['changed'] = True - - module.exit_json(**res) - -def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo, - disable_gpg_check): - - # take multiple args comma separated - items = pkgspec.split(',') - - # need debug level 2 to get 'Nothing to do' for groupinstall. - yum_basecmd = [yumbin, '-d', '2', '-y'] - - - if not repoquery: - repoq = None - else: - repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q'] - - if conf_file and os.path.exists(conf_file): - yum_basecmd += ['-c', conf_file] - if repoq: - repoq += ['-c', conf_file] - - dis_repos =[] - en_repos = [] - if disablerepo: - dis_repos = disablerepo.split(',') - if enablerepo: - en_repos = enablerepo.split(',') - - for repoid in dis_repos: - r_cmd = ['--disablerepo=%s' % repoid] - yum_basecmd.extend(r_cmd) - - for repoid in en_repos: - r_cmd = ['--enablerepo=%s' % repoid] - yum_basecmd.extend(r_cmd) - - if state in ['installed', 'present', 'latest']: - my = yum_base(conf_file) - try: - for r in dis_repos: - my.repos.disableRepo(r) - - current_repos = my.repos.repos.keys() - for r in en_repos: - try: - my.repos.enableRepo(r) - new_repos = my.repos.repos.keys() - for i in new_repos: - if not i in current_repos: - rid = my.repos.getRepo(i) - a = rid.repoXML.repoid - current_repos = new_repos - except yum.Errors.YumBaseError, e: - module.fail_json(msg="Error setting/accessing repo %s: %s" % (r, e)) - except yum.Errors.YumBaseError, e: - module.fail_json(msg="Error accessing repos: %s" % e) - - if state in ['installed', 'present']: - if disable_gpg_check: - yum_basecmd.append('--nogpgcheck') - install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos) - elif state in ['removed', 'absent']: - remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos) - elif state == 'latest': - if disable_gpg_check: - yum_basecmd.append('--nogpgcheck') - latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos) - - # should be caught by AnsibleModule argument_spec - return dict(changed=False, failed=True, results='', errors='unexpected state') - -def main(): - - # state=installed name=pkgspec - # state=removed name=pkgspec - # state=latest name=pkgspec - # - # informational commands: - # list=installed - # list=updates - # list=available - # list=repos - # list=pkgspec - - module = AnsibleModule( - argument_spec = dict( - name=dict(aliases=['pkg']), - # removed==absent, installed==present, these are accepted as aliases - state=dict(default='installed', choices=['absent','present','installed','removed','latest']), - enablerepo=dict(), - disablerepo=dict(), - list=dict(), - conf_file=dict(default=None), - disable_gpg_check=dict(required=False, default="no", type='bool'), - # this should not be needed, but exists as a failsafe - install_repoquery=dict(required=False, default="yes", type='bool'), - ), - required_one_of = [['name','list']], - mutually_exclusive = [['name','list']], - supports_check_mode = True - ) - - # this should not be needed, but exists as a failsafe - params = module.params - if params['install_repoquery'] and not repoquery and not module.check_mode: - install_yum_utils(module) - - if params['list']: - if not repoquery: - module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.") - results = dict(results=list_stuff(module, params['conf_file'], params['list'])) - module.exit_json(**results) - - else: - pkg = params['name'] - state = params['state'] - enablerepo = params.get('enablerepo', '') - disablerepo = params.get('disablerepo', '') - disable_gpg_check = params['disable_gpg_check'] - res = ensure(module, state, pkg, params['conf_file'], enablerepo, - disablerepo, disable_gpg_check) - module.fail_json(msg="we should never get here unless this all failed", **res) - -# import module snippets -from ansible.module_utils.basic import * -main() - diff --git a/library/packaging/zypper b/library/packaging/zypper deleted file mode 100644 index 73b1569480..0000000000 --- a/library/packaging/zypper +++ /dev/null @@ -1,260 +0,0 @@ -#!/usr/bin/python -tt -# -*- coding: utf-8 -*- - -# (c) 2013, Patrick Callahan -# based on -# openbsd_pkg -# (c) 2013 -# Patrik Lundin -# -# yum -# (c) 2012, Red Hat, Inc -# Written by Seth Vidal -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import re - -DOCUMENTATION = ''' ---- -module: zypper -author: Patrick Callahan -version_added: "1.2" -short_description: Manage packages on SuSE and openSuSE -description: - - Manage packages on SuSE and openSuSE using the zypper and rpm tools. -options: - name: - description: - - package name or package specifier wth version C(name) or C(name-1.0). - required: true - aliases: [ 'pkg' ] - state: - description: - - C(present) will make sure the package is installed. - C(latest) will make sure the latest version of the package is installed. - C(absent) will make sure the specified package is not installed. - required: false - choices: [ present, latest, absent ] - default: "present" - disable_gpg_check: - description: - - Whether to disable to GPG signature checking of the package - signature being installed. Has an effect only if state is - I(present) or I(latest). - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] - disable_recommends: - version_added: "1.8" - description: - - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages. - required: false - default: "yes" - choices: [ "yes", "no" ] - -notes: [] -# informational: requirements for nodes -requirements: [ zypper, rpm ] -author: Patrick Callahan -''' - -EXAMPLES = ''' -# Install "nmap" -- zypper: name=nmap state=present - -# Install apache2 with recommended packages -- zypper: name=apache2 state=present disable_recommends=no - -# Remove the "nmap" package -- zypper: name=nmap state=absent -''' - -# Function used for getting versions of currently installed packages. -def get_current_version(m, name): - cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n'] - cmd.extend(name) - (rc, stdout, stderr) = m.run_command(cmd) - - current_version = {} - rpmoutput_re = re.compile('^(\S+) (\S+)$') - for stdoutline, package in zip(stdout.splitlines(), name): - m = rpmoutput_re.match(stdoutline) - if m == None: - return None - rpmpackage = m.group(1) - rpmversion = m.group(2) - if package != rpmpackage: - return None - current_version[package] = rpmversion - - return current_version - -# Function used to find out if a package is currently installed. -def get_package_state(m, packages): - cmd = ['/bin/rpm', '--query', '--qf', 'package %{NAME} is installed\n'] - cmd.extend(packages) - - rc, stdout, stderr = m.run_command(cmd, check_rc=False) - - installed_state = {} - rpmoutput_re = re.compile('^package (\S+) (.*)$') - for stdoutline, name in zip(stdout.splitlines(), packages): - m = rpmoutput_re.match(stdoutline) - if m == None: - return None - package = m.group(1) - result = m.group(2) - if not name.startswith(package): - print name + ':' + package + ':' + stdoutline + '\n' - return None - if result == 'is installed': - installed_state[name] = True - else: - installed_state[name] = False - - return installed_state - -# Function used to make sure a package is present. -def package_present(m, name, installed_state, disable_gpg_check, disable_recommends): - packages = [] - for package in name: - if installed_state[package] is False: - packages.append(package) - if len(packages) != 0: - cmd = ['/usr/bin/zypper', '--non-interactive'] - # add global options before zypper command - if disable_gpg_check: - cmd.append('--no-gpg-check') - - cmd.extend(['install', '--auto-agree-with-licenses']) - # add install parameter - if disable_recommends: - cmd.append('--no-recommends') - cmd.extend(packages) - rc, stdout, stderr = m.run_command(cmd, check_rc=False) - - if rc == 0: - changed=True - else: - changed=False - else: - rc = 0 - stdout = '' - stderr = '' - changed=False - - return (rc, stdout, stderr, changed) - -# Function used to make sure a package is the latest available version. -def package_latest(m, name, installed_state, disable_gpg_check, disable_recommends): - - # first of all, make sure all the packages are installed - (rc, stdout, stderr, changed) = package_present(m, name, installed_state, disable_gpg_check) - - # if we've already made a change, we don't have to check whether a version changed - if not changed: - pre_upgrade_versions = get_current_version(m, name) - - cmd = ['/usr/bin/zypper', '--non-interactive', 'update', '--auto-agree-with-licenses'] - cmd.extend(name) - rc, stdout, stderr = m.run_command(cmd, check_rc=False) - - # if we've already made a change, we don't have to check whether a version changed - if not changed: - post_upgrade_versions = get_current_version(m, name) - if pre_upgrade_versions != post_upgrade_versions: - changed = True - - return (rc, stdout, stderr, changed) - -# Function used to make sure a package is not installed. -def package_absent(m, name, installed_state): - packages = [] - for package in name: - if installed_state[package] is True: - packages.append(package) - if len(packages) != 0: - cmd = ['/usr/bin/zypper', '--non-interactive', 'remove'] - cmd.extend(packages) - rc, stdout, stderr = m.run_command(cmd) - - if rc == 0: - changed=True - else: - changed=False - else: - rc = 0 - stdout = '' - stderr = '' - changed=False - - return (rc, stdout, stderr, changed) - -# =========================================== -# Main control flow - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True, aliases=['pkg'], type='list'), - state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), - disable_gpg_check = dict(required=False, default='no', type='bool'), - disable_recommends = dict(required=False, default='yes', type='bool'), - ), - supports_check_mode = False - ) - - - params = module.params - - name = params['name'] - state = params['state'] - disable_gpg_check = params['disable_gpg_check'] - disable_recommends = params['disable_recommends'] - - rc = 0 - stdout = '' - stderr = '' - result = {} - result['name'] = name - result['state'] = state - - # Get package state - installed_state = get_package_state(module, name) - - # Perform requested action - if state in ['installed', 'present']: - (rc, stdout, stderr, changed) = package_present(module, name, installed_state, disable_gpg_check, disable_recommends) - elif state in ['absent', 'removed']: - (rc, stdout, stderr, changed) = package_absent(module, name, installed_state) - elif state == 'latest': - (rc, stdout, stderr, changed) = package_latest(module, name, installed_state, disable_gpg_check, disable_recommends) - - if rc != 0: - if stderr: - module.fail_json(msg=stderr) - else: - module.fail_json(msg=stdout) - - result['changed'] = changed - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/packaging/zypper_repository b/library/packaging/zypper_repository deleted file mode 100644 index 1eb4ffdb34..0000000000 --- a/library/packaging/zypper_repository +++ /dev/null @@ -1,221 +0,0 @@ -#!/usr/bin/python -# encoding: utf-8 - -# (c) 2013, Matthias Vogelgesang -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -DOCUMENTATION = ''' ---- -module: zypper_repository -author: Matthias Vogelgesang -version_added: "1.4" -short_description: Add and remove Zypper repositories -description: - - Add or remove Zypper repositories on SUSE and openSUSE -options: - name: - required: false - default: none - description: - - A name for the repository. Not required when adding repofiles. - repo: - required: false - default: none - description: - - URI of the repository or .repo file. Required when state=present. - state: - required: false - choices: [ "absent", "present" ] - default: "present" - description: - - A source string state. - description: - required: false - default: none - description: - - A description of the repository - disable_gpg_check: - description: - - Whether to disable GPG signature checking of - all packages. Has an effect only if state is - I(present). - required: false - default: "no" - choices: [ "yes", "no" ] - aliases: [] -notes: [] -requirements: [ zypper ] -''' - -EXAMPLES = ''' -# Add NVIDIA repository for graphics drivers -- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=present - -# Remove NVIDIA repository -- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=absent - -# Add python development repository -- zypper_repository: repo=http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo -''' -from xml.dom.minidom import parseString as parseXML - -REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck'] - - -def _parse_repos(module): - """parses the output of zypper -x lr and returns a parse repo dictionary""" - cmd = ['/usr/bin/zypper', '-x', 'lr'] - repos = [] - - rc, stdout, stderr = module.run_command(cmd, check_rc=True) - dom = parseXML(stdout) - repo_list = dom.getElementsByTagName('repo') - for repo in repo_list: - opts = {} - for o in REPO_OPTS: - opts[o] = repo.getAttribute(o) - opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data - # A repo can be uniquely identified by an alias + url - repos.append(opts) - - return repos - - -def repo_exists(module, **kwargs): - - def repo_subset(realrepo, repocmp): - for k in repocmp: - if k not in realrepo: - return False - - for k, v in realrepo.items(): - if k in repocmp: - if v.rstrip("/") != repocmp[k].rstrip("/"): - return False - return True - - repos = _parse_repos(module) - - for repo in repos: - if repo_subset(repo, kwargs): - return True - return False - - -def add_repo(module, repo, alias, description, disable_gpg_check): - cmd = ['/usr/bin/zypper', 'ar', '--check', '--refresh'] - - if description: - cmd.extend(['--name', description]) - - if disable_gpg_check: - cmd.append('--no-gpgcheck') - - cmd.append(repo) - - if not repo.endswith('.repo'): - cmd.append(alias) - - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - changed = rc == 0 - if rc == 0: - changed = True - elif 'already exists. Please use another alias' in stderr: - changed = False - else: - module.fail_json(msg=stderr if stderr else stdout) - - return changed - - -def remove_repo(module, repo, alias): - - cmd = ['/usr/bin/zypper', 'rr'] - if alias: - cmd.append(alias) - else: - cmd.append(repo) - - rc, stdout, stderr = module.run_command(cmd, check_rc=True) - changed = rc == 0 - return changed - - -def fail_if_rc_is_null(module, rc, stdout, stderr): - if rc != 0: - module.fail_json(msg=stderr if stderr else stdout) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=False), - repo=dict(required=False), - state=dict(choices=['present', 'absent'], default='present'), - description=dict(required=False), - disable_gpg_check = dict(required=False, default='no', type='bool'), - ), - supports_check_mode=False, - ) - - repo = module.params['repo'] - state = module.params['state'] - name = module.params['name'] - description = module.params['description'] - disable_gpg_check = module.params['disable_gpg_check'] - - def exit_unchanged(): - module.exit_json(changed=False, repo=repo, state=state, name=name) - - # Check run-time module parameters - if state == 'present' and not repo: - module.fail_json(msg='Module option state=present requires repo') - if state == 'absent' and not repo and not name: - module.fail_json(msg='Alias or repo parameter required when state=absent') - - if repo and repo.endswith('.repo'): - if name: - module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding repo files') - else: - if not name and state == "present": - module.fail_json(msg='Name required when adding non-repo files:') - - if repo and repo.endswith('.repo'): - exists = repo_exists(module, url=repo, alias=name) - elif repo: - exists = repo_exists(module, url=repo) - else: - exists = repo_exists(module, alias=name) - - if state == 'present': - if exists: - exit_unchanged() - - changed = add_repo(module, repo, name, description, disable_gpg_check) - elif state == 'absent': - if not exists: - exit_unchanged() - - changed = remove_repo(module, repo, name) - - module.exit_json(changed=changed, repo=repo, state=state) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/source_control/bzr b/library/source_control/bzr deleted file mode 100644 index 996150a39a..0000000000 --- a/library/source_control/bzr +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, André Paramés -# Based on the Git module by Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = u''' ---- -module: bzr -author: André Paramés -version_added: "1.1" -short_description: Deploy software (or files) from bzr branches -description: - - Manage I(bzr) branches to deploy files or software. -options: - name: - required: true - aliases: [ 'parent' ] - description: - - SSH or HTTP protocol address of the parent branch. - dest: - required: true - description: - - Absolute path of where the branch should be cloned to. - version: - required: false - default: "head" - description: - - What version of the branch to clone. This can be the - bzr revno or revid. - force: - required: false - default: "yes" - choices: [ 'yes', 'no' ] - description: - - If C(yes), any modified files in the working - tree will be discarded. - executable: - required: false - default: null - version_added: "1.4" - description: - - Path to bzr executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. -''' - -EXAMPLES = ''' -# Example bzr checkout from Ansible Playbooks -- bzr: name=bzr+ssh://foosball.example.org/path/to/branch dest=/srv/checkout version=22 -''' - -import re - - -class Bzr(object): - def __init__(self, module, parent, dest, version, bzr_path): - self.module = module - self.parent = parent - self.dest = dest - self.version = version - self.bzr_path = bzr_path - - def _command(self, args_list, cwd=None, **kwargs): - (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs) - return (rc, out, err) - - def get_version(self): - '''samples the version of the bzr branch''' - - cmd = "%s revno" % self.bzr_path - rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) - revno = stdout.strip() - return revno - - def clone(self): - '''makes a new bzr branch if it does not already exist''' - dest_dirname = os.path.dirname(self.dest) - try: - os.makedirs(dest_dirname) - except: - pass - if self.version.lower() != 'head': - args_list = ["branch", "-r", self.version, self.parent, self.dest] - else: - args_list = ["branch", self.parent, self.dest] - return self._command(args_list, check_rc=True, cwd=dest_dirname) - - def has_local_mods(self): - - cmd = "%s status -S" % self.bzr_path - rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) - lines = stdout.splitlines() - - lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) - return len(lines) > 0 - - def reset(self, force): - ''' - Resets the index and working tree to head. - Discards any changes to tracked files in the working - tree since that commit. - ''' - if not force and self.has_local_mods(): - self.module.fail_json(msg="Local modifications exist in branch (force=no).") - return self._command(["revert"], check_rc=True, cwd=self.dest) - - def fetch(self): - '''updates branch from remote sources''' - if self.version.lower() != 'head': - (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest) - else: - (rc, out, err) = self._command(["pull"], cwd=self.dest) - if rc != 0: - self.module.fail_json(msg="Failed to pull") - return (rc, out, err) - - def switch_version(self): - '''once pulled, switch to a particular revno or revid''' - if self.version.lower() != 'head': - args_list = ["revert", "-r", self.version] - else: - args_list = ["revert"] - return self._command(args_list, check_rc=True, cwd=self.dest) - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec = dict( - dest=dict(required=True), - name=dict(required=True, aliases=['parent']), - version=dict(default='head'), - force=dict(default='yes', type='bool'), - executable=dict(default=None), - ) - ) - - dest = os.path.abspath(os.path.expanduser(module.params['dest'])) - parent = module.params['name'] - version = module.params['version'] - force = module.params['force'] - bzr_path = module.params['executable'] or module.get_bin_path('bzr', True) - - bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf') - - rc, out, err, status = (0, None, None, None) - - bzr = Bzr(module, parent, dest, version, bzr_path) - - # if there is no bzr configuration, do a branch operation - # else pull and switch the version - before = None - local_mods = False - if not os.path.exists(bzrconfig): - (rc, out, err) = bzr.clone() - - else: - # else do a pull - local_mods = bzr.has_local_mods() - before = bzr.get_version() - (rc, out, err) = bzr.reset(force) - if rc != 0: - module.fail_json(msg=err) - (rc, out, err) = bzr.fetch() - if rc != 0: - module.fail_json(msg=err) - - # switch to version specified regardless of whether - # we cloned or pulled - (rc, out, err) = bzr.switch_version() - - # determine if we changed anything - after = bzr.get_version() - changed = False - - if before != after or local_mods: - changed = True - - module.exit_json(changed=changed, before=before, after=after) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/source_control/git b/library/source_control/git deleted file mode 100644 index a5d94e3dbb..0000000000 --- a/library/source_control/git +++ /dev/null @@ -1,607 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: git -author: Michael DeHaan -version_added: "0.0.1" -short_description: Deploy software (or files) from git checkouts -description: - - Manage I(git) checkouts of repositories to deploy files or software. -options: - repo: - required: true - aliases: [ name ] - description: - - git, SSH, or HTTP protocol address of the git repository. - dest: - required: false - description: - - Absolute path of where the repository should be checked out to. - This parameter is required, unless C(update) is set to C(no) - This change was made in version 1.8. Prior to this version, the - C(dest) parameter was always required. - version: - required: false - default: "HEAD" - description: - - What version of the repository to check out. This can be the - full 40-character I(SHA-1) hash, the literal string C(HEAD), a - branch name, or a tag name. - accept_hostkey: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "1.5" - description: - - if C(yes), adds the hostkey for the repo url if not already - added. If ssh_args contains "-o StrictHostKeyChecking=no", - this parameter is ignored. - ssh_opts: - required: false - default: None - version_added: "1.5" - description: - - Creates a wrapper script and exports the path as GIT_SSH - which git then automatically uses to override ssh arguments. - An example value could be "-o StrictHostKeyChecking=no" - key_file: - required: false - default: None - version_added: "1.5" - description: - - Specify an optional private key file to use for the checkout. - reference: - required: false - default: null - version_added: "1.4" - description: - - Reference repository (see "git clone --reference ...") - remote: - required: false - default: "origin" - description: - - Name of the remote. - force: - required: false - default: "yes" - choices: [ "yes", "no" ] - version_added: "0.7" - description: - - If C(yes), any modified files in the working - repository will be discarded. Prior to 0.7, this was always - 'yes' and could not be disabled. - depth: - required: false - default: null - version_added: "1.2" - description: - - Create a shallow clone with a history truncated to the specified - number or revisions. The minimum possible value is C(1), otherwise - ignored. - update: - required: false - default: "yes" - choices: [ "yes", "no" ] - version_added: "1.2" - description: - - If C(no), just returns information about the repository without updating. - executable: - required: false - default: null - version_added: "1.4" - description: - - Path to git executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. - bare: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "1.4" - description: - - if C(yes), repository will be created as a bare repo, otherwise - it will be a standard repo with a workspace. - - recursive: - required: false - default: "yes" - choices: [ "yes", "no" ] - version_added: "1.6" - description: - - if C(no), repository will be cloned without the --recursive - option, skipping sub-modules. -notes: - - "If the task seems to be hanging, first verify remote host is in C(known_hosts). - SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, - one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling - the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts." -''' - -EXAMPLES = ''' -# Example git checkout from Ansible Playbooks -- git: repo=git://foosball.example.org/path/to/repo.git - dest=/srv/checkout - version=release-0.22 - -# Example read-write git checkout from github -- git: repo=ssh://git@github.com/mylogin/hello.git dest=/home/mylogin/hello - -# Example just ensuring the repo checkout exists -- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout update=no -''' - -import re -import tempfile - -def get_submodule_update_params(module, git_path, cwd): - - #or: git submodule [--quiet] update [--init] [-N|--no-fetch] - #[-f|--force] [--rebase] [--reference ] [--merge] - #[--recursive] [--] [...] - - params = [] - - # run a bad submodule command to get valid params - cmd = "%s submodule update --help" % (git_path) - rc, stdout, stderr = module.run_command(cmd, cwd=cwd) - lines = stderr.split('\n') - update_line = None - for line in lines: - if 'git submodule [--quiet] update ' in line: - update_line = line - if update_line: - update_line = update_line.replace('[','') - update_line = update_line.replace(']','') - update_line = update_line.replace('|',' ') - parts = shlex.split(update_line) - for part in parts: - if part.startswith('--'): - part = part.replace('--', '') - params.append(part) - - return params - -def write_ssh_wrapper(): - module_dir = get_module_path() - try: - # make sure we have full permission to the module_dir, which - # may not be the case if we're sudo'ing to a non-root user - if os.access(module_dir, os.W_OK|os.R_OK|os.X_OK): - fd, wrapper_path = tempfile.mkstemp(prefix=module_dir + '/') - else: - raise OSError - except (IOError, OSError): - fd, wrapper_path = tempfile.mkstemp() - fh = os.fdopen(fd, 'w+b') - template = """#!/bin/sh -if [ -z "$GIT_SSH_OPTS" ]; then - BASEOPTS="" -else - BASEOPTS=$GIT_SSH_OPTS -fi - -if [ -z "$GIT_KEY" ]; then - ssh $BASEOPTS "$@" -else - ssh -i "$GIT_KEY" $BASEOPTS "$@" -fi -""" - fh.write(template) - fh.close() - st = os.stat(wrapper_path) - os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) - return wrapper_path - -def set_git_ssh(ssh_wrapper, key_file, ssh_opts): - - if os.environ.get("GIT_SSH"): - del os.environ["GIT_SSH"] - os.environ["GIT_SSH"] = ssh_wrapper - - if os.environ.get("GIT_KEY"): - del os.environ["GIT_KEY"] - - if key_file: - os.environ["GIT_KEY"] = key_file - - if os.environ.get("GIT_SSH_OPTS"): - del os.environ["GIT_SSH_OPTS"] - - if ssh_opts: - os.environ["GIT_SSH_OPTS"] = ssh_opts - -def get_version(module, git_path, dest, ref="HEAD"): - ''' samples the version of the git repo ''' - - cmd = "%s rev-parse %s" % (git_path, ref) - rc, stdout, stderr = module.run_command(cmd, cwd=dest) - sha = stdout.rstrip('\n') - return sha - -def clone(git_path, module, repo, dest, remote, depth, version, bare, - reference, recursive): - ''' makes a new git repo if it does not already exist ''' - dest_dirname = os.path.dirname(dest) - try: - os.makedirs(dest_dirname) - except: - pass - cmd = [ git_path, 'clone' ] - if bare: - cmd.append('--bare') - else: - cmd.extend([ '--origin', remote ]) - if recursive: - cmd.extend([ '--recursive' ]) - if is_remote_branch(git_path, module, dest, repo, version) \ - or is_remote_tag(git_path, module, dest, repo, version): - cmd.extend([ '--branch', version ]) - if depth: - cmd.extend([ '--depth', str(depth) ]) - if reference: - cmd.extend([ '--reference', str(reference) ]) - cmd.extend([ repo, dest ]) - module.run_command(cmd, check_rc=True, cwd=dest_dirname) - if bare: - if remote != 'origin': - module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest) - -def has_local_mods(module, git_path, dest, bare): - if bare: - return False - - cmd = "%s status -s" % (git_path) - rc, stdout, stderr = module.run_command(cmd, cwd=dest) - lines = stdout.splitlines() - lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) - - return len(lines) > 0 - -def reset(git_path, module, dest): - ''' - Resets the index and working tree to HEAD. - Discards any changes to tracked files in working - tree since that commit. - ''' - cmd = "%s reset --hard HEAD" % (git_path,) - return module.run_command(cmd, check_rc=True, cwd=dest) - -def get_remote_head(git_path, module, dest, version, remote, bare): - cloning = False - cwd = None - if remote == module.params['repo']: - cloning = True - else: - cwd = dest - if version == 'HEAD': - if cloning: - # cloning the repo, just get the remote's HEAD version - cmd = '%s ls-remote %s -h HEAD' % (git_path, remote) - else: - head_branch = get_head_branch(git_path, module, dest, remote, bare) - cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch) - elif is_remote_branch(git_path, module, dest, remote, version): - cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version) - elif is_remote_tag(git_path, module, dest, remote, version): - cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version) - else: - # appears to be a sha1. return as-is since it appears - # cannot check for a specific sha1 on remote - return version - (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd) - if len(out) < 1: - module.fail_json(msg="Could not determine remote revision for %s" % version) - rev = out.split()[0] - return rev - -def is_remote_tag(git_path, module, dest, remote, version): - cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version) - (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - if version in out: - return True - else: - return False - -def get_branches(git_path, module, dest): - branches = [] - cmd = '%s branch -a' % (git_path,) - (rc, out, err) = module.run_command(cmd, cwd=dest) - if rc != 0: - module.fail_json(msg="Could not determine branch data - received %s" % out) - for line in out.split('\n'): - branches.append(line.strip()) - return branches - -def get_tags(git_path, module, dest): - tags = [] - cmd = '%s tag' % (git_path,) - (rc, out, err) = module.run_command(cmd, cwd=dest) - if rc != 0: - module.fail_json(msg="Could not determine tag data - received %s" % out) - for line in out.split('\n'): - tags.append(line.strip()) - return tags - -def is_remote_branch(git_path, module, dest, remote, version): - cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version) - (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - if version in out: - return True - else: - return False - -def is_local_branch(git_path, module, dest, branch): - branches = get_branches(git_path, module, dest) - lbranch = '%s' % branch - if lbranch in branches: - return True - elif '* %s' % branch in branches: - return True - else: - return False - -def is_not_a_branch(git_path, module, dest): - branches = get_branches(git_path, module, dest) - for b in branches: - if b.startswith('* ') and 'no branch' in b: - return True - return False - -def get_head_branch(git_path, module, dest, remote, bare=False): - ''' - Determine what branch HEAD is associated with. This is partly - taken from lib/ansible/utils/__init__.py. It finds the correct - path to .git/HEAD and reads from that file the branch that HEAD is - associated with. In the case of a detached HEAD, this will look - up the branch in .git/refs/remotes//HEAD. - ''' - if bare: - repo_path = dest - else: - repo_path = os.path.join(dest, '.git') - # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. - if os.path.isfile(repo_path): - try: - gitdir = yaml.safe_load(open(repo_path)).get('gitdir') - # There is a posibility the .git file to have an absolute path. - if os.path.isabs(gitdir): - repo_path = gitdir - else: - repo_path = os.path.join(repo_path.split('.git')[0], gitdir) - except (IOError, AttributeError): - return '' - # Read .git/HEAD for the name of the branch. - # If we're in a detached HEAD state, look up the branch associated with - # the remote HEAD in .git/refs/remotes//HEAD - f = open(os.path.join(repo_path, "HEAD")) - if is_not_a_branch(git_path, module, dest): - f.close() - f = open(os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD')) - branch = f.readline().split('/')[-1].rstrip("\n") - f.close() - return branch - -def fetch(git_path, module, repo, dest, version, remote, bare): - ''' updates repo from remote sources ''' - (rc, out0, err0) = module.run_command([git_path, 'remote', 'set-url', remote, repo], cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to set a new url %s for %s: %s" % (repo, remote, out0 + err0)) - if bare: - (rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*'], cwd=dest) - else: - (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote), cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs") - - if bare: - (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*'], cwd=dest) - else: - (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs") - (rc, out3, err3) = submodule_update(git_path, module, dest) - return (rc, out1 + out2 + out3, err1 + err2 + err3) - -def submodule_update(git_path, module, dest): - ''' init and update any submodules ''' - - # get the valid submodule params - params = get_submodule_update_params(module, git_path, dest) - - # skip submodule commands if .gitmodules is not present - if not os.path.exists(os.path.join(dest, '.gitmodules')): - return (0, '', '') - cmd = [ git_path, 'submodule', 'sync' ] - (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - if 'remote' in params: - cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ] - else: - cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ] - (rc, out, err) = module.run_command(cmd, cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to init/update submodules: %s" % out + err) - return (rc, out, err) - -def switch_version(git_path, module, dest, remote, version, recursive): - ''' once pulled, switch to a particular SHA, tag, or branch ''' - cmd = '' - if version != 'HEAD': - if is_remote_branch(git_path, module, dest, remote, version): - if not is_local_branch(git_path, module, dest, version): - cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version) - else: - (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to checkout branch %s" % version) - cmd = "%s reset --hard %s/%s" % (git_path, remote, version) - else: - cmd = "%s checkout --force %s" % (git_path, version) - else: - branch = get_head_branch(git_path, module, dest, remote) - (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to checkout branch %s" % branch) - cmd = "%s reset --hard %s" % (git_path, remote) - (rc, out1, err1) = module.run_command(cmd, cwd=dest) - if rc != 0: - if version != 'HEAD': - module.fail_json(msg="Failed to checkout %s" % (version)) - else: - module.fail_json(msg="Failed to checkout branch %s" % (branch)) - if recursive: - (rc, out2, err2) = submodule_update(git_path, module, dest) - out1 += out2 - err1 += err1 - return (rc, out1, err1) - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec = dict( - dest=dict(), - repo=dict(required=True, aliases=['name']), - version=dict(default='HEAD'), - remote=dict(default='origin'), - reference=dict(default=None), - force=dict(default='yes', type='bool'), - depth=dict(default=None, type='int'), - update=dict(default='yes', type='bool'), - accept_hostkey=dict(default='no', type='bool'), - key_file=dict(default=None, required=False), - ssh_opts=dict(default=None, required=False), - executable=dict(default=None), - bare=dict(default='no', type='bool'), - recursive=dict(default='yes', type='bool'), - ), - supports_check_mode=True - ) - - dest = module.params['dest'] - repo = module.params['repo'] - version = module.params['version'] - remote = module.params['remote'] - force = module.params['force'] - depth = module.params['depth'] - update = module.params['update'] - bare = module.params['bare'] - reference = module.params['reference'] - git_path = module.params['executable'] or module.get_bin_path('git', True) - key_file = module.params['key_file'] - ssh_opts = module.params['ssh_opts'] - - gitconfig = None - if not dest and update: - module.fail_json(msg="the destination directory must be specified unless update=no") - elif dest: - dest = os.path.abspath(os.path.expanduser(dest)) - if bare: - gitconfig = os.path.join(dest, 'config') - else: - gitconfig = os.path.join(dest, '.git', 'config') - - # create a wrapper script and export - # GIT_SSH= as an environment variable - # for git to use the wrapper script - ssh_wrapper = None - if key_file or ssh_opts: - ssh_wrapper = write_ssh_wrapper() - set_git_ssh(ssh_wrapper, key_file, ssh_opts) - module.add_cleanup_file(path=ssh_wrapper) - - # add the git repo's hostkey - if module.params['ssh_opts'] is not None: - if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']: - add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) - else: - add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) - - recursive = module.params['recursive'] - - rc, out, err, status = (0, None, None, None) - - before = None - local_mods = False - if gitconfig and not os.path.exists(gitconfig) or not gitconfig and not update: - # if there is no git configuration, do a clone operation unless the - # user requested no updates or we're doing a check mode test (in - # which case we do a ls-remote), otherwise clone the repo - if module.check_mode or not update: - remote_head = get_remote_head(git_path, module, dest, version, repo, bare) - module.exit_json(changed=True, before=before, after=remote_head) - # there's no git config, so clone - clone(git_path, module, repo, dest, remote, depth, version, bare, reference, recursive) - elif not update: - # Just return having found a repo already in the dest path - # this does no checking that the repo is the actual repo - # requested. - before = get_version(module, git_path, dest) - module.exit_json(changed=False, before=before, after=before) - else: - # else do a pull - local_mods = has_local_mods(module, git_path, dest, bare) - before = get_version(module, git_path, dest) - if local_mods: - # failure should happen regardless of check mode - if not force: - module.fail_json(msg="Local modifications exist in repository (force=no).") - # if force and in non-check mode, do a reset - if not module.check_mode: - reset(git_path, module, dest) - # exit if already at desired sha version - remote_head = get_remote_head(git_path, module, dest, version, remote, bare) - if before == remote_head: - if local_mods: - module.exit_json(changed=True, before=before, after=remote_head, - msg="Local modifications exist") - elif is_remote_tag(git_path, module, dest, repo, version): - # if the remote is a tag and we have the tag locally, exit early - if version in get_tags(git_path, module, dest): - module.exit_json(changed=False, before=before, after=remote_head) - else: - module.exit_json(changed=False, before=before, after=remote_head) - if module.check_mode: - module.exit_json(changed=True, before=before, after=remote_head) - fetch(git_path, module, repo, dest, version, remote, bare) - - # switch to version specified regardless of whether - # we cloned or pulled - if not bare: - switch_version(git_path, module, dest, remote, version, recursive) - - # determine if we changed anything - after = get_version(module, git_path, dest) - changed = False - - if before != after or local_mods: - changed = True - - # cleanup the wrapper script - if ssh_wrapper: - os.remove(ssh_wrapper) - - module.exit_json(changed=changed, before=before, after=after) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.known_hosts import * - -main() diff --git a/library/source_control/github_hooks b/library/source_control/github_hooks deleted file mode 100644 index 6a8d1ced93..0000000000 --- a/library/source_control/github_hooks +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Phillip Gentry -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. - -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import json -import base64 - -DOCUMENTATION = ''' ---- -module: github_hooks -short_description: Manages github service hooks. -description: - - Adds service hooks and removes service hooks that have an error status. -version_added: "1.4" -options: - user: - description: - - Github username. - required: true - oauthkey: - description: - - The oauth key provided by github. It can be found/generated on github under "Edit Your Profile" >> "Applications" >> "Personal Access Tokens" - required: true - repo: - description: - - "This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:. Note this is different than the normal repo url." - required: true - hookurl: - description: - - When creating a new hook, this is the url that you want github to post to. It is only required when creating a new hook. - required: false - action: - description: - - This tells the githooks module what you want it to do. - required: true - choices: [ "create", "cleanall" ] - validate_certs: - description: - - If C(no), SSL certificates for the target repo will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - -author: Phillip Gentry, CX Inc -''' - -EXAMPLES = ''' -# Example creating a new service hook. It ignores duplicates. -- github_hooks: action=create hookurl=http://11.111.111.111:2222 user={{ gituser }} oauthkey={{ oauthkey }} repo=https://api.github.com/repos/pcgentry/Github-Auto-Deploy - -# Cleaning all hooks for this repo that had an error on the last update. Since this works for all hooks in a repo it is probably best that this would be called from a handler. -- local_action: github_hooks action=cleanall user={{ gituser }} oauthkey={{ oauthkey }} repo={{ repo }} -''' - -def list(module, hookurl, oauthkey, repo, user): - url = "%s/hooks" % repo - auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') - headers = { - 'Authorization': 'Basic %s' % auth, - } - response, info = fetch_url(module, url, headers=headers) - if info['status'] != 200: - return False, '' - else: - return False, response.read() - -def clean504(module, hookurl, oauthkey, repo, user): - current_hooks = list(hookurl, oauthkey, repo, user)[1] - decoded = json.loads(current_hooks) - - for hook in decoded: - if hook['last_response']['code'] == 504: - # print "Last response was an ERROR for hook:" - # print hook['id'] - delete(module, hookurl, oauthkey, repo, user, hook['id']) - - return 0, current_hooks - -def cleanall(module, hookurl, oauthkey, repo, user): - current_hooks = list(hookurl, oauthkey, repo, user)[1] - decoded = json.loads(current_hooks) - - for hook in decoded: - if hook['last_response']['code'] != 200: - # print "Last response was an ERROR for hook:" - # print hook['id'] - delete(module, hookurl, oauthkey, repo, user, hook['id']) - - return 0, current_hooks - -def create(module, hookurl, oauthkey, repo, user): - url = "%s/hooks" % repo - values = { - "active": True, - "name": "web", - "config": { - "url": "%s" % hookurl, - "content_type": "json" - } - } - data = json.dumps(values) - auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') - headers = { - 'Authorization': 'Basic %s' % auth, - } - response, info = fetch_url(module, url, data=data, headers=headers) - if info['status'] != 200: - return 0, '[]' - else: - return 0, response.read() - -def delete(module, hookurl, oauthkey, repo, user, hookid): - url = "%s/hooks/%s" % (repo, hookid) - auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') - headers = { - 'Authorization': 'Basic %s' % auth, - } - response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE') - return response.read() - -def main(): - module = AnsibleModule( - argument_spec=dict( - action=dict(required=True), - hookurl=dict(required=False), - oauthkey=dict(required=True), - repo=dict(required=True), - user=dict(required=True), - validate_certs=dict(default='yes', type='bool'), - ) - ) - - action = module.params['action'] - hookurl = module.params['hookurl'] - oauthkey = module.params['oauthkey'] - repo = module.params['repo'] - user = module.params['user'] - - if action == "list": - (rc, out) = list(module, hookurl, oauthkey, repo, user) - - if action == "clean504": - (rc, out) = clean504(module, hookurl, oauthkey, repo, user) - - if action == "cleanall": - (rc, out) = cleanall(module, hookurl, oauthkey, repo, user) - - if action == "create": - (rc, out) = create(module, hookurl, oauthkey, repo, user) - - if rc != 0: - module.fail_json(msg="failed", result=out) - - module.exit_json(msg="success", result=out) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() diff --git a/library/source_control/hg b/library/source_control/hg deleted file mode 100644 index 1b95bcd5ac..0000000000 --- a/library/source_control/hg +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/python -#-*- coding: utf-8 -*- - -# (c) 2013, Yeukhon Wong -# -# This module was originally inspired by Brad Olson's ansible-module-mercurial -# . This module tends -# to follow the git module implementation. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import ConfigParser - -DOCUMENTATION = ''' ---- -module: hg -short_description: Manages Mercurial (hg) repositories. -description: - - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. -version_added: "1.0" -author: Yeukhon Wong -options: - repo: - description: - - The repository address. - required: true - default: null - aliases: [ name ] - dest: - description: - - Absolute path of where the repository should be cloned to. - required: true - default: null - revision: - description: - - Equivalent C(-r) option in hg command which could be the changeset, revision number, - branch name or even tag. - required: false - default: "default" - aliases: [ version ] - force: - description: - - Discards uncommitted changes. Runs C(hg update -C). - required: false - default: "yes" - choices: [ "yes", "no" ] - purge: - description: - - Deletes untracked files. Runs C(hg purge). - required: false - default: "no" - choices: [ "yes", "no" ] - executable: - required: false - default: null - version_added: "1.4" - description: - - Path to hg executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. -notes: - - "If the task seems to be hanging, first verify remote host is in C(known_hosts). - SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, - one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling - the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts." -requirements: [ ] -''' - -EXAMPLES = ''' -# Ensure the current working copy is inside the stable branch and deletes untracked files if any. -- hg: repo=https://bitbucket.org/user/repo1 dest=/home/user/repo1 revision=stable purge=yes -''' - -class Hg(object): - - def __init__(self, module, dest, repo, revision, hg_path): - self.module = module - self.dest = dest - self.repo = repo - self.revision = revision - self.hg_path = hg_path - - def _command(self, args_list): - (rc, out, err) = self.module.run_command([self.hg_path] + args_list) - return (rc, out, err) - - def _list_untracked(self): - args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print'] - return self._command(args) - - def get_revision(self): - """ - hg id -b -i -t returns a string in the format: - "[+] " - This format lists the state of the current working copy, - and indicates whether there are uncommitted changes by the - plus sign. Otherwise, the sign is omitted. - - Read the full description via hg id --help - """ - (rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest]) - if rc != 0: - self.module.fail_json(msg=err) - else: - return out.strip('\n') - - def has_local_mods(self): - now = self.get_revision() - if '+' in now: - return True - else: - return False - - def discard(self): - before = self.has_local_mods() - if not before: - return False - - (rc, out, err) = self._command(['update', '-C', '-R', self.dest]) - if rc != 0: - self.module.fail_json(msg=err) - - after = self.has_local_mods() - if before != after and not after: # no more local modification - return True - - def purge(self): - # before purge, find out if there are any untracked files - (rc1, out1, err1) = self._list_untracked() - if rc1 != 0: - self.module.fail_json(msg=err1) - - # there are some untrackd files - if out1 != '': - args = ['purge', '--config', 'extensions.purge=', '-R', self.dest] - (rc2, out2, err2) = self._command(args) - if rc2 != 0: - self.module.fail_json(msg=err2) - return True - else: - return False - - def cleanup(self, force, purge): - discarded = False - purged = False - - if force: - discarded = self.discard() - if purge: - purged = self.purge() - if discarded or purged: - return True - else: - return False - - def pull(self): - return self._command( - ['pull', '-R', self.dest, self.repo]) - - def update(self): - return self._command(['update', '-R', self.dest]) - - def clone(self): - return self._command(['clone', self.repo, self.dest, '-r', self.revision]) - - def switch_version(self): - return self._command(['update', '-r', self.revision, '-R', self.dest]) - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec = dict( - repo = dict(required=True, aliases=['name']), - dest = dict(required=True), - revision = dict(default="default", aliases=['version']), - force = dict(default='yes', type='bool'), - purge = dict(default='no', type='bool'), - executable = dict(default=None), - ), - ) - repo = module.params['repo'] - dest = os.path.expanduser(module.params['dest']) - revision = module.params['revision'] - force = module.params['force'] - purge = module.params['purge'] - hg_path = module.params['executable'] or module.get_bin_path('hg', True) - hgrc = os.path.join(dest, '.hg/hgrc') - - # initial states - before = '' - changed = False - cleaned = False - - hg = Hg(module, dest, repo, revision, hg_path) - - # If there is no hgrc file, then assume repo is absent - # and perform clone. Otherwise, perform pull and update. - if not os.path.exists(hgrc): - (rc, out, err) = hg.clone() - if rc != 0: - module.fail_json(msg=err) - else: - # get the current state before doing pulling - before = hg.get_revision() - - # can perform force and purge - cleaned = hg.cleanup(force, purge) - - (rc, out, err) = hg.pull() - if rc != 0: - module.fail_json(msg=err) - - (rc, out, err) = hg.update() - if rc != 0: - module.fail_json(msg=err) - - hg.switch_version() - after = hg.get_revision() - if before != after or cleaned: - changed = True - module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/source_control/subversion b/library/source_control/subversion deleted file mode 100644 index 6709a8c393..0000000000 --- a/library/source_control/subversion +++ /dev/null @@ -1,231 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: subversion -short_description: Deploys a subversion repository. -description: - - Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout. -version_added: "0.7" -author: Dane Summers, njharman@gmail.com -notes: - - Requires I(svn) to be installed on the client. -requirements: [] -options: - repo: - description: - - The subversion URL to the repository. - required: true - aliases: [ name, repository ] - default: null - dest: - description: - - Absolute path where the repository should be deployed. - required: true - default: null - revision: - description: - - Specific revision to checkout. - required: false - default: HEAD - aliases: [ version ] - force: - description: - - If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files. - required: false - default: "yes" - choices: [ "yes", "no" ] - username: - description: - - --username parameter passed to svn. - required: false - default: null - password: - description: - - --password parameter passed to svn. - required: false - default: null - executable: - required: false - default: null - version_added: "1.4" - description: - - Path to svn executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. - export: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "1.6" - description: - - If C(yes), do export instead of checkout/update. -''' - -EXAMPLES = ''' -# Checkout subversion repository to specified folder. -- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/checkout - -# Export subversion directory to folder -- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/export export=True -''' - -import re -import tempfile - - -class Subversion(object): - def __init__( - self, module, dest, repo, revision, username, password, svn_path): - self.module = module - self.dest = dest - self.repo = repo - self.revision = revision - self.username = username - self.password = password - self.svn_path = svn_path - - def _exec(self, args): - bits = [ - self.svn_path, - '--non-interactive', - '--trust-server-cert', - '--no-auth-cache', - ] - if self.username: - bits.extend(["--username", self.username]) - if self.password: - bits.extend(["--password", self.password]) - bits.extend(args) - rc, out, err = self.module.run_command(bits, check_rc=True) - return out.splitlines() - - def checkout(self): - '''Creates new svn working directory if it does not already exist.''' - self._exec(["checkout", "-r", self.revision, self.repo, self.dest]) - - def export(self, force=False): - '''Export svn repo to directory''' - self._exec(["export", "-r", self.revision, self.repo, self.dest]) - - def switch(self): - '''Change working directory's repo.''' - # switch to ensure we are pointing at correct repo. - self._exec(["switch", self.repo, self.dest]) - - def update(self): - '''Update existing svn working directory.''' - self._exec(["update", "-r", self.revision, self.dest]) - - def revert(self): - '''Revert svn working directory.''' - self._exec(["revert", "-R", self.dest]) - - def get_revision(self): - '''Revision and URL of subversion working directory.''' - text = '\n'.join(self._exec(["info", self.dest])) - rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0) - url = re.search(r'^URL:.*$', text, re.MULTILINE).group(0) - return rev, url - - def has_local_mods(self): - '''True if revisioned files have been added or modified. Unrevisioned files are ignored.''' - lines = self._exec(["status", self.dest]) - # Match only revisioned files, i.e. ignore status '?'. - regex = re.compile(r'^[^?]') - # Has local mods if more than 0 modifed revisioned files. - return len(filter(regex.match, lines)) > 0 - - def needs_update(self): - curr, url = self.get_revision() - out2 = '\n'.join(self._exec(["info", "-r", "HEAD", self.dest])) - head = re.search(r'^Revision:.*$', out2, re.MULTILINE).group(0) - rev1 = int(curr.split(':')[1].strip()) - rev2 = int(head.split(':')[1].strip()) - change = False - if rev1 < rev2: - change = True - return change, curr, head - - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec=dict( - dest=dict(required=True), - repo=dict(required=True, aliases=['name', 'repository']), - revision=dict(default='HEAD', aliases=['rev', 'version']), - force=dict(default='yes', type='bool'), - username=dict(required=False), - password=dict(required=False), - executable=dict(default=None), - export=dict(default=False, required=False, type='bool'), - ), - supports_check_mode=True - ) - - dest = os.path.expanduser(module.params['dest']) - repo = module.params['repo'] - revision = module.params['revision'] - force = module.params['force'] - username = module.params['username'] - password = module.params['password'] - svn_path = module.params['executable'] or module.get_bin_path('svn', True) - export = module.params['export'] - - os.environ['LANG'] = 'C' - svn = Subversion(module, dest, repo, revision, username, password, svn_path) - - if not os.path.exists(dest): - before = None - local_mods = False - if module.check_mode: - module.exit_json(changed=True) - if not export: - svn.checkout() - else: - svn.export() - elif os.path.exists("%s/.svn" % (dest, )): - # Order matters. Need to get local mods before switch to avoid false - # positives. Need to switch before revert to ensure we are reverting to - # correct repo. - if module.check_mode: - check, before, after = svn.needs_update() - module.exit_json(changed=check, before=before, after=after) - before = svn.get_revision() - local_mods = svn.has_local_mods() - svn.switch() - if local_mods: - if force: - svn.revert() - else: - module.fail_json(msg="ERROR: modified files exist in the repository.") - svn.update() - else: - module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest, )) - - after = svn.get_revision() - changed = before != after or local_mods - module.exit_json(changed=changed, before=before, after=after) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/alternatives b/library/system/alternatives deleted file mode 100755 index b80ffab944..0000000000 --- a/library/system/alternatives +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to manage symbolic link alternatives. -(c) 2014, Gabe Mulley - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: alternatives -short_description: Manages alternative programs for common commands -description: - - Manages symbolic links using the 'update-alternatives' tool provided on debian-like systems. - - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). -version_added: "1.6" -options: - name: - description: - - The generic name of the link. - required: true - path: - description: - - The path to the real executable that the link should point to. - required: true - link: - description: - - The path to the symbolic link that should point to the real executable. - required: false -requirements: [ update-alternatives ] -''' - -EXAMPLES = ''' -- name: correct java version selected - alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - -- name: alternatives link created - alternatives: name=hadoop-conf link=/etc/hadoop/conf path=/etc/hadoop/conf.ansible -''' - -DEFAULT_LINK_PRIORITY = 50 - -def main(): - - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - path = dict(required=True), - link = dict(required=False), - ) - ) - - params = module.params - name = params['name'] - path = params['path'] - link = params['link'] - - UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True) - - current_path = None - all_alternatives = [] - - (rc, query_output, query_error) = module.run_command( - [UPDATE_ALTERNATIVES, '--query', name] - ) - - # Gather the current setting and all alternatives from the query output. - # Query output should look something like this: - - # Name: java - # Link: /usr/bin/java - # Slaves: - # java.1.gz /usr/share/man/man1/java.1.gz - # Status: manual - # Best: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - # Value: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java - - # Alternative: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java - # Priority: 1061 - # Slaves: - # java.1.gz /usr/lib/jvm/java-6-openjdk-amd64/jre/man/man1/java.1.gz - - # Alternative: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - # Priority: 1071 - # Slaves: - # java.1.gz /usr/lib/jvm/java-7-openjdk-amd64/jre/man/man1/java.1.gz - - if rc == 0: - for line in query_output.splitlines(): - split_line = line.split(':') - if len(split_line) == 2: - key = split_line[0] - value = split_line[1].strip() - if key == 'Value': - current_path = value - elif key == 'Alternative': - all_alternatives.append(value) - elif key == 'Link' and not link: - link = value - - if current_path != path: - try: - # install the requested path if necessary - if path not in all_alternatives: - module.run_command( - [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], - check_rc=True - ) - - # select the requested path - module.run_command( - [UPDATE_ALTERNATIVES, '--set', name, path], - check_rc=True - ) - - module.exit_json(changed=True) - except subprocess.CalledProcessError, cpe: - module.fail_json(msg=str(dir(cpe))) - else: - module.exit_json(changed=False) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/at b/library/system/at deleted file mode 100644 index c63527563f..0000000000 --- a/library/system/at +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2014, Richard Isaacson -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: at -short_description: Schedule the execution of a command or script file via the at command. -description: - - Use this module to schedule a command or script file to run once in the future. - - All jobs are executed in the 'a' queue. -version_added: "1.5" -options: - command: - description: - - A command to be executed in the future. - required: false - default: null - script_file: - description: - - An existing script file to be executed in the future. - required: false - default: null - count: - description: - - The count of units in the future to execute the command or script file. - required: true - units: - description: - - The type of units in the future to execute the command or script file. - required: true - choices: ["minutes", "hours", "days", "weeks"] - state: - description: - - The state dictates if the command or script file should be evaluated as present(added) or absent(deleted). - required: false - choices: ["present", "absent"] - default: "present" - unique: - description: - - If a matching job is present a new job will not be added. - required: false - default: false -requirements: - - at -author: Richard Isaacson -''' - -EXAMPLES = ''' -# Schedule a command to execute in 20 minutes as root. -- at: command="ls -d / > /dev/null" count=20 units="minutes" - -# Match a command to an existing job and delete the job. -- at: command="ls -d / > /dev/null" state="absent" - -# Schedule a command to execute in 20 minutes making sure it is unique in the queue. -- at: command="ls -d / > /dev/null" unique=true count=20 units="minutes" -''' - -import os -import tempfile - - -def add_job(module, result, at_cmd, count, units, command, script_file): - at_command = "%s now + %s %s -f %s" % (at_cmd, count, units, script_file) - rc, out, err = module.run_command(at_command, check_rc=True) - if command: - os.unlink(script_file) - result['changed'] = True - - -def delete_job(module, result, at_cmd, command, script_file): - for matching_job in get_matching_jobs(module, at_cmd, script_file): - at_command = "%s -d %s" % (at_cmd, matching_job) - rc, out, err = module.run_command(at_command, check_rc=True) - result['changed'] = True - if command: - os.unlink(script_file) - module.exit_json(**result) - - -def get_matching_jobs(module, at_cmd, script_file): - matching_jobs = [] - - atq_cmd = module.get_bin_path('atq', True) - - # Get list of job numbers for the user. - atq_command = "%s" % atq_cmd - rc, out, err = module.run_command(atq_command, check_rc=True) - current_jobs = out.splitlines() - if len(current_jobs) == 0: - return matching_jobs - - # Read script_file into a string. - script_file_string = open(script_file).read().strip() - - # Loop through the jobs. - # If the script text is contained in a job add job number to list. - for current_job in current_jobs: - split_current_job = current_job.split() - at_command = "%s -c %s" % (at_cmd, split_current_job[0]) - rc, out, err = module.run_command(at_command, check_rc=True) - if script_file_string in out: - matching_jobs.append(split_current_job[0]) - - # Return the list. - return matching_jobs - - -def create_tempfile(command): - filed, script_file = tempfile.mkstemp(prefix='at') - fileh = os.fdopen(filed, 'w') - fileh.write(command) - fileh.close() - return script_file - - -def main(): - - module = AnsibleModule( - argument_spec = dict( - command=dict(required=False, - type='str'), - script_file=dict(required=False, - type='str'), - count=dict(required=False, - type='int'), - units=dict(required=False, - default=None, - choices=['minutes', 'hours', 'days', 'weeks'], - type='str'), - state=dict(required=False, - default='present', - choices=['present', 'absent'], - type='str'), - unique=dict(required=False, - default=False, - type='bool') - ), - mutually_exclusive=[['command', 'script_file']], - required_one_of=[['command', 'script_file']], - supports_check_mode=False - ) - - at_cmd = module.get_bin_path('at', True) - - command = module.params['command'] - script_file = module.params['script_file'] - count = module.params['count'] - units = module.params['units'] - state = module.params['state'] - unique = module.params['unique'] - - if (state == 'present') and (not count or not units): - module.fail_json(msg="present state requires count and units") - - result = {'state': state, 'changed': False} - - # If command transform it into a script_file - if command: - script_file = create_tempfile(command) - - # if absent remove existing and return - if state == 'absent': - delete_job(module, result, at_cmd, command, script_file) - - # if unique if existing return unchanged - if unique: - if len(get_matching_jobs(module, at_cmd, script_file)) != 0: - if command: - os.unlink(script_file) - module.exit_json(**result) - - result['script_file'] = script_file - result['count'] = count - result['units'] = units - - add_job(module, result, at_cmd, count, units, command, script_file) - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/authorized_key b/library/system/authorized_key deleted file mode 100644 index f964113127..0000000000 --- a/library/system/authorized_key +++ /dev/null @@ -1,421 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to add authorized_keys for ssh logins. -(c) 2012, Brad Olson - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: authorized_key -short_description: Adds or removes an SSH authorized key -description: - - Adds or removes an SSH authorized key for a user from a remote host. -version_added: "0.5" -options: - user: - description: - - The username on the remote host whose authorized_keys file will be modified - required: true - default: null - aliases: [] - key: - description: - - The SSH public key, as a string - required: true - default: null - path: - description: - - Alternate path to the authorized_keys file - required: false - default: "(homedir)+/.ssh/authorized_keys" - version_added: "1.2" - manage_dir: - description: - - Whether this module should manage the directory of the authorized key file. If - set, the module will create the directory, as well as set the owner and permissions - of an existing directory. Be sure to - set C(manage_dir=no) if you are using an alternate directory for - authorized_keys, as set with C(path), since you could lock yourself out of - SSH access. See the example below. - required: false - choices: [ "yes", "no" ] - default: "yes" - version_added: "1.2" - state: - description: - - Whether the given key (with the given key_options) should or should not be in the file - required: false - choices: [ "present", "absent" ] - default: "present" - key_options: - description: - - A string of ssh key options to be prepended to the key in the authorized_keys file - required: false - default: null - version_added: "1.4" -description: - - "Adds or removes authorized keys for particular user accounts" -author: Brad Olson -''' - -EXAMPLES = ''' -# Example using key data from a local file on the management machine -- authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" - -# Using alternate directory locations: -- authorized_key: user=charlie - key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" - path='/etc/ssh/authorized_keys/charlie' - manage_dir=no - -# Using with_file -- name: Set up authorized_keys for the deploy user - authorized_key: user=deploy - key="{{ item }}" - with_file: - - public_keys/doe-jane - - public_keys/doe-john - -# Using key_options: -- authorized_key: user=charlie - key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" - key_options='no-port-forwarding,host="10.0.1.1"' -''' - -# Makes sure the public key line is present or absent in the user's .ssh/authorized_keys. -# -# Arguments -# ========= -# user = username -# key = line to add to authorized_keys for user -# path = path to the user's authorized_keys file (default: ~/.ssh/authorized_keys) -# manage_dir = whether to create, and control ownership of the directory (default: true) -# state = absent|present (default: present) -# -# see example in examples/playbooks - -import sys -import os -import pwd -import os.path -import tempfile -import re -import shlex - -class keydict(dict): - - """ a dictionary that maintains the order of keys as they are added """ - - # http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class - - def __init__(self, *args, **kw): - super(keydict,self).__init__(*args, **kw) - self.itemlist = super(keydict,self).keys() - def __setitem__(self, key, value): - self.itemlist.append(key) - super(keydict,self).__setitem__(key, value) - def __iter__(self): - return iter(self.itemlist) - def keys(self): - return self.itemlist - def values(self): - return [self[key] for key in self] - def itervalues(self): - return (self[key] for key in self) - -def keyfile(module, user, write=False, path=None, manage_dir=True): - """ - Calculate name of authorized keys file, optionally creating the - directories and file, properly setting permissions. - - :param str user: name of user in passwd file - :param bool write: if True, write changes to authorized_keys file (creating directories if needed) - :param str path: if not None, use provided path rather than default of '~user/.ssh/authorized_keys' - :param bool manage_dir: if True, create and set ownership of the parent dir of the authorized_keys file - :return: full path string to authorized_keys for user - """ - - try: - user_entry = pwd.getpwnam(user) - except KeyError, e: - module.fail_json(msg="Failed to lookup user %s: %s" % (user, str(e))) - if path is None: - homedir = user_entry.pw_dir - sshdir = os.path.join(homedir, ".ssh") - keysfile = os.path.join(sshdir, "authorized_keys") - else: - sshdir = os.path.dirname(path) - keysfile = path - - if not write: - return keysfile - - uid = user_entry.pw_uid - gid = user_entry.pw_gid - - if manage_dir: - if not os.path.exists(sshdir): - os.mkdir(sshdir, 0700) - if module.selinux_enabled(): - module.set_default_selinux_context(sshdir, False) - os.chown(sshdir, uid, gid) - os.chmod(sshdir, 0700) - - if not os.path.exists(keysfile): - basedir = os.path.dirname(keysfile) - if not os.path.exists(basedir): - os.makedirs(basedir) - try: - f = open(keysfile, "w") #touches file so we can set ownership and perms - finally: - f.close() - if module.selinux_enabled(): - module.set_default_selinux_context(keysfile, False) - - try: - os.chown(keysfile, uid, gid) - os.chmod(keysfile, 0600) - except OSError: - pass - - return keysfile - -def parseoptions(module, options): - ''' - reads a string containing ssh-key options - and returns a dictionary of those options - ''' - options_dict = keydict() #ordered dict - if options: - try: - # the following regex will split on commas while - # ignoring those commas that fall within quotes - regex = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''') - parts = regex.split(options)[1:-1] - for part in parts: - if "=" in part: - (key, value) = part.split("=", 1) - options_dict[key] = value - elif part != ",": - options_dict[part] = None - except: - module.fail_json(msg="invalid option string: %s" % options) - - return options_dict - -def parsekey(module, raw_key): - ''' - parses a key, which may or may not contain a list - of ssh-key options at the beginning - ''' - - VALID_SSH2_KEY_TYPES = [ - 'ssh-ed25519', - 'ecdsa-sha2-nistp256', - 'ecdsa-sha2-nistp384', - 'ecdsa-sha2-nistp521', - 'ssh-dss', - 'ssh-rsa', - ] - - options = None # connection options - key = None # encrypted key string - key_type = None # type of ssh key - type_index = None # index of keytype in key string|list - - # remove comment yaml escapes - raw_key = raw_key.replace('\#', '#') - - # split key safely - lex = shlex.shlex(raw_key) - lex.quotes = [] - lex.commenters = '' #keep comment hashes - lex.whitespace_split = True - key_parts = list(lex) - - for i in range(0, len(key_parts)): - if key_parts[i] in VALID_SSH2_KEY_TYPES: - type_index = i - key_type = key_parts[i] - break - - # check for options - if type_index is None: - return None - elif type_index > 0: - options = " ".join(key_parts[:type_index]) - - # parse the options (if any) - options = parseoptions(module, options) - - # get key after the type index - key = key_parts[(type_index + 1)] - - # set comment to everything after the key - if len(key_parts) > (type_index + 1): - comment = " ".join(key_parts[(type_index + 2):]) - - return (key, key_type, options, comment) - -def readkeys(module, filename): - - if not os.path.isfile(filename): - return {} - - keys = {} - f = open(filename) - for line in f.readlines(): - key_data = parsekey(module, line) - if key_data: - # use key as identifier - keys[key_data[0]] = key_data - else: - # for an invalid line, just append the line - # to the array so it will be re-output later - keys[line] = line - f.close() - return keys - -def writekeys(module, filename, keys): - - fd, tmp_path = tempfile.mkstemp('', 'tmp', os.path.dirname(filename)) - f = open(tmp_path,"w") - try: - for index, key in keys.items(): - try: - (keyhash,type,options,comment) = key - option_str = "" - if options: - option_strings = [] - for option_key in options.keys(): - if options[option_key]: - option_strings.append("%s=%s" % (option_key, options[option_key])) - else: - option_strings.append("%s" % option_key) - - option_str = ",".join(option_strings) - option_str += " " - key_line = "%s%s %s %s\n" % (option_str, type, keyhash, comment) - except: - key_line = key - f.writelines(key_line) - except IOError, e: - module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e))) - f.close() - module.atomic_move(tmp_path, filename) - -def enforce_state(module, params): - """ - Add or remove key. - """ - - user = params["user"] - key = params["key"] - path = params.get("path", None) - manage_dir = params.get("manage_dir", True) - state = params.get("state", "present") - key_options = params.get("key_options", None) - - # extract indivial keys into an array, skipping blank lines and comments - key = [s for s in key.splitlines() if s and not s.startswith('#')] - - - # check current state -- just get the filename, don't create file - do_write = False - params["keyfile"] = keyfile(module, user, do_write, path, manage_dir) - existing_keys = readkeys(module, params["keyfile"]) - - # Check our new keys, if any of them exist we'll continue. - for new_key in key: - parsed_new_key = parsekey(module, new_key) - if key_options is not None: - parsed_options = parseoptions(module, key_options) - parsed_new_key = (parsed_new_key[0], parsed_new_key[1], parsed_options, parsed_new_key[3]) - - if not parsed_new_key: - module.fail_json(msg="invalid key specified: %s" % new_key) - - present = False - matched = False - non_matching_keys = [] - - if parsed_new_key[0] in existing_keys: - present = True - # Then we check if everything matches, including - # the key type and options. If not, we append this - # existing key to the non-matching list - # We only want it to match everything when the state - # is present - if parsed_new_key != existing_keys[parsed_new_key[0]] and state == "present": - non_matching_keys.append(existing_keys[parsed_new_key[0]]) - else: - matched = True - - - # handle idempotent state=present - if state=="present": - if len(non_matching_keys) > 0: - for non_matching_key in non_matching_keys: - if non_matching_key[0] in existing_keys: - del existing_keys[non_matching_key[0]] - do_write = True - - if not matched: - existing_keys[parsed_new_key[0]] = parsed_new_key - do_write = True - - elif state=="absent": - if not matched: - continue - del existing_keys[parsed_new_key[0]] - do_write = True - - if do_write: - if module.check_mode: - module.exit_json(changed=True) - writekeys(module, keyfile(module, user, do_write, path, manage_dir), existing_keys) - params['changed'] = True - else: - if module.check_mode: - module.exit_json(changed=False) - - return params - -def main(): - - module = AnsibleModule( - argument_spec = dict( - user = dict(required=True, type='str'), - key = dict(required=True, type='str'), - path = dict(required=False, type='str'), - manage_dir = dict(required=False, type='bool', default=True), - state = dict(default='present', choices=['absent','present']), - key_options = dict(required=False, type='str'), - unique = dict(default=False, type='bool'), - ), - supports_check_mode=True - ) - - results = enforce_state(module, module.params) - module.exit_json(**results) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/capabilities b/library/system/capabilities deleted file mode 100644 index f4a9f62c0d..0000000000 --- a/library/system/capabilities +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Nate Coraor -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: capabilities -short_description: Manage Linux capabilities -description: - - This module manipulates files privileges using the Linux capabilities(7) system. -version_added: "1.6" -options: - path: - description: - - Specifies the path to the file to be managed. - required: true - default: null - capability: - description: - - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent)) - required: true - default: null - aliases: [ 'cap' ] - state: - description: - - Whether the entry should be present or absent in the file's capabilities. - choices: [ "present", "absent" ] - default: present -notes: - - The capabilities system will automatically transform operators and flags - into the effective set, so (for example, cap_foo=ep will probably become - cap_foo+ep). This module does not attempt to determine the final operator - and flags to compare, so you will want to ensure that your capabilities - argument matches the final capabilities. -requirements: [] -author: Nate Coraor -''' - -EXAMPLES = ''' -# Set cap_sys_chroot+ep on /foo -- capabilities: path=/foo capability=cap_sys_chroot+ep state=present - -# Remove cap_net_bind_service from /bar -- capabilities: path=/bar capability=cap_net_bind_service state=absent -''' - - -OPS = ( '=', '-', '+' ) - -# ============================================================== - -import os -import tempfile -import re - -class CapabilitiesModule(object): - - platform = 'Linux' - distribution = None - - def __init__(self, module): - self.module = module - self.path = module.params['path'].strip() - self.capability = module.params['capability'].strip().lower() - self.state = module.params['state'] - self.getcap_cmd = module.get_bin_path('getcap', required=True) - self.setcap_cmd = module.get_bin_path('setcap', required=True) - self.capability_tup = self._parse_cap(self.capability, op_required=self.state=='present') - - self.run() - - def run(self): - - current = self.getcap(self.path) - caps = [ cap[0] for cap in current ] - - if self.state == 'present' and self.capability_tup not in current: - # need to add capability - if self.module.check_mode: - self.module.exit_json(changed=True, msg='capabilities changed') - else: - # remove from current cap list if it's already set (but op/flags differ) - current = filter(lambda x: x[0] != self.capability_tup[0], current) - # add new cap with correct op/flags - current.append( self.capability_tup ) - self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) - elif self.state == 'absent' and self.capability_tup[0] in caps: - # need to remove capability - if self.module.check_mode: - self.module.exit_json(changed=True, msg='capabilities changed') - else: - # remove from current cap list and then set current list - current = filter(lambda x: x[0] != self.capability_tup[0], current) - self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) - self.module.exit_json(changed=False, state=self.state) - - def getcap(self, path): - rval = [] - cmd = "%s -v %s" % (self.getcap_cmd, path) - rc, stdout, stderr = self.module.run_command(cmd) - # If file xattrs are set but no caps are set the output will be: - # '/foo =' - # If file xattrs are unset the output will be: - # '/foo' - # If the file does not eixst the output will be (with rc == 0...): - # '/foo (No such file or directory)' - if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1): - self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr) - if stdout.strip() != path: - caps = stdout.split(' =')[1].strip().split() - for cap in caps: - cap = cap.lower() - # getcap condenses capabilities with the same op/flags into a - # comma-separated list, so we have to parse that - if ',' in cap: - cap_group = cap.split(',') - cap_group[-1], op, flags = self._parse_cap(cap_group[-1]) - for subcap in cap_group: - rval.append( ( subcap, op, flags ) ) - else: - rval.append(self._parse_cap(cap)) - return rval - - def setcap(self, path, caps): - caps = ' '.join([ ''.join(cap) for cap in caps ]) - cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path) - rc, stdout, stderr = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr) - else: - return stdout - - def _parse_cap(self, cap, op_required=True): - opind = -1 - try: - i = 0 - while opind == -1: - opind = cap.find(OPS[i]) - i += 1 - except: - if op_required: - self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS)) - else: - return (cap, None, None) - op = cap[opind] - cap, flags = cap.split(op) - return (cap, op, flags) - -# ============================================================== -# main - -def main(): - - # defining module - module = AnsibleModule( - argument_spec = dict( - path = dict(aliases=['key'], required=True), - capability = dict(aliases=['cap'], required=True), - state = dict(default='present', choices=['present', 'absent']), - ), - supports_check_mode=True - ) - - CapabilitiesModule(module) - - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/cron b/library/system/cron deleted file mode 100644 index d14f36253c..0000000000 --- a/library/system/cron +++ /dev/null @@ -1,524 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2012, Dane Summers -# (c) 2013, Mike Grozak -# (c) 2013, Patrick Callahan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -# Cron Plugin: The goal of this plugin is to provide an indempotent method for -# setting up cron jobs on a host. The script will play well with other manually -# entered crons. Each cron job entered will be preceded with a comment -# describing the job so that it can be found later, which is required to be -# present in order for this plugin to find/modify the job. -# -# This module is based on python-crontab by Martin Owens. -# - -DOCUMENTATION = """ ---- -module: cron -short_description: Manage cron.d and crontab entries. -description: - - Use this module to manage crontab entries. This module allows you to create named - crontab entries, update, or delete them. - - 'The module includes one line with the description of the crontab entry C("#Ansible: ") - corresponding to the "name" passed to the module, which is used by future ansible/module calls - to find/check the state. The "name" parameter should be unique, and changing the "name" value - will result in a new cron task being created (or a different one being removed)' -version_added: "0.9" -options: - name: - description: - - Description of a crontab entry. - default: null - required: true - user: - description: - - The specific user whose crontab should be modified. - required: false - default: root - job: - description: - - The command to execute. Required if state=present. - required: false - default: null - state: - description: - - Whether to ensure the job is present or absent. - required: false - default: present - choices: [ "present", "absent" ] - cron_file: - description: - - If specified, uses this file in cron.d instead of an individual user's crontab. - required: false - default: null - backup: - description: - - If set, create a backup of the crontab before it is modified. - The location of the backup is returned in the C(backup) variable by this module. - required: false - default: false - minute: - description: - - Minute when the job should run ( 0-59, *, */2, etc ) - required: false - default: "*" - hour: - description: - - Hour when the job should run ( 0-23, *, */2, etc ) - required: false - default: "*" - day: - description: - - Day of the month the job should run ( 1-31, *, */2, etc ) - required: false - default: "*" - aliases: [ "dom" ] - month: - description: - - Month of the year the job should run ( 1-12, *, */2, etc ) - required: false - default: "*" - weekday: - description: - - Day of the week that the job should run ( 0-6 for Sunday-Saturday, *, etc ) - required: false - default: "*" - aliases: [ "dow" ] - reboot: - description: - - If the job should be run at reboot. This option is deprecated. Users should use special_time. - version_added: "1.0" - required: false - default: "no" - choices: [ "yes", "no" ] - special_time: - description: - - Special time specification nickname. - version_added: "1.3" - required: false - default: null - choices: [ "reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly" ] -requirements: - - cron -author: Dane Summers -updates: [ 'Mike Grozak', 'Patrick Callahan' ] -""" - -EXAMPLES = ''' -# Ensure a job that runs at 2 and 5 exists. -# Creates an entry like "* 5,2 * * ls -alh > /dev/null" -- cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null" - -# Ensure an old job is no longer present. Removes any job that is prefixed -# by "#Ansible: an old job" from the crontab -- cron: name="an old job" state=absent - -# Creates an entry like "@reboot /some/job.sh" -- cron: name="a job for reboot" special_time=reboot job="/some/job.sh" - -# Creates a cron file under /etc/cron.d -- cron: name="yum autoupdate" weekday="2" minute=0 hour=12 - user="root" job="YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate" - cron_file=ansible_yum-autoupdate - -# Removes a cron file from under /etc/cron.d -- cron: cron_file=ansible_yum-autoupdate state=absent -''' - -import os -import re -import tempfile -import platform -import pipes - -CRONCMD = "/usr/bin/crontab" - -class CronTabError(Exception): - pass - -class CronTab(object): - """ - CronTab object to write time based crontab file - - user - the user of the crontab (defaults to root) - cron_file - a cron file under /etc/cron.d - """ - def __init__(self, module, user=None, cron_file=None): - self.module = module - self.user = user - self.root = (os.getuid() == 0) - self.lines = None - self.ansible = "#Ansible: " - - # select whether we dump additional debug info through syslog - self.syslogging = False - - if cron_file: - self.cron_file = '/etc/cron.d/%s' % cron_file - else: - self.cron_file = None - - self.read() - - def read(self): - # Read in the crontab from the system - self.lines = [] - if self.cron_file: - # read the cronfile - try: - f = open(self.cron_file, 'r') - self.lines = f.read().splitlines() - f.close() - except IOError, e: - # cron file does not exist - return - except: - raise CronTabError("Unexpected error:", sys.exc_info()[0]) - else: - # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME - (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) - - if rc != 0 and rc != 1: # 1 can mean that there are no jobs. - raise CronTabError("Unable to read crontab") - - lines = out.splitlines() - count = 0 - for l in lines: - if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and - not re.match( r'# \(/tmp/.*installed on.*\)', l) and - not re.match( r'# \(.*version.*\)', l)): - self.lines.append(l) - count += 1 - - def log_message(self, message): - if self.syslogging: - syslog.syslog(syslog.LOG_NOTICE, 'ansible: "%s"' % message) - - def is_empty(self): - if len(self.lines) == 0: - return True - else: - return False - - def write(self, backup_file=None): - """ - Write the crontab to the system. Saves all information. - """ - if backup_file: - fileh = open(backup_file, 'w') - elif self.cron_file: - fileh = open(self.cron_file, 'w') - else: - filed, path = tempfile.mkstemp(prefix='crontab') - fileh = os.fdopen(filed, 'w') - - fileh.write(self.render()) - fileh.close() - - # return if making a backup - if backup_file: - return - - # Add the entire crontab back to the user crontab - if not self.cron_file: - # quoting shell args for now but really this should be two non-shell calls. FIXME - (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) - os.unlink(path) - - if rc != 0: - self.module.fail_json(msg=err) - - def add_job(self, name, job): - # Add the comment - self.lines.append("%s%s" % (self.ansible, name)) - - # Add the job - self.lines.append("%s" % (job)) - - def update_job(self, name, job): - return self._update_job(name, job, self.do_add_job) - - def do_add_job(self, lines, comment, job): - lines.append(comment) - - lines.append("%s" % (job)) - - def remove_job(self, name): - return self._update_job(name, "", self.do_remove_job) - - def do_remove_job(self, lines, comment, job): - return None - - def remove_job_file(self): - try: - os.unlink(self.cron_file) - return True - except OSError, e: - # cron file does not exist - return False - except: - raise CronTabError("Unexpected error:", sys.exc_info()[0]) - - def find_job(self, name): - comment = None - for l in self.lines: - if comment is not None: - if comment == name: - return [comment, l] - else: - comment = None - elif re.match( r'%s' % self.ansible, l): - comment = re.sub( r'%s' % self.ansible, '', l) - - return [] - - def get_cron_job(self,minute,hour,day,month,weekday,job,special): - if special: - if self.cron_file: - return "@%s %s %s" % (special, self.user, job) - else: - return "@%s %s" % (special, job) - else: - if self.cron_file: - return "%s %s %s %s %s %s %s" % (minute,hour,day,month,weekday,self.user,job) - else: - return "%s %s %s %s %s %s" % (minute,hour,day,month,weekday,job) - - return None - - def get_jobnames(self): - jobnames = [] - - for l in self.lines: - if re.match( r'%s' % self.ansible, l): - jobnames.append(re.sub( r'%s' % self.ansible, '', l)) - - return jobnames - - def _update_job(self, name, job, addlinesfunction): - ansiblename = "%s%s" % (self.ansible, name) - newlines = [] - comment = None - - for l in self.lines: - if comment is not None: - addlinesfunction(newlines, comment, job) - comment = None - elif l == ansiblename: - comment = l - else: - newlines.append(l) - - self.lines = newlines - - if len(newlines) == 0: - return True - else: - return False # TODO add some more error testing - - def render(self): - """ - Render this crontab as it would be in the crontab. - """ - crons = [] - for cron in self.lines: - crons.append(cron) - - result = '\n'.join(crons) - if result and result[-1] not in ['\n', '\r']: - result += '\n' - return result - - def _read_user_execute(self): - """ - Returns the command line for reading a crontab - """ - user = '' - if self.user: - if platform.system() == 'SunOS': - return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD)) - elif platform.system() == 'AIX': - return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user)) - elif platform.system() == 'HP-UX': - return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user)) - else: - user = '-u %s' % pipes.quote(self.user) - return "%s %s %s" % (CRONCMD , user, '-l') - - def _write_execute(self, path): - """ - Return the command line for writing a crontab - """ - user = '' - if self.user: - if platform.system() in ['SunOS', 'HP-UX', 'AIX']: - return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path)) - else: - user = '-u %s' % pipes.quote(self.user) - return "%s %s %s" % (CRONCMD , user, pipes.quote(path)) - - - -#================================================== - -def main(): - # The following example playbooks: - # - # - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null" - # - # - name: do the job - # cron: name="do the job" hour="5,2" job="/some/dir/job.sh" - # - # - name: no job - # cron: name="an old job" state=absent - # - # Would produce: - # # Ansible: check dirs - # * * 5,2 * * ls -alh > /dev/null - # # Ansible: do the job - # * * 5,2 * * /some/dir/job.sh - - module = AnsibleModule( - argument_spec = dict( - name=dict(required=True), - user=dict(required=False), - job=dict(required=False), - cron_file=dict(required=False), - state=dict(default='present', choices=['present', 'absent']), - backup=dict(default=False, type='bool'), - minute=dict(default='*'), - hour=dict(default='*'), - day=dict(aliases=['dom'], default='*'), - month=dict(default='*'), - weekday=dict(aliases=['dow'], default='*'), - reboot=dict(required=False, default=False, type='bool'), - special_time=dict(required=False, - default=None, - choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"], - type='str') - ), - supports_check_mode = False, - ) - - name = module.params['name'] - user = module.params['user'] - job = module.params['job'] - cron_file = module.params['cron_file'] - state = module.params['state'] - backup = module.params['backup'] - minute = module.params['minute'] - hour = module.params['hour'] - day = module.params['day'] - month = module.params['month'] - weekday = module.params['weekday'] - reboot = module.params['reboot'] - special_time = module.params['special_time'] - do_install = state == 'present' - - changed = False - res_args = dict() - - # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. - os.umask(022) - crontab = CronTab(module, user, cron_file) - - if crontab.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'cron instantiated - name: "%s"' % name) - - # --- user input validation --- - - if (special_time or reboot) and \ - (True in [(x != '*') for x in [minute, hour, day, month, weekday]]): - module.fail_json(msg="You must specify time and date fields or special time.") - - if cron_file and do_install: - if not user: - module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well") - - if reboot and special_time: - module.fail_json(msg="reboot and special_time are mutually exclusive") - - if name is None and do_install: - module.fail_json(msg="You must specify 'name' to install a new cron job") - - if job is None and do_install: - module.fail_json(msg="You must specify 'job' to install a new cron job") - - if job and name is None and not do_install: - module.fail_json(msg="You must specify 'name' to remove a cron job") - - if reboot: - if special_time: - module.fail_json(msg="reboot and special_time are mutually exclusive") - else: - special_time = "reboot" - - # if requested make a backup before making a change - if backup: - (backuph, backup_file) = tempfile.mkstemp(prefix='crontab') - crontab.write(backup_file) - - if crontab.cron_file and not name and not do_install: - changed = crontab.remove_job_file() - module.exit_json(changed=changed,cron_file=cron_file,state=state) - - job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time) - old_job = crontab.find_job(name) - - if do_install: - if len(old_job) == 0: - crontab.add_job(name, job) - changed = True - if len(old_job) > 0 and old_job[1] != job: - crontab.update_job(name, job) - changed = True - else: - if len(old_job) > 0: - crontab.remove_job(name) - changed = True - - res_args = dict( - jobs = crontab.get_jobnames(), changed = changed - ) - - if changed: - crontab.write() - - # retain the backup only if crontab or cron file have changed - if backup: - if changed: - res_args['backup_file'] = backup_file - else: - os.unlink(backup_file) - - if cron_file: - res_args['cron_file'] = cron_file - - module.exit_json(**res_args) - - # --- should never get here - module.exit_json(msg="Unable to execute cron task.") - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/debconf b/library/system/debconf deleted file mode 100644 index 7f5ea0368c..0000000000 --- a/library/system/debconf +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -""" -Ansible module to configure .deb packages. -(c) 2014, Brian Coca - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: debconf -short_description: Configure a .deb package -description: - - Configure a .deb package using debconf-set-selections. Or just query - existing selections. -version_added: "1.6" -notes: - - This module requires the command line debconf tools. - - A number of questions have to be answered (depending on the package). - Use 'debconf-show ' on any Debian or derivative with the package - installed to see questions/settings available. -requirements: [ debconf, debconf-utils ] -options: - name: - description: - - Name of package to configure. - required: true - default: null - aliases: ['pkg'] - question: - description: - - A debconf configuration setting - required: false - default: null - aliases: ['setting', 'selection'] - vtype: - description: - - The type of the value supplied - required: false - default: null - choices: [string, password, boolean, select, multiselect, note, error, title, text] - aliases: [] - value: - description: - - Value to set the configuration to - required: false - default: null - aliases: ['answer'] - unseen: - description: - - Do not set 'seen' flag when pre-seeding - required: false - default: False - aliases: [] -author: Brian Coca - -''' - -EXAMPLES = ''' -# Set default locale to fr_FR.UTF-8 -debconf: name=locales question='locales/default_environment_locale' value=fr_FR.UTF-8 vtype='select' - -# set to generate locales: -debconf: name=locales question='locales/locales_to_be_generated' value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8' vtype='multiselect' - -# Accept oracle license -debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select' - -# Specifying package you can register/return the list of questions and current values -debconf: name='tzdata' -''' - -import pipes - -def get_selections(module, pkg): - cmd = [module.get_bin_path('debconf-show', True), pkg] - rc, out, err = module.run_command(' '.join(cmd)) - - if rc != 0: - module.fail_json(msg=err) - - selections = {} - - for line in out.splitlines(): - (key, value) = line.split(':', 1) - selections[ key.strip('*').strip() ] = value.strip() - - return selections - - -def set_selection(module, pkg, question, vtype, value, unseen): - - data = ' '.join([ question, vtype, value ]) - - setsel = module.get_bin_path('debconf-set-selections', True) - cmd = ["echo %s %s |" % (pipes.quote(pkg), pipes.quote(data)), setsel] - if unseen: - cmd.append('-u') - - return module.run_command(' '.join(cmd), use_unsafe_shell=True) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True, aliases=['pkg'], type='str'), - question = dict(required=False, aliases=['setting', 'selection'], type='str'), - vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text']), - value= dict(required=False, type='str'), - unseen = dict(required=False, type='bool'), - ), - required_together = ( ['question','vtype', 'value'],), - supports_check_mode=True, - ) - - #TODO: enable passing array of options and/or debconf file from get-selections dump - pkg = module.params["name"] - question = module.params["question"] - vtype = module.params["vtype"] - value = module.params["value"] - unseen = module.params["unseen"] - - prev = get_selections(module, pkg) - diff = '' - - changed = False - msg = "" - - if question is not None: - if vtype is None or value is None: - module.fail_json(msg="when supplying a question you must supply a valid vtype and value") - - if not question in prev or prev[question] != value: - changed = True - - if changed: - if not module.check_mode: - rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen) - if rc: - module.fail_json(msg=e) - - curr = { question: value } - if question in prev: - prev = {question: prev[question]} - else: - prev[question] = '' - - module.exit_json(changed=changed, msg=msg, current=curr, previous=prev) - - module.exit_json(changed=changed, msg=msg, current=prev) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/system/facter b/library/system/facter deleted file mode 100644 index a72cdc6536..0000000000 --- a/library/system/facter +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - - -DOCUMENTATION = ''' ---- -module: facter -short_description: Runs the discovery program I(facter) on the remote system -description: - - Runs the I(facter) discovery program - (U(https://github.com/puppetlabs/facter)) on the remote system, returning - JSON data that can be useful for inventory purposes. -version_added: "0.2" -options: {} -notes: [] -requirements: [ "facter", "ruby-json" ] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Example command-line invocation -ansible www.example.net -m facter -''' - -def main(): - module = AnsibleModule( - argument_spec = dict() - ) - - cmd = ["/usr/bin/env", "facter", "--json"] - rc, out, err = module.run_command(cmd, check_rc=True) - module.exit_json(**json.loads(out)) - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/filesystem b/library/system/filesystem deleted file mode 100644 index 064c0d0af8..0000000000 --- a/library/system/filesystem +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Alexander Bulimov -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -author: Alexander Bulimov -module: filesystem -short_description: Makes file system on block device -description: - - This module creates file system. -version_added: "1.2" -options: - fstype: - description: - - File System type to be created. - required: true - dev: - description: - - Target block device. - required: true - force: - choices: [ "yes", "no" ] - default: "no" - description: - - If yes, allows to create new filesystem on devices that already has filesystem. - required: false - opts: - description: - - List of options to be passed to mkfs command. -notes: - - uses mkfs command -''' - -EXAMPLES = ''' -# Create a ext2 filesystem on /dev/sdb1. -- filesystem: fstype=ext2 dev=/dev/sdb1 - -# Create a ext4 filesystem on /dev/sdb1 and check disk blocks. -- filesystem: fstype=ext4 dev=/dev/sdb1 opts="-cc" -''' - -def main(): - module = AnsibleModule( - argument_spec = dict( - fstype=dict(required=True, aliases=['type']), - dev=dict(required=True, aliases=['device']), - opts=dict(), - force=dict(type='bool', default='no'), - ), - supports_check_mode=True, - ) - - dev = module.params['dev'] - fstype = module.params['fstype'] - opts = module.params['opts'] - force = module.boolean(module.params['force']) - - changed = False - - if not os.path.exists(dev): - module.fail_json(msg="Device %s not found."%dev) - - cmd = module.get_bin_path('blkid', required=True) - - rc,raw_fs,err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev)) - fs = raw_fs.strip() - - - if fs == fstype: - module.exit_json(changed=False) - elif fs and not force: - module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite"%(dev,fs), rc=rc, err=err) - - ### create fs - - if module.check_mode: - changed = True - else: - mkfs = module.get_bin_path('mkfs', required=True) - cmd = None - if fstype in ['ext2', 'ext3', 'ext4', 'ext4dev']: - force_flag="-F" - elif fstype in ['btrfs']: - force_flag="-f" - else: - force_flag="" - - if opts is None: - cmd = "%s -t %s %s '%s'" % (mkfs, fstype, force_flag, dev) - else: - cmd = "%s -t %s %s %s '%s'" % (mkfs, fstype, force_flag, opts, dev) - rc,_,err = module.run_command(cmd) - if rc == 0: - changed = True - else: - module.fail_json(msg="Creating filesystem %s on device '%s' failed"%(fstype,dev), rc=rc, err=err) - - module.exit_json(changed=changed) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/firewalld b/library/system/firewalld deleted file mode 100644 index 22db165aad..0000000000 --- a/library/system/firewalld +++ /dev/null @@ -1,398 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Adam Miller (maxamillion@fedoraproject.org) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: firewalld -short_description: Manage arbitrary ports/services with firewalld -description: - - This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules -version_added: "1.4" -options: - service: - description: - - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services" - required: false - default: null - port: - description: - - "Name of a port to add/remove to/from firewalld must be in the form PORT/PROTOCOL" - required: false - default: null - rich_rule: - description: - - "Rich rule to add/remove to/from firewalld" - required: false - default: null - zone: - description: - - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).' - required: false - default: system-default(public) - choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block"] - permanent: - description: - - "Should this configuration be in the running firewalld configuration or persist across reboots" - required: true - default: true - state: - description: - - "Should this port accept(enabled) or reject(disabled) connections" - required: true - default: enabled - timeout: - description: - - "The amount of time the rule should be in effect for when non-permanent" - required: false - default: 0 -notes: - - Not tested on any debian based system -requirements: [ firewalld >= 0.2.11 ] -author: Adam Miller -''' - -EXAMPLES = ''' -- firewalld: service=https permanent=true state=enabled -- firewalld: port=8081/tcp permanent=true state=disabled -- firewalld: zone=dmz service=http permanent=true state=enabled -- firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled -''' - -import os -import re -import sys - -try: - import firewall.config - FW_VERSION = firewall.config.VERSION - - from firewall.client import FirewallClient - fw = FirewallClient() - if not fw.connected: - raise Exception('failed to connect to the firewalld daemon') -except ImportError: - print "failed=True msg='firewalld required for this module'" - sys.exit(1) -except Exception, e: - print "failed=True msg='%s'" % str(e) - sys.exit(1) - -################ -# port handling -# -def get_port_enabled(zone, port_proto): - if port_proto in fw.getPorts(zone): - return True - else: - return False - -def set_port_enabled(zone, port, protocol, timeout): - fw.addPort(zone, port, protocol, timeout) - -def set_port_disabled(zone, port, protocol): - fw.removePort(zone, port, protocol) - -def get_port_enabled_permanent(zone, port_proto): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - if tuple(port_proto) in fw_settings.getPorts(): - return True - else: - return False - -def set_port_enabled_permanent(zone, port, protocol): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - fw_settings.addPort(port, protocol) - fw_zone.update(fw_settings) - -def set_port_disabled_permanent(zone, port, protocol): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - fw_settings.removePort(port, protocol) - fw_zone.update(fw_settings) - - -#################### -# service handling -# -def get_service_enabled(zone, service): - if service in fw.getServices(zone): - return True - else: - return False - -def set_service_enabled(zone, service, timeout): - fw.addService(zone, service, timeout) - -def set_service_disabled(zone, service): - fw.removeService(zone, service) - -def get_service_enabled_permanent(zone, service): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - if service in fw_settings.getServices(): - return True - else: - return False - -def set_service_enabled_permanent(zone, service): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - fw_settings.addService(service) - fw_zone.update(fw_settings) - -def set_service_disabled_permanent(zone, service): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - fw_settings.removeService(service) - fw_zone.update(fw_settings) - - -#################### -# rich rule handling -# -def get_rich_rule_enabled(zone, rule): - if rule in fw.getRichRules(zone): - return True - else: - return False - -def set_rich_rule_enabled(zone, rule, timeout): - fw.addRichRule(zone, rule, timeout) - -def set_rich_rule_disabled(zone, rule): - fw.removeRichRule(zone, rule) - -def get_rich_rule_enabled_permanent(zone, rule): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - if rule in fw_settings.getRichRules(): - return True - else: - return False - -def set_rich_rule_enabled_permanent(zone, rule): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - fw_settings.addRichRule(rule) - fw_zone.update(fw_settings) - -def set_rich_rule_disabled_permanent(zone, rule): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() - fw_settings.removeRichRule(rule) - fw_zone.update(fw_settings) - - -def main(): - - module = AnsibleModule( - argument_spec = dict( - service=dict(required=False,default=None), - port=dict(required=False,default=None), - rich_rule=dict(required=False,default=None), - zone=dict(required=False,default=None), - permanent=dict(type='bool',required=True), - state=dict(choices=['enabled', 'disabled'], required=True), - timeout=dict(type='int',required=False,default=0), - ), - supports_check_mode=True - ) - - ## Pre-run version checking - if FW_VERSION < "0.2.11": - module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11') - - ## Global Vars - changed=False - msgs = [] - service = module.params['service'] - rich_rule = module.params['rich_rule'] - - if module.params['port'] != None: - port, protocol = module.params['port'].split('/') - if protocol == None: - module.fail_json(msg='improper port format (missing protocol?)') - else: - port = None - - if module.params['zone'] != None: - zone = module.params['zone'] - else: - zone = fw.getDefaultZone() - - permanent = module.params['permanent'] - desired_state = module.params['state'] - timeout = module.params['timeout'] - - ## Check for firewalld running - try: - if fw.connected == False: - module.fail_json(msg='firewalld service must be running') - except AttributeError: - module.fail_json(msg="firewalld connection can't be established,\ - version likely too old. Requires firewalld >= 2.0.11") - - modification_count = 0 - if service != None: - modification_count += 1 - if port != None: - modification_count += 1 - if rich_rule != None: - modification_count += 1 - - if modification_count > 1: - module.fail_json(msg='can only operate on port, service or rich_rule at once') - - if service != None: - if permanent: - is_enabled = get_service_enabled_permanent(zone, service) - msgs.append('Permanent operation') - - if desired_state == "enabled": - if is_enabled == False: - if module.check_mode: - module.exit_json(changed=True) - - set_service_enabled_permanent(zone, service) - changed=True - elif desired_state == "disabled": - if is_enabled == True: - if module.check_mode: - module.exit_json(changed=True) - - set_service_disabled_permanent(zone, service) - changed=True - else: - is_enabled = get_service_enabled(zone, service) - msgs.append('Non-permanent operation') - - - if desired_state == "enabled": - if is_enabled == False: - if module.check_mode: - module.exit_json(changed=True) - - set_service_enabled(zone, service, timeout) - changed=True - elif desired_state == "disabled": - if is_enabled == True: - if module.check_mode: - module.exit_json(changed=True) - - set_service_disabled(zone, service) - changed=True - - if changed == True: - msgs.append("Changed service %s to %s" % (service, desired_state)) - - if port != None: - if permanent: - is_enabled = get_port_enabled_permanent(zone, [port, protocol]) - msgs.append('Permanent operation') - - if desired_state == "enabled": - if is_enabled == False: - if module.check_mode: - module.exit_json(changed=True) - - set_port_enabled_permanent(zone, port, protocol) - changed=True - elif desired_state == "disabled": - if is_enabled == True: - if module.check_mode: - module.exit_json(changed=True) - - set_port_disabled_permanent(zone, port, protocol) - changed=True - else: - is_enabled = get_port_enabled(zone, [port,protocol]) - msgs.append('Non-permanent operation') - - if desired_state == "enabled": - if is_enabled == False: - if module.check_mode: - module.exit_json(changed=True) - - set_port_enabled(zone, port, protocol, timeout) - changed=True - elif desired_state == "disabled": - if is_enabled == True: - if module.check_mode: - module.exit_json(changed=True) - - set_port_disabled(zone, port, protocol) - changed=True - - if changed == True: - msgs.append("Changed port %s to %s" % ("%s/%s" % (port, protocol), \ - desired_state)) - - if rich_rule != None: - if permanent: - is_enabled = get_rich_rule_enabled_permanent(zone, rich_rule) - msgs.append('Permanent operation') - - if desired_state == "enabled": - if is_enabled == False: - if module.check_mode: - module.exit_json(changed=True) - - set_rich_rule_enabled_permanent(zone, rich_rule) - changed=True - elif desired_state == "disabled": - if is_enabled == True: - if module.check_mode: - module.exit_json(changed=True) - - set_rich_rule_disabled_permanent(zone, rich_rule) - changed=True - else: - is_enabled = get_rich_rule_enabled(zone, rich_rule) - msgs.append('Non-permanent operation') - - if desired_state == "enabled": - if is_enabled == False: - if module.check_mode: - module.exit_json(changed=True) - - set_rich_rule_enabled(zone, rich_rule, timeout) - changed=True - elif desired_state == "disabled": - if is_enabled == True: - if module.check_mode: - module.exit_json(changed=True) - - set_rich_rule_disabled(zone, rich_rule) - changed=True - - if changed == True: - msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state)) - - module.exit_json(changed=changed, msg=', '.join(msgs)) - - -################################################# -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/getent b/library/system/getent deleted file mode 100644 index 0173618f69..0000000000 --- a/library/system/getent +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Brian Coca -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - - -DOCUMENTATION = ''' ---- -module: getent -short_description: a wrapper to the unix getent utility -description: - - Runs getent against one of it's various databases and returns information into - the host's facts -version_added: "1.8" -options: - database: - required: True - description: - - the name of a getent database supported by the target system (passwd, group, - hosts, etc). - key: - required: False - default: '' - description: - - key from which to return values from the specified database, otherwise the - full contents are returned. - split: - required: False - default: None - description: - - "character used to split the database values into lists/arrays such as ':' or '\t', otherwise it will try to pick one depending on the database" - fail_key: - required: False - default: True - description: - - If a supplied key is missing this will make the task fail if True - -notes: - - "Not all databases support enumeration, check system documentation for details" -requirements: [ ] -author: Brian Coca -''' - -EXAMPLES = ''' -# get root user info -- getent: database=passwd key=root - register: root_info - -# get all groups -- getent: database=group split=':' - register: groups - -# get all hosts, split by tab -- getent: database=hosts - register: hosts - -# get http service info, no error if missing -- getent: database=services key=http fail_key=False - register: http_info - -# get user password hash (requires sudo/root) -- getent: database=shadow key=www-data split=: - register: pw_hash - -''' - -def main(): - module = AnsibleModule( - argument_spec = dict( - database = dict(required=True), - key = dict(required=False, default=None), - split = dict(required=False, default=None), - fail_key = dict(required=False, default=True), - ), - supports_check_mode = True, - ) - - colon = [ 'passwd', 'shadow', 'group', 'gshadow' ] - - database = module.params['database'] - key = module.params.get('key') - split = module.params.get('split') - fail_key = module.params.get('fail_key') - - getent_bin = module.get_bin_path('getent', True) - - if key is not None: - cmd = [ getent_bin, database, key ] - else: - cmd = [ getent_bin, database ] - - if split is None and database in colon: - split = ':' - - try: - rc, out, err = module.run_command(cmd) - except Exception, e: - module.fail_json(msg=str(e)) - - msg = "Unexpected failure!" - dbtree = 'getent_%s' % database - results = { dbtree: {} } - - if rc == 0: - for line in out.splitlines(): - record = line.split(split) - results[dbtree][record[0]] = record[1:] - - module.exit_json(ansible_facts=results) - - elif rc == 1: - msg = "Missing arguments, or database unknown." - elif rc == 2: - msg = "One or more supplied key could not be found in the database." - if not fail_key: - results[dbtree][key] = None - module.exit_json(ansible_facts=results, msg=msg) - elif rc == 3: - msg = "Enumeration not supported on this database." - - module.fail_json(msg=msg) - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/group b/library/system/group deleted file mode 100644 index 617de7c285..0000000000 --- a/library/system/group +++ /dev/null @@ -1,403 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Stephen Fromm -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: group -author: Stephen Fromm -version_added: "0.0.2" -short_description: Add or remove groups -requirements: [ groupadd, groupdel, groupmod ] -description: - - Manage presence of groups on a host. -options: - name: - required: true - description: - - Name of the group to manage. - gid: - required: false - description: - - Optional I(GID) to set for the group. - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the group should be present or not on the remote host. - system: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - If I(yes), indicates that the group created is a system group. - -''' - -EXAMPLES = ''' -# Example group command from Ansible Playbooks -- group: name=somegroup state=present -''' - -import grp -import syslog -import platform - -class Group(object): - """ - This is a generic Group manipulation class that is subclassed - based on platform. - - A subclass may wish to override the following action methods:- - - group_del() - - group_add() - - group_mod() - - All subclasses MUST define platform and distribution (which may be None). - """ - - platform = 'Generic' - distribution = None - GROUPFILE = '/etc/group' - - def __new__(cls, *args, **kwargs): - return load_platform_subclass(Group, args, kwargs) - - def __init__(self, module): - self.module = module - self.state = module.params['state'] - self.name = module.params['name'] - self.gid = module.params['gid'] - self.system = module.params['system'] - self.syslogging = False - - def execute_command(self, cmd): - if self.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) - - return self.module.run_command(cmd) - - def group_del(self): - cmd = [self.module.get_bin_path('groupdel', True), self.name] - return self.execute_command(cmd) - - def group_add(self, **kwargs): - cmd = [self.module.get_bin_path('groupadd', True)] - for key in kwargs: - if key == 'gid' and kwargs[key] is not None: - cmd.append('-g') - cmd.append(kwargs[key]) - elif key == 'system' and kwargs[key] == True: - cmd.append('-r') - cmd.append(self.name) - return self.execute_command(cmd) - - def group_mod(self, **kwargs): - cmd = [self.module.get_bin_path('groupmod', True)] - info = self.group_info() - for key in kwargs: - if key == 'gid': - if kwargs[key] is not None and info[2] != int(kwargs[key]): - cmd.append('-g') - cmd.append(kwargs[key]) - if len(cmd) == 1: - return (None, '', '') - if self.module.check_mode: - return (0, '', '') - cmd.append(self.name) - return self.execute_command(cmd) - - def group_exists(self): - try: - if grp.getgrnam(self.name): - return True - except KeyError: - return False - - def group_info(self): - if not self.group_exists(): - return False - try: - info = list(grp.getgrnam(self.name)) - except KeyError: - return False - return info - -# =========================================== - -class SunOS(Group): - """ - This is a SunOS Group manipulation class. Solaris doesn't have - the 'system' group concept. - - This overrides the following methods from the generic class:- - - group_add() - """ - - platform = 'SunOS' - distribution = None - GROUPFILE = '/etc/group' - - def group_add(self, **kwargs): - cmd = [self.module.get_bin_path('groupadd', True)] - for key in kwargs: - if key == 'gid' and kwargs[key] is not None: - cmd.append('-g') - cmd.append(kwargs[key]) - cmd.append(self.name) - return self.execute_command(cmd) - - -# =========================================== - -class AIX(Group): - """ - This is a AIX Group manipulation class. - - This overrides the following methods from the generic class:- - - group_del() - - group_add() - - group_mod() - """ - - platform = 'AIX' - distribution = None - GROUPFILE = '/etc/group' - - def group_del(self): - cmd = [self.module.get_bin_path('rmgroup', True), self.name] - return self.execute_command(cmd) - - def group_add(self, **kwargs): - cmd = [self.module.get_bin_path('mkgroup', True)] - for key in kwargs: - if key == 'gid' and kwargs[key] is not None: - cmd.append('id='+kwargs[key]) - elif key == 'system' and kwargs[key] == True: - cmd.append('-a') - cmd.append(self.name) - return self.execute_command(cmd) - - def group_mod(self, **kwargs): - cmd = [self.module.get_bin_path('chgroup', True)] - info = self.group_info() - for key in kwargs: - if key == 'gid': - if kwargs[key] is not None and info[2] != int(kwargs[key]): - cmd.append('id='+kwargs[key]) - if len(cmd) == 1: - return (None, '', '') - if self.module.check_mode: - return (0, '', '') - cmd.append(self.name) - return self.execute_command(cmd) - -# =========================================== - -class FreeBsdGroup(Group): - """ - This is a FreeBSD Group manipulation class. - - This overrides the following methods from the generic class:- - - group_del() - - group_add() - - group_mod() - """ - - platform = 'FreeBSD' - distribution = None - GROUPFILE = '/etc/group' - - def group_del(self): - cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name] - return self.execute_command(cmd) - - def group_add(self, **kwargs): - cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name] - if self.gid is not None: - cmd.append('-g %d' % int(self.gid)) - return self.execute_command(cmd) - - def group_mod(self, **kwargs): - cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name] - info = self.group_info() - cmd_len = len(cmd) - if self.gid is not None and int(self.gid) != info[2]: - cmd.append('-g %d' % int(self.gid)) - # modify the group if cmd will do anything - if cmd_len != len(cmd): - if self.module.check_mode: - return (0, '', '') - return self.execute_command(cmd) - return (None, '', '') - -# =========================================== - -class OpenBsdGroup(Group): - """ - This is a OpenBSD Group manipulation class. - - This overrides the following methods from the generic class:- - - group_del() - - group_add() - - group_mod() - """ - - platform = 'OpenBSD' - distribution = None - GROUPFILE = '/etc/group' - - def group_del(self): - cmd = [self.module.get_bin_path('groupdel', True), self.name] - return self.execute_command(cmd) - - def group_add(self, **kwargs): - cmd = [self.module.get_bin_path('groupadd', True)] - if self.gid is not None: - cmd.append('-g') - cmd.append('%d' % int(self.gid)) - cmd.append(self.name) - return self.execute_command(cmd) - - def group_mod(self, **kwargs): - cmd = [self.module.get_bin_path('groupmod', True)] - info = self.group_info() - cmd_len = len(cmd) - if self.gid is not None and int(self.gid) != info[2]: - cmd.append('-g') - cmd.append('%d' % int(self.gid)) - if len(cmd) == 1: - return (None, '', '') - if self.module.check_mode: - return (0, '', '') - cmd.append(self.name) - return self.execute_command(cmd) - -# =========================================== - -class NetBsdGroup(Group): - """ - This is a NetBSD Group manipulation class. - - This overrides the following methods from the generic class:- - - group_del() - - group_add() - - group_mod() - """ - - platform = 'NetBSD' - distribution = None - GROUPFILE = '/etc/group' - - def group_del(self): - cmd = [self.module.get_bin_path('groupdel', True), self.name] - return self.execute_command(cmd) - - def group_add(self, **kwargs): - cmd = [self.module.get_bin_path('groupadd', True)] - if self.gid is not None: - cmd.append('-g') - cmd.append('%d' % int(self.gid)) - cmd.append(self.name) - return self.execute_command(cmd) - - def group_mod(self, **kwargs): - cmd = [self.module.get_bin_path('groupmod', True)] - info = self.group_info() - cmd_len = len(cmd) - if self.gid is not None and int(self.gid) != info[2]: - cmd.append('-g') - cmd.append('%d' % int(self.gid)) - if len(cmd) == 1: - return (None, '', '') - if self.module.check_mode: - return (0, '', '') - cmd.append(self.name) - return self.execute_command(cmd) - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec = dict( - state=dict(default='present', choices=['present', 'absent'], type='str'), - name=dict(required=True, type='str'), - gid=dict(default=None, type='str'), - system=dict(default=False, type='bool'), - ), - supports_check_mode=True - ) - - group = Group(module) - - if group.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - platform %s' % group.platform) - if user.distribution: - syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - distribution %s' % group.distribution) - - rc = None - out = '' - err = '' - result = {} - result['name'] = group.name - result['state'] = group.state - - if group.state == 'absent': - - if group.group_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = group.group_del() - if rc != 0: - module.fail_json(name=group.name, msg=err) - - elif group.state == 'present': - - if not group.group_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = group.group_add(gid=group.gid, system=group.system) - else: - (rc, out, err) = group.group_mod(gid=group.gid) - - if rc is not None and rc != 0: - module.fail_json(name=group.name, msg=err) - - if rc is None: - result['changed'] = False - else: - result['changed'] = True - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - if group.group_exists(): - info = group.group_info() - result['system'] = group.system - result['gid'] = info[2] - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/hostname b/library/system/hostname deleted file mode 100755 index a426b59136..0000000000 --- a/library/system/hostname +++ /dev/null @@ -1,445 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Hiroaki Nakamura -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: hostname -author: Hiroaki Nakamura -version_added: "1.4" -short_description: Manage hostname -requirements: [ hostname ] -description: - - Set system's hostname - - Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI. -options: - name: - required: true - description: - - Name of the host -''' - -EXAMPLES = ''' -- hostname: name=web01 -''' - -from distutils.version import LooseVersion - -# import module snippets -from ansible.module_utils.basic import * - - -# wrap get_distribution_version in case it returns a string -def _get_distribution_version(): - distribution_version = get_distribution_version() - - if type(distribution_version) is str: - distribution_version = 0 - elif type(distribution_version) is None: - distribution_version = 0 - - return distribution_version - - -class UnimplementedStrategy(object): - def __init__(self, module): - self.module = module - - def get_current_hostname(self): - self.unimplemented_error() - - def set_current_hostname(self, name): - self.unimplemented_error() - - def get_permanent_hostname(self): - self.unimplemented_error() - - def set_permanent_hostname(self, name): - self.unimplemented_error() - - def unimplemented_error(self): - platform = get_platform() - distribution = get_distribution() - if distribution is not None: - msg_platform = '%s (%s)' % (platform, distribution) - else: - msg_platform = platform - self.module.fail_json( - msg='hostname module cannot be used on platform %s' % msg_platform) - -class Hostname(object): - """ - This is a generic Hostname manipulation class that is subclassed - based on platform. - - A subclass may wish to set different strategy instance to self.strategy. - - All subclasses MUST define platform and distribution (which may be None). - """ - - platform = 'Generic' - distribution = None - strategy_class = UnimplementedStrategy - - def __new__(cls, *args, **kwargs): - return load_platform_subclass(Hostname, args, kwargs) - - def __init__(self, module): - self.module = module - self.name = module.params['name'] - self.strategy = self.strategy_class(module) - - def get_current_hostname(self): - return self.strategy.get_current_hostname() - - def set_current_hostname(self, name): - self.strategy.set_current_hostname(name) - - def get_permanent_hostname(self): - return self.strategy.get_permanent_hostname() - - def set_permanent_hostname(self, name): - self.strategy.set_permanent_hostname(name) - -class GenericStrategy(object): - """ - This is a generic Hostname manipulation strategy class. - - A subclass may wish to override some or all of these methods. - - get_current_hostname() - - get_permanent_hostname() - - set_current_hostname(name) - - set_permanent_hostname(name) - """ - def __init__(self, module): - self.module = module - - HOSTNAME_CMD = '/bin/hostname' - - def get_current_hostname(self): - cmd = [self.HOSTNAME_CMD] - rc, out, err = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - return out.strip() - - def set_current_hostname(self, name): - cmd = [self.HOSTNAME_CMD, name] - rc, out, err = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - - def get_permanent_hostname(self): - return None - - def set_permanent_hostname(self, name): - pass - - -# =========================================== - -class DebianStrategy(GenericStrategy): - """ - This is a Debian family Hostname manipulation strategy class - it edits - the /etc/hostname file. - """ - - HOSTNAME_FILE = '/etc/hostname' - - def get_permanent_hostname(self): - if not os.path.isfile(self.HOSTNAME_FILE): - try: - open(self.HOSTNAME_FILE, "a").write("") - except IOError, err: - self.module.fail_json(msg="failed to write file: %s" % - str(err)) - try: - f = open(self.HOSTNAME_FILE) - try: - return f.read().strip() - finally: - f.close() - except Exception, err: - self.module.fail_json(msg="failed to read hostname: %s" % - str(err)) - - def set_permanent_hostname(self, name): - try: - f = open(self.HOSTNAME_FILE, 'w+') - try: - f.write("%s\n" % name) - finally: - f.close() - except Exception, err: - self.module.fail_json(msg="failed to update hostname: %s" % - str(err)) - - -# =========================================== - -class RedHatStrategy(GenericStrategy): - """ - This is a Redhat Hostname strategy class - it edits the - /etc/sysconfig/network file. - """ - NETWORK_FILE = '/etc/sysconfig/network' - - def get_permanent_hostname(self): - try: - f = open(self.NETWORK_FILE, 'rb') - try: - for line in f.readlines(): - if line.startswith('HOSTNAME'): - k, v = line.split('=') - return v.strip() - finally: - f.close() - except Exception, err: - self.module.fail_json(msg="failed to read hostname: %s" % - str(err)) - - def set_permanent_hostname(self, name): - try: - lines = [] - found = False - f = open(self.NETWORK_FILE, 'rb') - try: - for line in f.readlines(): - if line.startswith('HOSTNAME'): - lines.append("HOSTNAME=%s\n" % name) - found = True - else: - lines.append(line) - finally: - f.close() - if not found: - lines.append("HOSTNAME=%s\n" % name) - f = open(self.NETWORK_FILE, 'w+') - try: - f.writelines(lines) - finally: - f.close() - except Exception, err: - self.module.fail_json(msg="failed to update hostname: %s" % - str(err)) - - -# =========================================== - -class FedoraStrategy(GenericStrategy): - """ - This is a Fedora family Hostname manipulation strategy class - it uses - the hostnamectl command. - """ - - def get_current_hostname(self): - cmd = ['hostname'] - rc, out, err = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - return out.strip() - - def set_current_hostname(self, name): - cmd = ['hostnamectl', '--transient', 'set-hostname', name] - rc, out, err = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - - def get_permanent_hostname(self): - cmd = 'hostnamectl --static status' - rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - return out.strip() - - def set_permanent_hostname(self, name): - cmd = ['hostnamectl', '--pretty', 'set-hostname', name] - rc, out, err = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - cmd = ['hostnamectl', '--static', 'set-hostname', name] - rc, out, err = self.module.run_command(cmd) - if rc != 0: - self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % - (rc, out, err)) - - -# =========================================== - -class OpenRCStrategy(GenericStrategy): - """ - This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits - the /etc/conf.d/hostname file. - """ - - HOSTNAME_FILE = '/etc/conf.d/hostname' - - def get_permanent_hostname(self): - try: - with open(self.HOSTNAME_FILE, 'r') as f: - for line in f: - line = line.strip() - if line.startswith('hostname='): - return line[10:].strip('"') - return None - except Exception, err: - self.module.fail_json(msg="failed to read hostname: %s" % - str(err)) - - def set_permanent_hostname(self, name): - try: - with open(self.HOSTNAME_FILE, 'r') as f: - lines = [x.strip() for x in f] - - for i, line in enumerate(lines): - if line.startswith('hostname='): - lines[i] = 'hostname="%s"' % name - break - - with open(self.HOSTNAME_FILE, 'w') as f: - f.write('\n'.join(lines) + '\n') - except Exception, err: - self.module.fail_json(msg="failed to update hostname: %s" % - str(err)) - -# =========================================== - -class FedoraHostname(Hostname): - platform = 'Linux' - distribution = 'Fedora' - strategy_class = FedoraStrategy - -class OpenSUSEHostname(Hostname): - platform = 'Linux' - distribution = 'Opensuse ' - strategy_class = FedoraStrategy - -class ArchHostname(Hostname): - platform = 'Linux' - distribution = 'Arch' - strategy_class = FedoraStrategy - -class RedHat5Hostname(Hostname): - platform = 'Linux' - distribution = 'Redhat' - strategy_class = RedHatStrategy - -class RedHatServerHostname(Hostname): - platform = 'Linux' - distribution = 'Red hat enterprise linux server' - distribution_version = _get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy - else: - strategy_class = RedHatStrategy - -class RedHatWorkstationHostname(Hostname): - platform = 'Linux' - distribution = 'Red hat enterprise linux workstation' - distribution_version = _get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy - else: - strategy_class = RedHatStrategy - -class CentOSHostname(Hostname): - platform = 'Linux' - distribution = 'Centos' - distribution_version = _get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy - else: - strategy_class = RedHatStrategy - -class CentOSLinuxHostname(Hostname): - platform = 'Linux' - distribution = 'Centos linux' - distribution_version = _get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy - else: - strategy_class = RedHatStrategy - -class ScientificHostname(Hostname): - platform = 'Linux' - distribution = 'Scientific' - strategy_class = RedHatStrategy - -class ScientificLinuxHostname(Hostname): - platform = 'Linux' - distribution = 'Scientific linux' - strategy_class = RedHatStrategy - -class AmazonLinuxHostname(Hostname): - platform = 'Linux' - distribution = 'Amazon' - strategy_class = RedHatStrategy - -class DebianHostname(Hostname): - platform = 'Linux' - distribution = 'Debian' - strategy_class = DebianStrategy - -class UbuntuHostname(Hostname): - platform = 'Linux' - distribution = 'Ubuntu' - strategy_class = DebianStrategy - -class LinaroHostname(Hostname): - platform = 'Linux' - distribution = 'Linaro' - strategy_class = DebianStrategy - -class GentooHostname(Hostname): - platform = 'Linux' - distribution = 'Gentoo base system' - strategy_class = OpenRCStrategy - -# =========================================== - -def main(): - module = AnsibleModule( - argument_spec = dict( - name=dict(required=True, type='str') - ) - ) - - hostname = Hostname(module) - - changed = False - name = module.params['name'] - current_name = hostname.get_current_hostname() - if current_name != name: - hostname.set_current_hostname(name) - changed = True - - permanent_name = hostname.get_permanent_hostname() - if permanent_name != name: - hostname.set_permanent_hostname(name) - changed = True - - module.exit_json(changed=changed, name=name) - -main() diff --git a/library/system/kernel_blacklist b/library/system/kernel_blacklist deleted file mode 100644 index 6af08c0788..0000000000 --- a/library/system/kernel_blacklist +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/python -# encoding: utf-8 -*- - -# (c) 2013, Matthias Vogelgesang -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import os -import re - - -DOCUMENTATION = ''' ---- -module: kernel_blacklist -author: Matthias Vogelgesang -version_added: 1.4 -short_description: Blacklist kernel modules -description: - - Add or remove kernel modules from blacklist. -options: - name: - required: true - description: - - Name of kernel module to black- or whitelist. - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the module should be present in the blacklist or absent. - blacklist_file: - required: false - description: - - If specified, use this blacklist file instead of - C(/etc/modprobe.d/blacklist-ansible.conf). - default: null -requirements: [] -''' - -EXAMPLES = ''' -# Blacklist the nouveau driver module -- kernel_blacklist: name=nouveau state=present -''' - - -class Blacklist(object): - def __init__(self, module, filename): - if not os.path.exists(filename): - open(filename, 'a').close() - - self.filename = filename - self.module = module - - def get_pattern(self): - return '^blacklist\s*' + self.module + '$' - - def readlines(self): - f = open(self.filename, 'r') - lines = f.readlines() - f.close() - return lines - - def module_listed(self): - lines = self.readlines() - pattern = self.get_pattern() - - for line in lines: - stripped = line.strip() - if stripped.startswith('#'): - continue - - if re.match(pattern, stripped): - return True - - return False - - def remove_module(self): - lines = self.readlines() - pattern = self.get_pattern() - - f = open(self.filename, 'w') - - for line in lines: - if not re.match(pattern, line.strip()): - f.write(line) - - f.close() - - def add_module(self): - f = open(self.filename, 'a') - f.write('blacklist %s\n' % self.module) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(required=False, choices=['present', 'absent'], - default='present'), - blacklist_file=dict(required=False, default=None) - ), - supports_check_mode=False, - ) - - args = dict(changed=False, failed=False, - name=module.params['name'], state=module.params['state']) - - filename = '/etc/modprobe.d/blacklist-ansible.conf' - - if module.params['blacklist_file']: - filename = module.params['blacklist_file'] - - blacklist = Blacklist(args['name'], filename) - - if blacklist.module_listed(): - if args['state'] == 'absent': - blacklist.remove_module() - args['changed'] = True - else: - if args['state'] == 'present': - blacklist.add_module() - args['changed'] = True - - module.exit_json(**args) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/locale_gen b/library/system/locale_gen deleted file mode 100644 index 12eab8dbc8..0000000000 --- a/library/system/locale_gen +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -import os -import os.path -from subprocess import Popen, PIPE, call - -DOCUMENTATION = ''' ---- -module: locale_gen -short_description: Creates of removes locales. -description: - - Manages locales by editing /etc/locale.gen and invoking locale-gen. -version_added: "1.6" -options: - name: - description: - - Name and encoding of the locale, such as "en_GB.UTF-8". - required: true - default: null - aliases: [] - state: - description: - - Whether the locale shall be present. - required: false - choices: ["present", "absent"] - default: "present" -''' - -EXAMPLES = ''' -# Ensure a locale exists. -- locale_gen: name=de_CH.UTF-8 state=present -''' - -# =========================================== -# location module specific support methods. -# - -def is_present(name): - """Checks if the given locale is currently installed.""" - output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0] - return any(fix_case(name) == fix_case(line) for line in output.splitlines()) - -def fix_case(name): - """locale -a might return the encoding in either lower or upper case. - Passing through this function makes them uniform for comparisons.""" - return name.replace(".utf8", ".UTF-8") - -def replace_line(existing_line, new_line): - """Replaces lines in /etc/locale.gen""" - with open("/etc/locale.gen", "r") as f: - lines = [line.replace(existing_line, new_line) for line in f] - with open("/etc/locale.gen", "w") as f: - f.write("".join(lines)) - -def apply_change(targetState, name, encoding): - """Create or remove locale. - - Keyword arguments: - targetState -- Desired state, either present or absent. - name -- Name including encoding such as de_CH.UTF-8. - encoding -- Encoding such as UTF-8. - """ - if targetState=="present": - # Create locale. - replace_line("# "+name+" "+encoding, name+" "+encoding) - else: - # Delete locale. - replace_line(name+" "+encoding, "# "+name+" "+encoding) - - localeGenExitValue = call("locale-gen") - if localeGenExitValue!=0: - raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue)) - -def apply_change_ubuntu(targetState, name, encoding): - """Create or remove locale. - - Keyword arguments: - targetState -- Desired state, either present or absent. - name -- Name including encoding such as de_CH.UTF-8. - encoding -- Encoding such as UTF-8. - """ - if targetState=="present": - # Create locale. - # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local - localeGenExitValue = call(["locale-gen", name]) - else: - # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. - with open("/var/lib/locales/supported.d/local", "r") as f: - content = f.readlines() - with open("/var/lib/locales/supported.d/local", "w") as f: - for line in content: - if line!=(name+" "+encoding+"\n"): - f.write(line) - # Purge locales and regenerate. - # Please provide a patch if you know how to avoid regenerating the locales to keep! - localeGenExitValue = call(["locale-gen", "--purge"]) - - if localeGenExitValue!=0: - raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue)) - -# ============================================================== -# main - -def main(): - - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - state = dict(choices=['present','absent'], required=True), - ), - supports_check_mode=True - ) - - name = module.params['name'] - if not "." in name: - module.fail_json(msg="Locale does not match pattern. Did you specify the encoding?") - state = module.params['state'] - - if not os.path.exists("/etc/locale.gen"): - if os.path.exists("/var/lib/locales/supported.d/local"): - # Ubuntu created its own system to manage locales. - ubuntuMode = True - else: - module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package “locales†installed?") - else: - # We found the common way to manage locales. - ubuntuMode = False - - prev_state = "present" if is_present(name) else "absent" - changed = (prev_state!=state) - - if module.check_mode: - module.exit_json(changed=changed) - else: - encoding = name.split(".")[1] - if changed: - try: - if ubuntuMode==False: - apply_change(state, name, encoding) - else: - apply_change_ubuntu(state, name, encoding) - except EnvironmentError as e: - module.fail_json(msg=e.strerror, exitValue=e.errno) - - module.exit_json(name=name, changed=changed, msg="OK") - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/system/lvg b/library/system/lvg deleted file mode 100644 index b7a86a2720..0000000000 --- a/library/system/lvg +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Alexander Bulimov -# based on lvol module by Jeroen Hoekx -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -author: Alexander Bulimov -module: lvg -short_description: Configure LVM volume groups -description: - - This module creates, removes or resizes volume groups. -version_added: "1.1" -options: - vg: - description: - - The name of the volume group. - required: true - pvs: - description: - - List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group. - required: false - pesize: - description: - - The size of the physical extent in megabytes. Must be a power of 2. - default: 4 - required: false - vg_options: - description: - - Additional options to pass to C(vgcreate) when creating the volume group. - default: null - required: false - version_added: "1.6" - state: - choices: [ "present", "absent" ] - default: present - description: - - Control if the volume group exists. - required: false - force: - choices: [ "yes", "no" ] - default: "no" - description: - - If yes, allows to remove volume group with logical volumes. - required: false -notes: - - module does not modify PE size for already present volume group -''' - -EXAMPLES = ''' -# Create a volume group on top of /dev/sda1 with physical extent size = 32MB. -- lvg: vg=vg.services pvs=/dev/sda1 pesize=32 - -# Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5. -# If, for example, we already have VG vg.services on top of /dev/sdb1, -# this VG will be extended by /dev/sdc5. Or if vg.services was created on -# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5, -# and then reduce by /dev/sda5. -- lvg: vg=vg.services pvs=/dev/sdb1,/dev/sdc5 - -# Remove a volume group with name vg.services. -- lvg: vg=vg.services state=absent -''' - -def parse_vgs(data): - vgs = [] - for line in data.splitlines(): - parts = line.strip().split(';') - vgs.append({ - 'name': parts[0], - 'pv_count': int(parts[1]), - 'lv_count': int(parts[2]), - }) - return vgs - -def find_mapper_device_name(module, dm_device): - dmsetup_cmd = module.get_bin_path('dmsetup', True) - mapper_prefix = '/dev/mapper/' - rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device)) - if rc != 0: - module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) - mapper_device = mapper_prefix + dm_name.rstrip() - return mapper_device - -def parse_pvs(module, data): - pvs = [] - dm_prefix = '/dev/dm-' - for line in data.splitlines(): - parts = line.strip().split(';') - if parts[0].startswith(dm_prefix): - parts[0] = find_mapper_device_name(module, parts[0]) - pvs.append({ - 'name': parts[0], - 'vg_name': parts[1], - }) - return pvs - -def main(): - module = AnsibleModule( - argument_spec = dict( - vg=dict(required=True), - pvs=dict(type='list'), - pesize=dict(type='int', default=4), - vg_options=dict(default=''), - state=dict(choices=["absent", "present"], default='present'), - force=dict(type='bool', default='no'), - ), - supports_check_mode=True, - ) - - vg = module.params['vg'] - state = module.params['state'] - force = module.boolean(module.params['force']) - pesize = module.params['pesize'] - vgoptions = module.params['vg_options'].split() - - if module.params['pvs']: - dev_string = ' '.join(module.params['pvs']) - dev_list = module.params['pvs'] - elif state == 'present': - module.fail_json(msg="No physical volumes given.") - - - - if state=='present': - ### check given devices - for test_dev in dev_list: - if not os.path.exists(test_dev): - module.fail_json(msg="Device %s not found."%test_dev) - - ### get pv list - pvs_cmd = module.get_bin_path('pvs', True) - rc,current_pvs,err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';'" % pvs_cmd) - if rc != 0: - module.fail_json(msg="Failed executing pvs command.",rc=rc, err=err) - - ### check pv for devices - pvs = parse_pvs(module, current_pvs) - used_pvs = [ pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg ] - if used_pvs: - module.fail_json(msg="Device %s is already in %s volume group."%(used_pvs[0]['name'],used_pvs[0]['vg_name'])) - - vgs_cmd = module.get_bin_path('vgs', True) - rc,current_vgs,err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd) - - if rc != 0: - module.fail_json(msg="Failed executing vgs command.",rc=rc, err=err) - - changed = False - - vgs = parse_vgs(current_vgs) - - for test_vg in vgs: - if test_vg['name'] == vg: - this_vg = test_vg - break - else: - this_vg = None - - if this_vg is None: - if state == 'present': - ### create VG - if module.check_mode: - changed = True - else: - ### create PV - pvcreate_cmd = module.get_bin_path('pvcreate', True) - for current_dev in dev_list: - rc,_,err = module.run_command("%s %s" % (pvcreate_cmd,current_dev)) - if rc == 0: - changed = True - else: - module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) - vgcreate_cmd = module.get_bin_path('vgcreate') - rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg, dev_string]) - if rc == 0: - changed = True - else: - module.fail_json(msg="Creating volume group '%s' failed"%vg, rc=rc, err=err) - else: - if state == 'absent': - if module.check_mode: - module.exit_json(changed=True) - else: - if this_vg['lv_count'] == 0 or force: - ### remove VG - vgremove_cmd = module.get_bin_path('vgremove', True) - rc,_,err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) - if rc == 0: - module.exit_json(changed=True) - else: - module.fail_json(msg="Failed to remove volume group %s"%(vg),rc=rc, err=err) - else: - module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes"%(vg)) - - ### resize VG - current_devs = [ pv['name'] for pv in pvs if pv['vg_name'] == vg ] - devs_to_remove = list(set(current_devs) - set(dev_list)) - devs_to_add = list(set(dev_list) - set(current_devs)) - - if devs_to_add or devs_to_remove: - if module.check_mode: - changed = True - else: - if devs_to_add: - devs_to_add_string = ' '.join(devs_to_add) - ### create PV - pvcreate_cmd = module.get_bin_path('pvcreate', True) - for current_dev in devs_to_add: - rc,_,err = module.run_command("%s %s" % (pvcreate_cmd, current_dev)) - if rc == 0: - changed = True - else: - module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err) - ### add PV to our VG - vgextend_cmd = module.get_bin_path('vgextend', True) - rc,_,err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) - if rc == 0: - changed = True - else: - module.fail_json(msg="Unable to extend %s by %s."%(vg, devs_to_add_string),rc=rc,err=err) - - ### remove some PV from our VG - if devs_to_remove: - devs_to_remove_string = ' '.join(devs_to_remove) - vgreduce_cmd = module.get_bin_path('vgreduce', True) - rc,_,err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) - if rc == 0: - changed = True - else: - module.fail_json(msg="Unable to reduce %s by %s."%(vg, devs_to_remove_string),rc=rc,err=err) - - module.exit_json(changed=changed) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/lvol b/library/system/lvol deleted file mode 100644 index 96f1b846e2..0000000000 --- a/library/system/lvol +++ /dev/null @@ -1,235 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Jeroen Hoekx , Alexander Bulimov -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -author: Jeroen Hoekx -module: lvol -short_description: Configure LVM logical volumes -description: - - This module creates, removes or resizes logical volumes. -version_added: "1.1" -options: - vg: - description: - - The volume group this logical volume is part of. - required: true - lv: - description: - - The name of the logical volume. - required: true - size: - description: - - The size of the logical volume, according to lvcreate(8) --size, by - default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or - according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; - resizing is not supported with percentages. - state: - choices: [ "present", "absent" ] - default: present - description: - - Control if the logical volume exists. - required: false - force: - version_added: "1.5" - choices: [ "yes", "no" ] - default: "no" - description: - - Shrink or remove operations of volumes requires this switch. Ensures that - that filesystems get never corrupted/destroyed by mistake. - required: false -notes: - - Filesystems on top of the volume are not resized. -''' - -EXAMPLES = ''' -# Create a logical volume of 512m. -- lvol: vg=firefly lv=test size=512 - -# Create a logical volume of 512g. -- lvol: vg=firefly lv=test size=512g - -# Create a logical volume the size of all remaining space in the volume group -- lvol: vg=firefly lv=test size=100%FREE - -# Extend the logical volume to 1024m. -- lvol: vg=firefly lv=test size=1024 - -# Reduce the logical volume to 512m -- lvol: vg=firefly lv=test size=512 force=yes - -# Remove the logical volume. -- lvol: vg=firefly lv=test state=absent force=yes -''' - -import re - -decimal_point = re.compile(r"(\.|,)") - - -def parse_lvs(data): - lvs = [] - for line in data.splitlines(): - parts = line.strip().split(';') - lvs.append({ - 'name': parts[0], - 'size': int(decimal_point.split(parts[1])[0]), - }) - return lvs - - -def main(): - module = AnsibleModule( - argument_spec=dict( - vg=dict(required=True), - lv=dict(required=True), - size=dict(), - state=dict(choices=["absent", "present"], default='present'), - force=dict(type='bool', default='no'), - ), - supports_check_mode=True, - ) - - vg = module.params['vg'] - lv = module.params['lv'] - size = module.params['size'] - state = module.params['state'] - force = module.boolean(module.params['force']) - size_opt = 'L' - size_unit = 'm' - - if size: - # LVCREATE(8) -l --extents option with percentage - if '%' in size: - size_parts = size.split('%', 1) - size_percent = int(size_parts[0]) - if size_percent > 100: - module.fail_json(msg="Size percentage cannot be larger than 100%") - size_whole = size_parts[1] - if size_whole == 'ORIGIN': - module.fail_json(msg="Snapshot Volumes are not supported") - elif size_whole not in ['VG', 'PVS', 'FREE']: - module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE") - size_opt = 'l' - size_unit = '' - - # LVCREATE(8) -L --size option unit - elif size[-1].isalpha(): - if size[-1] in 'bBsSkKmMgGtTpPeE': - size_unit = size[-1] - if size[0:-1].isdigit(): - size = int(size[0:-1]) - else: - module.fail_json(msg="Bad size specification for unit %s" % size_unit) - size_opt = 'L' - else: - module.fail_json(msg="Size unit should be one of [bBsSkKmMgGtTpPeE]") - # when no unit, megabytes by default - elif size.isdigit(): - size = int(size) - else: - module.fail_json(msg="Bad size specification") - - if size_opt == 'l': - unit = 'm' - else: - unit = size_unit - - rc, current_lvs, err = module.run_command( - "lvs --noheadings -o lv_name,size --units %s --separator ';' %s" % (unit, vg)) - - if rc != 0: - if state == 'absent': - module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False) - else: - module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) - - changed = False - - lvs = parse_lvs(current_lvs) - - for test_lv in lvs: - if test_lv['name'] == lv: - this_lv = test_lv - break - else: - this_lv = None - - if state == 'present' and not size: - if this_lv is None: - module.fail_json(msg="No size given.") - else: - module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) - - msg = '' - if this_lv is None: - if state == 'present': - ### create LV - if module.check_mode: - changed = True - else: - rc, _, err = module.run_command("lvcreate -n %s -%s %s%s %s" % (lv, size_opt, size, size_unit, vg)) - if rc == 0: - changed = True - else: - module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) - else: - if state == 'absent': - ### remove LV - if module.check_mode: - module.exit_json(changed=True) - if not force: - module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) - rc, _, err = module.run_command("lvremove --force %s/%s" % (vg, this_lv['name'])) - if rc == 0: - module.exit_json(changed=True) - else: - module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) - - elif size_opt == 'l': - module.exit_json(changed=False, msg="Resizing extents with percentage not supported.") - else: - ### resize LV - tool = None - if size > this_lv['size']: - tool = 'lvextend' - elif size < this_lv['size']: - if not force: - module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) - tool = 'lvreduce --force' - - if tool: - if module.check_mode: - changed = True - else: - rc, _, err = module.run_command("%s -%s %s%s %s/%s" % (tool, size_opt, size, size_unit, vg, this_lv['name'])) - if rc == 0: - changed = True - elif "matches existing size" in err: - module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) - else: - module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) - - module.exit_json(changed=changed, msg=msg) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/system/modprobe b/library/system/modprobe deleted file mode 100644 index 50c8f72fb2..0000000000 --- a/library/system/modprobe +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, David Stygstra -# -# This file is part of Ansible -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - - -DOCUMENTATION = ''' ---- -module: modprobe -short_description: Add or remove kernel modules -requirements: [] -version_added: 1.4 -author: David Stygstra, Julien Dauphant, Matt Jeffery -description: - - Add or remove kernel modules. -options: - name: - required: true - description: - - Name of kernel module to manage. - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the module should be present or absent. - params: - required: false - default: "" - version_added: "1.6" - description: - - Modules parameters. -''' - -EXAMPLES = ''' -# Add the 802.1q module -- modprobe: name=8021q state=present -# Add the dummy module -- modprobe: name=dummy state=present params="numdummies=2" -''' - -def main(): - module = AnsibleModule( - argument_spec={ - 'name': {'required': True}, - 'state': {'default': 'present', 'choices': ['present', 'absent']}, - 'params': {'default': ''}, - }, - supports_check_mode=True, - ) - args = { - 'changed': False, - 'failed': False, - 'name': module.params['name'], - 'state': module.params['state'], - 'params': module.params['params'], - } - - # Check if module is present - try: - modules = open('/proc/modules') - present = False - module_name = args['name'].replace('-', '_') + ' ' - for line in modules: - if line.startswith(module_name): - present = True - break - modules.close() - except IOError, e: - module.fail_json(msg=str(e), **args) - - # Check only; don't modify - if module.check_mode: - if args['state'] == 'present' and not present: - changed = True - elif args['state'] == 'absent' and present: - changed = True - else: - changed = False - module.exit_json(changed=changed) - - # Add/remove module as needed - if args['state'] == 'present': - if not present: - rc, _, err = module.run_command(['modprobe', args['name'], args['params']]) - if rc != 0: - module.fail_json(msg=err, **args) - args['changed'] = True - elif args['state'] == 'absent': - if present: - rc, _, err = module.run_command(['rmmod', args['name']]) - if rc != 0: - module.fail_json(msg=err, **args) - args['changed'] = True - - module.exit_json(**args) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/mount b/library/system/mount deleted file mode 100755 index 9dc6fbe7b8..0000000000 --- a/library/system/mount +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Red Hat, inc -# Written by Seth Vidal -# based on the mount modules from salt and puppet -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: mount -short_description: Control active and configured mount points -description: - - This module controls active and configured mount points in C(/etc/fstab). -version_added: "0.6" -options: - name: - description: - - "path to the mount point, eg: C(/mnt/files)" - required: true - default: null - aliases: [] - src: - description: - - device to be mounted on I(name). - required: true - default: null - fstype: - description: - - file-system type - required: true - default: null - opts: - description: - - mount options (see fstab(8)) - required: false - default: null - dump: - description: - - dump (see fstab(8)) - required: false - default: null - passno: - description: - - passno (see fstab(8)) - required: false - default: null - state: - description: - - If C(mounted) or C(unmounted), the device will be actively mounted or unmounted - as needed and appropriately configured in I(fstab). - C(absent) and C(present) only deal with - I(fstab) but will not affect current mounting. If specifying C(mounted) and the mount - point is not present, the mount point will be created. Similarly, specifying C(absent) will remove the mount point directory. - required: true - choices: [ "present", "absent", "mounted", "unmounted" ] - default: null - fstab: - description: - - file to use instead of C(/etc/fstab). You shouldn't use that option - unless you really know what you are doing. This might be useful if - you need to configure mountpoints in a chroot environment. - required: false - default: /etc/fstab - -notes: [] -requirements: [] -author: Seth Vidal -''' -EXAMPLES = ''' -# Mount DVD read-only -- mount: name=/mnt/dvd src=/dev/sr0 fstype=iso9660 opts=ro state=present - -# Mount up device by label -- mount: name=/srv/disk src='LABEL=SOME_LABEL' fstype=ext4 state=present - -# Mount up device by UUID -- mount: name=/home src='UUID=b3e48f45-f933-4c8e-a700-22a159ec9077' fstype=xfs opts=noatime state=present -''' - - -def write_fstab(lines, dest): - - fs_w = open(dest, 'w') - for l in lines: - fs_w.write(l) - - fs_w.flush() - fs_w.close() - -def set_mount(**kwargs): - """ set/change a mount point location in fstab """ - - # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab - args = dict( - opts = 'defaults', - dump = '0', - passno = '0', - fstab = '/etc/fstab' - ) - args.update(kwargs) - - new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n' - - to_write = [] - exists = False - changed = False - for line in open(args['fstab'], 'r').readlines(): - if not line.strip(): - to_write.append(line) - continue - if line.strip().startswith('#'): - to_write.append(line) - continue - if len(line.split()) != 6: - # not sure what this is or why it is here - # but it is not our fault so leave it be - to_write.append(line) - continue - - ld = {} - ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() - - if ld['name'] != args['name']: - to_write.append(line) - continue - - # it exists - now see if what we have is different - exists = True - for t in ('src', 'fstype','opts', 'dump', 'passno'): - if ld[t] != args[t]: - changed = True - ld[t] = args[t] - - if changed: - to_write.append(new_line % ld) - else: - to_write.append(line) - - if not exists: - to_write.append(new_line % args) - changed = True - - if changed: - write_fstab(to_write, args['fstab']) - - return (args['name'], changed) - - -def unset_mount(**kwargs): - """ remove a mount point from fstab """ - - # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab - args = dict( - opts = 'default', - dump = '0', - passno = '0', - fstab = '/etc/fstab' - ) - args.update(kwargs) - - to_write = [] - changed = False - for line in open(args['fstab'], 'r').readlines(): - if not line.strip(): - to_write.append(line) - continue - if line.strip().startswith('#'): - to_write.append(line) - continue - if len(line.split()) != 6: - # not sure what this is or why it is here - # but it is not our fault so leave it be - to_write.append(line) - continue - - ld = {} - ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() - - if ld['name'] != args['name']: - to_write.append(line) - continue - - # if we got here we found a match - continue and mark changed - changed = True - - if changed: - write_fstab(to_write, args['fstab']) - - return (args['name'], changed) - - -def mount(module, **kwargs): - """ mount up a path or remount if needed """ - mount_bin = module.get_bin_path('mount') - - name = kwargs['name'] - if os.path.ismount(name): - cmd = [ mount_bin , '-o', 'remount', name ] - else: - cmd = [ mount_bin, name ] - - rc, out, err = module.run_command(cmd) - if rc == 0: - return 0, '' - else: - return rc, out+err - -def umount(module, **kwargs): - """ unmount a path """ - - umount_bin = module.get_bin_path('umount') - name = kwargs['name'] - cmd = [umount_bin, name] - - rc, out, err = module.run_command(cmd) - if rc == 0: - return 0, '' - else: - return rc, out+err - -def main(): - - module = AnsibleModule( - argument_spec = dict( - state = dict(required=True, choices=['present', 'absent', 'mounted', 'unmounted']), - name = dict(required=True), - opts = dict(default=None), - passno = dict(default=None), - dump = dict(default=None), - src = dict(required=True), - fstype = dict(required=True), - fstab = dict(default='/etc/fstab') - ) - ) - - - changed = False - rc = 0 - args = { - 'name': module.params['name'], - 'src': module.params['src'], - 'fstype': module.params['fstype'] - } - if module.params['passno'] is not None: - args['passno'] = module.params['passno'] - if module.params['opts'] is not None: - args['opts'] = module.params['opts'] - if ' ' in args['opts']: - module.fail_json(msg="unexpected space in 'opts' parameter") - if module.params['dump'] is not None: - args['dump'] = module.params['dump'] - if module.params['fstab'] is not None: - args['fstab'] = module.params['fstab'] - - # if fstab file does not exist, we first need to create it. This mainly - # happens when fstab optin is passed to the module. - if not os.path.exists(args['fstab']): - if not os.path.exists(os.path.dirname(args['fstab'])): - os.makedirs(os.path.dirname(args['fstab'])) - open(args['fstab'],'a').close() - - # absent == remove from fstab and unmounted - # unmounted == do not change fstab state, but unmount - # present == add to fstab, do not change mount state - # mounted == add to fstab if not there and make sure it is mounted, if it has changed in fstab then remount it - - state = module.params['state'] - name = module.params['name'] - if state == 'absent': - name, changed = unset_mount(**args) - if changed: - if os.path.ismount(name): - res,msg = umount(module, **args) - if res: - module.fail_json(msg="Error unmounting %s: %s" % (name, msg)) - - if os.path.exists(name): - try: - os.rmdir(name) - except (OSError, IOError), e: - module.fail_json(msg="Error rmdir %s: %s" % (name, str(e))) - - module.exit_json(changed=changed, **args) - - if state == 'unmounted': - if os.path.ismount(name): - res,msg = umount(module, **args) - if res: - module.fail_json(msg="Error unmounting %s: %s" % (name, msg)) - changed = True - - module.exit_json(changed=changed, **args) - - if state in ['mounted', 'present']: - if state == 'mounted': - if not os.path.exists(name): - try: - os.makedirs(name) - except (OSError, IOError), e: - module.fail_json(msg="Error making dir %s: %s" % (name, str(e))) - - name, changed = set_mount(**args) - if state == 'mounted': - res = 0 - if os.path.ismount(name): - if changed: - res,msg = mount(module, **args) - else: - changed = True - res,msg = mount(module, **args) - - if res: - module.fail_json(msg="Error mounting %s: %s" % (name, msg)) - - - module.exit_json(changed=changed, **args) - - module.fail_json(msg='Unexpected position reached') - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/ohai b/library/system/ohai deleted file mode 100644 index b50abc9db0..0000000000 --- a/library/system/ohai +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: ohai -short_description: Returns inventory data from I(Ohai) -description: - - Similar to the M(facter) module, this runs the I(Ohai) discovery program - (U(http://wiki.opscode.com/display/chef/Ohai)) on the remote host and - returns JSON inventory data. - I(Ohai) data is a bit more verbose and nested than I(facter). -version_added: "0.6" -options: {} -notes: [] -requirements: [ "ohai" ] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Retrieve (ohai) data from all Web servers and store in one-file per host -ansible webservers -m ohai --tree=/tmp/ohaidata -''' - -def main(): - module = AnsibleModule( - argument_spec = dict() - ) - cmd = ["/usr/bin/env", "ohai"] - rc, out, err = module.run_command(cmd, check_rc=True) - module.exit_json(**json.loads(out)) - -# import module snippets -from ansible.module_utils.basic import * - -main() - - diff --git a/library/system/open_iscsi b/library/system/open_iscsi deleted file mode 100644 index c661a723d7..0000000000 --- a/library/system/open_iscsi +++ /dev/null @@ -1,379 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Serge van Ginderachter -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: open_iscsi -author: Serge van Ginderachter -version_added: "1.4" -short_description: Manage iscsi targets with open-iscsi -description: - - Discover targets on given portal, (dis)connect targets, mark targets to - manually or auto start, return device nodes of connected targets. -requirements: - - open_iscsi library and tools (iscsiadm) -options: - portal: - required: false - aliases: [ip] - description: - - the ip address of the iscsi target - port: - required: false - default: 3260 - description: - - the port on which the iscsi target process listens - target: - required: false - aliases: [name, targetname] - description: - - the iscsi target name - login: - required: false - choices: [true, false] - description: - - whether the target node should be connected - node_auth: - required: false - default: CHAP - description: - - discovery.sendtargets.auth.authmethod - node_user: - required: false - description: - - discovery.sendtargets.auth.username - node_pass: - required: false - description: - - discovery.sendtargets.auth.password - auto_node_startup: - aliases: [automatic] - required: false - choices: [true, false] - description: - - whether the target node should be automatically connected at startup - discover: - required: false - choices: [true, false] - description: - - whether the list of target nodes on the portal should be - (re)discovered and added to the persistent iscsi database. - Keep in mind that iscsiadm discovery resets configurtion, like node.startup - to manual, hence combined with auto_node_startup=yes will allways return - a changed state. - show_nodes: - required: false - choices: [true, false] - description: - - whether the list of nodes in the persistent iscsi database should be - returned by the module - -examples: - - description: perform a discovery on 10.1.2.3 and show available target - nodes - code: > - open_iscsi: show_nodes=yes discover=yes portal=10.1.2.3 - - description: discover targets on portal and login to the one available - (only works if exactly one target is exported to the initiator) - code: > - open_iscsi: portal={{iscsi_target}} login=yes discover=yes - - description: connect to the named target, after updating the local - persistent database (cache) - code: > - open_iscsi: login=yes target=iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d - - description: discconnect from the cached named target - code: > - open_iscsi: login=no target=iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d" -''' - -import glob -import time - -ISCSIADM = 'iscsiadm' - -def compare_nodelists(l1, l2): - - l1.sort() - l2.sort() - return l1 == l2 - - -def iscsi_get_cached_nodes(module, portal=None): - - cmd = '%s --mode node' % iscsiadm_cmd - (rc, out, err) = module.run_command(cmd) - - if rc == 0: - lines = out.splitlines() - nodes = [] - for line in lines: - # line format is "ip:port,target_portal_group_tag targetname" - parts = line.split() - if len(parts) > 2: - module.fail_json(msg='error parsing output', cmd=cmd) - target = parts[1] - parts = parts[0].split(':') - target_portal = parts[0] - - if portal is None or portal == target_portal: - nodes.append(target) - - # older versions of scsiadm don't have nice return codes - # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details - # err can contain [N|n]o records... - elif rc == 21 or (rc == 255 and "o records found" in err): - nodes = [] - else: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - return nodes - - -def iscsi_discover(module, portal, port): - - cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def target_loggedon(module, target): - - cmd = '%s --mode session' % iscsiadm_cmd - (rc, out, err) = module.run_command(cmd) - - if rc == 0: - return target in out - elif rc == 21: - return False - else: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def target_login(module, target): - - node_auth = module.params['node_auth'] - node_user = module.params['node_user'] - node_pass = module.params['node_pass'] - - if node_user: - params = [('node.session.auth.authmethod', node_auth), - ('node.session.auth.username', node_user), - ('node.session.auth.password', node_pass)] - for (name, value) in params: - cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value) - (rc, out, err) = module.run_command(cmd) - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def target_logout(module, target): - - cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def target_device_node(module, target): - - # if anyone know a better way to find out which devicenodes get created for - # a given target... - - devices = glob.glob('/dev/disk/by-path/*%s*' % target) - if len(devices) == 0: - return None - else: - devdisks = [] - for dev in devices: - # exclude partitions - if "-part" not in dev: - devdisk = os.path.realpath(dev) - # only add once (multi-path?) - if devdisk not in devdisks: - devdisks.append(devdisk) - return devdisks - - -def target_isauto(module, target): - - cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target) - (rc, out, err) = module.run_command(cmd) - - if rc == 0: - lines = out.splitlines() - for line in lines: - if 'node.startup' in line: - return 'automatic' in line - return False - else: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def target_setauto(module, target): - - cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def target_setmanual(module, target): - - cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) - - -def main(): - - # load ansible module object - module = AnsibleModule( - argument_spec = dict( - - # target - portal = dict(required=False, aliases=['ip']), - port = dict(required=False, default=3260), - target = dict(required=False, aliases=['name', 'targetname']), - node_auth = dict(required=False, default='CHAP'), - node_user = dict(required=False), - node_pass = dict(required=False), - - # actions - login = dict(type='bool', aliases=['state']), - auto_node_startup = dict(type='bool', aliases=['automatic']), - discover = dict(type='bool', default=False), - show_nodes = dict(type='bool', default=False) - ), - - required_together=[['discover_user', 'discover_pass'], - ['node_user', 'node_pass']], - supports_check_mode=True - ) - - global iscsiadm_cmd - iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True) - - # parameters - portal = module.params['portal'] - target = module.params['target'] - port = module.params['port'] - login = module.params['login'] - automatic = module.params['auto_node_startup'] - discover = module.params['discover'] - show_nodes = module.params['show_nodes'] - - check = module.check_mode - - cached = iscsi_get_cached_nodes(module, portal) - - # return json dict - result = {} - result['changed'] = False - - if discover: - if portal is None: - module.fail_json(msg = "Need to specify at least the portal (ip) to discover") - elif check: - nodes = cached - else: - iscsi_discover(module, portal, port) - nodes = iscsi_get_cached_nodes(module, portal) - if not compare_nodelists(cached, nodes): - result['changed'] |= True - result['cache_updated'] = True - else: - nodes = cached - - if login is not None or automatic is not None: - if target is None: - if len(nodes) > 1: - module.fail_json(msg = "Need to specify a target") - else: - target = nodes[0] - else: - # check given target is in cache - check_target = False - for node in nodes: - if node == target: - check_target = True - break - if not check_target: - module.fail_json(msg = "Specified target not found") - - if show_nodes: - result['nodes'] = nodes - - if login is not None: - loggedon = target_loggedon(module,target) - if (login and loggedon) or (not login and not loggedon): - result['changed'] |= False - if login: - result['devicenodes'] = target_device_node(module,target) - elif not check: - if login: - target_login(module, target) - # give udev some time - time.sleep(1) - result['devicenodes'] = target_device_node(module,target) - else: - target_logout(module, target) - result['changed'] |= True - result['connection_changed'] = True - else: - result['changed'] |= True - result['connection_changed'] = True - - if automatic is not None: - isauto = target_isauto(module, target) - if (automatic and isauto) or (not automatic and not isauto): - result['changed'] |= False - result['automatic_changed'] = False - elif not check: - if automatic: - target_setauto(module, target) - else: - target_setmanual(module, target) - result['changed'] |= True - result['automatic_changed'] = True - else: - result['changed'] |= True - result['automatic_changed'] = True - - module.exit_json(**result) - - - -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/ping b/library/system/ping deleted file mode 100644 index b098d0054c..0000000000 --- a/library/system/ping +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -DOCUMENTATION = ''' ---- -module: ping -version_added: historical -short_description: Try to connect to host and return C(pong) on success. -description: - - A trivial test module, this module always returns C(pong) on successful - contact. It does not make sense in playbooks, but it is useful from - C(/usr/bin/ansible) -options: {} -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Test 'webservers' status -ansible webservers -m ping -''' - -import exceptions - -def main(): - module = AnsibleModule( - argument_spec = dict( - data=dict(required=False, default=None), - ), - supports_check_mode = True - ) - result = dict(ping='pong') - if module.params['data']: - if module.params['data'] == 'crash': - raise exceptions.Exception("boom") - result['ping'] = module.params['data'] - module.exit_json(**result) - -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/seboolean b/library/system/seboolean deleted file mode 100644 index 9799e71636..0000000000 --- a/library/system/seboolean +++ /dev/null @@ -1,212 +0,0 @@ -#!/usr/bin/python - -# (c) 2012, Stephen Fromm -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: seboolean -short_description: Toggles SELinux booleans. -description: - - Toggles SELinux booleans. -version_added: "0.7" -options: - name: - description: - - Name of the boolean to configure - required: true - default: null - persistent: - description: - - Set to C(yes) if the boolean setting should survive a reboot - required: false - default: no - choices: [ "yes", "no" ] - state: - description: - - Desired boolean value - required: true - default: null - choices: [ 'yes', 'no' ] -notes: - - Not tested on any debian based system -requirements: [ ] -author: Stephen Fromm -''' - -EXAMPLES = ''' -# Set (httpd_can_network_connect) flag on and keep it persistent across reboots -- seboolean: name=httpd_can_network_connect state=yes persistent=yes -''' - -try: - import selinux - HAVE_SELINUX=True -except ImportError: - HAVE_SELINUX=False - -try: - import semanage - HAVE_SEMANAGE=True -except ImportError: - HAVE_SEMANAGE=False - -def has_boolean_value(module, name): - bools = [] - try: - rc, bools = selinux.security_get_boolean_names() - except OSError, e: - module.fail_json(msg="Failed to get list of boolean names") - if name in bools: - return True - else: - return False - -def get_boolean_value(module, name): - state = 0 - try: - state = selinux.security_get_boolean_active(name) - except OSError, e: - module.fail_json(msg="Failed to determine current state for boolean %s" % name) - if state == 1: - return True - else: - return False - -# The following method implements what setsebool.c does to change -# a boolean and make it persist after reboot.. -def semanage_boolean_value(module, name, state): - rc = 0 - value = 0 - if state: - value = 1 - handle = semanage.semanage_handle_create() - if handle is None: - module.fail_json(msg="Failed to create semanage library handle") - try: - managed = semanage.semanage_is_managed(handle) - if managed < 0: - module.fail_json(msg="Failed to determine whether policy is manage") - if managed == 0: - if os.getuid() == 0: - module.fail_json(msg="Cannot set persistent booleans without managed policy") - else: - module.fail_json(msg="Cannot set persistent booleans; please try as root") - if semanage.semanage_connect(handle) < 0: - module.fail_json(msg="Failed to connect to semanage") - - if semanage.semanage_begin_transaction(handle) < 0: - module.fail_json(msg="Failed to begin semanage transaction") - - rc, sebool = semanage.semanage_bool_create(handle) - if rc < 0: - module.fail_json(msg="Failed to create seboolean with semanage") - if semanage.semanage_bool_set_name(handle, sebool, name) < 0: - module.fail_json(msg="Failed to set seboolean name with semanage") - semanage.semanage_bool_set_value(sebool, value) - - rc, boolkey = semanage.semanage_bool_key_extract(handle, sebool) - if rc < 0: - module.fail_json(msg="Failed to extract boolean key with semanage") - - if semanage.semanage_bool_modify_local(handle, boolkey, sebool) < 0: - module.fail_json(msg="Failed to modify boolean key with semanage") - - if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0: - module.fail_json(msg="Failed to set boolean key active with semanage") - - semanage.semanage_bool_key_free(boolkey) - semanage.semanage_bool_free(sebool) - - semanage.semanage_set_reload(handle, 0) - if semanage.semanage_commit(handle) < 0: - module.fail_json(msg="Failed to commit changes to semanage") - - semanage.semanage_disconnect(handle) - semanage.semanage_handle_destroy(handle) - except Exception, e: - module.fail_json(msg="Failed to manage policy for boolean %s: %s" % (name, str(e))) - return True - -def set_boolean_value(module, name, state): - rc = 0 - value = 0 - if state: - value = 1 - try: - rc = selinux.security_set_boolean(name, value) - except OSError, e: - module.fail_json(msg="Failed to set boolean %s to %s" % (name, value)) - if rc == 0: - return True - else: - return False - -def main(): - module = AnsibleModule( - argument_spec = dict( - name=dict(required=True), - persistent=dict(default='no', type='bool'), - state=dict(required=True, type='bool') - ), - supports_check_mode=True - ) - - if not HAVE_SELINUX: - module.fail_json(msg="This module requires libselinux-python support") - - if not HAVE_SEMANAGE: - module.fail_json(msg="This module requires libsemanage-python support") - - if not selinux.is_selinux_enabled(): - module.fail_json(msg="SELinux is disabled on this host.") - - name = module.params['name'] - persistent = module.params['persistent'] - state = module.params['state'] - result = {} - result['name'] = name - - if not has_boolean_value(module, name): - module.fail_json(msg="SELinux boolean %s does not exist." % name) - - cur_value = get_boolean_value(module, name) - - if cur_value == state: - result['state'] = cur_value - result['changed'] = False - module.exit_json(**result) - - if module.check_mode: - module.exit_json(changed=True) - if persistent: - r = semanage_boolean_value(module, name, state) - else: - r = set_boolean_value(module, name, state) - - result['changed'] = r - if not r: - module.fail_json(msg="Failed to set boolean %s to %s" % (name, value)) - try: - selinux.security_commit_booleans() - except: - module.fail_json(msg="Failed to commit pending boolean %s value" % name) - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/selinux b/library/system/selinux deleted file mode 100644 index 53e53d1d49..0000000000 --- a/library/system/selinux +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Derek Carter -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: selinux -short_description: Change policy and state of SELinux -description: - - Configures the SELinux mode and policy. A reboot may be required after usage. Ansible will not issue this reboot but will let you know when it is required. -version_added: "0.7" -options: - policy: - description: - - "name of the SELinux policy to use (example: C(targeted)) will be required if state is not C(disabled)" - required: false - default: null - state: - description: - - The SELinux mode - required: true - default: null - choices: [ "enforcing", "permissive", "disabled" ] - conf: - description: - - path to the SELinux configuration file, if non-standard - required: false - default: "/etc/selinux/config" -notes: - - Not tested on any debian based system -requirements: [ libselinux-python ] -author: Derek Carter -''' - -EXAMPLES = ''' -- selinux: policy=targeted state=enforcing -- selinux: policy=targeted state=permissive -- selinux: state=disabled -''' - -import os -import re -import sys - -try: - import selinux -except ImportError: - print "failed=True msg='libselinux-python required for this module'" - sys.exit(1) - -# getter subroutines -def get_config_state(configfile): - myfile = open(configfile, "r") - lines = myfile.readlines() - myfile.close() - for line in lines: - stateline = re.match('^SELINUX=.*$', line) - if (stateline): - return(line.split('=')[1].strip()) - -def get_config_policy(configfile): - myfile = open(configfile, "r") - lines = myfile.readlines() - myfile.close() - for line in lines: - stateline = re.match('^SELINUXTYPE=.*$', line) - if (stateline): - return(line.split('=')[1].strip()) - -# setter subroutines -def set_config_state(state, configfile): - #SELINUX=permissive - # edit config file with state value - stateline='SELINUX=%s' % state - myfile = open(configfile, "r") - lines = myfile.readlines() - myfile.close() - myfile = open(configfile, "w") - for line in lines: - myfile.write(re.sub(r'^SELINUX=.*', stateline, line)) - myfile.close() - -def set_state(state): - if (state == 'enforcing'): - selinux.security_setenforce(1) - elif (state == 'permissive'): - selinux.security_setenforce(0) - elif (state == 'disabled'): - pass - else: - msg = 'trying to set invalid runtime state %s' % state - module.fail_json(msg=msg) - -def set_config_policy(policy, configfile): - # edit config file with state value - #SELINUXTYPE=targeted - policyline='SELINUXTYPE=%s' % policy - myfile = open(configfile, "r") - lines = myfile.readlines() - myfile.close() - myfile = open(configfile, "w") - for line in lines: - myfile.write(re.sub(r'^SELINUXTYPE=.*', policyline, line)) - myfile.close() - -def main(): - - module = AnsibleModule( - argument_spec = dict( - policy=dict(required=False), - state=dict(choices=['enforcing', 'permissive', 'disabled'], required=True), - configfile=dict(aliases=['conf','file'], default='/etc/selinux/config') - ), - supports_check_mode=True - ) - - # global vars - changed=False - msgs = [] - configfile = module.params['configfile'] - policy = module.params['policy'] - state = module.params['state'] - runtime_enabled = selinux.is_selinux_enabled() - runtime_policy = selinux.selinux_getpolicytype()[1] - runtime_state = 'disabled' - if (runtime_enabled): - # enabled means 'enforcing' or 'permissive' - if (selinux.security_getenforce()): - runtime_state = 'enforcing' - else: - runtime_state = 'permissive' - config_policy = get_config_policy(configfile) - config_state = get_config_state(configfile) - - # check to see if policy is set if state is not 'disabled' - if (state != 'disabled'): - if not policy: - module.fail_json(msg='policy is required if state is not \'disabled\'') - else: - if not policy: - policy = config_policy - - # check changed values and run changes - if (policy != runtime_policy): - if module.check_mode: - module.exit_json(changed=True) - # cannot change runtime policy - msgs.append('reboot to change the loaded policy') - changed=True - - if (policy != config_policy): - if module.check_mode: - module.exit_json(changed=True) - msgs.append('config policy changed from \'%s\' to \'%s\'' % (config_policy, policy)) - set_config_policy(policy, configfile) - changed=True - - if (state != runtime_state): - if module.check_mode: - module.exit_json(changed=True) - if (state == 'disabled'): - msgs.append('state change will take effect next reboot') - else: - if (runtime_enabled): - set_state(state) - msgs.append('runtime state changed from \'%s\' to \'%s\'' % (runtime_state, state)) - else: - msgs.append('state change will take effect next reboot') - changed=True - - if (state != config_state): - if module.check_mode: - module.exit_json(changed=True) - msgs.append('config state changed from \'%s\' to \'%s\'' % (config_state, state)) - set_config_state(state, configfile) - changed=True - - module.exit_json(changed=changed, msg=', '.join(msgs), - configfile=configfile, - policy=policy, state=state) - -################################################# -# import module snippets -from ansible.module_utils.basic import * - -main() - diff --git a/library/system/service b/library/system/service deleted file mode 100644 index b235ee25c5..0000000000 --- a/library/system/service +++ /dev/null @@ -1,1328 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: service -author: Michael DeHaan -version_added: "0.1" -short_description: Manage services. -description: - - Controls services on remote hosts. -options: - name: - required: true - description: - - Name of the service. - state: - required: false - choices: [ started, stopped, restarted, reloaded ] - description: - - C(started)/C(stopped) are idempotent actions that will not run - commands unless necessary. C(restarted) will always bounce the - service. C(reloaded) will always reload. B(At least one of state - and enabled are required.) - sleep: - required: false - version_added: "1.3" - description: - - If the service is being C(restarted) then sleep this many seconds - between the stop and start command. This helps to workaround badly - behaving init scripts that exit immediately after signaling a process - to stop. - pattern: - required: false - version_added: "0.7" - description: - - If the service does not respond to the status command, name a - substring to look for as would be found in the output of the I(ps) - command as a stand-in for a status result. If the string is found, - the service will be assumed to be running. - enabled: - required: false - choices: [ "yes", "no" ] - description: - - Whether the service should start on boot. B(At least one of state and - enabled are required.) - - runlevel: - required: false - default: 'default' - description: - - "For OpenRC init scripts (ex: Gentoo) only. The runlevel that this service belongs to." - arguments: - description: - - Additional arguments provided on the command line - aliases: [ 'args' ] -''' - -EXAMPLES = ''' -# Example action to start service httpd, if not running -- service: name=httpd state=started - -# Example action to stop service httpd, if running -- service: name=httpd state=stopped - -# Example action to restart service httpd, in all cases -- service: name=httpd state=restarted - -# Example action to reload service httpd, in all cases -- service: name=httpd state=reloaded - -# Example action to enable service httpd, and not touch the running state -- service: name=httpd enabled=yes - -# Example action to start service foo, based on running process /usr/bin/foo -- service: name=foo pattern=/usr/bin/foo state=started - -# Example action to restart network service for interface eth0 -- service: name=network state=restarted args=eth0 -''' - -import platform -import os -import re -import tempfile -import shlex -import select -import time -import string - -from distutils.version import LooseVersion - -class Service(object): - """ - This is the generic Service manipulation class that is subclassed - based on platform. - - A subclass should override the following action methods:- - - get_service_tools - - service_enable - - get_service_status - - service_control - - All subclasses MUST define platform and distribution (which may be None). - """ - - platform = 'Generic' - distribution = None - - def __new__(cls, *args, **kwargs): - return load_platform_subclass(Service, args, kwargs) - - def __init__(self, module): - self.module = module - self.name = module.params['name'] - self.state = module.params['state'] - self.sleep = module.params['sleep'] - self.pattern = module.params['pattern'] - self.enable = module.params['enabled'] - self.runlevel = module.params['runlevel'] - self.changed = False - self.running = None - self.crashed = None - self.action = None - self.svc_cmd = None - self.svc_initscript = None - self.svc_initctl = None - self.enable_cmd = None - self.arguments = module.params.get('arguments', '') - self.rcconf_file = None - self.rcconf_key = None - self.rcconf_value = None - self.svc_change = False - - # select whether we dump additional debug info through syslog - self.syslogging = False - - # =========================================== - # Platform specific methods (must be replaced by subclass). - - def get_service_tools(self): - self.module.fail_json(msg="get_service_tools not implemented on target platform") - - def service_enable(self): - self.module.fail_json(msg="service_enable not implemented on target platform") - - def get_service_status(self): - self.module.fail_json(msg="get_service_status not implemented on target platform") - - def service_control(self): - self.module.fail_json(msg="service_control not implemented on target platform") - - # =========================================== - # Generic methods that should be used on all platforms. - - def execute_command(self, cmd, daemonize=False): - if self.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Command %s, daemonize %r' % (cmd, daemonize)) - - # Most things don't need to be daemonized - if not daemonize: - return self.module.run_command(cmd) - - # This is complex because daemonization is hard for people. - # What we do is daemonize a part of this module, the daemon runs the - # command, picks up the return code and output, and returns it to the - # main process. - pipe = os.pipe() - pid = os.fork() - if pid == 0: - os.close(pipe[0]) - # Set stdin/stdout/stderr to /dev/null - fd = os.open(os.devnull, os.O_RDWR) - if fd != 0: - os.dup2(fd, 0) - if fd != 1: - os.dup2(fd, 1) - if fd != 2: - os.dup2(fd, 2) - if fd not in (0, 1, 2): - os.close(fd) - - # Make us a daemon. Yes, that's all it takes. - pid = os.fork() - if pid > 0: - os._exit(0) - os.setsid() - os.chdir("/") - pid = os.fork() - if pid > 0: - os._exit(0) - - # Start the command - if isinstance(cmd, basestring): - cmd = shlex.split(cmd) - p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1])) - stdout = "" - stderr = "" - fds = [p.stdout, p.stderr] - # Wait for all output, or until the main process is dead and its output is done. - while fds: - rfd, wfd, efd = select.select(fds, [], fds, 1) - if not (rfd + wfd + efd) and p.poll() is not None: - break - if p.stdout in rfd: - dat = os.read(p.stdout.fileno(), 4096) - if not dat: - fds.remove(p.stdout) - stdout += dat - if p.stderr in rfd: - dat = os.read(p.stderr.fileno(), 4096) - if not dat: - fds.remove(p.stderr) - stderr += dat - p.wait() - # Return a JSON blob to parent - os.write(pipe[1], json.dumps([p.returncode, stdout, stderr])) - os.close(pipe[1]) - os._exit(0) - elif pid == -1: - self.module.fail_json(msg="unable to fork") - else: - os.close(pipe[1]) - os.waitpid(pid, 0) - # Wait for data from daemon process and process it. - data = "" - while True: - rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]]) - if pipe[0] in rfd: - dat = os.read(pipe[0], 4096) - if not dat: - break - data += dat - return json.loads(data) - - def check_ps(self): - # Set ps flags - if platform.system() == 'SunOS': - psflags = '-ef' - else: - psflags = 'auxww' - - # Find ps binary - psbin = self.module.get_bin_path('ps', True) - - (rc, psout, pserr) = self.execute_command('%s %s' % (psbin, psflags)) - # If rc is 0, set running as appropriate - if rc == 0: - self.running = False - lines = psout.split("\n") - for line in lines: - if self.pattern in line and not "pattern=" in line: - # so as to not confuse ./hacking/test-module - self.running = True - break - - def check_service_changed(self): - if self.state and self.running is None: - self.module.fail_json(msg="failed determining service state, possible typo of service name?") - # Find out if state has changed - if not self.running and self.state in ["started", "running", "reloaded"]: - self.svc_change = True - elif self.running and self.state in ["stopped","reloaded"]: - self.svc_change = True - elif self.state == "restarted": - self.svc_change = True - if self.module.check_mode and self.svc_change: - self.module.exit_json(changed=True, msg='service state changed') - - def modify_service_state(self): - - # Only do something if state will change - if self.svc_change: - # Control service - if self.state in ['started', 'running']: - self.action = "start" - elif not self.running and self.state == 'reloaded': - self.action = "start" - elif self.state == 'stopped': - self.action = "stop" - elif self.state == 'reloaded': - self.action = "reload" - elif self.state == 'restarted': - self.action = "restart" - - if self.module.check_mode: - self.module.exit_json(changed=True, msg='changing service state') - - return self.service_control() - - else: - # If nothing needs to change just say all is well - rc = 0 - err = '' - out = '' - return rc, out, err - - def service_enable_rcconf(self): - if self.rcconf_file is None or self.rcconf_key is None or self.rcconf_value is None: - self.module.fail_json(msg="service_enable_rcconf() requires rcconf_file, rcconf_key and rcconf_value") - - self.changed = None - entry = '%s="%s"\n' % (self.rcconf_key, self.rcconf_value) - RCFILE = open(self.rcconf_file, "r") - new_rc_conf = [] - - # Build a list containing the possibly modified file. - for rcline in RCFILE: - # Parse line removing whitespaces, quotes, etc. - rcarray = shlex.split(rcline, comments=True) - if len(rcarray) >= 1 and '=' in rcarray[0]: - (key, value) = rcarray[0].split("=", 1) - if key == self.rcconf_key: - if value.upper() == self.rcconf_value: - # Since the proper entry already exists we can stop iterating. - self.changed = False - break - else: - # We found the key but the value is wrong, replace with new entry. - rcline = entry - self.changed = True - - # Add line to the list. - new_rc_conf.append(rcline) - - # We are done with reading the current rc.conf, close it. - RCFILE.close() - - # If we did not see any trace of our entry we need to add it. - if self.changed is None: - new_rc_conf.append(entry) - self.changed = True - - if self.changed is True: - - if self.module.check_mode: - self.module.exit_json(changed=True, msg="changing service enablement") - - # Create a temporary file next to the current rc.conf (so we stay on the same filesystem). - # This way the replacement operation is atomic. - rcconf_dir = os.path.dirname(self.rcconf_file) - rcconf_base = os.path.basename(self.rcconf_file) - (TMP_RCCONF, tmp_rcconf_file) = tempfile.mkstemp(dir=rcconf_dir, prefix="%s-" % rcconf_base) - - # Write out the contents of the list into our temporary file. - for rcline in new_rc_conf: - os.write(TMP_RCCONF, rcline) - - # Close temporary file. - os.close(TMP_RCCONF) - - # Replace previous rc.conf. - self.module.atomic_move(tmp_rcconf_file, self.rcconf_file) - -# =========================================== -# Subclass: Linux - -class LinuxService(Service): - """ - This is the Linux Service manipulation class - it is currently supporting - a mixture of binaries and init scripts for controlling services started at - boot, as well as for controlling the current state. - """ - - platform = 'Linux' - distribution = None - - def get_service_tools(self): - - paths = [ '/sbin', '/usr/sbin', '/bin', '/usr/bin' ] - binaries = [ 'service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart' ] - initpaths = [ '/etc/init.d' ] - location = dict() - - for binary in binaries: - location[binary] = self.module.get_bin_path(binary) - - def check_systemd(name): - # verify service is managed by systemd - if not location.get('systemctl', None): - return False - - # default to .service if the unit type is not specified - if name.find('.') > 0: - unit_name, unit_type = name.rsplit('.', 1) - if unit_type not in ("service", "socket", "device", "mount", "automount", - "swap", "target", "path", "timer", "snapshot"): - name = "%s.service" % name - else: - name = "%s.service" % name - - rc, out, err = self.execute_command("%s list-unit-files" % (location['systemctl'])) - - # adjust the service name to account for template service unit files - index = name.find('@') - if index != -1: - template_name = name[:index+1] - else: - template_name = name - - self.__systemd_unit = None - for line in out.splitlines(): - if line.startswith(template_name): - self.__systemd_unit = name - return True - return False - - # Locate a tool for enable options - if location.get('chkconfig', None) and os.path.exists("/etc/init.d/%s" % self.name): - if check_systemd(self.name): - # service is managed by systemd - self.enable_cmd = location['systemctl'] - else: - # we are using a standard SysV service - self.enable_cmd = location['chkconfig'] - elif location.get('update-rc.d', None): - if check_systemd(self.name): - # service is managed by systemd - self.enable_cmd = location['systemctl'] - elif location['initctl'] and os.path.exists("/etc/init/%s.conf" % self.name): - # service is managed by upstart - self.enable_cmd = location['initctl'] - elif location['update-rc.d'] and os.path.exists("/etc/init.d/%s" % self.name): - # service is managed by with SysV init scripts, but with update-rc.d - self.enable_cmd = location['update-rc.d'] - else: - self.module.fail_json(msg="service not found: %s" % self.name) - elif location.get('rc-service', None) and not location.get('systemctl', None): - # service is managed by OpenRC - self.svc_cmd = location['rc-service'] - self.enable_cmd = location['rc-update'] - return - elif check_systemd(self.name): - # service is managed by systemd - self.enable_cmd = location['systemctl'] - elif location['initctl'] and os.path.exists("/etc/init/%s.conf" % self.name): - # service is managed by upstart - self.enable_cmd = location['initctl'] - - # if this service is managed via upstart, get the current upstart version - if self.enable_cmd == location['initctl']: - # default the upstart version to something we can compare against - self.upstart_version = LooseVersion('0.0.0') - try: - # set the upstart version based on the output of 'initctl version' - version_re = re.compile(r'\(upstart (.*)\)') - rc,stdout,stderr = self.module.run_command('initctl version') - if rc == 0: - res = version_re.search(stdout) - if res: - self.upstart_version = LooseVersion(res.groups()[0]) - except: - # we'll use the default of 0.0.0 since we couldn't - # detect the current upstart version above - pass - - # Locate a tool for runtime service management (start, stop etc.) - if location.get('service', None) and os.path.exists("/etc/init.d/%s" % self.name): - # SysV init script - self.svc_cmd = location['service'] - elif location.get('start', None) and os.path.exists("/etc/init/%s.conf" % self.name): - # upstart -- rather than being managed by one command, start/stop/restart are actual commands - self.svc_cmd = '' - else: - # still a SysV init script, but /sbin/service isn't installed - for initdir in initpaths: - initscript = "%s/%s" % (initdir,self.name) - if os.path.isfile(initscript): - self.svc_initscript = initscript - - # couldn't find anything yet, assume systemd - if self.svc_cmd is None and self.svc_initscript is None: - if location.get('systemctl'): - self.svc_cmd = location['systemctl'] - - if self.svc_cmd is None and not self.svc_initscript: - self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting') - - if location.get('initctl', None): - self.svc_initctl = location['initctl'] - - def get_systemd_status_dict(self): - (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,)) - if rc != 0: - self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err)) - return dict(line.split('=', 1) for line in out.splitlines()) - - def get_systemd_service_status(self): - d = self.get_systemd_status_dict() - if d.get('ActiveState') == 'active': - # run-once services (for which a single successful exit indicates - # that they are running as designed) should not be restarted here. - # Thus, we are not checking d['SubState']. - self.running = True - self.crashed = False - elif d.get('ActiveState') == 'failed': - self.running = False - self.crashed = True - elif d.get('ActiveState') is None: - self.module.fail_json(msg='No ActiveState value in systemctl show output for %r' % (self.__systemd_unit,)) - else: - self.running = False - self.crashed = False - return self.running - - def get_service_status(self): - if self.svc_cmd and self.svc_cmd.endswith('systemctl'): - return self.get_systemd_service_status() - - self.action = "status" - rc, status_stdout, status_stderr = self.service_control() - - # if we have decided the service is managed by upstart, we check for some additional output... - if self.svc_initctl and self.running is None: - # check the job status by upstart response - initctl_rc, initctl_status_stdout, initctl_status_stderr = self.execute_command("%s status %s" % (self.svc_initctl, self.name)) - if "stop/waiting" in initctl_status_stdout: - self.running = False - elif "start/running" in initctl_status_stdout: - self.running = True - - if self.svc_cmd and self.svc_cmd.endswith("rc-service") and self.running is None: - openrc_rc, openrc_status_stdout, openrc_status_stderr = self.execute_command("%s %s status" % (self.svc_cmd, self.name)) - self.running = "started" in openrc_status_stdout - self.crashed = "crashed" in openrc_status_stderr - - # if the job status is still not known check it by status output keywords - # Only check keywords if there's only one line of output (some init - # scripts will output verbosely in case of error and those can emit - # keywords that are picked up as false positives - if self.running is None and status_stdout.count('\n') <= 1: - # first transform the status output that could irritate keyword matching - cleanout = status_stdout.lower().replace(self.name.lower(), '') - if "stop" in cleanout: - self.running = False - elif "run" in cleanout and "not" in cleanout: - self.running = False - elif "run" in cleanout and "not" not in cleanout: - self.running = True - elif "start" in cleanout and "not" not in cleanout: - self.running = True - elif 'could not access pid file' in cleanout: - self.running = False - elif 'is dead and pid file exists' in cleanout: - self.running = False - elif 'dead but subsys locked' in cleanout: - self.running = False - elif 'dead but pid file exists' in cleanout: - self.running = False - - # if the job status is still not known check it by response code - # For reference, see: - # http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html - if self.running is None: - if rc in [1, 2, 3, 4, 69]: - self.running = False - elif rc == 0: - self.running = True - - # if the job status is still not known check it by special conditions - if self.running is None: - if self.name == 'iptables' and "ACCEPT" in status_stdout: - # iptables status command output is lame - # TODO: lookup if we can use a return code for this instead? - self.running = True - - return self.running - - - def service_enable(self): - - if self.enable_cmd is None: - self.module.fail_json(msg='cannot detect command to enable service %s, typo or init system potentially unknown' % self.name) - - self.changed = True - action = None - - # FIXME: we use chkconfig or systemctl - # to decide whether to run the command here but need something - # similar for upstart - - # - # Upstart's initctl - # - if self.enable_cmd.endswith("initctl"): - def write_to_override_file(file_name, file_contents, ): - override_file = open(file_name, 'w') - override_file.write(file_contents) - override_file.close() - - initpath = '/etc/init' - if self.upstart_version >= LooseVersion('0.6.7'): - manreg = re.compile('^manual\s*$', re.M | re.I) - config_line = 'manual\n' - else: - manreg = re.compile('^start on manual\s*$', re.M | re.I) - config_line = 'start on manual\n' - conf_file_name = "%s/%s.conf" % (initpath, self.name) - override_file_name = "%s/%s.override" % (initpath, self.name) - - # Check to see if files contain the manual line in .conf and fail if True - if manreg.search(open(conf_file_name).read()): - self.module.fail_json(msg="manual stanza not supported in a .conf file") - - self.changed = False - if os.path.exists(override_file_name): - override_file_contents = open(override_file_name).read() - # Remove manual stanza if present and service enabled - if self.enable and manreg.search(override_file_contents): - self.changed = True - override_state = manreg.sub('', override_file_contents) - # Add manual stanza if not present and service disabled - elif not (self.enable) and not (manreg.search(override_file_contents)): - self.changed = True - override_state = '\n'.join((override_file_contents, config_line)) - # service already in desired state - else: - pass - # Add file with manual stanza if service disabled - elif not (self.enable): - self.changed = True - override_state = config_line - else: - # service already in desired state - pass - - if self.module.check_mode: - self.module.exit_json(changed=self.changed) - - # The initctl method of enabling and disabling services is much - # different than for the other service methods. So actually - # committing the change is done in this conditional and then we - # skip the boilerplate at the bottom of the method - if self.changed: - try: - write_to_override_file(override_file_name, override_state) - except: - self.module.fail_json(msg='Could not modify override file') - - return - - # - # SysV's chkconfig - # - if self.enable_cmd.endswith("chkconfig"): - if self.enable: - action = 'on' - else: - action = 'off' - - (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name)) - if 'chkconfig --add %s' % self.name in err: - self.execute_command("%s --add %s" % (self.enable_cmd, self.name)) - (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name)) - if not self.name in out: - self.module.fail_json(msg="service %s does not support chkconfig" % self.name) - state = out.split()[-1] - - # Check if we're already in the correct state - if "3:%s" % action in out and "5:%s" % action in out: - self.changed = False - return - - # - # Systemd's systemctl - # - if self.enable_cmd.endswith("systemctl"): - if self.enable: - action = 'enable' - else: - action = 'disable' - - # Check if we're already in the correct state - d = self.get_systemd_status_dict() - if "UnitFileState" in d: - if self.enable and d["UnitFileState"] == "enabled": - self.changed = False - elif not self.enable and d["UnitFileState"] == "disabled": - self.changed = False - elif not self.enable: - self.changed = False - - if not self.changed: - return - - # - # OpenRC's rc-update - # - if self.enable_cmd.endswith("rc-update"): - if self.enable: - action = 'add' - else: - action = 'delete' - - (rc, out, err) = self.execute_command("%s show" % self.enable_cmd) - for line in out.splitlines(): - service_name, runlevels = line.split('|') - service_name = service_name.strip() - if service_name != self.name: - continue - runlevels = re.split(r'\s+', runlevels) - # service already enabled for the runlevel - if self.enable and self.runlevel in runlevels: - self.changed = False - # service already disabled for the runlevel - elif not self.enable and self.runlevel not in runlevels: - self.changed = False - break - else: - # service already disabled altogether - if not self.enable: - self.changed = False - - if not self.changed: - return - - # - # update-rc.d style - # - if self.enable_cmd.endswith("update-rc.d"): - if self.enable: - action = 'enable' - else: - action = 'disable' - - if self.enable: - # make sure the init.d symlinks are created - # otherwise enable might not work - (rc, out, err) = self.execute_command("%s %s defaults" \ - % (self.enable_cmd, self.name)) - if rc != 0: - if err: - self.module.fail_json(msg=err) - else: - self.module.fail_json(msg=out) - - (rc, out, err) = self.execute_command("%s -n %s %s" \ - % (self.enable_cmd, self.name, action)) - self.changed = False - for line in out.splitlines(): - if line.startswith('rename'): - self.changed = True - break - elif self.enable and 'do not exist' in line: - self.changed = True - break - elif not self.enable and 'already exist' in line: - self.changed = True - break - - # Debian compatibility - for line in err.splitlines(): - if self.enable and 'no runlevel symlinks to modify' in line: - self.changed = True - break - - if not self.changed: - return - - # - # If we've gotten to the end, the service needs to be updated - # - self.changed = True - - # we change argument order depending on real binary used: - # rc-update and systemctl need the argument order reversed - - if self.enable_cmd.endswith("rc-update"): - args = (self.enable_cmd, action, self.name + " " + self.runlevel) - elif self.enable_cmd.endswith("systemctl"): - args = (self.enable_cmd, action, self.__systemd_unit) - else: - args = (self.enable_cmd, self.name, action) - - if self.module.check_mode: - self.module.exit_json(changed=self.changed) - - (rc, out, err) = self.execute_command("%s %s %s" % args) - if rc != 0: - if err: - self.module.fail_json(msg=err) - else: - self.module.fail_json(msg=out) - - return (rc, out, err) - - - def service_control(self): - - # Decide what command to run - svc_cmd = '' - arguments = self.arguments - if self.svc_cmd: - if not self.svc_cmd.endswith("systemctl"): - # SysV and OpenRC take the form - svc_cmd = "%s %s" % (self.svc_cmd, self.name) - else: - # systemd commands take the form - svc_cmd = self.svc_cmd - arguments = "%s %s" % (self.__systemd_unit, arguments) - elif self.svc_initscript: - # upstart - svc_cmd = "%s" % self.svc_initscript - - # In OpenRC, if a service crashed, we need to reset its status to - # stopped with the zap command, before we can start it back. - if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed: - self.execute_command("%s zap" % svc_cmd, daemonize=True) - - if self.action is not "restart": - if svc_cmd != '': - # upstart or systemd or OpenRC - rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True) - else: - # SysV - rc_state, stdout, stderr = self.execute_command("%s %s %s" % (self.action, self.name, arguments), daemonize=True) - elif self.svc_cmd and self.svc_cmd.endswith('rc-service'): - # All services in OpenRC support restart. - rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True) - else: - # In other systems, not all services support restart. Do it the hard way. - if svc_cmd != '': - # upstart or systemd - rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % (svc_cmd, 'stop', arguments), daemonize=True) - else: - # SysV - rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % ('stop', self.name, arguments), daemonize=True) - - if self.sleep: - time.sleep(self.sleep) - - if svc_cmd != '': - # upstart or systemd - rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % (svc_cmd, 'start', arguments), daemonize=True) - else: - # SysV - rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % ('start', self.name, arguments), daemonize=True) - - # merge return information - if rc1 != 0 and rc2 == 0: - rc_state = rc2 - stdout = stdout2 - stderr = stderr2 - else: - rc_state = rc1 + rc2 - stdout = stdout1 + stdout2 - stderr = stderr1 + stderr2 - - return(rc_state, stdout, stderr) - -# =========================================== -# Subclass: FreeBSD - -class FreeBsdService(Service): - """ - This is the FreeBSD Service manipulation class - it uses the /etc/rc.conf - file for controlling services started at boot and the 'service' binary to - check status and perform direct service manipulation. - """ - - platform = 'FreeBSD' - distribution = None - - def get_service_tools(self): - self.svc_cmd = self.module.get_bin_path('service', True) - - if not self.svc_cmd: - self.module.fail_json(msg='unable to find service binary') - - def get_service_status(self): - rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'onestatus', self.arguments)) - if rc == 1: - self.running = False - elif rc == 0: - self.running = True - - def service_enable(self): - if self.enable: - self.rcconf_value = "YES" - else: - self.rcconf_value = "NO" - - rcfiles = [ '/etc/rc.conf','/etc/rc.conf.local', '/usr/local/etc/rc.conf' ] - for rcfile in rcfiles: - if os.path.isfile(rcfile): - self.rcconf_file = rcfile - - rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments)) - cmd = "%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments) - rcvars = shlex.split(stdout, comments=True) - - if not rcvars: - self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr) - - # In rare cases, i.e. sendmail, rcvar can return several key=value pairs - # Usually there is just one, however. In other rare cases, i.e. uwsgi, - # rcvar can return extra uncommented data that is not at all related to - # the rcvar. We will just take the first key=value pair we come across - # and hope for the best. - for rcvar in rcvars: - if '=' in rcvar: - self.rcconf_key = rcvar.split('=')[0] - break - - if self.rcconf_key is None: - self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr) - - try: - return self.service_enable_rcconf() - except: - self.module.fail_json(msg='unable to set rcvar') - - def service_control(self): - - if self.action is "start": - self.action = "onestart" - if self.action is "stop": - self.action = "onestop" - if self.action is "reload": - self.action = "onereload" - - return self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments)) - -# =========================================== -# Subclass: OpenBSD - -class OpenBsdService(Service): - """ - This is the OpenBSD Service manipulation class - it uses /etc/rc.d for - service control. Enabling a service is currently not supported because the - _flags variable is not boolean, you should supply a rc.conf.local - file in some other way. - """ - - platform = 'OpenBSD' - distribution = None - - def get_service_tools(self): - rcdir = '/etc/rc.d' - - rc_script = "%s/%s" % (rcdir, self.name) - if os.path.isfile(rc_script): - self.svc_cmd = rc_script - - if not self.svc_cmd: - self.module.fail_json(msg='unable to find rc.d script') - - def get_service_status(self): - rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check')) - if rc == 1: - self.running = False - elif rc == 0: - self.running = True - - def service_control(self): - return self.execute_command("%s %s" % (self.svc_cmd, self.action)) - -# =========================================== -# Subclass: NetBSD - -class NetBsdService(Service): - """ - This is the NetBSD Service manipulation class - it uses the /etc/rc.conf - file for controlling services started at boot, check status and perform - direct service manipulation. Init scripts in /etc/rcd are used for - controlling services (start/stop) as well as for controlling the current - state. - """ - - platform = 'NetBSD' - distribution = None - - def get_service_tools(self): - initpaths = [ '/etc/rc.d' ] # better: $rc_directories - how to get in here? Run: sh -c '. /etc/rc.conf ; echo $rc_directories' - - for initdir in initpaths: - initscript = "%s/%s" % (initdir,self.name) - if os.path.isfile(initscript): - self.svc_initscript = initscript - - if not self.svc_initscript: - self.module.fail_json(msg='unable to find rc.d script') - - def service_enable(self): - if self.enable: - self.rcconf_value = "YES" - else: - self.rcconf_value = "NO" - - rcfiles = [ '/etc/rc.conf' ] # Overkill? - for rcfile in rcfiles: - if os.path.isfile(rcfile): - self.rcconf_file = rcfile - - self.rcconf_key = "%s" % string.replace(self.name,"-","_") - - return self.service_enable_rcconf() - - def get_service_status(self): - self.svc_cmd = "%s" % self.svc_initscript - rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'onestatus')) - if rc == 1: - self.running = False - elif rc == 0: - self.running = True - - def service_control(self): - if self.action is "start": - self.action = "onestart" - if self.action is "stop": - self.action = "onestop" - - self.svc_cmd = "%s" % self.svc_initscript - return self.execute_command("%s %s" % (self.svc_cmd, self.action), daemonize=True) - -# =========================================== -# Subclass: SunOS -class SunOSService(Service): - """ - This is the SunOS Service manipulation class - it uses the svcadm - command for controlling services, and svcs command for checking status. - It also tries to be smart about taking the service out of maintenance - state if necessary. - """ - platform = 'SunOS' - distribution = None - - def get_service_tools(self): - self.svcs_cmd = self.module.get_bin_path('svcs', True) - - if not self.svcs_cmd: - self.module.fail_json(msg='unable to find svcs binary') - - self.svcadm_cmd = self.module.get_bin_path('svcadm', True) - - if not self.svcadm_cmd: - self.module.fail_json(msg='unable to find svcadm binary') - - def get_service_status(self): - status = self.get_sunos_svcs_status() - # Only 'online' is considered properly running. Everything else is off - # or has some sort of problem. - if status == 'online': - self.running = True - else: - self.running = False - - def get_sunos_svcs_status(self): - rc, stdout, stderr = self.execute_command("%s %s" % (self.svcs_cmd, self.name)) - if rc == 1: - if stderr: - self.module.fail_json(msg=stderr) - else: - self.module.fail_json(msg=stdout) - - lines = stdout.rstrip("\n").split("\n") - status = lines[-1].split(" ")[0] - # status is one of: online, offline, degraded, disabled, maintenance, uninitialized - # see man svcs(1) - return status - - def service_enable(self): - # Get current service enablement status - rc, stdout, stderr = self.execute_command("%s -l %s" % (self.svcs_cmd, self.name)) - - if rc != 0: - if stderr: - self.module.fail_json(msg=stderr) - else: - self.module.fail_json(msg=stdout) - - enabled = False - temporary = False - - # look for enabled line, which could be one of: - # enabled true (temporary) - # enabled false (temporary) - # enabled true - # enabled false - for line in stdout.split("\n"): - if line.startswith("enabled"): - if "true" in line: - enabled = True - if "temporary" in line: - temporary = True - - startup_enabled = (enabled and not temporary) or (not enabled and temporary) - - if self.enable and startup_enabled: - return - elif (not self.enable) and (not startup_enabled): - return - - # Mark service as started or stopped (this will have the side effect of - # actually stopping or starting the service) - if self.enable: - subcmd = "enable -rs" - else: - subcmd = "disable -s" - - rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name)) - - if rc != 0: - if stderr: - self.module.fail_json(msg=stderr) - else: - self.module.fail_json(msg=stdout) - - self.changed = True - - - def service_control(self): - status = self.get_sunos_svcs_status() - - # if starting or reloading, clear maintenace states - if self.action in ['start', 'reload', 'restart'] and status in ['maintenance', 'degraded']: - rc, stdout, stderr = self.execute_command("%s clear %s" % (self.svcadm_cmd, self.name)) - if rc != 0: - return rc, stdout, stderr - status = self.get_sunos_svcs_status() - - if status in ['maintenance', 'degraded']: - self.module.fail_json(msg="Failed to bring service out of %s status." % status) - - if self.action == 'start': - subcmd = "enable -rst" - elif self.action == 'stop': - subcmd = "disable -st" - elif self.action == 'reload': - subcmd = "refresh" - elif self.action == 'restart' and status == 'online': - subcmd = "restart" - elif self.action == 'restart' and status != 'online': - subcmd = "enable -rst" - - return self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name)) - -# =========================================== -# Subclass: AIX - -class AIX(Service): - """ - This is the AIX Service (SRC) manipulation class - it uses lssrc, startsrc, stopsrc - and refresh for service control. Enabling a service is currently not supported. - Would require to add an entry in the /etc/inittab file (mkitab, chitab and rmitab - commands) - """ - - platform = 'AIX' - distribution = None - - def get_service_tools(self): - self.lssrc_cmd = self.module.get_bin_path('lssrc', True) - - if not self.lssrc_cmd: - self.module.fail_json(msg='unable to find lssrc binary') - - self.startsrc_cmd = self.module.get_bin_path('startsrc', True) - - if not self.startsrc_cmd: - self.module.fail_json(msg='unable to find startsrc binary') - - self.stopsrc_cmd = self.module.get_bin_path('stopsrc', True) - - if not self.stopsrc_cmd: - self.module.fail_json(msg='unable to find stopsrc binary') - - self.refresh_cmd = self.module.get_bin_path('refresh', True) - - if not self.refresh_cmd: - self.module.fail_json(msg='unable to find refresh binary') - - - def get_service_status(self): - status = self.get_aix_src_status() - # Only 'active' is considered properly running. Everything else is off - # or has some sort of problem. - if status == 'active': - self.running = True - else: - self.running = False - - def get_aix_src_status(self): - rc, stdout, stderr = self.execute_command("%s -s %s" % (self.lssrc_cmd, self.name)) - if rc == 1: - if stderr: - self.module.fail_json(msg=stderr) - else: - self.module.fail_json(msg=stdout) - - lines = stdout.rstrip("\n").split("\n") - status = lines[-1].split(" ")[-1] - # status is one of: active, inoperative - return status - - def service_control(self): - if self.action == 'start': - srccmd = self.startsrc_cmd - elif self.action == 'stop': - srccmd = self.stopsrc_cmd - elif self.action == 'reload': - srccmd = self.refresh_cmd - elif self.action == 'restart': - self.execute_command("%s -s %s" % (self.stopsrc_cmd, self.name)) - srccmd = self.startsrc_cmd - - if self.arguments and self.action == 'start': - return self.execute_command("%s -a \"%s\" -s %s" % (srccmd, self.arguments, self.name)) - else: - return self.execute_command("%s -s %s" % (srccmd, self.name)) - - -# =========================================== -# Main control flow - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - state = dict(choices=['running', 'started', 'stopped', 'restarted', 'reloaded']), - sleep = dict(required=False, type='int', default=None), - pattern = dict(required=False, default=None), - enabled = dict(type='bool'), - runlevel = dict(required=False, default='default'), - arguments = dict(aliases=['args'], default=''), - ), - supports_check_mode=True - ) - if module.params['state'] is None and module.params['enabled'] is None: - module.fail_json(msg="Neither 'state' nor 'enabled' set") - - service = Service(module) - - if service.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Service instantiated - platform %s' % service.platform) - if service.distribution: - syslog.syslog(syslog.LOG_NOTICE, 'Service instantiated - distribution %s' % service.distribution) - - rc = 0 - out = '' - err = '' - result = {} - result['name'] = service.name - - # Find service management tools - service.get_service_tools() - - # Enable/disable service startup at boot if requested - if service.module.params['enabled'] is not None: - # FIXME: ideally this should detect if we need to toggle the enablement state, though - # it's unlikely the changed handler would need to fire in this case so it's a minor thing. - service.service_enable() - result['enabled'] = service.enable - - if module.params['state'] is None: - # Not changing the running state, so bail out now. - result['changed'] = service.changed - module.exit_json(**result) - - result['state'] = service.state - - # Collect service status - if service.pattern: - service.check_ps() - else: - service.get_service_status() - - # Calculate if request will change service state - service.check_service_changed() - - # Modify service state if necessary - (rc, out, err) = service.modify_service_state() - - if rc != 0: - if err and "Job is already running" in err: - # upstart got confused, one such possibility is MySQL on Ubuntu 12.04 - # where status may report it has no start/stop links and we could - # not get accurate status - pass - else: - if err: - module.fail_json(msg=err) - else: - module.fail_json(msg=out) - - result['changed'] = service.changed | service.svc_change - if service.module.params['enabled'] is not None: - result['enabled'] = service.module.params['enabled'] - - if not service.module.params['state']: - status = service.get_service_status() - if status is None: - result['state'] = 'absent' - elif status is False: - result['state'] = 'started' - else: - result['state'] = 'stopped' - else: - # as we may have just bounced the service the service command may not - # report accurate state at this moment so just show what we ran - if service.module.params['state'] in ['started','restarted','running','reloaded']: - result['state'] = 'started' - else: - result['state'] = 'stopped' - - module.exit_json(**result) - -from ansible.module_utils.basic import * -main() diff --git a/library/system/setup b/library/system/setup deleted file mode 100644 index 486304230b..0000000000 --- a/library/system/setup +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: setup -version_added: historical -short_description: Gathers facts about remote hosts -options: - filter: - version_added: "1.1" - description: - - if supplied, only return facts that match this shell-style (fnmatch) wildcard. - required: false - default: '*' - fact_path: - version_added: "1.3" - description: - - path used for local ansible facts (*.fact) - files in this dir - will be run (if executable) and their results be added to ansible_local facts - if a file is not executable it is read. - File/results format can be json or ini-format - required: false - default: '/etc/ansible/facts.d' -description: - - This module is automatically called by playbooks to gather useful - variables about remote hosts that can be used in playbooks. It can also be - executed directly by C(/usr/bin/ansible) to check what variables are - available to a host. Ansible provides many I(facts) about the system, - automatically. -notes: - - More ansible facts will be added with successive releases. If I(facter) or - I(ohai) are installed, variables from these programs will also be snapshotted - into the JSON file for usage in templating. These variables are prefixed - with C(facter_) and C(ohai_) so it's easy to tell their source. All variables are - bubbled up to the caller. Using the ansible facts and choosing to not - install I(facter) and I(ohai) means you can avoid Ruby-dependencies on your - remote systems. (See also M(facter) and M(ohai).) - - The filter option filters only the first level subkey below ansible_facts. - - If the target host is Windows, you will not currently have the ability to use - C(fact_path) or C(filter) as this is provided by a simpler implementation of the module. - Different facts are returned for Windows hosts. -author: Michael DeHaan -''' - -EXAMPLES = """ -# Display facts from all hosts and store them indexed by I(hostname) at C(/tmp/facts). -ansible all -m setup --tree /tmp/facts - -# Display only facts regarding memory found by ansible on all hosts and output them. -ansible all -m setup -a 'filter=ansible_*_mb' - -# Display only facts returned by facter. -ansible all -m setup -a 'filter=facter_*' - -# Display only facts about certain interfaces. -ansible all -m setup -a 'filter=ansible_eth[0-2]' -""" - - -def run_setup(module): - - setup_options = dict(module_setup=True) - facts = ansible_facts(module) - - for (k, v) in facts.items(): - setup_options["ansible_%s" % k.replace('-', '_')] = v - - # Look for the path to the facter and ohai binary and set - # the variable to that path. - facter_path = module.get_bin_path('facter') - ohai_path = module.get_bin_path('ohai') - - # if facter is installed, and we can use --json because - # ruby-json is ALSO installed, include facter data in the JSON - if facter_path is not None: - rc, out, err = module.run_command(facter_path + " --puppet --json") - facter = True - try: - facter_ds = json.loads(out) - except: - facter = False - if facter: - for (k,v) in facter_ds.items(): - setup_options["facter_%s" % k] = v - - # ditto for ohai - if ohai_path is not None: - rc, out, err = module.run_command(ohai_path) - ohai = True - try: - ohai_ds = json.loads(out) - except: - ohai = False - if ohai: - for (k,v) in ohai_ds.items(): - k2 = "ohai_%s" % k.replace('-', '_') - setup_options[k2] = v - - setup_result = { 'ansible_facts': {} } - - for (k,v) in setup_options.items(): - if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']): - setup_result['ansible_facts'][k] = v - - # hack to keep --verbose from showing all the setup module results - setup_result['verbose_override'] = True - - return setup_result - -def main(): - global module - module = AnsibleModule( - argument_spec = dict( - filter=dict(default="*", required=False), - fact_path=dict(default='/etc/ansible/facts.d', required=False), - ), - supports_check_mode = True, - ) - data = run_setup(module) - module.exit_json(**data) - -# import module snippets - -from ansible.module_utils.basic import * - -from ansible.module_utils.facts import * - -main() diff --git a/library/system/sysctl b/library/system/sysctl deleted file mode 100644 index acf6395f07..0000000000 --- a/library/system/sysctl +++ /dev/null @@ -1,334 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, David "DaviXX" CHANIAL -# (c) 2014, James Tanner -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: sysctl -short_description: Manage entries in sysctl.conf. -description: - - This module manipulates sysctl entries and optionally performs a C(/sbin/sysctl -p) after changing them. -version_added: "1.0" -options: - name: - description: - - The dot-separated path (aka I(key)) specifying the sysctl variable. - required: true - default: null - aliases: [ 'key' ] - value: - description: - - Desired value of the sysctl key. - required: false - default: null - aliases: [ 'val' ] - state: - description: - - Whether the entry should be present or absent in the sysctl file. - choices: [ "present", "absent" ] - default: present - ignoreerrors: - description: - - Use this option to ignore errors about unknown keys. - choices: [ "yes", "no" ] - default: no - reload: - description: - - If C(yes), performs a I(/sbin/sysctl -p) if the C(sysctl_file) is - updated. If C(no), does not reload I(sysctl) even if the - C(sysctl_file) is updated. - choices: [ "yes", "no" ] - default: "yes" - sysctl_file: - description: - - Specifies the absolute path to C(sysctl.conf), if not C(/etc/sysctl.conf). - required: false - default: /etc/sysctl.conf - sysctl_set: - description: - - Verify token value with the sysctl command and set with -w if necessary - choices: [ "yes", "no" ] - required: false - version_added: 1.5 - default: False -notes: [] -requirements: [] -author: David "DaviXX" CHANIAL -''' - -EXAMPLES = ''' -# Set vm.swappiness to 5 in /etc/sysctl.conf -- sysctl: name=vm.swappiness value=5 state=present - -# Remove kernel.panic entry from /etc/sysctl.conf -- sysctl: name=kernel.panic state=absent sysctl_file=/etc/sysctl.conf - -# Set kernel.panic to 3 in /tmp/test_sysctl.conf -- sysctl: name=kernel.panic value=3 sysctl_file=/tmp/test_sysctl.conf reload=no - -# Set ip fowarding on in /proc and do not reload the sysctl file -- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes - -# Set ip forwarding on in /proc and in the sysctl file and reload if necessary -- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes -''' - -# ============================================================== - -import os -import tempfile -import re - -class SysctlModule(object): - - def __init__(self, module): - self.module = module - self.args = self.module.params - - self.sysctl_cmd = self.module.get_bin_path('sysctl', required=True) - self.sysctl_file = self.args['sysctl_file'] - - self.proc_value = None # current token value in proc fs - self.file_value = None # current token value in file - self.file_lines = [] # all lines in the file - self.file_values = {} # dict of token values - - self.changed = False # will change occur - self.set_proc = False # does sysctl need to set value - self.write_file = False # does the sysctl file need to be reloaded - - self.process() - - # ============================================================== - # LOGIC - # ============================================================== - - def process(self): - - # Whitespace is bad - self.args['name'] = self.args['name'].strip() - self.args['value'] = self._parse_value(self.args['value']) - - thisname = self.args['name'] - - # get the current proc fs value - self.proc_value = self.get_token_curr_value(thisname) - - # get the currect sysctl file value - self.read_sysctl_file() - if thisname not in self.file_values: - self.file_values[thisname] = None - - # update file contents with desired token/value - self.fix_lines() - - # what do we need to do now? - if self.file_values[thisname] is None and self.args['state'] == "present": - self.changed = True - self.write_file = True - elif self.file_values[thisname] is None and self.args['state'] == "absent": - self.changed = False - elif self.file_values[thisname] != self.args['value']: - self.changed = True - self.write_file = True - - # use the sysctl command or not? - if self.args['sysctl_set']: - if self.proc_value is None: - self.changed = True - elif not self._values_is_equal(self.proc_value, self.args['value']): - self.changed = True - self.set_proc = True - - # Do the work - if not self.module.check_mode: - if self.write_file: - self.write_sysctl() - if self.write_file and self.args['reload']: - self.reload_sysctl() - if self.set_proc: - self.set_token_value(self.args['name'], self.args['value']) - - def _values_is_equal(self, a, b): - """Expects two string values. It will split the string by whitespace - and compare each value. It will return True if both lists are the same, - contain the same elements and the same order.""" - if a is None or b is None: - return False - - a = a.split() - b = b.split() - - if len(a) != len(b): - return False - - return len([i for i, j in zip(a, b) if i == j]) == len(a) - - def _parse_value(self, value): - if value is None: - return '' - elif value.lower() in BOOLEANS_TRUE: - return '1' - elif value.lower() in BOOLEANS_FALSE: - return '0' - else: - return value.strip() - - # ============================================================== - # SYSCTL COMMAND MANAGEMENT - # ============================================================== - - # Use the sysctl command to find the current value - def get_token_curr_value(self, token): - thiscmd = "%s -e -n %s" % (self.sysctl_cmd, token) - rc,out,err = self.module.run_command(thiscmd) - if rc != 0: - return None - else: - return out - - # Use the sysctl command to set the current value - def set_token_value(self, token, value): - if len(value.split()) > 0: - value = '"' + value + '"' - thiscmd = "%s -w %s=%s" % (self.sysctl_cmd, token, value) - rc,out,err = self.module.run_command(thiscmd) - if rc != 0: - self.module.fail_json(msg='setting %s failed: %s' % (token, out + err)) - else: - return rc - - # Run sysctl -p - def reload_sysctl(self): - # do it - if get_platform().lower() == 'freebsd': - # freebsd doesn't support -p, so reload the sysctl service - rc,out,err = self.module.run_command('/etc/rc.d/sysctl reload') - else: - # system supports reloading via the -p flag to sysctl, so we'll use that - sysctl_args = [self.sysctl_cmd, '-p', self.sysctl_file] - if self.args['ignoreerrors']: - sysctl_args.insert(1, '-e') - - rc,out,err = self.module.run_command(sysctl_args) - - if rc != 0: - self.module.fail_json(msg="Failed to reload sysctl: %s" % str(out) + str(err)) - - # ============================================================== - # SYSCTL FILE MANAGEMENT - # ============================================================== - - # Get the token value from the sysctl file - def read_sysctl_file(self): - - lines = [] - if os.path.isfile(self.sysctl_file): - try: - f = open(self.sysctl_file, "r") - lines = f.readlines() - f.close() - except IOError, e: - self.module.fail_json(msg="Failed to open %s: %s" % (self.sysctl_file, str(e))) - - for line in lines: - line = line.strip() - self.file_lines.append(line) - - # don't split empty lines or comments - if not line or line.startswith("#"): - continue - - k, v = line.split('=',1) - k = k.strip() - v = v.strip() - self.file_values[k] = v.strip() - - # Fix the value in the sysctl file content - def fix_lines(self): - checked = [] - self.fixed_lines = [] - for line in self.file_lines: - if not line.strip() or line.strip().startswith("#"): - self.fixed_lines.append(line) - continue - tmpline = line.strip() - k, v = line.split('=',1) - k = k.strip() - v = v.strip() - if k not in checked: - checked.append(k) - if k == self.args['name']: - if self.args['state'] == "present": - new_line = "%s = %s\n" % (k, self.args['value']) - self.fixed_lines.append(new_line) - else: - new_line = "%s = %s\n" % (k, v) - self.fixed_lines.append(new_line) - - if self.args['name'] not in checked and self.args['state'] == "present": - new_line = "%s=%s\n" % (self.args['name'], self.args['value']) - self.fixed_lines.append(new_line) - - # Completely rewrite the sysctl file - def write_sysctl(self): - # open a tmp file - fd, tmp_path = tempfile.mkstemp('.conf', '.ansible_m_sysctl_', os.path.dirname(self.sysctl_file)) - f = open(tmp_path,"w") - try: - for l in self.fixed_lines: - f.write(l.strip() + "\n") - except IOError, e: - self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e))) - f.flush() - f.close() - - # replace the real one - self.module.atomic_move(tmp_path, self.sysctl_file) - - -# ============================================================== -# main - -def main(): - - # defining module - module = AnsibleModule( - argument_spec = dict( - name = dict(aliases=['key'], required=True), - value = dict(aliases=['val'], required=False), - state = dict(default='present', choices=['present', 'absent']), - reload = dict(default=True, type='bool'), - sysctl_set = dict(default=False, type='bool'), - ignoreerrors = dict(default=False, type='bool'), - sysctl_file = dict(default='/etc/sysctl.conf') - ), - supports_check_mode=True - ) - - result = SysctlModule(module) - - module.exit_json(changed=result.changed) - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/ufw b/library/system/ufw deleted file mode 100644 index e917a3bc74..0000000000 --- a/library/system/ufw +++ /dev/null @@ -1,269 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Ahti Kitsik -# (c) 2014, Jarno Keskikangas -# (c) 2013, Aleksey Ovcharenko -# (c) 2013, James Martin -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: ufw -short_description: Manage firewall with UFW -description: - - Manage firewall with UFW. -version_added: 1.6 -author: Aleksey Ovcharenko, Jarno Keskikangas, Ahti Kitsik -notes: - - See C(man ufw) for more examples. -requirements: - - C(ufw) package -options: - state: - description: - - C(enabled) reloads firewall and enables firewall on boot. - - C(disabled) unloads firewall and disables firewall on boot. - - C(reloaded) reloads firewall. - - C(reset) disables and resets firewall to installation defaults. - required: false - choices: ['enabled', 'disabled', 'reloaded', 'reset'] - policy: - description: - - Change the default policy for incoming or outgoing traffic. - required: false - alias: default - choices: ['allow', 'deny', 'reject'] - direction: - description: - - Select direction for a rule or default policy command. - required: false - choices: ['in', 'out', 'incoming', 'outgoing'] - logging: - description: - - Toggles logging. Logged packets use the LOG_KERN syslog facility. - choices: ['on', 'off', 'low', 'medium', 'high', 'full'] - required: false - insert: - description: - - Insert the corresponding rule as rule number NUM - required: false - rule: - description: - - Add firewall rule - required: false - choices: ['allow', 'deny', 'reject', 'limit'] - log: - description: - - Log new connections matched to this rule - required: false - choices: ['yes', 'no'] - from_ip: - description: - - Source IP address. - required: false - aliases: ['from', 'src'] - default: 'any' - from_port: - description: - - Source port. - required: false - to_ip: - description: - - Destination IP address. - required: false - aliases: ['to', 'dest'] - default: 'any' - to_port: - description: - - Destination port. - required: false - aliases: ['port'] - proto: - description: - - TCP/IP protocol. - choices: ['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah'] - required: false - name: - description: - - Use profile located in C(/etc/ufw/applications.d) - required: false - aliases: ['app'] - delete: - description: - - Delete rule. - required: false - choices: ['yes', 'no'] - interface: - description: - - Specify interface for rule. - required: false - aliases: ['if'] -''' - -EXAMPLES = ''' -# Allow everything and enable UFW -ufw: state=enabled policy=allow - -# Set logging -ufw: logging=on - -# Sometimes it is desirable to let the sender know when traffic is -# being denied, rather than simply ignoring it. In these cases, use -# reject instead of deny. In addition, log rejected connections: -ufw: rule=reject port=auth log=yes - -# ufw supports connection rate limiting, which is useful for protecting -# against brute-force login attacks. ufw will deny connections if an IP -# address has attempted to initiate 6 or more connections in the last -# 30 seconds. See http://www.debian-administration.org/articles/187 -# for details. Typical usage is: -ufw: rule=limit port=ssh proto=tcp - -# Allow OpenSSH -ufw: rule=allow name=OpenSSH - -# Delete OpenSSH rule -ufw: rule=allow name=OpenSSH delete=yes - -# Deny all access to port 53: -ufw: rule=deny port=53 - -# Allow all access to tcp port 80: -ufw: rule=allow port=80 proto=tcp - -# Allow all access from RFC1918 networks to this host: -ufw: rule=allow src={{ item }} -with_items: -- 10.0.0.0/8 -- 172.16.0.0/12 -- 192.168.0.0/16 - -# Deny access to udp port 514 from host 1.2.3.4: -ufw: rule=deny proto=udp src=1.2.3.4 port=514 - -# Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469 -ufw: rule=allow interface=eth0 direction=in proto=udp src=1.2.3.5 from_port=5469 dest=1.2.3.4 to_port=5469 - -# Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host. -# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work. -ufw: rule=deny proto=tcp src=2001:db8::/32 port=25 -''' - -from operator import itemgetter - - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reset']), - default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']), - logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']), - direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing']), - delete = dict(default=False, type='bool'), - insert = dict(default=None), - rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']), - interface = dict(default=None, aliases=['if']), - log = dict(default=False, type='bool'), - from_ip = dict(default='any', aliases=['src', 'from']), - from_port = dict(default=None), - to_ip = dict(default='any', aliases=['dest', 'to']), - to_port = dict(default=None, aliases=['port']), - proto = dict(default=None, aliases=['protocol'], choices=['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']), - app = dict(default=None, aliases=['name']) - ), - supports_check_mode = True, - mutually_exclusive = [['app', 'proto', 'logging']] - ) - - cmds = [] - - def execute(cmd): - cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd))) - - cmds.append(cmd) - (rc, out, err) = module.run_command(cmd) - - if rc != 0: - module.fail_json(msg=err or out) - - params = module.params - - # Ensure at least one of the command arguments are given - command_keys = ['state', 'default', 'rule', 'logging'] - commands = dict((key, params[key]) for key in command_keys if params[key]) - - if len(commands) < 1: - module.fail_json(msg="Not any of the command arguments %s given" % commands) - - if('interface' in params and 'direction' not in params): - module.fail_json(msg="Direction must be specified when creating a rule on an interface") - - # Ensure ufw is available - ufw_bin = module.get_bin_path('ufw', True) - - # Save the pre state and rules in order to recognize changes - (_, pre_state, _) = module.run_command(ufw_bin + ' status verbose') - (_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules") - - # Execute commands - for (command, value) in commands.iteritems(): - cmd = [[ufw_bin], [module.check_mode, '--dry-run']] - - if command == 'state': - states = { 'enabled': 'enable', 'disabled': 'disable', - 'reloaded': 'reload', 'reset': 'reset' } - execute(cmd + [['-f'], [states[value]]]) - - elif command == 'logging': - execute(cmd + [[command], [value]]) - - elif command == 'default': - execute(cmd + [[command], [value], [params['direction']]]) - - elif command == 'rule': - # Rules are constructed according to the long format - # - # ufw [--dry-run] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ - # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ - # [proto protocol] [app application] - cmd.append([module.boolean(params['delete']), 'delete']) - cmd.append([params['insert'], "insert %s" % params['insert']]) - cmd.append([value]) - cmd.append([module.boolean(params['log']), 'log']) - - for (key, template) in [('direction', "%s" ), ('interface', "on %s" ), - ('from_ip', "from %s" ), ('from_port', "port %s" ), - ('to_ip', "to %s" ), ('to_port', "port %s" ), - ('proto', "proto %s"), ('app', "app '%s'")]: - - value = params[key] - cmd.append([value, template % (value)]) - - execute(cmd) - - # Get the new state - (_, post_state, _) = module.run_command(ufw_bin + ' status verbose') - (_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules") - changed = (pre_state != post_state) or (pre_rules != post_rules) - - return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip()) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/system/user b/library/system/user deleted file mode 100644 index 551384a7a6..0000000000 --- a/library/system/user +++ /dev/null @@ -1,1584 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Stephen Fromm -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: user -author: Stephen Fromm -version_added: "0.2" -short_description: Manage user accounts -requirements: [ useradd, userdel, usermod ] -description: - - Manage user accounts and user attributes. -options: - name: - required: true - aliases: [ "user" ] - description: - - Name of the user to create, remove or modify. - comment: - required: false - description: - - Optionally sets the description (aka I(GECOS)) of user account. - uid: - required: false - description: - - Optionally sets the I(UID) of the user. - non_unique: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - Optionally when used with the -u option, this option allows to - change the user ID to a non-unique value. - version_added: "1.1" - group: - required: false - description: - - Optionally sets the user's primary group (takes a group name). - groups: - required: false - description: - - Puts the user in this comma-delimited list of groups. When set to - the empty string ('groups='), the user is removed from all groups - except the primary group. - append: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - If C(yes), will only add groups, not set them to just the list - in I(groups). - shell: - required: false - description: - - Optionally set the user's shell. - home: - required: false - description: - - Optionally set the user's home directory. - password: - required: false - description: - - Optionally set the user's password to this crypted value. See - the user example in the github examples directory for what this looks - like in a playbook. The `FAQ `_ - contains details on various ways to generate these password values. - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the account should exist. When C(absent), removes - the user account. - createhome: - required: false - default: "yes" - choices: [ "yes", "no" ] - description: - - Unless set to C(no), a home directory will be made for the user - when the account is created or if the home directory does not - exist. - move_home: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - If set to C(yes) when used with C(home=), attempt to move the - user's home directory to the specified directory if it isn't there - already. - system: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - When creating an account, setting this to C(yes) makes the user a - system account. This setting cannot be changed on existing users. - force: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - When used with C(state=absent), behavior is as with - C(userdel --force). - login_class: - required: false - description: - - Optionally sets the user's login class for FreeBSD, OpenBSD and NetBSD systems. - remove: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - When used with C(state=absent), behavior is as with - C(userdel --remove). - generate_ssh_key: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "0.9" - description: - - Whether to generate a SSH key for the user in question. - This will B(not) overwrite an existing SSH key. - ssh_key_bits: - required: false - default: 2048 - version_added: "0.9" - description: - - Optionally specify number of bits in SSH key to create. - ssh_key_type: - required: false - default: rsa - version_added: "0.9" - description: - - Optionally specify the type of SSH key to generate. - Available SSH key types will depend on implementation - present on target host. - ssh_key_file: - required: false - default: $HOME/.ssh/id_rsa - version_added: "0.9" - description: - - Optionally specify the SSH key filename. - ssh_key_comment: - required: false - default: ansible-generated - version_added: "0.9" - description: - - Optionally define the comment for the SSH key. - ssh_key_passphrase: - required: false - version_added: "0.9" - description: - - Set a passphrase for the SSH key. If no - passphrase is provided, the SSH key will default to - having no passphrase. - update_password: - required: false - default: always - choices: ['always', 'on_create'] - version_added: "1.3" - description: - - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. -''' - -EXAMPLES = ''' -# Add the user 'johnd' with a specific uid and a primary group of 'admin' -- user: name=johnd comment="John Doe" uid=1040 group=admin - -# Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups -- user: name=james shell=/bin/bash groups=admins,developers append=yes - -# Remove the user 'johnd' -- user: name=johnd state=absent remove=yes - -# Create a 2048-bit SSH key for user jsmith -- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 -''' - -import os -import pwd -import grp -import syslog -import platform - -try: - import spwd - HAVE_SPWD=True -except: - HAVE_SPWD=False - - -class User(object): - """ - This is a generic User manipulation class that is subclassed - based on platform. - - A subclass may wish to override the following action methods:- - - create_user() - - remove_user() - - modify_user() - - ssh_key_gen() - - ssh_key_fingerprint() - - user_exists() - - All subclasses MUST define platform and distribution (which may be None). - """ - - platform = 'Generic' - distribution = None - SHADOWFILE = '/etc/shadow' - - def __new__(cls, *args, **kwargs): - return load_platform_subclass(User, args, kwargs) - - def __init__(self, module): - self.module = module - self.state = module.params['state'] - self.name = module.params['name'] - self.uid = module.params['uid'] - self.non_unique = module.params['non_unique'] - self.group = module.params['group'] - self.groups = module.params['groups'] - self.comment = module.params['comment'] - self.home = module.params['home'] - self.shell = module.params['shell'] - self.password = module.params['password'] - self.force = module.params['force'] - self.remove = module.params['remove'] - self.createhome = module.params['createhome'] - self.move_home = module.params['move_home'] - self.system = module.params['system'] - self.login_class = module.params['login_class'] - self.append = module.params['append'] - self.sshkeygen = module.params['generate_ssh_key'] - self.ssh_bits = module.params['ssh_key_bits'] - self.ssh_type = module.params['ssh_key_type'] - self.ssh_comment = module.params['ssh_key_comment'] - self.ssh_passphrase = module.params['ssh_key_passphrase'] - self.update_password = module.params['update_password'] - if module.params['ssh_key_file'] is not None: - self.ssh_file = module.params['ssh_key_file'] - else: - self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type) - - # select whether we dump additional debug info through syslog - self.syslogging = False - - def execute_command(self, cmd): - if self.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) - - return self.module.run_command(cmd) - - def remove_user_userdel(self): - cmd = [self.module.get_bin_path('userdel', True)] - if self.force: - cmd.append('-f') - if self.remove: - cmd.append('-r') - cmd.append(self.name) - - return self.execute_command(cmd) - - def create_user_useradd(self, command_name='useradd'): - cmd = [self.module.get_bin_path(command_name, True)] - - if self.uid is not None: - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - cmd.append('-g') - cmd.append(self.group) - elif self.group_exists(self.name): - # use the -N option (no user group) if a group already - # exists with the same name as the user to prevent - # errors from useradd trying to create a group when - # USERGROUPS_ENAB is set in /etc/login.defs. - cmd.append('-N') - - if self.groups is not None and len(self.groups): - groups = self.get_groups_set() - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None: - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None: - cmd.append('-s') - cmd.append(self.shell) - - if self.password is not None: - cmd.append('-p') - cmd.append(self.password) - - if self.createhome: - cmd.append('-m') - else: - cmd.append('-M') - - if self.system: - cmd.append('-r') - - cmd.append(self.name) - return self.execute_command(cmd) - - - def _check_usermod_append(self): - # check if this version of usermod can append groups - usermod_path = self.module.get_bin_path('usermod', True) - - # for some reason, usermod --help cannot be used by non root - # on RH/Fedora, due to lack of execute bit for others - if not os.access(usermod_path, os.X_OK): - return False - - cmd = [usermod_path] - cmd.append('--help') - rc, data1, data2 = self.execute_command(cmd) - helpout = data1 + data2 - - # check if --append exists - lines = helpout.split('\n') - for line in lines: - if line.strip().startswith('-a, --append'): - return True - - return False - - - - def modify_user_usermod(self): - cmd = [self.module.get_bin_path('usermod', True)] - info = self.user_info() - has_append = self._check_usermod_append() - - if self.uid is not None and info[2] != int(self.uid): - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - ginfo = self.group_info(self.group) - if info[3] != ginfo[2]: - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - current_groups = self.user_group_membership() - groups_need_mod = False - groups = [] - - if self.groups == '': - if current_groups and not self.append: - groups_need_mod = True - else: - groups = self.get_groups_set(remove_existing=False) - group_diff = set(current_groups).symmetric_difference(groups) - - if group_diff: - if self.append: - for g in groups: - if g in group_diff: - if has_append: - cmd.append('-a') - groups_need_mod = True - break - else: - groups_need_mod = True - - if groups_need_mod: - if self.append and not has_append: - cmd.append('-A') - cmd.append(','.join(group_diff)) - else: - cmd.append('-G') - cmd.append(','.join(groups)) - - - if self.comment is not None and info[4] != self.comment: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None and info[5] != self.home: - cmd.append('-d') - cmd.append(self.home) - if self.move_home: - cmd.append('-m') - - if self.shell is not None and info[6] != self.shell: - cmd.append('-s') - cmd.append(self.shell) - - if self.update_password == 'always' and self.password is not None and info[1] != self.password: - cmd.append('-p') - cmd.append(self.password) - - # skip if no changes to be made - if len(cmd) == 1: - return (None, '', '') - elif self.module.check_mode: - return (0, '', '') - - cmd.append(self.name) - return self.execute_command(cmd) - - def group_exists(self,group): - try: - if group.isdigit(): - if grp.getgrgid(int(group)): - return True - else: - if grp.getgrnam(group): - return True - except KeyError: - return False - - def group_info(self,group): - if not self.group_exists(group): - return False - if group.isdigit(): - return list(grp.getgrgid(group)) - else: - return list(grp.getgrnam(group)) - - def get_groups_set(self, remove_existing=True): - if self.groups is None: - return None - info = self.user_info() - groups = set(filter(None, self.groups.split(','))) - for g in set(groups): - if not self.group_exists(g): - self.module.fail_json(msg="Group %s does not exist" % (g)) - if info and remove_existing and self.group_info(g)[2] == info[3]: - groups.remove(g) - return groups - - def user_group_membership(self): - groups = [] - info = self.get_pwd_info() - for group in grp.getgrall(): - if self.name in group.gr_mem and not info[3] == group.gr_gid: - groups.append(group[0]) - return groups - - def user_exists(self): - try: - if pwd.getpwnam(self.name): - return True - except KeyError: - return False - - def get_pwd_info(self): - if not self.user_exists(): - return False - return list(pwd.getpwnam(self.name)) - - def user_info(self): - if not self.user_exists(): - return False - info = self.get_pwd_info() - if len(info[1]) == 1 or len(info[1]) == 0: - info[1] = self.user_password() - return info - - def user_password(self): - passwd = '' - if HAVE_SPWD: - try: - passwd = spwd.getspnam(self.name)[1] - except KeyError: - return passwd - if not self.user_exists(): - return passwd - else: - # Read shadow file for user's encrypted password string - if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK): - for line in open(self.SHADOWFILE).readlines(): - if line.startswith('%s:' % self.name): - passwd = line.split(':')[1] - return passwd - - def get_ssh_key_path(self): - info = self.user_info() - if os.path.isabs(self.ssh_file): - ssh_key_file = self.ssh_file - else: - ssh_key_file = os.path.join(info[5], self.ssh_file) - return ssh_key_file - - def ssh_key_gen(self): - info = self.user_info() - if not os.path.exists(info[5]): - return (1, '', 'User %s home directory does not exist' % self.name) - ssh_key_file = self.get_ssh_key_path() - ssh_dir = os.path.dirname(ssh_key_file) - if not os.path.exists(ssh_dir): - try: - os.mkdir(ssh_dir, 0700) - os.chown(ssh_dir, info[2], info[3]) - except OSError, e: - return (1, '', 'Failed to create %s: %s' % (ssh_dir, str(e))) - if os.path.exists(ssh_key_file): - return (None, 'Key already exists', '') - cmd = [self.module.get_bin_path('ssh-keygen', True)] - cmd.append('-t') - cmd.append(self.ssh_type) - cmd.append('-b') - cmd.append(self.ssh_bits) - cmd.append('-C') - cmd.append(self.ssh_comment) - cmd.append('-f') - cmd.append(ssh_key_file) - cmd.append('-N') - if self.ssh_passphrase is not None: - cmd.append(self.ssh_passphrase) - else: - cmd.append('') - - (rc, out, err) = self.execute_command(cmd) - if rc == 0: - # If the keys were successfully created, we should be able - # to tweak ownership. - os.chown(ssh_key_file, info[2], info[3]) - os.chown('%s.pub' % ssh_key_file, info[2], info[3]) - return (rc, out, err) - - def ssh_key_fingerprint(self): - ssh_key_file = self.get_ssh_key_path() - if not os.path.exists(ssh_key_file): - return (1, 'SSH Key file %s does not exist' % ssh_key_file, '') - cmd = [ self.module.get_bin_path('ssh-keygen', True) ] - cmd.append('-l') - cmd.append('-f') - cmd.append(ssh_key_file) - - return self.execute_command(cmd) - - def get_ssh_public_key(self): - ssh_public_key_file = '%s.pub' % self.get_ssh_key_path() - try: - f = open(ssh_public_key_file) - ssh_public_key = f.read().strip() - f.close() - except IOError: - return None - return ssh_public_key - - def create_user(self): - # by default we use the create_user_useradd method - return self.create_user_useradd() - - def remove_user(self): - # by default we use the remove_user_userdel method - return self.remove_user_userdel() - - def modify_user(self): - # by default we use the modify_user_usermod method - return self.modify_user_usermod() - - def create_homedir(self, path): - if not os.path.exists(path): - # use /etc/skel if possible - if os.path.exists('/etc/skel'): - try: - shutil.copytree('/etc/skel', path, symlinks=True) - except OSError, e: - self.module.exit_json(failed=True, msg="%s" % e) - else: - try: - os.makedirs(path) - except OSError, e: - self.module.exit_json(failed=True, msg="%s" % e) - - def chown_homedir(self, uid, gid, path): - try: - os.chown(path, uid, gid) - for root, dirs, files in os.walk(path): - for d in dirs: - os.chown(path, uid, gid) - for f in files: - os.chown(os.path.join(root, f), uid, gid) - except OSError, e: - self.module.exit_json(failed=True, msg="%s" % e) - - -# =========================================== - -class FreeBsdUser(User): - """ - This is a FreeBSD User manipulation class - it uses the pw command - to manipulate the user database, followed by the chpass command - to change the password. - - This overrides the following methods from the generic class:- - - create_user() - - remove_user() - - modify_user() - """ - - platform = 'FreeBSD' - distribution = None - SHADOWFILE = '/etc/master.passwd' - - def remove_user(self): - cmd = [ - self.module.get_bin_path('pw', True), - 'userdel', - '-n', - self.name - ] - if self.remove: - cmd.append('-r') - - return self.execute_command(cmd) - - def create_user(self): - cmd = [ - self.module.get_bin_path('pw', True), - 'useradd', - '-n', - self.name, - ] - - if self.uid is not None: - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.comment is not None: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None: - cmd.append('-d') - cmd.append(self.home) - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - groups = self.get_groups_set() - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.createhome: - cmd.append('-m') - - if self.shell is not None: - cmd.append('-s') - cmd.append(self.shell) - - if self.login_class is not None: - cmd.append('-L') - cmd.append(self.login_class) - - # system cannot be handled currently - should we error if its requested? - # create the user - (rc, out, err) = self.execute_command(cmd) - if rc is not None and rc != 0: - self.module.fail_json(name=self.name, msg=err, rc=rc) - - # we have to set the password in a second command - if self.password is not None: - cmd = [ - self.module.get_bin_path('chpass', True), - '-p', - self.password, - self.name - ] - return self.execute_command(cmd) - - return (rc, out, err) - - def modify_user(self): - cmd = [ - self.module.get_bin_path('pw', True), - 'usermod', - '-n', - self.name - ] - cmd_len = len(cmd) - info = self.user_info() - - if self.uid is not None and info[2] != int(self.uid): - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.comment is not None and info[4] != self.comment: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None and info[5] != self.home: - if self.move_home: - cmd.append('-m') - cmd.append('-d') - cmd.append(self.home) - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - ginfo = self.group_info(self.group) - if info[3] != ginfo[2]: - cmd.append('-g') - cmd.append(self.group) - - if self.shell is not None and info[6] != self.shell: - cmd.append('-s') - cmd.append(self.shell) - - if self.login_class is not None: - cmd.append('-L') - cmd.append(self.login_class) - - if self.groups is not None: - current_groups = self.user_group_membership() - groups = self.get_groups_set() - - group_diff = set(current_groups).symmetric_difference(groups) - groups_need_mod = False - - if group_diff: - if self.append: - for g in groups: - if g in group_diff: - groups_need_mod = True - break - else: - groups_need_mod = True - - if groups_need_mod: - cmd.append('-G') - new_groups = groups - if self.append: - new_groups = groups | set(current_groups) - cmd.append(','.join(new_groups)) - - # modify the user if cmd will do anything - if cmd_len != len(cmd): - (rc, out, err) = self.execute_command(cmd) - if rc is not None and rc != 0: - self.module.fail_json(name=self.name, msg=err, rc=rc) - else: - (rc, out, err) = (None, '', '') - - # we have to set the password in a second command - if self.update_password == 'always' and self.password is not None and info[1] != self.password: - cmd = [ - self.module.get_bin_path('chpass', True), - '-p', - self.password, - self.name - ] - return self.execute_command(cmd) - - return (rc, out, err) - -# =========================================== - -class OpenBSDUser(User): - """ - This is a OpenBSD User manipulation class. - Main differences are that OpenBSD:- - - has no concept of "system" account. - - has no force delete user - - This overrides the following methods from the generic class:- - - create_user() - - remove_user() - - modify_user() - """ - - platform = 'OpenBSD' - distribution = None - SHADOWFILE = '/etc/master.passwd' - - def create_user(self): - cmd = [self.module.get_bin_path('useradd', True)] - - if self.uid is not None: - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - groups = self.get_groups_set() - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None: - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None: - cmd.append('-s') - cmd.append(self.shell) - - if self.login_class is not None: - cmd.append('-L') - cmd.append(self.login_class) - - if self.password is not None: - cmd.append('-p') - cmd.append(self.password) - - if self.createhome: - cmd.append('-m') - - cmd.append(self.name) - return self.execute_command(cmd) - - def remove_user_userdel(self): - cmd = [self.module.get_bin_path('userdel', True)] - if self.remove: - cmd.append('-r') - cmd.append(self.name) - return self.execute_command(cmd) - - def modify_user(self): - cmd = [self.module.get_bin_path('usermod', True)] - info = self.user_info() - - if self.uid is not None and info[2] != int(self.uid): - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - ginfo = self.group_info(self.group) - if info[3] != ginfo[2]: - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - current_groups = self.user_group_membership() - groups_need_mod = False - groups_option = '-G' - groups = [] - - if self.groups == '': - if current_groups and not self.append: - groups_need_mod = True - else: - groups = self.get_groups_set() - group_diff = set(current_groups).symmetric_difference(groups) - - if group_diff: - if self.append: - for g in groups: - if g in group_diff: - groups_option = '-S' - groups_need_mod = True - break - else: - groups_need_mod = True - - if groups_need_mod: - cmd.append(groups_option) - cmd.append(','.join(groups)) - - if self.comment is not None and info[4] != self.comment: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None and info[5] != self.home: - if self.move_home: - cmd.append('-m') - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None and info[6] != self.shell: - cmd.append('-s') - cmd.append(self.shell) - - if self.login_class is not None: - # find current login class - user_login_class = None - userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name] - (rc, out, err) = self.execute_command(userinfo_cmd) - - for line in out.splitlines(): - tokens = line.split() - - if tokens[0] == 'class' and len(tokens) == 2: - user_login_class = tokens[1] - - # act only if login_class change - if self.login_class != user_login_class: - cmd.append('-L') - cmd.append(self.login_class) - - if self.update_password == 'always' and self.password is not None and info[1] != self.password: - cmd.append('-p') - cmd.append(self.password) - - # skip if no changes to be made - if len(cmd) == 1: - return (None, '', '') - elif self.module.check_mode: - return (0, '', '') - - cmd.append(self.name) - return self.execute_command(cmd) - - -# =========================================== - -class NetBSDUser(User): - """ - This is a NetBSD User manipulation class. - Main differences are that NetBSD:- - - has no concept of "system" account. - - has no force delete user - - - This overrides the following methods from the generic class:- - - create_user() - - remove_user() - - modify_user() - """ - - platform = 'NetBSD' - distribution = None - SHADOWFILE = '/etc/master.passwd' - - def create_user(self): - cmd = [self.module.get_bin_path('useradd', True)] - - if self.uid is not None: - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - groups = self.get_groups_set() - if len(groups) > 16: - self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups)) - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None: - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None: - cmd.append('-s') - cmd.append(self.shell) - - if self.login_class is not None: - cmd.append('-L') - cmd.append(self.login_class) - - if self.password is not None: - cmd.append('-p') - cmd.append(self.password) - - if self.createhome: - cmd.append('-m') - - cmd.append(self.name) - return self.execute_command(cmd) - - def remove_user_userdel(self): - cmd = [self.module.get_bin_path('userdel', True)] - if self.remove: - cmd.append('-r') - cmd.append(self.name) - return self.execute_command(cmd) - - def modify_user(self): - cmd = [self.module.get_bin_path('usermod', True)] - info = self.user_info() - - if self.uid is not None and info[2] != int(self.uid): - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - ginfo = self.group_info(self.group) - if info[3] != ginfo[2]: - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - current_groups = self.user_group_membership() - groups_need_mod = False - groups = [] - - if self.groups == '': - if current_groups and not self.append: - groups_need_mod = True - else: - groups = self.get_groups_set() - group_diff = set(current_groups).symmetric_difference(groups) - - if group_diff: - if self.append: - for g in groups: - if g in group_diff: - groups = set(current_groups).union(groups) - groups_need_mod = True - break - else: - groups_need_mod = True - - if groups_need_mod: - if len(groups) > 16: - self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups)) - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None and info[4] != self.comment: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None and info[5] != self.home: - if self.move_home: - cmd.append('-m') - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None and info[6] != self.shell: - cmd.append('-s') - cmd.append(self.shell) - - if self.login_class is not None: - cmd.append('-L') - cmd.append(self.login_class) - - if self.update_password == 'always' and self.password is not None and info[1] != self.password: - cmd.append('-p') - cmd.append(self.password) - - # skip if no changes to be made - if len(cmd) == 1: - return (None, '', '') - elif self.module.check_mode: - return (0, '', '') - - cmd.append(self.name) - return self.execute_command(cmd) - - -# =========================================== - -class SunOS(User): - """ - This is a SunOS User manipulation class - The main difference between - this class and the generic user class is that Solaris-type distros - don't support the concept of a "system" account and we need to - edit the /etc/shadow file manually to set a password. (Ugh) - - This overrides the following methods from the generic class:- - - create_user() - - remove_user() - - modify_user() - """ - - platform = 'SunOS' - distribution = None - SHADOWFILE = '/etc/shadow' - - def remove_user(self): - cmd = [self.module.get_bin_path('userdel', True)] - if self.remove: - cmd.append('-r') - cmd.append(self.name) - - return self.execute_command(cmd) - - def create_user(self): - cmd = [self.module.get_bin_path('useradd', True)] - - if self.uid is not None: - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - groups = self.get_groups_set() - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None: - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None: - cmd.append('-s') - cmd.append(self.shell) - - if self.createhome: - cmd.append('-m') - - cmd.append(self.name) - - if self.module.check_mode: - return (0, '', '') - else: - (rc, out, err) = self.execute_command(cmd) - if rc is not None and rc != 0: - self.module.fail_json(name=self.name, msg=err, rc=rc) - - # we have to set the password by editing the /etc/shadow file - if self.password is not None: - try: - lines = [] - for line in open(self.SHADOWFILE, 'rb').readlines(): - fields = line.strip().split(':') - if not fields[0] == self.name: - lines.append(line) - continue - fields[1] = self.password - fields[2] = str(int(time.time() / 86400)) - line = ':'.join(fields) - lines.append('%s\n' % line) - open(self.SHADOWFILE, 'w+').writelines(lines) - except Exception, err: - self.module.fail_json(msg="failed to update users password: %s" % str(err)) - - return (rc, out, err) - - def modify_user_usermod(self): - cmd = [self.module.get_bin_path('usermod', True)] - cmd_len = len(cmd) - info = self.user_info() - - if self.uid is not None and info[2] != int(self.uid): - cmd.append('-u') - cmd.append(self.uid) - - if self.non_unique: - cmd.append('-o') - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - ginfo = self.group_info(self.group) - if info[3] != ginfo[2]: - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - current_groups = self.user_group_membership() - groups = self.get_groups_set() - group_diff = set(current_groups).symmetric_difference(groups) - groups_need_mod = False - - if group_diff: - if self.append: - for g in groups: - if g in group_diff: - groups_need_mod = True - break - else: - groups_need_mod = True - - if groups_need_mod: - cmd.append('-G') - new_groups = groups - if self.append: - new_groups.extend(current_groups) - cmd.append(','.join(new_groups)) - - if self.comment is not None and info[4] != self.comment: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None and info[5] != self.home: - if self.move_home: - cmd.append('-m') - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None and info[6] != self.shell: - cmd.append('-s') - cmd.append(self.shell) - - if self.module.check_mode: - return (0, '', '') - else: - # modify the user if cmd will do anything - if cmd_len != len(cmd): - cmd.append(self.name) - (rc, out, err) = self.execute_command(cmd) - if rc is not None and rc != 0: - self.module.fail_json(name=self.name, msg=err, rc=rc) - else: - (rc, out, err) = (None, '', '') - - # we have to set the password by editing the /etc/shadow file - if self.update_password == 'always' and self.password is not None and info[1] != self.password: - try: - lines = [] - for line in open(self.SHADOWFILE, 'rb').readlines(): - fields = line.strip().split(':') - if not fields[0] == self.name: - lines.append(line) - continue - fields[1] = self.password - fields[2] = str(int(time.time() / 86400)) - line = ':'.join(fields) - lines.append('%s\n' % line) - open(self.SHADOWFILE, 'w+').writelines(lines) - rc = 0 - except Exception, err: - self.module.fail_json(msg="failed to update users password: %s" % str(err)) - - return (rc, out, err) - -# =========================================== - -class AIX(User): - """ - This is a AIX User manipulation class. - - This overrides the following methods from the generic class:- - - create_user() - - remove_user() - - modify_user() - """ - - platform = 'AIX' - distribution = None - SHADOWFILE = '/etc/security/passwd' - - def remove_user(self): - cmd = [self.module.get_bin_path('userdel', True)] - if self.remove: - cmd.append('-r') - cmd.append(self.name) - - return self.execute_command(cmd) - - def create_user_useradd(self, command_name='useradd'): - cmd = [self.module.get_bin_path(command_name, True)] - - if self.uid is not None: - cmd.append('-u') - cmd.append(self.uid) - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None and len(self.groups): - groups = self.get_groups_set() - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None: - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None: - cmd.append('-s') - cmd.append(self.shell) - - if self.createhome: - cmd.append('-m') - - cmd.append(self.name) - (rc, out, err) = self.execute_command(cmd) - - # set password with chpasswd - if self.password is not None: - cmd = [] - cmd.append('echo "'+self.name+':'+self.password+'" |') - cmd.append(self.module.get_bin_path('chpasswd', True)) - cmd.append('-e') - cmd.append('-c') - self.execute_command(' '.join(cmd)) - - return (rc, out, err) - - def modify_user_usermod(self): - cmd = [self.module.get_bin_path('usermod', True)] - info = self.user_info() - - if self.uid is not None and info[2] != int(self.uid): - cmd.append('-u') - cmd.append(self.uid) - - if self.group is not None: - if not self.group_exists(self.group): - self.module.fail_json(msg="Group %s does not exist" % self.group) - ginfo = self.group_info(self.group) - if info[3] != ginfo[2]: - cmd.append('-g') - cmd.append(self.group) - - if self.groups is not None: - current_groups = self.user_group_membership() - groups_need_mod = False - groups = [] - - if self.groups == '': - if current_groups and not self.append: - groups_need_mod = True - else: - groups = self.get_groups_set() - group_diff = set(current_groups).symmetric_difference(groups) - - if group_diff: - if self.append: - for g in groups: - if g in group_diff: - groups_need_mod = True - break - else: - groups_need_mod = True - - if groups_need_mod: - cmd.append('-G') - cmd.append(','.join(groups)) - - if self.comment is not None and info[4] != self.comment: - cmd.append('-c') - cmd.append(self.comment) - - if self.home is not None and info[5] != self.home: - if self.move_home: - cmd.append('-m') - cmd.append('-d') - cmd.append(self.home) - - if self.shell is not None and info[6] != self.shell: - cmd.append('-s') - cmd.append(self.shell) - - - # skip if no changes to be made - if len(cmd) == 1: - (rc, out, err) = (None, '', '') - elif self.module.check_mode: - return (True, '', '') - else: - cmd.append(self.name) - (rc, out, err) = self.execute_command(cmd) - - # set password with chpasswd - if self.update_password == 'always' and self.password is not None and info[1] != self.password: - cmd = [] - cmd.append('echo "'+self.name+':'+self.password+'" |') - cmd.append(self.module.get_bin_path('chpasswd', True)) - cmd.append('-e') - cmd.append('-c') - (rc2, out2, err2) = self.execute_command(' '.join(cmd)) - else: - (rc2, out2, err2) = (None, '', '') - - if rc != None: - return (rc, out+out2, err+err2) - else: - return (rc2, out+out2, err+err2) - -# =========================================== - -def main(): - ssh_defaults = { - 'bits': '2048', - 'type': 'rsa', - 'passphrase': None, - 'comment': 'ansible-generated' - } - module = AnsibleModule( - argument_spec = dict( - state=dict(default='present', choices=['present', 'absent'], type='str'), - name=dict(required=True, aliases=['user'], type='str'), - uid=dict(default=None, type='str'), - non_unique=dict(default='no', type='bool'), - group=dict(default=None, type='str'), - groups=dict(default=None, type='str'), - comment=dict(default=None, type='str'), - home=dict(default=None, type='str'), - shell=dict(default=None, type='str'), - password=dict(default=None, type='str'), - login_class=dict(default=None, type='str'), - # following options are specific to userdel - force=dict(default='no', type='bool'), - remove=dict(default='no', type='bool'), - # following options are specific to useradd - createhome=dict(default='yes', type='bool'), - system=dict(default='no', type='bool'), - # following options are specific to usermod - move_home=dict(default='no', type='bool'), - append=dict(default='no', type='bool'), - # following are specific to ssh key generation - generate_ssh_key=dict(type='bool'), - ssh_key_bits=dict(default=ssh_defaults['bits'], type='str'), - ssh_key_type=dict(default=ssh_defaults['type'], type='str'), - ssh_key_file=dict(default=None, type='str'), - ssh_key_comment=dict(default=ssh_defaults['comment'], type='str'), - ssh_key_passphrase=dict(default=None, type='str'), - update_password=dict(default='always',choices=['always','on_create'],type='str') - ), - supports_check_mode=True - ) - - user = User(module) - - if user.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - platform %s' % user.platform) - if user.distribution: - syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - distribution %s' % user.distribution) - - rc = None - out = '' - err = '' - result = {} - result['name'] = user.name - result['state'] = user.state - if user.state == 'absent': - if user.user_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = user.remove_user() - if rc != 0: - module.fail_json(name=user.name, msg=err, rc=rc) - result['force'] = user.force - result['remove'] = user.remove - elif user.state == 'present': - if not user.user_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = user.create_user() - result['system'] = user.system - result['createhome'] = user.createhome - else: - # modify user (note: this function is check mode aware) - (rc, out, err) = user.modify_user() - result['append'] = user.append - result['move_home'] = user.move_home - if rc is not None and rc != 0: - module.fail_json(name=user.name, msg=err, rc=rc) - if user.password is not None: - result['password'] = 'NOT_LOGGING_PASSWORD' - - if rc is None: - result['changed'] = False - else: - result['changed'] = True - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - if user.user_exists(): - info = user.user_info() - if info == False: - result['msg'] = "failed to look up user name: %s" % user.name - result['failed'] = True - result['uid'] = info[2] - result['group'] = info[3] - result['comment'] = info[4] - result['home'] = info[5] - result['shell'] = info[6] - result['uid'] = info[2] - if user.groups is not None: - result['groups'] = user.groups - - # deal with ssh key - if user.sshkeygen: - (rc, out, err) = user.ssh_key_gen() - if rc is not None and rc != 0: - module.fail_json(name=user.name, msg=err, rc=rc) - if rc == 0: - result['changed'] = True - (rc, out, err) = user.ssh_key_fingerprint() - if rc == 0: - result['ssh_fingerprint'] = out.strip() - else: - result['ssh_fingerprint'] = err.strip() - result['ssh_key_file'] = user.get_ssh_key_path() - result['ssh_public_key'] = user.get_ssh_public_key() - - # handle missing homedirs - info = user.user_info() - if user.home is None: - user.home = info[5] - if not os.path.exists(user.home) and user.createhome: - if not module.check_mode: - user.create_homedir(user.home) - user.chown_homedir(info[2], info[3], user.home) - result['changed'] = True - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/system/zfs b/library/system/zfs deleted file mode 100644 index 9324889705..0000000000 --- a/library/system/zfs +++ /dev/null @@ -1,417 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Johan Wiren -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: zfs -short_description: Manage zfs -description: - - Manages ZFS file systems on Solaris and FreeBSD. Can manage file systems, volumes and snapshots. See zfs(1M) for more information about the properties. -version_added: "1.1" -options: - name: - description: - - File system, snapshot or volume name e.g. C(rpool/myfs) - required: true - state: - description: - - Whether to create (C(present)), or remove (C(absent)) a file system, snapshot or volume. - required: true - choices: [present, absent] - aclinherit: - description: - - The aclinherit property. - required: False - choices: [discard,noallow,restricted,passthrough,passthrough-x] - aclmode: - description: - - The aclmode property. - required: False - choices: [discard,groupmask,passthrough] - atime: - description: - - The atime property. - required: False - choices: ['on','off'] - canmount: - description: - - The canmount property. - required: False - choices: ['on','off','noauto'] - casesensitivity: - description: - - The casesensitivity property. - required: False - choices: [sensitive,insensitive,mixed] - checksum: - description: - - The checksum property. - required: False - choices: ['on','off',fletcher2,fletcher4,sha256] - compression: - description: - - The compression property. - required: False - choices: ['on','off',lzjb,gzip,gzip-1,gzip-2,gzip-3,gzip-4,gzip-5,gzip-6,gzip-7,gzip-8,gzip-9,lz4,zle] - copies: - description: - - The copies property. - required: False - choices: [1,2,3] - dedup: - description: - - The dedup property. - required: False - choices: ['on','off'] - devices: - description: - - The devices property. - required: False - choices: ['on','off'] - exec: - description: - - The exec property. - required: False - choices: ['on','off'] - jailed: - description: - - The jailed property. - required: False - choices: ['on','off'] - logbias: - description: - - The logbias property. - required: False - choices: [latency,throughput] - mountpoint: - description: - - The mountpoint property. - required: False - nbmand: - description: - - The nbmand property. - required: False - choices: ['on','off'] - normalization: - description: - - The normalization property. - required: False - choices: [none,formC,formD,formKC,formKD] - primarycache: - description: - - The primarycache property. - required: False - choices: [all,none,metadata] - quota: - description: - - The quota property. - required: False - readonly: - description: - - The readonly property. - required: False - choices: ['on','off'] - recordsize: - description: - - The recordsize property. - required: False - refquota: - description: - - The refquota property. - required: False - refreservation: - description: - - The refreservation property. - required: False - reservation: - description: - - The reservation property. - required: False - secondarycache: - description: - - The secondarycache property. - required: False - choices: [all,none,metadata] - setuid: - description: - - The setuid property. - required: False - choices: ['on','off'] - shareiscsi: - description: - - The shareiscsi property. - required: False - choices: ['on','off'] - sharenfs: - description: - - The sharenfs property. - required: False - sharesmb: - description: - - The sharesmb property. - required: False - snapdir: - description: - - The snapdir property. - required: False - choices: [hidden,visible] - sync: - description: - - The sync property. - required: False - choices: ['on','off'] - utf8only: - description: - - The utf8only property. - required: False - choices: ['on','off'] - volsize: - description: - - The volsize property. - required: False - volblocksize: - description: - - The volblocksize property. - required: False - vscan: - description: - - The vscan property. - required: False - choices: ['on','off'] - xattr: - description: - - The xattr property. - required: False - choices: ['on','off'] - zoned: - description: - - The zoned property. - required: False - choices: ['on','off'] -author: Johan Wiren -''' - -EXAMPLES = ''' -# Create a new file system called myfs in pool rpool -- zfs: name=rpool/myfs state=present - -# Create a new volume called myvol in pool rpool. -- zfs: name=rpool/myvol state=present volsize=10M - -# Create a snapshot of rpool/myfs file system. -- zfs: name=rpool/myfs@mysnapshot state=present - -# Create a new file system called myfs2 with snapdir enabled -- zfs: name=rpool/myfs2 state=present snapdir=enabled -''' - - -import os - -class Zfs(object): - def __init__(self, module, name, properties): - self.module = module - self.name = name - self.properties = properties - self.changed = False - - self.immutable_properties = [ 'casesensitivity', 'normalization', 'utf8only' ] - - def exists(self): - cmd = [self.module.get_bin_path('zfs', True)] - cmd.append('list') - cmd.append('-t all') - cmd.append(self.name) - (rc, out, err) = self.module.run_command(' '.join(cmd)) - if rc == 0: - return True - else: - return False - - def create(self): - if self.module.check_mode: - self.changed = True - return - properties=self.properties - volsize = properties.pop('volsize', None) - volblocksize = properties.pop('volblocksize', None) - if "@" in self.name: - action = 'snapshot' - else: - action = 'create' - - cmd = [self.module.get_bin_path('zfs', True)] - cmd.append(action) - if volblocksize: - cmd.append('-b %s' % volblocksize) - if properties: - for prop, value in properties.iteritems(): - cmd.append('-o %s="%s"' % (prop, value)) - if volsize: - cmd.append('-V') - cmd.append(volsize) - cmd.append(self.name) - (rc, err, out) = self.module.run_command(' '.join(cmd)) - if rc == 0: - self.changed=True - else: - self.module.fail_json(msg=out) - - def destroy(self): - if self.module.check_mode: - self.changed = True - return - cmd = [self.module.get_bin_path('zfs', True)] - cmd.append('destroy') - cmd.append(self.name) - (rc, err, out) = self.module.run_command(' '.join(cmd)) - if rc == 0: - self.changed = True - else: - self.module.fail_json(msg=out) - - def set_property(self, prop, value): - if self.module.check_mode: - self.changed = True - return - cmd = self.module.get_bin_path('zfs', True) - args = [cmd, 'set', prop + '=' + value, self.name] - (rc, err, out) = self.module.run_command(args) - if rc == 0: - self.changed = True - else: - self.module.fail_json(msg=out) - - def set_properties_if_changed(self): - current_properties = self.get_current_properties() - for prop, value in self.properties.iteritems(): - if current_properties[prop] != value: - if prop in self.immutable_properties: - self.module.fail_json(msg='Cannot change property %s after creation.' % prop) - else: - self.set_property(prop, value) - - def get_current_properties(self): - def get_properties_by_name(propname): - cmd = [self.module.get_bin_path('zfs', True)] - cmd += ['get', '-H', propname, self.name] - rc, out, err = self.module.run_command(cmd) - return [l.split('\t')[1:3] for l in out.splitlines()] - properties = dict(get_properties_by_name('all')) - if 'share.*' in properties: - # Some ZFS pools list the sharenfs and sharesmb properties - # hierarchically as share.nfs and share.smb respectively. - del properties['share.*'] - for p, v in get_properties_by_name('share.all'): - alias = p.replace('.', '') # share.nfs -> sharenfs (etc) - properties[alias] = v - return properties - - def run_command(self, cmd): - progname = cmd[0] - cmd[0] = module.get_bin_path(progname, True) - return module.run_command(cmd) - -def main(): - - # FIXME: should use dict() constructor like other modules, required=False is default - module = AnsibleModule( - argument_spec = { - 'name': {'required': True}, - 'state': {'required': True, 'choices':['present', 'absent']}, - 'aclinherit': {'required': False, 'choices':['discard', 'noallow', 'restricted', 'passthrough', 'passthrough-x']}, - 'aclmode': {'required': False, 'choices':['discard', 'groupmask', 'passthrough']}, - 'atime': {'required': False, 'choices':['on', 'off']}, - 'canmount': {'required': False, 'choices':['on', 'off', 'noauto']}, - 'casesensitivity': {'required': False, 'choices':['sensitive', 'insensitive', 'mixed']}, - 'checksum': {'required': False, 'choices':['on', 'off', 'fletcher2', 'fletcher4', 'sha256']}, - 'compression': {'required': False, 'choices':['on', 'off', 'lzjb', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lz4', 'zle']}, - 'copies': {'required': False, 'choices':['1', '2', '3']}, - 'dedup': {'required': False, 'choices':['on', 'off']}, - 'devices': {'required': False, 'choices':['on', 'off']}, - 'exec': {'required': False, 'choices':['on', 'off']}, - # Not supported - #'groupquota': {'required': False}, - 'jailed': {'required': False, 'choices':['on', 'off']}, - 'logbias': {'required': False, 'choices':['latency', 'throughput']}, - 'mountpoint': {'required': False}, - 'nbmand': {'required': False, 'choices':['on', 'off']}, - 'normalization': {'required': False, 'choices':['none', 'formC', 'formD', 'formKC', 'formKD']}, - 'primarycache': {'required': False, 'choices':['all', 'none', 'metadata']}, - 'quota': {'required': False}, - 'readonly': {'required': False, 'choices':['on', 'off']}, - 'recordsize': {'required': False}, - 'refquota': {'required': False}, - 'refreservation': {'required': False}, - 'reservation': {'required': False}, - 'secondarycache': {'required': False, 'choices':['all', 'none', 'metadata']}, - 'setuid': {'required': False, 'choices':['on', 'off']}, - 'shareiscsi': {'required': False, 'choices':['on', 'off']}, - 'sharenfs': {'required': False}, - 'sharesmb': {'required': False}, - 'snapdir': {'required': False, 'choices':['hidden', 'visible']}, - 'sync': {'required': False, 'choices':['on', 'off']}, - # Not supported - #'userquota': {'required': False}, - 'utf8only': {'required': False, 'choices':['on', 'off']}, - 'volsize': {'required': False}, - 'volblocksize': {'required': False}, - 'vscan': {'required': False, 'choices':['on', 'off']}, - 'xattr': {'required': False, 'choices':['on', 'off']}, - 'zoned': {'required': False, 'choices':['on', 'off']}, - }, - supports_check_mode=True - ) - - state = module.params.pop('state') - name = module.params.pop('name') - - # Get all valid zfs-properties - properties = dict() - for prop, value in module.params.iteritems(): - if prop in ['CHECKMODE']: - continue - if value: - properties[prop] = value - - result = {} - result['name'] = name - result['state'] = state - - zfs=Zfs(module, name, properties) - - if state == 'present': - if zfs.exists(): - zfs.set_properties_if_changed() - else: - zfs.create() - - elif state == 'absent': - if zfs.exists(): - zfs.destroy() - - result.update(zfs.properties) - result['changed'] = zfs.changed - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/utilities/accelerate b/library/utilities/accelerate deleted file mode 100644 index bd62471316..0000000000 --- a/library/utilities/accelerate +++ /dev/null @@ -1,727 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, James Cammarata -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: accelerate -short_description: Enable accelerated mode on remote node -description: - - This modules launches an ephemeral I(accelerate) daemon on the remote node which - Ansible can use to communicate with nodes at high speed. - - The daemon listens on a configurable port for a configurable amount of time. - - Fireball mode is AES encrypted -version_added: "1.3" -options: - port: - description: - - TCP port for the socket connection - required: false - default: 5099 - aliases: [] - timeout: - description: - - The number of seconds the socket will wait for data. If none is received when the timeout value is reached, the connection will be closed. - required: false - default: 300 - aliases: [] - minutes: - description: - - The I(accelerate) listener daemon is started on nodes and will stay around for - this number of minutes before turning itself off. - required: false - default: 30 - ipv6: - description: - - The listener daemon on the remote host will bind to the ipv6 localhost socket - if this parameter is set to true. - required: false - default: false - multi_key: - description: - - When enabled, the daemon will open a local socket file which can be used by future daemon executions to - upload a new key to the already running daemon, so that multiple users can connect using different keys. - This access still requires an ssh connection as the uid for which the daemon is currently running. - required: false - default: no - version_added: "1.6" -notes: - - See the advanced playbooks chapter for more about using accelerated mode. -requirements: [ "python-keyczar" ] -author: James Cammarata -''' - -EXAMPLES = ''' -# To use accelerate mode, simply add "accelerate: true" to your play. The initial -# key exchange and starting up of the daemon will occur over SSH, but all commands and -# subsequent actions will be conducted over the raw socket connection using AES encryption - -- hosts: devservers - accelerate: true - tasks: - - command: /usr/bin/anything -''' - -import base64 -import errno -import getpass -import json -import os -import os.path -import pwd -import signal -import socket -import struct -import sys -import syslog -import tempfile -import time -import traceback - -import SocketServer - -from datetime import datetime -from threading import Thread, Lock - -# import module snippets -# we must import this here at the top so we can use get_module_path() -from ansible.module_utils.basic import * - -syslog.openlog('ansible-%s' % os.path.basename(__file__)) - -# the chunk size to read and send, assuming mtu 1500 and -# leaving room for base64 (+33%) encoding and header (100 bytes) -# 4 * (975/3) + 100 = 1400 -# which leaves room for the TCP/IP header -CHUNK_SIZE=10240 - -# FIXME: this all should be moved to module_common, as it's -# pretty much a copy from the callbacks/util code -DEBUG_LEVEL=0 -def log(msg, cap=0): - global DEBUG_LEVEL - if DEBUG_LEVEL >= cap: - syslog.syslog(syslog.LOG_NOTICE|syslog.LOG_DAEMON, msg) - -def v(msg): - log(msg, cap=1) - -def vv(msg): - log(msg, cap=2) - -def vvv(msg): - log(msg, cap=3) - -def vvvv(msg): - log(msg, cap=4) - - -HAS_KEYCZAR = False -try: - from keyczar.keys import AesKey - HAS_KEYCZAR = True -except ImportError: - pass - -SOCKET_FILE = os.path.join(get_module_path(), '.ansible-accelerate', ".local.socket") - -def get_pid_location(module): - """ - Try to find a pid directory in the common locations, falling - back to the user's home directory if no others exist - """ - for dir in ['/var/run', '/var/lib/run', '/run', os.path.expanduser("~/")]: - try: - if os.path.isdir(dir) and os.access(dir, os.R_OK|os.W_OK): - return os.path.join(dir, '.accelerate.pid') - except: - pass - module.fail_json(msg="couldn't find any valid directory to use for the accelerate pid file") - - -# NOTE: this shares a fair amount of code in common with async_wrapper, if async_wrapper were a new module we could move -# this into utils.module_common and probably should anyway - -def daemonize_self(module, password, port, minutes, pid_file): - # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012 - try: - pid = os.fork() - if pid > 0: - vvv("exiting pid %s" % pid) - # exit first parent - module.exit_json(msg="daemonized accelerate on port %s for %s minutes with pid %s" % (port, minutes, str(pid))) - except OSError, e: - log("fork #1 failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) - - # decouple from parent environment - os.chdir("/") - os.setsid() - os.umask(022) - - # do second fork - try: - pid = os.fork() - if pid > 0: - log("daemon pid %s, writing %s" % (pid, pid_file)) - pid_file = open(pid_file, "w") - pid_file.write("%s" % pid) - pid_file.close() - vvv("pid file written") - sys.exit(0) - except OSError, e: - log("fork #2 failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) - - dev_null = file('/dev/null','rw') - os.dup2(dev_null.fileno(), sys.stdin.fileno()) - os.dup2(dev_null.fileno(), sys.stdout.fileno()) - os.dup2(dev_null.fileno(), sys.stderr.fileno()) - log("daemonizing successful") - -class LocalSocketThread(Thread): - server = None - terminated = False - - def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None): - self.server = kwargs.get('server') - Thread.__init__(self, group, target, name, args, kwargs, Verbose) - - def run(self): - try: - if os.path.exists(SOCKET_FILE): - os.remove(SOCKET_FILE) - else: - dir = os.path.dirname(SOCKET_FILE) - if os.path.exists(dir): - if not os.path.isdir(dir): - log("The socket file path (%s) exists, but is not a directory. No local connections will be available" % dir) - return - else: - # make sure the directory is accessible only to this - # user, as socket files derive their permissions from - # the directory that contains them - os.chmod(dir, 0700) - elif not os.path.exists(dir): - os.makedirs(dir, 0700) - except OSError: - pass - self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self.s.bind(SOCKET_FILE) - self.s.listen(5) - while not self.terminated: - try: - conn, addr = self.s.accept() - vv("received local connection") - data = "" - while "\n" not in data: - data += conn.recv(2048) - try: - new_key = AesKey.Read(data.strip()) - found = False - for key in self.server.key_list: - try: - new_key.Decrypt(key.Encrypt("foo")) - found = True - break - except: - pass - if not found: - vv("adding new key to the key list") - self.server.key_list.append(new_key) - conn.sendall("OK\n") - else: - vv("key already exists in the key list, ignoring") - conn.sendall("EXISTS\n") - - # update the last event time so the server doesn't - # shutdown sooner than expected for new cliets - try: - self.server.last_event_lock.acquire() - self.server.last_event = datetime.now() - finally: - self.server.last_event_lock.release() - except Exception, e: - vv("key loaded locally was invalid, ignoring (%s)" % e) - conn.sendall("BADKEY\n") - finally: - try: - conn.close() - except: - pass - except: - pass - - def terminate(self): - self.terminated = True - self.s.shutdown(socket.SHUT_RDWR) - self.s.close() - -class ThreadWithReturnValue(Thread): - def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None): - Thread.__init__(self, group, target, name, args, kwargs, Verbose) - self._return = None - - def run(self): - if self._Thread__target is not None: - self._return = self._Thread__target(*self._Thread__args, - **self._Thread__kwargs) - - def join(self,timeout=None): - Thread.join(self, timeout=timeout) - return self._return - -class ThreadedTCPServer(SocketServer.ThreadingTCPServer): - key_list = [] - last_event = datetime.now() - last_event_lock = Lock() - def __init__(self, server_address, RequestHandlerClass, module, password, timeout, use_ipv6=False): - self.module = module - self.key_list.append(AesKey.Read(password)) - self.allow_reuse_address = True - self.timeout = timeout - - if use_ipv6: - self.address_family = socket.AF_INET6 - - if self.module.params.get('multi_key', False): - vv("starting thread to handle local connections for multiple keys") - self.local_thread = LocalSocketThread(kwargs=dict(server=self)) - self.local_thread.start() - - SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass) - - def shutdown(self): - self.local_thread.terminate() - self.running = False - SocketServer.ThreadingTCPServer.shutdown(self) - -class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): - # the key to use for this connection - active_key = None - - def send_data(self, data): - try: - self.server.last_event_lock.acquire() - self.server.last_event = datetime.now() - finally: - self.server.last_event_lock.release() - - packed_len = struct.pack('!Q', len(data)) - return self.request.sendall(packed_len + data) - - def recv_data(self): - header_len = 8 # size of a packed unsigned long long - data = "" - vvvv("in recv_data(), waiting for the header") - while len(data) < header_len: - try: - d = self.request.recv(header_len - len(data)) - if not d: - vvv("received nothing, bailing out") - return None - data += d - except: - # probably got a connection reset - vvvv("exception received while waiting for recv(), returning None") - return None - vvvv("in recv_data(), got the header, unpacking") - data_len = struct.unpack('!Q',data[:header_len])[0] - data = data[header_len:] - vvvv("data received so far (expecting %d): %d" % (data_len,len(data))) - while len(data) < data_len: - try: - d = self.request.recv(data_len - len(data)) - if not d: - vvv("received nothing, bailing out") - return None - data += d - vvvv("data received so far (expecting %d): %d" % (data_len,len(data))) - except: - # probably got a connection reset - vvvv("exception received while waiting for recv(), returning None") - return None - vvvv("received all of the data, returning") - - try: - self.server.last_event_lock.acquire() - self.server.last_event = datetime.now() - finally: - self.server.last_event_lock.release() - - return data - - def handle(self): - try: - while True: - vvvv("waiting for data") - data = self.recv_data() - if not data: - vvvv("received nothing back from recv_data(), breaking out") - break - vvvv("got data, decrypting") - if not self.active_key: - for key in self.server.key_list: - try: - data = key.Decrypt(data) - self.active_key = key - break - except: - pass - else: - vv("bad decrypt, exiting the connection handler") - return - else: - try: - data = self.active_key.Decrypt(data) - except: - vv("bad decrypt, exiting the connection handler") - return - - vvvv("decryption done, loading json from the data") - data = json.loads(data) - - mode = data['mode'] - response = {} - last_pong = datetime.now() - if mode == 'command': - vvvv("received a command request, running it") - twrv = ThreadWithReturnValue(target=self.command, args=(data,)) - twrv.start() - response = None - while twrv.is_alive(): - if (datetime.now() - last_pong).seconds >= 15: - last_pong = datetime.now() - vvvv("command still running, sending keepalive packet") - data2 = json.dumps(dict(pong=True)) - data2 = self.active_key.Encrypt(data2) - self.send_data(data2) - time.sleep(0.1) - response = twrv._return - vvvv("thread is done, response from join was %s" % response) - elif mode == 'put': - vvvv("received a put request, putting it") - response = self.put(data) - elif mode == 'fetch': - vvvv("received a fetch request, getting it") - response = self.fetch(data) - elif mode == 'validate_user': - vvvv("received a request to validate the user id") - response = self.validate_user(data) - - vvvv("response result is %s" % str(response)) - json_response = json.dumps(response) - vvvv("dumped json is %s" % json_response) - data2 = self.active_key.Encrypt(json_response) - vvvv("sending the response back to the controller") - self.send_data(data2) - vvvv("done sending the response") - - if mode == 'validate_user' and response.get('rc') == 1: - vvvv("detected a uid mismatch, shutting down") - self.server.shutdown() - except: - tb = traceback.format_exc() - log("encountered an unhandled exception in the handle() function") - log("error was:\n%s" % tb) - if self.active_key: - data2 = json.dumps(dict(rc=1, failed=True, msg="unhandled error in the handle() function")) - data2 = self.active_key.Encrypt(data2) - self.send_data(data2) - - def validate_user(self, data): - if 'username' not in data: - return dict(failed=True, msg='No username specified') - - vvvv("validating we're running as %s" % data['username']) - - # get the current uid - c_uid = os.getuid() - try: - # the target uid - t_uid = pwd.getpwnam(data['username']).pw_uid - except: - vvvv("could not find user %s" % data['username']) - return dict(failed=True, msg='could not find user %s' % data['username']) - - # and return rc=0 for success, rc=1 for failure - if c_uid == t_uid: - return dict(rc=0) - else: - return dict(rc=1) - - def command(self, data): - if 'cmd' not in data: - return dict(failed=True, msg='internal error: cmd is required') - if 'tmp_path' not in data: - return dict(failed=True, msg='internal error: tmp_path is required') - - vvvv("executing: %s" % data['cmd']) - - use_unsafe_shell = False - executable = data.get('executable') - if executable: - use_unsafe_shell = True - - rc, stdout, stderr = self.server.module.run_command(data['cmd'], executable=executable, use_unsafe_shell=use_unsafe_shell, close_fds=True) - if stdout is None: - stdout = '' - if stderr is None: - stderr = '' - vvvv("got stdout: %s" % stdout) - vvvv("got stderr: %s" % stderr) - - return dict(rc=rc, stdout=stdout, stderr=stderr) - - def fetch(self, data): - if 'in_path' not in data: - return dict(failed=True, msg='internal error: in_path is required') - - try: - fd = file(data['in_path'], 'rb') - fstat = os.stat(data['in_path']) - vvv("FETCH file is %d bytes" % fstat.st_size) - while fd.tell() < fstat.st_size: - data = fd.read(CHUNK_SIZE) - last = False - if fd.tell() >= fstat.st_size: - last = True - data = dict(data=base64.b64encode(data), last=last) - data = json.dumps(data) - data = self.active_key.Encrypt(data) - - if self.send_data(data): - return dict(failed=True, stderr="failed to send data") - - response = self.recv_data() - if not response: - log("failed to get a response, aborting") - return dict(failed=True, stderr="Failed to get a response from %s" % self.host) - response = self.active_key.Decrypt(response) - response = json.loads(response) - - if response.get('failed',False): - log("got a failed response from the master") - return dict(failed=True, stderr="Master reported failure, aborting transfer") - except Exception, e: - fd.close() - tb = traceback.format_exc() - log("failed to fetch the file: %s" % tb) - return dict(failed=True, stderr="Could not fetch the file: %s" % str(e)) - - fd.close() - return dict() - - def put(self, data): - if 'data' not in data: - return dict(failed=True, msg='internal error: data is required') - if 'out_path' not in data: - return dict(failed=True, msg='internal error: out_path is required') - - final_path = None - if 'user' in data and data.get('user') != getpass.getuser(): - vvv("the target user doesn't match this user, we'll move the file into place via sudo") - tmp_path = os.path.expanduser('~/.ansible/tmp/') - if not os.path.exists(tmp_path): - try: - os.makedirs(tmp_path, 0700) - except: - return dict(failed=True, msg='could not create a temporary directory at %s' % tmp_path) - (fd,out_path) = tempfile.mkstemp(prefix='ansible.', dir=tmp_path) - out_fd = os.fdopen(fd, 'w', 0) - final_path = data['out_path'] - else: - out_path = data['out_path'] - out_fd = open(out_path, 'w') - - try: - bytes=0 - while True: - out = base64.b64decode(data['data']) - bytes += len(out) - out_fd.write(out) - response = json.dumps(dict()) - response = self.active_key.Encrypt(response) - self.send_data(response) - if data['last']: - break - data = self.recv_data() - if not data: - raise "" - data = self.active_key.Decrypt(data) - data = json.loads(data) - except: - out_fd.close() - tb = traceback.format_exc() - log("failed to put the file: %s" % tb) - return dict(failed=True, stdout="Could not write the file") - - vvvv("wrote %d bytes" % bytes) - out_fd.close() - - if final_path: - vvv("moving %s to %s" % (out_path, final_path)) - self.server.module.atomic_move(out_path, final_path) - return dict() - -def daemonize(module, password, port, timeout, minutes, use_ipv6, pid_file): - try: - daemonize_self(module, password, port, minutes, pid_file) - - def timer_handler(signum, _): - try: - server.last_event_lock.acquire() - td = datetime.now() - server.last_event - # older python timedelta objects don't have total_seconds(), - # so we use the formula from the docs to calculate it - total_seconds = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 - if total_seconds >= minutes * 60: - log("server has been idle longer than the timeout, shutting down") - server.running = False - server.shutdown() - else: - # reschedule the check - vvvv("daemon idle for %d seconds (timeout=%d)" % (total_seconds,minutes*60)) - signal.alarm(30) - except: - pass - finally: - server.last_event_lock.release() - - signal.signal(signal.SIGALRM, timer_handler) - signal.alarm(30) - - tries = 5 - while tries > 0: - try: - if use_ipv6: - address = ("::", port) - else: - address = ("0.0.0.0", port) - server = ThreadedTCPServer(address, ThreadedTCPRequestHandler, module, password, timeout, use_ipv6=use_ipv6) - server.allow_reuse_address = True - break - except Exception, e: - vv("Failed to create the TCP server (tries left = %d) (error: %s) " % (tries,e)) - tries -= 1 - time.sleep(0.2) - - if tries == 0: - vv("Maximum number of attempts to create the TCP server reached, bailing out") - raise Exception("max # of attempts to serve reached") - - # run the server in a separate thread to make signal handling work - server_thread = Thread(target=server.serve_forever, kwargs=dict(poll_interval=0.1)) - server_thread.start() - server.running = True - - v("serving!") - while server.running: - time.sleep(1) - - # wait for the thread to exit fully - server_thread.join() - - v("server thread terminated, exiting!") - sys.exit(0) - except Exception, e: - tb = traceback.format_exc() - log("exception caught, exiting accelerated mode: %s\n%s" % (e, tb)) - sys.exit(0) - -def main(): - global DEBUG_LEVEL - module = AnsibleModule( - argument_spec = dict( - port=dict(required=False, default=5099), - ipv6=dict(required=False, default=False, type='bool'), - multi_key=dict(required=False, default=False, type='bool'), - timeout=dict(required=False, default=300), - password=dict(required=True), - minutes=dict(required=False, default=30), - debug=dict(required=False, default=0, type='int') - ), - supports_check_mode=True - ) - - password = base64.b64decode(module.params['password']) - port = int(module.params['port']) - timeout = int(module.params['timeout']) - minutes = int(module.params['minutes']) - debug = int(module.params['debug']) - ipv6 = module.params['ipv6'] - multi_key = module.params['multi_key'] - - if not HAS_KEYCZAR: - module.fail_json(msg="keyczar is not installed (on the remote side)") - - DEBUG_LEVEL=debug - pid_file = get_pid_location(module) - - daemon_pid = None - daemon_running = False - if os.path.exists(pid_file): - try: - daemon_pid = int(open(pid_file).read()) - try: - # sending signal 0 doesn't do anything to the - # process, other than tell the calling program - # whether other signals can be sent - os.kill(daemon_pid, 0) - except OSError, e: - if e.errno == errno.EPERM: - # no permissions means the pid is probably - # running, but as a different user, so fail - module.fail_json(msg="the accelerate daemon appears to be running as a different user that this user cannot access (pid=%d)" % daemon_pid) - else: - daemon_running = True - except ValueError: - # invalid pid file, unlink it - otherwise we don't care - try: - os.unlink(pid_file) - except: - pass - - if daemon_running and multi_key: - # try to connect to the file socket for the daemon if it exists - s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - try: - s.connect(SOCKET_FILE) - s.sendall(password + '\n') - data = "" - while '\n' not in data: - data += s.recv(2048) - res = data.strip() - except: - module.fail_json(msg="failed to connect to the local socket file") - finally: - try: - s.close() - except: - pass - - if res in ("OK", "EXISTS"): - module.exit_json(msg="transferred new key to the existing daemon") - else: - module.fail_json(msg="could not transfer new key: %s" % data.strip()) - else: - # try to start up the daemon - daemonize(module, password, port, timeout, minutes, ipv6, pid_file) - -main() diff --git a/library/utilities/assert b/library/utilities/assert deleted file mode 100644 index f5963d60cd..0000000000 --- a/library/utilities/assert +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2012 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: assert -short_description: Fail with custom message -description: - - This module asserts that a given expression is true and can be a simpler alternative to the 'fail' module in some cases. -version_added: "1.5" -options: - that: - description: - - "A string expression of the same form that can be passed to the 'when' statement" - - "Alternatively, a list of string expressions" - required: true -author: Michael DeHaan -''' - -EXAMPLES = ''' -- assert: { that: "ansible_os_family != 'RedHat'" } - -- assert: - that: - - "'foo' in some_command_result.stdout" - - "number_of_the_counting == 3" -''' diff --git a/library/utilities/debug b/library/utilities/debug deleted file mode 100644 index 2df68ca083..0000000000 --- a/library/utilities/debug +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2012 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: debug -short_description: Print statements during execution -description: - - This module prints statements during execution and can be useful - for debugging variables or expressions without necessarily halting - the playbook. Useful for debugging together with the 'when:' directive. - -version_added: "0.8" -options: - msg: - description: - - The customized message that is printed. If omitted, prints a generic - message. - required: false - default: "Hello world!" - var: - description: - - A variable name to debug. Mutually exclusive with the 'msg' option. -author: Dag Wieers, Michael DeHaan -''' - -EXAMPLES = ''' -# Example that prints the loopback address and gateway for each host -- debug: msg="System {{ inventory_hostname }} has uuid {{ ansible_product_uuid }}" - -- debug: msg="System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}" - when: ansible_default_ipv4.gateway is defined - -- shell: /usr/bin/uptime - register: result - -- debug: var=result - -- name: Display all variables/facts known for a host - debug: var=hostvars[inventory_hostname] -''' diff --git a/library/utilities/fail b/library/utilities/fail deleted file mode 100644 index 23f5b83668..0000000000 --- a/library/utilities/fail +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2012 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: fail -short_description: Fail with custom message -description: - - This module fails the progress with a custom message. It can be - useful for bailing out when a certain condition is met using C(when). -version_added: "0.8" -options: - msg: - description: - - The customized message used for failing execution. If omitted, - fail will simple bail out with a generic message. - required: false - default: "'Failed as requested from task'" - -author: Dag Wieers -''' - -EXAMPLES = ''' -# Example playbook using fail and when together -- fail: msg="The system may not be provisioned according to the CMDB status." - when: cmdb_status != "to-be-staged" -''' diff --git a/library/utilities/fireball b/library/utilities/fireball deleted file mode 100644 index 43760969a8..0000000000 --- a/library/utilities/fireball +++ /dev/null @@ -1,280 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: fireball -short_description: Enable fireball mode on remote node -description: - - This modules launches an ephemeral I(fireball) ZeroMQ message bus daemon on the remote node which - Ansible can use to communicate with nodes at high speed. - - The daemon listens on a configurable port for a configurable amount of time. - - Starting a new fireball as a given user terminates any existing user fireballs. - - Fireball mode is AES encrypted -version_added: "0.9" -options: - port: - description: - - TCP port for ZeroMQ - required: false - default: 5099 - aliases: [] - minutes: - description: - - The I(fireball) listener daemon is started on nodes and will stay around for - this number of minutes before turning itself off. - required: false - default: 30 -notes: - - See the advanced playbooks chapter for more about using fireball mode. -requirements: [ "zmq", "keyczar" ] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# This example playbook has two plays: the first launches 'fireball' mode on all hosts via SSH, and -# the second actually starts using it for subsequent management over the fireball connection - -- hosts: devservers - gather_facts: false - connection: ssh - sudo: yes - tasks: - - action: fireball - -- hosts: devservers - connection: fireball - tasks: - - command: /usr/bin/anything -''' - -import os -import sys -import shutil -import time -import base64 -import syslog -import signal -import time -import signal -import traceback - -syslog.openlog('ansible-%s' % os.path.basename(__file__)) -PIDFILE = os.path.expanduser("~/.fireball.pid") - -def log(msg): - syslog.syslog(syslog.LOG_NOTICE, msg) - -if os.path.exists(PIDFILE): - try: - data = int(open(PIDFILE).read()) - try: - os.kill(data, signal.SIGKILL) - except OSError: - pass - except ValueError: - pass - os.unlink(PIDFILE) - -HAS_ZMQ = False -try: - import zmq - HAS_ZMQ = True -except ImportError: - pass - -HAS_KEYCZAR = False -try: - from keyczar.keys import AesKey - HAS_KEYCZAR = True -except ImportError: - pass - -# NOTE: this shares a fair amount of code in common with async_wrapper, if async_wrapper were a new module we could move -# this into utils.module_common and probably should anyway - -def daemonize_self(module, password, port, minutes): - # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012 - try: - pid = os.fork() - if pid > 0: - log("exiting pid %s" % pid) - # exit first parent - module.exit_json(msg="daemonized fireball on port %s for %s minutes" % (port, minutes)) - except OSError, e: - log("fork #1 failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) - - # decouple from parent environment - os.chdir("/") - os.setsid() - os.umask(022) - - # do second fork - try: - pid = os.fork() - if pid > 0: - log("daemon pid %s, writing %s" % (pid, PIDFILE)) - pid_file = open(PIDFILE, "w") - pid_file.write("%s" % pid) - pid_file.close() - log("pidfile written") - sys.exit(0) - except OSError, e: - log("fork #2 failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) - - dev_null = file('/dev/null','rw') - os.dup2(dev_null.fileno(), sys.stdin.fileno()) - os.dup2(dev_null.fileno(), sys.stdout.fileno()) - os.dup2(dev_null.fileno(), sys.stderr.fileno()) - log("daemonizing successful (%s,%s)" % (password, port)) - -def command(module, data): - if 'cmd' not in data: - return dict(failed=True, msg='internal error: cmd is required') - if 'tmp_path' not in data: - return dict(failed=True, msg='internal error: tmp_path is required') - if 'executable' not in data: - return dict(failed=True, msg='internal error: executable is required') - - log("executing: %s" % data['cmd']) - rc, stdout, stderr = module.run_command(data['cmd'], executable=data['executable'], close_fds=True) - if stdout is None: - stdout = '' - if stderr is None: - stderr = '' - log("got stdout: %s" % stdout) - - return dict(rc=rc, stdout=stdout, stderr=stderr) - -def fetch(data): - if 'in_path' not in data: - return dict(failed=True, msg='internal error: in_path is required') - - # FIXME: should probably support chunked file transfer for binary files - # at some point. For now, just base64 encodes the file - # so don't use it to move ISOs, use rsync. - - fh = open(data['in_path']) - data = base64.b64encode(fh.read()) - return dict(data=data) - -def put(data): - - if 'data' not in data: - return dict(failed=True, msg='internal error: data is required') - if 'out_path' not in data: - return dict(failed=True, msg='internal error: out_path is required') - - # FIXME: should probably support chunked file transfer for binary files - # at some point. For now, just base64 encodes the file - # so don't use it to move ISOs, use rsync. - - fh = open(data['out_path'], 'w') - fh.write(base64.b64decode(data['data'])) - fh.close() - - return dict() - -def serve(module, password, port, minutes): - - - log("serving") - context = zmq.Context() - socket = context.socket(zmq.REP) - addr = "tcp://*:%s" % port - log("zmq serving on %s" % addr) - socket.bind(addr) - - # password isn't so much a password but a serialized AesKey object that we xferred over SSH - # password as a variable in ansible is never logged though, so it serves well - - key = AesKey.Read(password) - - while True: - - data = socket.recv() - - try: - data = key.Decrypt(data) - except: - continue - - data = json.loads(data) - - mode = data['mode'] - response = {} - - if mode == 'command': - response = command(module, data) - elif mode == 'put': - response = put(data) - elif mode == 'fetch': - response = fetch(data) - - data2 = json.dumps(response) - data2 = key.Encrypt(data2) - socket.send(data2) - -def daemonize(module, password, port, minutes): - - try: - daemonize_self(module, password, port, minutes) - - def catcher(signum, _): - module.exit_json(msg='timer expired') - - signal.signal(signal.SIGALRM, catcher) - signal.setitimer(signal.ITIMER_REAL, 60 * minutes) - - - serve(module, password, port, minutes) - except Exception, e: - tb = traceback.format_exc() - log("exception caught, exiting fireball mode: %s\n%s" % (e, tb)) - sys.exit(0) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - port=dict(required=False, default=5099), - password=dict(required=True), - minutes=dict(required=False, default=30), - ), - supports_check_mode=True - ) - - password = base64.b64decode(module.params['password']) - port = module.params['port'] - minutes = int(module.params['minutes']) - - if not HAS_ZMQ: - module.fail_json(msg="zmq is not installed") - if not HAS_KEYCZAR: - module.fail_json(msg="keyczar is not installed") - - daemonize(module, password, port, minutes) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/utilities/include_vars b/library/utilities/include_vars deleted file mode 100644 index 4c7c39d903..0000000000 --- a/library/utilities/include_vars +++ /dev/null @@ -1,39 +0,0 @@ -# -*- mode: python -*- - -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -author: Benno Joy -module: include_vars -short_description: Load variables from files, dynamically within a task. -description: - - Loads variables from a YAML file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from. -options: - free-form: - description: - - The file name from which variables should be loaded, if called from a role it will look for - the file in vars/ subdirectory of the role, otherwise the path would be relative to playbook. An absolute path can also be provided. - required: true -version_added: "1.4" -''' - -EXAMPLES = """ -# Conditionally decide to load in variables when x is 0, otherwise do not. -- include_vars: contingency_plan.yml - when: x == 0 - -# Load a variable file based on the OS type, or a default if not found. -- include_vars: "{{ item }}" - with_first_found: - - "{{ ansible_distribution }}.yml" - - "{{ ansible_os_family }}.yml" - - "default.yml" - -""" diff --git a/library/utilities/pause b/library/utilities/pause deleted file mode 100644 index 6e8a83afe6..0000000000 --- a/library/utilities/pause +++ /dev/null @@ -1,40 +0,0 @@ -# -*- mode: python -*- - -DOCUMENTATION = ''' ---- -module: pause -short_description: Pause playbook execution -description: - - Pauses playbook execution for a set amount of time, or until a prompt is acknowledged. All parameters are optional. The default behavior is to pause with a prompt. - - "You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely. To continue early: press C(ctrl+c) and then C(c). To abort a playbook: press C(ctrl+c) and then C(a)." - - "The pause module integrates into async/parallelized playbooks without any special considerations (see also: Rolling Updates). When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts." -version_added: "0.8" -options: - minutes: - description: - - Number of minutes to pause for. - required: false - default: null - seconds: - description: - - Number of seconds to pause for. - required: false - default: null - prompt: - description: - - Optional text to use for the prompt message. - required: false - default: null -author: Tim Bielawa -''' - -EXAMPLES = ''' -# Pause for 5 minutes to build app cache. -- pause: minutes=5 - -# Pause until you can verify updates to an application were successful. -- pause: - -# A helpful reminder of what to look out for post-update. -- pause: prompt="Make sure org.foo.FooOverload exception is not present" -''' diff --git a/library/utilities/set_fact b/library/utilities/set_fact deleted file mode 100644 index ea67cc43a3..0000000000 --- a/library/utilities/set_fact +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -author: Dag Wieers -module: set_fact -short_description: Set host facts from a task -description: - - This module allows setting new variables. Variables are set on a host-by-host basis - just like facts discovered by the setup module. - - These variables will survive between plays. -options: - key_value: - description: - - The C(set_fact) module takes key=value pairs as variables to set - in the playbook scope. Or alternatively, accepts complex arguments - using the C(args:) statement. - required: true - default: null -version_added: "1.2" -''' - -EXAMPLES = ''' -# Example setting host facts using key=value pairs -- set_fact: one_fact="something" other_fact="{{ local_var * 2 }}" - -# Example setting host facts using complex arguments -- set_fact: - one_fact: something - other_fact: "{{ local_var * 2 }}" - -# As of 1.8, Ansible will convert boolean strings ('true', 'false', 'yes', 'no') -# to proper boolean values when using the key=value syntax, however it is still -# recommended that booleans be set using the complex argument style: -- set_fact: - one_fact: true - other_fact: false - -''' diff --git a/library/utilities/wait_for b/library/utilities/wait_for deleted file mode 100644 index 5e02712ddf..0000000000 --- a/library/utilities/wait_for +++ /dev/null @@ -1,462 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Jeroen Hoekx -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import socket -import datetime -import time -import sys -import re -import binascii - -HAS_PSUTIL = False -try: - import psutil - HAS_PSUTIL = True - # just because we can import it on Linux doesn't mean we will use it -except ImportError: - pass - -DOCUMENTATION = ''' ---- -module: wait_for -short_description: Waits for a condition before continuing. -description: - - Waiting for a port to become available is useful for when services - are not immediately available after their init scripts return - - which is true of certain Java application servers. It is also - useful when starting guests with the M(virt) module and - needing to pause until they are ready. - - This module can also be used to wait for a regex match a string to be present in a file. - - In 1.6 and later, this module can - also be used to wait for a file to be available or absent on the - filesystem. - - In 1.8 and later, this module can also be used to wait for active - connections to be closed before continuing, useful if a node - is being rotated out of a load balancer pool. -version_added: "0.7" -options: - host: - description: - - hostname or IP address to wait for - required: false - default: "127.0.0.1" - aliases: [] - timeout: - description: - - maximum number of seconds to wait for - required: false - default: 300 - delay: - description: - - number of seconds to wait before starting to poll - required: false - default: 0 - port: - description: - - port number to poll - required: false - state: - description: - - either C(present), C(started), or C(stopped), C(absent), or C(drained) - - When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections - - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing, C(absent) will check that file is absent or removed - choices: [ "present", "started", "stopped", "absent", "drained" ] - default: "started" - path: - version_added: "1.4" - required: false - description: - - path to a file on the filesytem that must exist before continuing - search_regex: - version_added: "1.4" - required: false - description: - - Can be used to match a string in either a file or a socket connection. Defaults to a multiline regex. - exclude_hosts: - version_added: "1.8" - required: false - description: - - list of hosts or IPs to ignore when looking for active TCP connections for C(drained) state -notes: - - The ability to use search_regex with a port connection was added in 1.7. -requirements: [] -author: Jeroen Hoekx, John Jarvis, Andrii Radyk -''' - -EXAMPLES = ''' - -# wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds -- wait_for: port=8000 delay=10 - -# wait 300 seconds for port 8000 of any IP to close active connections, don't start checking for 10 seconds -- wait_for: host=0.0.0.0 port=8000 delay=10 state=drained - -# wait 300 seconds for port 8000 of any IP to close active connections, ignoring connections for specified hosts -- wait_for: host=0.0.0.0 port=8000 state=drained exclude_hosts=10.2.1.2,10.2.1.3 - -# wait until the file /tmp/foo is present before continuing -- wait_for: path=/tmp/foo - -# wait until the string "completed" is in the file /tmp/foo before continuing -- wait_for: path=/tmp/foo search_regex=completed - -# wait until the lock file is removed -- wait_for: path=/var/lock/file.lock state=absent - -# wait until the process is finished and pid was destroyed -- wait_for: path=/proc/3466/status state=absent - -# Wait 300 seconds for port 22 to become open and contain "OpenSSH", don't start checking for 10 seconds -- local_action: wait_for port=22 host="{{ inventory_hostname }}" search_regex=OpenSSH delay=10 - -''' - -class TCPConnectionInfo(object): - """ - This is a generic TCP Connection Info strategy class that relies - on the psutil module, which is not ideal for targets, but necessary - for cross platform support. - - A subclass may wish to override some or all of these methods. - - _get_exclude_ips() - - get_active_connections() - - All subclasses MUST define platform and distribution (which may be None). - """ - platform = 'Generic' - distribution = None - - match_all_ips = { - socket.AF_INET: '0.0.0.0', - socket.AF_INET6: '::', - } - connection_states = { - '01': 'ESTABLISHED', - '02': 'SYN_SENT', - '03': 'SYN_RECV', - '04': 'FIN_WAIT1', - '05': 'FIN_WAIT2', - '06': 'TIME_WAIT', - } - - def __new__(cls, *args, **kwargs): - return load_platform_subclass(TCPConnectionInfo, args, kwargs) - - def __init__(self, module): - self.module = module - (self.family, self.ip) = _convert_host_to_ip(self.module.params['host']) - self.port = int(self.module.params['port']) - self.exclude_ips = self._get_exclude_ips() - if not HAS_PSUTIL: - module.fail_json(msg="psutil module required for wait_for") - - def _get_exclude_ips(self): - if self.module.params['exclude_hosts'] is None: - return [] - exclude_hosts = self.module.params['exclude_hosts'].split(',') - return [ _convert_host_to_hex(h)[1] for h in exclude_hosts ] - - def get_active_connections_count(self): - active_connections = 0 - for p in psutil.process_iter(): - connections = p.get_connections(kind='inet') - for conn in connections: - if conn.status not in self.connection_states.values(): - continue - (local_ip, local_port) = conn.local_address - if self.port == local_port and self.ip in [self.match_all_ips[self.family], local_ip]: - (remote_ip, remote_port) = conn.remote_address - if remote_ip not in self.exclude_ips: - active_connections += 1 - return active_connections - - -# =========================================== -# Subclass: Linux - -class LinuxTCPConnectionInfo(TCPConnectionInfo): - """ - This is a TCP Connection Info evaluation strategy class - that utilizes information from Linux's procfs. While less universal, - does allow Linux targets to not require an additional library. - """ - platform = 'Linux' - distribution = None - - source_file = { - socket.AF_INET: '/proc/net/tcp', - socket.AF_INET6: '/proc/net/tcp6' - } - match_all_ips = { - socket.AF_INET: '00000000', - socket.AF_INET6: '00000000000000000000000000000000', - } - local_address_field = 1 - remote_address_field = 2 - connection_state_field = 3 - - def __init__(self, module): - self.module = module - (self.family, self.ip) = _convert_host_to_hex(module.params['host']) - self.port = "%0.4X" % int(module.params['port']) - self.exclude_ips = self._get_exclude_ips() - - def _get_exclude_ips(self): - if self.module.params['exclude_hosts'] is None: - return [] - exclude_hosts = self.module.params['exclude_hosts'].split(',') - return [ _convert_host_to_hex(h) for h in exclude_hosts ] - - def get_active_connections_count(self): - active_connections = 0 - f = open(self.source_file[self.family]) - for tcp_connection in f.readlines(): - tcp_connection = tcp_connection.strip().split(' ') - if tcp_connection[self.local_address_field] == 'local_address': - continue - if tcp_connection[self.connection_state_field] not in self.connection_states: - continue - (local_ip, local_port) = tcp_connection[self.local_address_field].split(':') - if self.port == local_port and self.ip in [self.match_all_ips[self.family], local_ip]: - (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':') - if remote_ip not in self.exclude_ips: - active_connections += 1 - f.close() - return active_connections - - -def _convert_host_to_ip(host): - """ - Perform forward DNS resolution on host, IP will give the same IP - - Args: - host: String with either hostname, IPv4, or IPv6 address - - Returns: - Tuple containing address family and IP - """ - addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)[0] - return (addrinfo[0], addrinfo[4][0]) - -def _convert_host_to_hex(host): - """ - Convert the provided host to the format in /proc/net/tcp* - - /proc/net/tcp uses little-endian four byte hex for ipv4 - /proc/net/tcp6 uses little-endian per 4B word for ipv6 - - Args: - host: String with either hostname, IPv4, or IPv6 address - - Returns: - Tuple containing address family and the little-endian converted host - """ - (family, ip) = _convert_host_to_ip(host) - hexed = binascii.hexlify(socket.inet_pton(family, ip)).upper() - if family == socket.AF_INET: - hexed = _little_endian_convert_32bit(hexed) - elif family == socket.AF_INET6: - # xrange loops through each 8 character (4B) set in the 128bit total - hexed = "".join([ _little_endian_convert_32bit(hexed[x:x+8]) for x in xrange(0, 32, 8) ]) - return (family, hexed) - -def _little_endian_convert_32bit(block): - """ - Convert to little-endian, effectively transposing - the order of the four byte word - 12345678 -> 78563412 - - Args: - block: String containing a 4 byte hex representation - - Returns: - String containing the little-endian converted block - """ - # xrange starts at 6, and increments by -2 until it reaches -2 - # which lets us start at the end of the string block and work to the begining - return "".join([ block[x:x+2] for x in xrange(6, -2, -2) ]) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - host=dict(default='127.0.0.1'), - timeout=dict(default=300), - connect_timeout=dict(default=5), - delay=dict(default=0), - port=dict(default=None), - path=dict(default=None), - search_regex=dict(default=None), - state=dict(default='started', choices=['started', 'stopped', 'present', 'absent', 'drained']), - exclude_hosts=dict(default=None, type='list') - ), - ) - - params = module.params - - host = params['host'] - timeout = int(params['timeout']) - connect_timeout = int(params['connect_timeout']) - delay = int(params['delay']) - if params['port']: - port = int(params['port']) - else: - port = None - state = params['state'] - path = params['path'] - search_regex = params['search_regex'] - if params['exclude_hosts']: - exclude_hosts = params['exclude_hosts'].split(',') - else: - exclude_hosts = [] - - if port and path: - module.fail_json(msg="port and path parameter can not both be passed to wait_for") - if path and state == 'stopped': - module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module") - if path and state == 'drained': - module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module") - if exclude_hosts and state != 'drained': - module.fail_json(msg="exclude_hosts should only be with state=drained") - - start = datetime.datetime.now() - - if delay: - time.sleep(delay) - - if state in [ 'stopped', 'absent' ]: - ### first wait for the stop condition - end = start + datetime.timedelta(seconds=timeout) - - while datetime.datetime.now() < end: - if path: - try: - f = open(path) - f.close() - time.sleep(1) - pass - except IOError: - break - elif port: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(connect_timeout) - try: - s.connect( (host, port) ) - s.shutdown(socket.SHUT_RDWR) - s.close() - time.sleep(1) - except: - break - else: - elapsed = datetime.datetime.now() - start - if port: - module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds) - elif path: - module.fail_json(msg="Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds) - - elif state in ['started', 'present']: - ### wait for start condition - end = start + datetime.timedelta(seconds=timeout) - while datetime.datetime.now() < end: - if path: - try: - os.stat(path) - if search_regex: - try: - f = open(path) - try: - if re.search(search_regex, f.read(), re.MULTILINE): - break - else: - time.sleep(1) - finally: - f.close() - except IOError: - time.sleep(1) - pass - else: - break - except OSError, e: - # File not present - if e.errno == 2: - time.sleep(1) - else: - elapsed = datetime.datetime.now() - start - module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds) - elif port: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(connect_timeout) - try: - s.connect( (host, port) ) - if search_regex: - data = '' - matched = False - while 1: - data += s.recv(1024) - if not data: - break - elif re.search(search_regex, data, re.MULTILINE): - matched = True - break - if matched: - s.shutdown(socket.SHUT_RDWR) - s.close() - break - else: - s.shutdown(socket.SHUT_RDWR) - s.close() - break - except: - time.sleep(1) - pass - else: - elapsed = datetime.datetime.now() - start - if port: - if search_regex: - module.fail_json(msg="Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds) - else: - module.fail_json(msg="Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds) - elif path: - if search_regex: - module.fail_json(msg="Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds) - else: - module.fail_json(msg="Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds) - - elif state == 'drained': - ### wait until all active connections are gone - end = start + datetime.timedelta(seconds=timeout) - tcpconns = TCPConnectionInfo(module) - while datetime.datetime.now() < end: - try: - if tcpconns.get_active_connections_count() == 0: - break - except IOError: - pass - time.sleep(1) - else: - elapsed = datetime.datetime.now() - start - module.fail_json(msg="Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds) - - elapsed = datetime.datetime.now() - start - module.exit_json(state=state, port=port, search_regex=search_regex, path=path, elapsed=elapsed.seconds) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/web_infrastructure/apache2_module b/library/web_infrastructure/apache2_module deleted file mode 100644 index 3935148208..0000000000 --- a/library/web_infrastructure/apache2_module +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013-2014, Christian Berendt -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -DOCUMENTATION = ''' ---- -module: apache2_module -version_added: 1.6 -short_description: enables/disables a module of the Apache2 webserver -description: - - Enables or disables a specified module of the Apache2 webserver. -options: - name: - description: - - name of the module to enable/disable - required: true - state: - description: - - indicate the desired state of the resource - choices: ['present', 'absent'] - default: present - -''' - -EXAMPLES = ''' -# enables the Apache2 module "wsgi" -- apache2_module: state=present name=wsgi - -# disables the Apache2 module "wsgi" -- apache2_module: state=absent name=wsgi -''' - -import re - -def _disable_module(module): - name = module.params['name'] - a2dismod_binary = module.get_bin_path("a2dismod") - result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name)) - - if re.match(r'.*' + name + r' already disabled.*', stdout, re.S): - module.exit_json(changed = False, result = "Success") - elif result != 0: - module.fail_json(msg="Failed to disable module %s: %s" % (name, stdout)) - else: - module.exit_json(changed = True, result = "Disabled") - -def _enable_module(module): - name = module.params['name'] - a2enmod_binary = module.get_bin_path("a2enmod") - result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name)) - - if re.match(r'.*' + name + r' already enabled.*', stdout, re.S): - module.exit_json(changed = False, result = "Success") - elif result != 0: - module.fail_json(msg="Failed to enable module %s: %s" % (name, stdout)) - else: - module.exit_json(changed = True, result = "Enabled") - -def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - state = dict(default='present', choices=['absent', 'present']) - ), - ) - - if module.params['state'] == 'present': - _enable_module(module) - - if module.params['state'] == 'absent': - _disable_module(module) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/web_infrastructure/django_manage b/library/web_infrastructure/django_manage deleted file mode 100644 index 580cc63c2d..0000000000 --- a/library/web_infrastructure/django_manage +++ /dev/null @@ -1,281 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Scott Anderson -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = ''' ---- -module: django_manage -short_description: Manages a Django application. -description: - - Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all management commands will be executed by the given I(virtualenv) installation. -version_added: "1.1" -options: - command: - choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ] - description: - - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate. Other commands can be entered, but will fail if they're unknown to Django. - required: true - app_path: - description: - - The path to the root of the Django application where B(manage.py) lives. - required: true - settings: - description: - - The Python path to the application's settings module, such as 'myapp.settings'. - required: false - pythonpath: - description: - - A directory to add to the Python path. Typically used to include the settings module if it is located external to the application directory. - required: false - virtualenv: - description: - - An optional path to a I(virtualenv) installation to use while running the manage application. - required: false - apps: - description: - - A list of space-delimited apps to target. Used by the 'test' command. - required: false - cache_table: - description: - - The name of the table used for database-backed caching. Used by the 'createcachetable' command. - required: false - database: - description: - - The database to target. Used by the 'createcachetable', 'flush', 'loaddata', and 'syncdb' commands. - required: false - failfast: - description: - - Fail the command immediately if a test fails. Used by the 'test' command. - required: false - default: "no" - choices: [ "yes", "no" ] - fixtures: - description: - - A space-delimited list of fixture file names to load in the database. B(Required) by the 'loaddata' command. - required: false - skip: - description: - - Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate) - required: false - version_added: "1.3" - merge: - description: - - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command - required: false - version_added: "1.3" - link: - description: - - Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command - required: false - version_added: "1.3" -notes: - - I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified. - - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location. - - This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately. - - To be able to use the migrate command, you must have south installed and added as an app in your settings - - To be able to use the collectstatic command, you must have enabled staticfiles in your settings -requirements: [ "virtualenv", "django" ] -author: Scott Anderson -''' - -EXAMPLES = """ -# Run cleanup on the application installed in 'django_dir'. -- django_manage: command=cleanup app_path={{ django_dir }} - -# Load the initial_data fixture into the application -- django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }} - -#Run syncdb on the application -- django_manage: > - command=syncdb - app_path={{ django_dir }} - settings={{ settings_app_name }} - pythonpath={{ settings_dir }} - virtualenv={{ virtualenv_dir }} - -#Run the SmokeTest test case from the main app. Useful for testing deploys. -- django_manage: command=test app_path=django_dir apps=main.SmokeTest -""" - - -import os - -def _fail(module, cmd, out, err, **kwargs): - msg = '' - if out: - msg += "stdout: %s" % (out, ) - if err: - msg += "\n:stderr: %s" % (err, ) - module.fail_json(cmd=cmd, msg=msg, **kwargs) - - -def _ensure_virtualenv(module): - - venv_param = module.params['virtualenv'] - if venv_param is None: - return - - vbin = os.path.join(os.path.expanduser(venv_param), 'bin') - activate = os.path.join(vbin, 'activate') - - if not os.path.exists(activate): - virtualenv = module.get_bin_path('virtualenv', True) - vcmd = '%s %s' % (virtualenv, venv_param) - vcmd = [virtualenv, venv_param] - rc, out_venv, err_venv = module.run_command(vcmd) - if rc != 0: - _fail(module, vcmd, out_venv, err_venv) - - os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"]) - os.environ["VIRTUAL_ENV"] = venv_param - -def createcachetable_filter_output(line): - return "Already exists" not in line - -def flush_filter_output(line): - return "Installed" in line and "Installed 0 object" not in line - -def loaddata_filter_output(line): - return "Installed" in line and "Installed 0 object" not in line - -def syncdb_filter_output(line): - return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line) - -def migrate_filter_output(line): - return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) - -def main(): - command_allowed_param_map = dict( - cleanup=(), - createcachetable=('cache_table', 'database', ), - flush=('database', ), - loaddata=('database', 'fixtures', ), - syncdb=('database', ), - test=('failfast', 'testrunner', 'liveserver', 'apps', ), - validate=(), - migrate=('apps', 'skip', 'merge'), - collectstatic=('link', ), - ) - - command_required_param_map = dict( - loaddata=('fixtures', ), - createcachetable=('cache_table', ), - ) - - # forces --noinput on every command that needs it - noinput_commands = ( - 'flush', - 'syncdb', - 'migrate', - 'test', - 'collectstatic', - ) - - # These params are allowed for certain commands only - specific_params = ('apps', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner') - - # These params are automatically added to the command if present - general_params = ('settings', 'pythonpath', 'database',) - specific_boolean_params = ('failfast', 'skip', 'merge', 'link') - end_of_command_params = ('apps', 'cache_table', 'fixtures') - - module = AnsibleModule( - argument_spec=dict( - command = dict(default=None, required=True), - app_path = dict(default=None, required=True), - settings = dict(default=None, required=False), - pythonpath = dict(default=None, required=False, aliases=['python_path']), - virtualenv = dict(default=None, required=False, aliases=['virtual_env']), - - apps = dict(default=None, required=False), - cache_table = dict(default=None, required=False), - database = dict(default=None, required=False), - failfast = dict(default='no', required=False, type='bool', aliases=['fail_fast']), - fixtures = dict(default=None, required=False), - liveserver = dict(default=None, required=False, aliases=['live_server']), - testrunner = dict(default=None, required=False, aliases=['test_runner']), - skip = dict(default=None, required=False, type='bool'), - merge = dict(default=None, required=False, type='bool'), - link = dict(default=None, required=False, type='bool'), - ), - ) - - command = module.params['command'] - app_path = module.params['app_path'] - virtualenv = module.params['virtualenv'] - - for param in specific_params: - value = module.params[param] - if param in specific_boolean_params: - value = module.boolean(value) - if value and param not in command_allowed_param_map[command]: - module.fail_json(msg='%s param is incompatible with command=%s' % (param, command)) - - for param in command_required_param_map.get(command, ()): - if not module.params[param]: - module.fail_json(msg='%s param is required for command=%s' % (param, command)) - - venv = module.params['virtualenv'] - - _ensure_virtualenv(module) - - cmd = "python manage.py %s" % (command, ) - - if command in noinput_commands: - cmd = '%s --noinput' % cmd - - for param in general_params: - if module.params[param]: - cmd = '%s --%s=%s' % (cmd, param, module.params[param]) - - for param in specific_boolean_params: - if module.boolean(module.params[param]): - cmd = '%s --%s' % (cmd, param) - - # these params always get tacked on the end of the command - for param in end_of_command_params: - if module.params[param]: - cmd = '%s %s' % (cmd, module.params[param]) - - rc, out, err = module.run_command(cmd, cwd=app_path) - if rc != 0: - if command == 'createcachetable' and 'table' in err and 'already exists' in err: - out = 'Already exists.' - else: - if "Unknown command:" in err: - _fail(module, cmd, err, "Unknown django command: %s" % command) - _fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path) - - changed = False - - lines = out.split('\n') - filt = globals().get(command + "_filter_output", None) - if filt: - filtered_output = filter(filt, out.split('\n')) - if len(filtered_output): - changed = filtered_output - - module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv, - settings=module.params['settings'], pythonpath=module.params['pythonpath']) - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/web_infrastructure/ejabberd_user b/library/web_infrastructure/ejabberd_user deleted file mode 100755 index d8b0384679..0000000000 --- a/library/web_infrastructure/ejabberd_user +++ /dev/null @@ -1,214 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (C) 2013, Peter Sprygada -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -DOCUMENTATION = ''' ---- -module: ejabberd_user -version_added: "1.5" -author: Peter Sprygada -short_description: Manages users for ejabberd servers -requirements: - - ejabberd with mod_admin_extra -description: - - This module provides user management for ejabberd servers -options: - username: - description: - - the name of the user to manage - required: true - host: - description: - - the ejabberd host associated with this username - required: true - password: - description: - - the password to assign to the username - required: false - logging: - description: - - enables or disables the local syslog facility for this module - required: false - default: false - choices: [ 'true', 'false', 'yes', 'no' ] - state: - description: - - describe the desired state of the user to be managed - required: false - default: 'present' - choices: [ 'present', 'absent' ] -notes: - - Password parameter is required for state == present only - - Passwords must be stored in clear text for this release - - The ejabberd configuration file must include mod_admin_extra as a module. -''' -EXAMPLES = ''' -Example playbook entries using the ejabberd_user module to manage users state. - - tasks: - - - name: create a user if it does not exists - action: ejabberd_user username=test host=server password=password - - - name: delete a user if it exists - action: ejabberd_user username=test host=server state=absent -''' -import syslog - -class EjabberdUserException(Exception): - """ Base exeption for EjabberdUser class object """ - pass - -class EjabberdUser(object): - """ This object represents a user resource for an ejabberd server. The - object manages user creation and deletion using ejabberdctl. The following - commands are currently supported: - * ejabberdctl register - * ejabberdctl deregister - """ - - def __init__(self, module): - self.module = module - self.logging = module.params.get('logging') - self.state = module.params.get('state') - self.host = module.params.get('host') - self.user = module.params.get('username') - self.pwd = module.params.get('password') - - @property - def changed(self): - """ This method will check the current user and see if the password has - changed. It will return True if the user does not match the supplied - credentials and False if it does not - """ - try: - options = [self.user, self.host, self.pwd] - (rc, out, err) = self.run_command('check_password', options) - except EjabberdUserException, e: - (rc, out, err) = (1, None, "required attribute(s) missing") - return rc - - @property - def exists(self): - """ This method will check to see if the supplied username exists for - host specified. If the user exists True is returned, otherwise False - is returned - """ - try: - options = [self.user, self.host] - (rc, out, err) = self.run_command('check_account', options) - except EjabberdUserException, e: - (rc, out, err) = (1, None, "required attribute(s) missing") - return True if rc == 0 else False - - def log(self, entry): - """ This method will log information to the local syslog facility """ - if self.logging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, entry) - - def run_command(self, cmd, options): - """ This method will run the any command specified and return the - returns using the Ansible common module - """ - if not all(options): - raise EjabberdUserException - - cmd = 'ejabberdctl %s ' % cmd - cmd += " ".join(options) - self.log('command: %s' % cmd) - return self.module.run_command(cmd.split()) - - def update(self): - """ The update method will update the credentials for the user provided - """ - try: - options = [self.user, self.host, self.pwd] - (rc, out, err) = self.run_command('change_password', options) - except EjabberdUserException, e: - (rc, out, err) = (1, None, "required attribute(s) missing") - return (rc, out, err) - - def create(self): - """ The create method will create a new user on the host with the - password provided - """ - try: - options = [self.user, self.host, self.pwd] - (rc, out, err) = self.run_command('register', options) - except EjabberdUserException, e: - (rc, out, err) = (1, None, "required attribute(s) missing") - return (rc, out, err) - - def delete(self): - """ The delete method will delete the user from the host - """ - try: - options = [self.user, self.host] - (rc, out, err) = self.run_command('unregister', options) - except EjabberdUserException, e: - (rc, out, err) = (1, None, "required attribute(s) missing") - return (rc, out, err) - -def main(): - module = AnsibleModule( - argument_spec = dict( - host=dict(default=None, type='str'), - username=dict(default=None, type='str'), - password=dict(default=None, type='str'), - state=dict(default='present', choices=['present', 'absent']), - logging=dict(default=False, type='bool') - ), - supports_check_mode = True - ) - - obj = EjabberdUser(module) - - rc = None - result = dict() - - if obj.state == 'absent': - if obj.exists: - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = obj.delete() - if rc != 0: - module.fail_json(msg=err, rc=rc) - - elif obj.state == 'present': - if not obj.exists: - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = obj.create() - elif obj.changed: - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = obj.update() - if rc is not None and rc != 0: - module.fail_json(msg=err, rc=rc) - - if rc is None: - result['changed'] = False - else: - result['changed'] = True - - module.exit_json(**result) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/web_infrastructure/htpasswd b/library/web_infrastructure/htpasswd deleted file mode 100644 index 4a72ea37fe..0000000000 --- a/library/web_infrastructure/htpasswd +++ /dev/null @@ -1,219 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Nimbis Services, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -DOCUMENTATION = """ -module: htpasswd -version_added: "1.3" -short_description: manage user files for basic authentication -description: - - Add and remove username/password entries in a password file using htpasswd. - - This is used by web servers such as Apache and Nginx for basic authentication. -options: - path: - required: true - aliases: [ dest, destfile ] - description: - - Path to the file that contains the usernames and passwords - name: - required: true - aliases: [ username ] - description: - - User name to add or remove - password: - required: false - description: - - Password associated with user. - - Must be specified if user does not exist yet. - crypt_scheme: - required: false - choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] - default: "apr_md5_crypt" - description: - - Encryption scheme to be used. - state: - required: false - choices: [ present, absent ] - default: "present" - description: - - Whether the user entry should be present or not - create: - required: false - choices: [ "yes", "no" ] - default: "yes" - description: - - Used with C(state=present). If specified, the file will be created - if it does not already exist. If set to "no", will fail if the - file does not exist -notes: - - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems." - - "On Debian, Ubuntu, or Fedora: install I(python-passlib)." - - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)." -requires: [ passlib>=1.6 ] -author: Lorin Hochstein -""" - -EXAMPLES = """ -# Add a user to a password file and ensure permissions are set -- htpasswd: path=/etc/nginx/passwdfile name=janedoe password=9s36?;fyNp owner=root group=www-data mode=0640 -# Remove a user from a password file -- htpasswd: path=/etc/apache2/passwdfile name=foobar state=absent -""" - - -import os -from distutils.version import StrictVersion - -try: - from passlib.apache import HtpasswdFile - import passlib -except ImportError: - passlib_installed = False -else: - passlib_installed = True - - -def create_missing_directories(dest): - destpath = os.path.dirname(dest) - if not os.path.exists(destpath): - os.makedirs(destpath) - - -def present(dest, username, password, crypt_scheme, create, check_mode): - """ Ensures user is present - - Returns (msg, changed) """ - if not os.path.exists(dest): - if not create: - raise ValueError('Destination %s does not exist' % dest) - if check_mode: - return ("Create %s" % dest, True) - create_missing_directories(dest) - if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme) - else: - ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme) - if getattr(ht, 'set_password', None): - ht.set_password(username, password) - else: - ht.update(username, password) - ht.save() - return ("Created %s and added %s" % (dest, username), True) - else: - if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme) - else: - ht = HtpasswdFile(dest, default=crypt_scheme) - - found = None - if getattr(ht, 'check_password', None): - found = ht.check_password(username, password) - else: - found = ht.verify(username, password) - - if found: - return ("%s already present" % username, False) - else: - if not check_mode: - if getattr(ht, 'set_password', None): - ht.set_password(username, password) - else: - ht.update(username, password) - ht.save() - return ("Add/update %s" % username, True) - - -def absent(dest, username, check_mode): - """ Ensures user is absent - - Returns (msg, changed) """ - if not os.path.exists(dest): - raise ValueError("%s does not exists" % dest) - - if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=False) - else: - ht = HtpasswdFile(dest) - - if username not in ht.users(): - return ("%s not present" % username, False) - else: - if not check_mode: - ht.delete(username) - ht.save() - return ("Remove %s" % username, True) - - -def check_file_attrs(module, changed, message): - - file_args = module.load_file_common_arguments(module.params) - if module.set_fs_attributes_if_different(file_args, False): - - if changed: - message += " and " - changed = True - message += "ownership, perms or SE linux context changed" - - return message, changed - - -def main(): - arg_spec = dict( - path=dict(required=True, aliases=["dest", "destfile"]), - name=dict(required=True, aliases=["username"]), - password=dict(required=False, default=None), - crypt_scheme=dict(required=False, default=None), - state=dict(required=False, default="present"), - create=dict(type='bool', default='yes'), - - ) - module = AnsibleModule(argument_spec=arg_spec, - add_file_common_args=True, - supports_check_mode=True) - - path = module.params['path'] - username = module.params['name'] - password = module.params['password'] - crypt_scheme = module.params['crypt_scheme'] - state = module.params['state'] - create = module.params['create'] - check_mode = module.check_mode - - if not passlib_installed: - module.fail_json(msg="This module requires the passlib Python library") - - try: - if state == 'present': - (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) - elif state == 'absent': - (msg, changed) = absent(path, username, check_mode) - else: - module.fail_json(msg="Invalid state: %s" % state) - - check_file_attrs(module, changed, msg) - module.exit_json(msg=msg, changed=changed) - except Exception, e: - module.fail_json(msg=str(e)) - - -# import module snippets -from ansible.module_utils.basic import * - -if __name__ == '__main__': - main() diff --git a/library/web_infrastructure/jboss b/library/web_infrastructure/jboss deleted file mode 100644 index 9478235698..0000000000 --- a/library/web_infrastructure/jboss +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Jeroen Hoekx -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = """ -module: jboss -version_added: "1.4" -short_description: deploy applications to JBoss -description: - - Deploy applications to JBoss standalone using the filesystem -options: - deployment: - required: true - description: - - The name of the deployment - src: - required: false - description: - - The remote path of the application ear or war to deploy - deploy_path: - required: false - default: /var/lib/jbossas/standalone/deployments - description: - - The location in the filesystem where the deployment scanner listens - state: - required: false - choices: [ present, absent ] - default: "present" - description: - - Whether the application should be deployed or undeployed -notes: - - "The JBoss standalone deployment-scanner has to be enabled in standalone.xml" - - "Ensure no identically named application is deployed through the JBoss CLI" -author: Jeroen Hoekx -""" - -EXAMPLES = """ -# Deploy a hello world application -- jboss: src=/tmp/hello-1.0-SNAPSHOT.war deployment=hello.war state=present -# Update the hello world application -- jboss: src=/tmp/hello-1.1-SNAPSHOT.war deployment=hello.war state=present -# Undeploy the hello world application -- jboss: deployment=hello.war state=absent -""" - -import os -import shutil -import time - -def is_deployed(deploy_path, deployment): - return os.path.exists(os.path.join(deploy_path, "%s.deployed"%(deployment))) - -def is_undeployed(deploy_path, deployment): - return os.path.exists(os.path.join(deploy_path, "%s.undeployed"%(deployment))) - -def is_failed(deploy_path, deployment): - return os.path.exists(os.path.join(deploy_path, "%s.failed"%(deployment))) - -def main(): - module = AnsibleModule( - argument_spec = dict( - src=dict(), - deployment=dict(required=True), - deploy_path=dict(default='/var/lib/jbossas/standalone/deployments'), - state=dict(choices=['absent', 'present'], default='present'), - ), - ) - - changed = False - - src = module.params['src'] - deployment = module.params['deployment'] - deploy_path = module.params['deploy_path'] - state = module.params['state'] - - if state == 'present' and not src: - module.fail_json(msg="Argument 'src' required.") - - if not os.path.exists(deploy_path): - module.fail_json(msg="deploy_path does not exist.") - - deployed = is_deployed(deploy_path, deployment) - - if state == 'present' and not deployed: - if not os.path.exists(src): - module.fail_json(msg='Source file %s does not exist.'%(src)) - if is_failed(deploy_path, deployment): - ### Clean up old failed deployment - os.remove(os.path.join(deploy_path, "%s.failed"%(deployment))) - - shutil.copyfile(src, os.path.join(deploy_path, deployment)) - while not deployed: - deployed = is_deployed(deploy_path, deployment) - if is_failed(deploy_path, deployment): - module.fail_json(msg='Deploying %s failed.'%(deployment)) - time.sleep(1) - changed = True - - if state == 'present' and deployed: - if module.md5(src) != module.md5(os.path.join(deploy_path, deployment)): - os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment))) - shutil.copyfile(src, os.path.join(deploy_path, deployment)) - deployed = False - while not deployed: - deployed = is_deployed(deploy_path, deployment) - if is_failed(deploy_path, deployment): - module.fail_json(msg='Deploying %s failed.'%(deployment)) - time.sleep(1) - changed = True - - if state == 'absent' and deployed: - os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment))) - while deployed: - deployed = not is_undeployed(deploy_path, deployment) - if is_failed(deploy_path, deployment): - module.fail_json(msg='Undeploying %s failed.'%(deployment)) - time.sleep(1) - changed = True - - module.exit_json(changed=changed) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/library/web_infrastructure/jira b/library/web_infrastructure/jira deleted file mode 100644 index 950fc3dbfc..0000000000 --- a/library/web_infrastructure/jira +++ /dev/null @@ -1,347 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Steve Smith -# Atlassian open-source approval reference OSR-76. -# -# This file is part of Ansible. -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -DOCUMENTATION = """ -module: jira -version_added: "1.6" -short_description: create and modify issues in a JIRA instance -description: - - Create and modify issues in a JIRA instance. - -options: - uri: - required: true - description: - - Base URI for the JIRA instance - - operation: - required: true - aliases: [ command ] - choices: [ create, comment, edit, fetch, transition ] - description: - - The operation to perform. - - username: - required: true - description: - - The username to log-in with. - - password: - required: true - description: - - The password to log-in with. - - project: - aliases: [ prj ] - required: false - description: - - The project for this operation. Required for issue creation. - - summary: - required: false - description: - - The issue summary, where appropriate. - - description: - required: false - description: - - The issue description, where appropriate. - - issuetype: - required: false - description: - - The issue type, for issue creation. - - issue: - required: false - description: - - An existing issue key to operate on. - - comment: - required: false - description: - - The comment text to add. - - status: - required: false - description: - - The desired status; only relevant for the transition operation. - - assignee: - required: false - description: - - Sets the assignee on create or transition operations. Note not all transitions will allow this. - - fields: - required: false - description: - - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API (possibly after merging with other required data, as when passed to create). See examples for more information, and the JIRA REST API for the structure required for various fields. - -notes: - - "Currently this only works with basic-auth." - -author: Steve Smith -""" - -EXAMPLES = """ -# Create a new issue and add a comment to it: -- name: Create an issue - jira: uri={{server}} username={{user}} password={{pass}} - project=ANS operation=create - summary="Example Issue" description="Created using Ansible" issuetype=Task - register: issue - -- name: Comment on issue - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=comment - comment="A comment added by Ansible" - -# Assign an existing issue using edit -- name: Assign an issue using free-form fields - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=edit - assignee=ssmith - -# Create an issue with an existing assignee -- name: Create an assigned issue - jira: uri={{server}} username={{user}} password={{pass}} - project=ANS operation=create - summary="Assigned issue" description="Created and assigned using Ansible" - issuetype=Task assignee=ssmith - -# Edit an issue using free-form fields -- name: Set the labels on an issue using free-form fields - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=edit - args: { fields: {labels: ["autocreated", "ansible"]}} - -- name: Set the labels on an issue, YAML version - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=edit - args: - fields: - labels: - - "autocreated" - - "ansible" - - "yaml" - -# Retrieve metadata for an issue and use it to create an account -- name: Get an issue - jira: uri={{server}} username={{user}} password={{pass}} - project=ANS operation=fetch issue="ANS-63" - register: issue - -- name: Create a unix account for the reporter - sudo: true - user: name="{{issue.meta.fields.creator.name}}" comment="{{issue.meta.fields.creator.displayName}}" - -# Transition an issue by target status -- name: Close the issue - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=transition status="Done" -""" - -import json -import base64 - -def request(url, user, passwd, data=None, method=None): - if data: - data = json.dumps(data) - - # NOTE: fetch_url uses a password manager, which follows the - # standard request-then-challenge basic-auth semantics. However as - # JIRA allows some unauthorised operations it doesn't necessarily - # send the challenge, so the request occurs as the anonymous user, - # resulting in unexpected results. To work around this we manually - # inject the basic-auth header up-front to ensure that JIRA treats - # the requests as authorized for this user. - auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '') - response, info = fetch_url(module, url, data=data, method=method, - headers={'Content-Type':'application/json', - 'Authorization':"Basic %s" % auth}) - - if info['status'] not in (200, 204): - module.fail_json(msg=info['msg']) - - body = response.read() - - if body: - return json.loads(body) - else: - return {} - -def post(url, user, passwd, data): - return request(url, user, passwd, data=data, method='POST') - -def put(url, user, passwd, data): - return request(url, user, passwd, data=data, method='PUT') - -def get(url, user, passwd): - return request(url, user, passwd) - - -def create(restbase, user, passwd, params): - createfields = { - 'project': { 'key': params['project'] }, - 'summary': params['summary'], - 'description': params['description'], - 'issuetype': { 'name': params['issuetype'] }} - - # Merge in any additional or overridden fields - if params['fields']: - createfields.update(params['fields']) - - data = {'fields': createfields} - - url = restbase + '/issue/' - - ret = post(url, user, passwd, data) - - return ret - - -def comment(restbase, user, passwd, params): - data = { - 'body': params['comment'] - } - - url = restbase + '/issue/' + params['issue'] + '/comment' - - ret = post(url, user, passwd, data) - - return ret - - -def edit(restbase, user, passwd, params): - data = { - 'fields': params['fields'] - } - - url = restbase + '/issue/' + params['issue'] - - ret = put(url, user, passwd, data) - - return ret - - -def fetch(restbase, user, passwd, params): - url = restbase + '/issue/' + params['issue'] - ret = get(url, user, passwd) - return ret - - -def transition(restbase, user, passwd, params): - # Find the transition id - turl = restbase + '/issue/' + params['issue'] + "/transitions" - tmeta = get(turl, user, passwd) - - target = params['status'] - tid = None - for t in tmeta['transitions']: - if t['name'] == target: - tid = t['id'] - break - - if not tid: - raise ValueError("Failed find valid transition for '%s'" % target) - - # Perform it - url = restbase + '/issue/' + params['issue'] + "/transitions" - data = { 'transition': { "id" : tid }, - 'fields': params['fields']} - - ret = post(url, user, passwd, data) - - return ret - - -# Some parameters are required depending on the operation: -OP_REQUIRED = dict(create=['project', 'issuetype', 'summary', 'description'], - comment=['issue', 'comment'], - edit=[], - fetch=['issue'], - transition=['status']) - -def main(): - - global module - module = AnsibleModule( - argument_spec=dict( - uri=dict(required=True), - operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition'], - aliases=['command'], required=True), - username=dict(required=True), - password=dict(required=True), - project=dict(), - summary=dict(), - description=dict(), - issuetype=dict(), - issue=dict(aliases=['ticket']), - comment=dict(), - status=dict(), - assignee=dict(), - fields=dict(default={}) - ), - supports_check_mode=False - ) - - op = module.params['operation'] - - # Check we have the necessary per-operation parameters - missing = [] - for parm in OP_REQUIRED[op]: - if not module.params[parm]: - missing.append(parm) - if missing: - module.fail_json(msg="Operation %s require the following missing parameters: %s" % (op, ",".join(missing))) - - # Handle rest of parameters - uri = module.params['uri'] - user = module.params['username'] - passwd = module.params['password'] - if module.params['assignee']: - module.params['fields']['assignee'] = { 'name': module.params['assignee'] } - - if not uri.endswith('/'): - uri = uri+'/' - restbase = uri + 'rest/api/2' - - # Dispatch - try: - - # Lookup the corresponding method for this operation. This is - # safe as the AnsibleModule should remove any unknown operations. - thismod = sys.modules[__name__] - method = getattr(thismod, op) - - ret = method(restbase, user, passwd, module.params) - - except Exception as e: - return module.fail_json(msg=e.message) - - - module.exit_json(changed=True, meta=ret) - - -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() diff --git a/library/web_infrastructure/supervisorctl b/library/web_infrastructure/supervisorctl deleted file mode 100644 index 2d458169e7..0000000000 --- a/library/web_infrastructure/supervisorctl +++ /dev/null @@ -1,221 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Matt Wright -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -import os - -DOCUMENTATION = ''' ---- -module: supervisorctl -short_description: Manage the state of a program or group of programs running via supervisord -description: - - Manage the state of a program or group of programs running via supervisord -version_added: "0.7" -options: - name: - description: - - The name of the supervisord program or group to manage. - - The name will be taken as group name when it ends with a colon I(:) - - Group support is only available in Ansible version 1.6 or later. - required: true - default: null - config: - description: - - The supervisor configuration file path - required: false - default: null - version_added: "1.3" - server_url: - description: - - URL on which supervisord server is listening - required: false - default: null - version_added: "1.3" - username: - description: - - username to use for authentication - required: false - default: null - version_added: "1.3" - password: - description: - - password to use for authentication - required: false - default: null - version_added: "1.3" - state: - description: - - The desired state of program/group. - required: true - default: null - choices: [ "present", "started", "stopped", "restarted" ] - supervisorctl_path: - description: - - path to supervisorctl executable - required: false - default: null - version_added: "1.4" -notes: - - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist. - - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart). -requirements: [ "supervisorctl" ] -author: Matt Wright, Aaron Wang -''' - -EXAMPLES = ''' -# Manage the state of program to be in 'started' state. -- supervisorctl: name=my_app state=started - -# Manage the state of program group to be in 'started' state. -- supervisorctl: name='my_apps:' state=started - -# Restart my_app, reading supervisorctl configuration from a specified file. -- supervisorctl: name=my_app state=restarted config=/var/opt/my_project/supervisord.conf - -# Restart my_app, connecting to supervisord with credentials and server URL. -- supervisorctl: name=my_app state=restarted username=test password=testpass server_url=http://localhost:9001 -''' - - -def main(): - arg_spec = dict( - name=dict(required=True), - config=dict(required=False), - server_url=dict(required=False), - username=dict(required=False), - password=dict(required=False), - supervisorctl_path=dict(required=False), - state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped']) - ) - - module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) - - name = module.params['name'] - is_group = False - if name.endswith(':'): - is_group = True - name = name.rstrip(':') - state = module.params['state'] - config = module.params.get('config') - server_url = module.params.get('server_url') - username = module.params.get('username') - password = module.params.get('password') - supervisorctl_path = module.params.get('supervisorctl_path') - - if supervisorctl_path: - supervisorctl_path = os.path.expanduser(supervisorctl_path) - if os.path.exists(supervisorctl_path) and module.is_executable(supervisorctl_path): - supervisorctl_args = [supervisorctl_path] - else: - module.fail_json( - msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path) - else: - supervisorctl_args = [module.get_bin_path('supervisorctl', True)] - - if config: - supervisorctl_args.extend(['-c', os.path.expanduser(config)]) - if server_url: - supervisorctl_args.extend(['-s', server_url]) - if username: - supervisorctl_args.extend(['-u', username]) - if password: - supervisorctl_args.extend(['-p', password]) - - def run_supervisorctl(cmd, name=None, **kwargs): - args = list(supervisorctl_args) # copy the master args - args.append(cmd) - if name: - args.append(name) - return module.run_command(args, **kwargs) - - def get_matched_processes(): - matched = [] - rc, out, err = run_supervisorctl('status') - for line in out.splitlines(): - # One status line may look like one of these two: - # process not in group: - # echo_date_lonely RUNNING pid 7680, uptime 13:22:18 - # process in group: - # echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18 - fields = [field for field in line.split(' ') if field != ''] - process_name = fields[0] - status = fields[1] - - if is_group: - # If there is ':', this process must be in a group. - if ':' in process_name: - group = process_name.split(':')[0] - if group != name: - continue - else: - continue - else: - if process_name != name: - continue - - matched.append((process_name, status)) - return matched - - def take_action_on_processes(processes, status_filter, action, expected_result): - to_take_action_on = [] - for process_name, status in processes: - if status_filter(status): - to_take_action_on.append(process_name) - - if len(to_take_action_on) == 0: - module.exit_json(changed=False, name=name, state=state) - if module.check_mode: - module.exit_json(changed=True) - for process_name in to_take_action_on: - rc, out, err = run_supervisorctl(action, process_name) - if '%s: %s' % (process_name, expected_result) not in out: - module.fail_json(msg=out) - - module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on) - - if state == 'restarted': - rc, out, err = run_supervisorctl('update') - processes = get_matched_processes() - take_action_on_processes(processes, lambda s: True, 'restart', 'started') - - processes = get_matched_processes() - - if state == 'present': - if len(processes) > 0: - module.exit_json(changed=False, name=name, state=state) - - if module.check_mode: - module.exit_json(changed=True) - run_supervisorctl('reread', check_rc=True) - rc, out, err = run_supervisorctl('add', name) - if '%s: added process group' % name in out: - module.exit_json(changed=True, name=name, state=state) - else: - module.fail_json(msg=out, name=name, state=state) - - if state == 'started': - take_action_on_processes(processes, lambda s: s != 'RUNNING', 'start', 'started') - - if state == 'stopped': - take_action_on_processes(processes, lambda s: s == 'RUNNING', 'stop', 'stopped') - -# import module snippets -from ansible.module_utils.basic import * - -main() diff --git a/library/windows/setup.ps1 b/library/windows/setup.ps1 deleted file mode 100644 index c249251d97..0000000000 --- a/library/windows/setup.ps1 +++ /dev/null @@ -1,100 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -# $params is not currently used in this module -# $params = Parse-Args $args; - -$result = New-Object psobject @{ - ansible_facts = New-Object psobject - changed = $false -}; - -$osversion = [Environment]::OSVersion -$memory = @() -$memory += Get-WmiObject win32_Physicalmemory -$capacity = 0 -$memory | foreach {$capacity += $_.Capacity} -$netcfg = Get-WmiObject win32_NetworkAdapterConfiguration - -$ActiveNetcfg = @(); $ActiveNetcfg+= $netcfg | where {$_.ipaddress -ne $null} -$formattednetcfg = @() -foreach ($adapter in $ActiveNetcfg) -{ - $thisadapter = New-Object psobject @{ - interface_name = $adapter.description - dns_domain = $adapter.dnsdomain - default_gateway = $null - interface_index = $adapter.InterfaceIndex - } - - if ($adapter.defaultIPGateway) - { - $thisadapter.default_gateway = $adapter.DefaultIPGateway[0].ToString() - } - - $formattednetcfg += $thisadapter;$thisadapter = $null -} - -Set-Attr $result.ansible_facts "ansible_interfaces" $formattednetcfg - -Set-Attr $result.ansible_facts "ansible_hostname" $env:COMPUTERNAME; -Set-Attr $result.ansible_facts "ansible_fqdn" "$([System.Net.Dns]::GetHostByName((hostname)).HostName)" -Set-Attr $result.ansible_facts "ansible_system" $osversion.Platform.ToString() -Set-Attr $result.ansible_facts "ansible_os_family" "Windows" -Set-Attr $result.ansible_facts "ansible_distribution" $osversion.VersionString -Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version.ToString() - -Set-Attr $result.ansible_facts "ansible_totalmem" $capacity - -$ips = @() -Foreach ($ip in $netcfg.IPAddress) { If ($ip) { $ips += $ip } } -Set-Attr $result.ansible_facts "ansible_ip_addresses" $ips - -$psversion = $PSVersionTable.PSVersion.Major -Set-Attr $result.ansible_facts "ansible_powershell_version" $psversion - -$winrm_https_listener_parent_path = Get-ChildItem -Path WSMan:\localhost\Listener -Recurse | Where-Object {$_.PSChildName -eq "Transport" -and $_.Value -eq "HTTPS"} | select PSParentPath - -if ($winrm_https_listener_parent_path ) { - $winrm_https_listener_path = $winrm_https_listener_parent_path.PSParentPath.Substring($winrm_https_listener_parent_path.PSParentPath.LastIndexOf("\")) -} - -if ($winrm_https_listener_path) -{ - $https_listener = Get-ChildItem -Path "WSMan:\localhost\Listener$winrm_https_listener_path" -} - -if ($https_listener) -{ - $winrm_cert_thumbprint = $https_listener | where {$_.Name -EQ "CertificateThumbprint" } | select Value -} - -if ($winrm_cert_thumbprint) -{ - $uppercase_cert_thumbprint = $winrm_cert_thumbprint.Value.ToString().ToUpper() -} - -$winrm_cert_expiry = Get-ChildItem -Path Cert:\LocalMachine\My | where Thumbprint -EQ $uppercase_cert_thumbprint | select NotAfter - -if ($winrm_cert_expiry) -{ - Set-Attr $result.ansible_facts "ansible_winrm_certificate_expires" $winrm_cert_expiry.NotAfter.ToString("yyyy-MM-dd HH:mm:ss") -} - -Exit-Json $result; diff --git a/library/windows/slurp.ps1 b/library/windows/slurp.ps1 deleted file mode 100644 index edf1da7635..0000000000 --- a/library/windows/slurp.ps1 +++ /dev/null @@ -1,46 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$src = Get-Attr $params "src" (Get-Attr $params "path" $FALSE); -If (-not $src) -{ - Fail-Json (New-Object psobject) "missing required argument: src"; -} - -If (Test-Path -PathType Leaf $src) -{ - $bytes = [System.IO.File]::ReadAllBytes($src); - $content = [System.Convert]::ToBase64String($bytes); - $result = New-Object psobject @{ - changed = $false - encoding = "base64" - content = $content - }; - Exit-Json $result; -} -ElseIf (Test-Path -PathType Container $src) -{ - Fail-Json (New-Object psobject) ("is a directory: " + $src); -} -Else -{ - Fail-Json (New-Object psobject) ("file not found: " + $src); -} diff --git a/library/windows/win_feature b/library/windows/win_feature deleted file mode 100644 index ef344ee3b2..0000000000 --- a/library/windows/win_feature +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Paul Durivage , Trond Hindenes and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_feature -version_added: "1.7" -short_description: Installs and uninstalls Windows Features -description: - - Installs or uninstalls Windows Roles or Features -options: - name: - description: - - Names of roles or features to install as a single feature or a comma-separated list of features - required: true - default: null - aliases: [] - state: - description: - - State of the features or roles on the system - required: false - choices: - - present - - absent - default: present - aliases: [] - restart: - description: - - Restarts the computer automatically when installation is complete, if restarting is required by the roles or features installed. - choices: - - yes - - no - default: null - aliases: [] - include_sub_features: - description: - - Adds all subfeatures of the specified feature - choices: - - yes - - no - default: null - aliases: [] - include_management_tools: - description: - - Adds the corresponding management tools to the specified feature - choices: - - yes - - no - default: null - aliases: [] -author: Paul Durivage / Trond Hindenes -''' - -EXAMPLES = ''' -# This installs IIS. -# The names of features available for install can be run by running the following Powershell Command: -# PS C:\Users\Administrator> Import-Module ServerManager; Get-WindowsFeature -$ ansible -i hosts -m win_feature -a "name=Web-Server" all -$ ansible -i hosts -m win_feature -a "name=Web-Server,Web-Common-Http" all - - -# Playbook example ---- -- name: Install IIS - hosts: all - gather_facts: false - tasks: - - name: Install IIS - win_feature: - name: "Web-Server" - state: absent - restart: yes - include_sub_features: yes - include_management_tools: yes - - -''' diff --git a/library/windows/win_feature.ps1 b/library/windows/win_feature.ps1 deleted file mode 100644 index a0776a4bf1..0000000000 --- a/library/windows/win_feature.ps1 +++ /dev/null @@ -1,122 +0,0 @@ -#!powershell -# This file is part of Ansible. -# -# Copyright 2014, Paul Durivage -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -Import-Module Servermanager; - -$params = Parse-Args $args; - -$result = New-Object psobject @{ - changed = $false -} - -If ($params.name) { - $name = $params.name -} -Else { - Fail-Json $result "mising required argument: name" -} - -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne 'present') -and ($state -ne 'absent')) { - Fail-Json $result "state is '$state'; must be 'present' or 'absent'" - } -} -Elseif (!$params.state) { - $state = "present" -} - -If ($params.restart) { - $restart = $params.restart | ConvertTo-Bool -} -Else -{ - $restart = $false -} - -if ($params.include_sub_features) -{ - $includesubfeatures = $params.include_sub_features | ConvertTo-Bool -} -Else -{ - $includesubfeatures = $false -} - -if ($params.include_management_tools) -{ - $includemanagementtools = $params.include_management_tools | ConvertTo-Bool -} -Else -{ - $includemanagementtools = $false -} - - - -If ($state -eq "present") { - try { - $featureresult = Add-WindowsFeature -Name $name -Restart:$restart -IncludeAllSubFeature:$includesubfeatures -IncludeManagementTools:$includemanagementtools - } - catch { - Fail-Json $result $_.Exception.Message - } -} -Elseif ($state -eq "absent") { - try { - $featureresult = Remove-WindowsFeature -Name $name -Restart:$restart - } - catch { - Fail-Json $result $_.Exception.Message - } -} - -# Loop through results and create a hash containing details about -# each role/feature that is installed/removed -$installed_features = @() -#$featureresult.featureresult is filled if anything was changed -if ($featureresult.FeatureResult) -{ - ForEach ($item in $featureresult.FeatureResult) { - $installed_features += New-Object psobject @{ - id = $item.id.ToString() - display_name = $item.DisplayName - message = $item.Message.ToString() - restart_needed = $item.RestartNeeded.ToString() - skip_reason = $item.SkipReason.ToString() - success = $item.Success.ToString() - } - } - Set-Attr $result "feature_result" $installed_features - - - $result.changed = $true -} -Else -{ - Set-Attr $result "feature_result" $null -} - -Set-Attr $result "feature_success" $featureresult.Success.ToString() -Set-Attr $result "feature_exitcode" $featureresult.ExitCode.ToString() -Set-Attr $result "feature_restart_needed" $featureresult.RestartNeeded.ToString() - -Exit-Json $result; diff --git a/library/windows/win_get_url b/library/windows/win_get_url deleted file mode 100644 index 10910cf605..0000000000 --- a/library/windows/win_get_url +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Paul Durivage , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_get_url -version_added: "1.7" -short_description: Fetches a file from a given URL -description: - - Fetches a file from a URL and saves to locally -options: - url: - description: - - The full URL of a file to download - required: true - default: null - aliases: [] - dest: - description: - - The absolute path of the location to save the file at the URL. Be sure to include a filename and extension as appropriate. - required: false - default: yes - aliases: [] -author: Paul Durivage -''' - -EXAMPLES = ''' -# Downloading a JPEG and saving it to a file with the ansible command. -# Note the "dest" is quoted rather instead of escaping the backslashes -$ ansible -i hosts -c winrm -m win_get_url -a "url=http://www.example.com/earthrise.jpg dest='C:\Users\Administrator\earthrise.jpg'" all - -# Playbook example -- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' - win_get_url: - url: 'http://www.example.com/earthrise.jpg' - dest: 'C:\Users\RandomUser\earthrise.jpg' -''' diff --git a/library/windows/win_get_url.ps1 b/library/windows/win_get_url.ps1 deleted file mode 100644 index b555cc7a52..0000000000 --- a/library/windows/win_get_url.ps1 +++ /dev/null @@ -1,56 +0,0 @@ -#!powershell -# This file is part of Ansible. -# -# Copyright 2014, Paul Durivage -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$result = New-Object psobject @{ - win_get_url = New-Object psobject - changed = $false -} - -If ($params.url) { - $url = $params.url -} -Else { - Fail-Json $result "mising required argument: url" -} - -If ($params.dest) { - $dest = $params.dest -} -Else { - Fail-Json $result "missing required argument: dest" -} - -$client = New-Object System.Net.WebClient - -Try { - $client.DownloadFile($url, $dest) - $result.changed = $true -} -Catch { - Fail-Json $result "Error downloading $url to $dest" -} - -Set-Attr $result.win_get_url "url" $url -Set-Attr $result.win_get_url "dest" $dest - -Exit-Json $result; diff --git a/library/windows/win_group b/library/windows/win_group deleted file mode 100644 index 2013b52be5..0000000000 --- a/library/windows/win_group +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Chris Hoffman -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_group -version_added: "1.7" -short_description: Add and remove local groups -description: - - Add and remove local groups -options: - name: - description: - - Name of the group - required: true - default: null - aliases: [] - description: - description: - - Description of the group - required: false - default: null - aliases: [] - state: - description: - - Create or remove the group - required: false - choices: - - present - - absent - default: present - aliases: [] -author: Chris Hoffman -''' - -EXAMPLES = ''' - # Create a new group - win_group: - name: deploy - description: Deploy Group - state: present - - # Remove a group - win_group: - name: deploy - state: absent -''' diff --git a/library/windows/win_group.ps1 b/library/windows/win_group.ps1 deleted file mode 100644 index febaf47d01..0000000000 --- a/library/windows/win_group.ps1 +++ /dev/null @@ -1,70 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Copyright 2014, Chris Hoffman -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$result = New-Object PSObject; -Set-Attr $result "changed" $false; - -If (-not $params.name.GetType) { - Fail-Json $result "missing required arguments: name" -} - -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne "present") -and ($state -ne "absent")) { - Fail-Json $result "state is '$state'; must be 'present' or 'absent'" - } -} -Elseif (-not $params.state) { - $state = "present" -} - -$adsi = [ADSI]"WinNT://$env:COMPUTERNAME" -$group = $adsi.Children | Where-Object {$_.SchemaClassName -eq 'group' -and $_.Name -eq $params.name } - -try { - If ($state -eq "present") { - If (-not $group) { - $group = $adsi.Create("Group", $params.name) - $group.SetInfo() - - Set-Attr $result "changed" $true - } - - If ($params.description.GetType) { - IF (-not $group.description -or $group.description -ne $params.description) { - $group.description = $params.description - $group.SetInfo() - Set-Attr $result "changed" $true - } - } - } - ElseIf ($state -eq "absent" -and $group) { - $adsi.delete("Group", $group.Name.Value) - Set-Attr $result "changed" $true - } -} -catch { - Fail-Json $result $_.Exception.Message -} - -Exit-Json $result diff --git a/library/windows/win_msi b/library/windows/win_msi deleted file mode 100644 index 9eb6f1bafa..0000000000 --- a/library/windows/win_msi +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Matt Martz , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_msi -version_added: "1.7" -short_description: Installs and uninstalls Windows MSI files -description: - - Installs or uninstalls a Windows MSI file that is already located on the - target server -options: - path: - description: - - File system path to the MSI file to install - required: true - state: - description: - - Whether the MSI file should be installed or uninstalled - choices: - - present - - absent - default: present - creates: - description: - - Path to a file created by installing the MSI to prevent from - attempting to reinstall the package on every run -author: Matt Martz -''' - -EXAMPLES = ''' -# Install an MSI file -- win_msi: path=C:\\\\7z920-x64.msi - -# Uninstall an MSI file -- win_msi: path=C:\\\\7z920-x64.msi state=absent -''' - diff --git a/library/windows/win_msi.ps1 b/library/windows/win_msi.ps1 deleted file mode 100644 index 1c2bc8a301..0000000000 --- a/library/windows/win_msi.ps1 +++ /dev/null @@ -1,63 +0,0 @@ -#!powershell -# (c) 2014, Matt Martz , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$result = New-Object psobject; -Set-Attr $result "changed" $false; - -If (-not $params.path.GetType) -{ - Fail-Json $result "missing required arguments: path" -} - -$extra_args = "" -If ($params.extra_args.GetType) -{ - $extra_args = $params.extra_args; -} - -If ($params.creates.GetType -and $params.state.GetType -and $params.state -ne "absent") -{ - If (Test-File $creates) - { - Exit-Json $result; - } -} - -$logfile = [IO.Path]::GetTempFileName(); -if ($params.state.GetType -and $params.state -eq "absent") -{ - msiexec.exe /x $params.path /qb /l $logfile $extra_args; -} -Else -{ - msiexec.exe /i $params.path /qb /l $logfile $extra_args; -} - -Set-Attr $result "changed" $true; - -$logcontents = Get-Content $logfile; -Remove-Item $logfile; - -Set-Attr $result "log" $logcontents; - -Exit-Json $result; diff --git a/library/windows/win_ping b/library/windows/win_ping deleted file mode 100644 index de32877d61..0000000000 --- a/library/windows/win_ping +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_ping -version_added: "1.7" -short_description: A windows version of the classic ping module. -description: - - Checks management connectivity of a windows host -options: - data: - description: - - Alternate data to return instead of 'pong' - required: false - default: 'pong' - aliases: [] -author: Chris Church -''' - -EXAMPLES = ''' -# Test connectivity to a windows host -ansible winserver -m win_ping - -# Example from an Ansible Playbook -- action: win_ping -''' - diff --git a/library/windows/win_ping.ps1 b/library/windows/win_ping.ps1 deleted file mode 100644 index 98f1415e29..0000000000 --- a/library/windows/win_ping.ps1 +++ /dev/null @@ -1,29 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$data = Get-Attr $params "data" "pong"; - -$result = New-Object psobject @{ - changed = $false - ping = $data -}; - -Exit-Json $result; diff --git a/library/windows/win_service b/library/windows/win_service deleted file mode 100644 index c378be120b..0000000000 --- a/library/windows/win_service +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Chris Hoffman -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_service -version_added: "1.7" -short_description: Manages Windows services -description: - - Manages Windows services -options: - name: - description: - - Name of the service - required: true - default: null - aliases: [] - start_mode: - description: - - Set the startup type for the service - required: false - choices: - - auto - - manual - - disabled - state: - description: - - C(started)/C(stopped) are idempotent actions that will not run - commands unless necessary. C(restarted) will always bounce the - service. - required: false - choices: - - started - - stopped - - restarted - default: null - aliases: [] -author: Chris Hoffman -''' - -EXAMPLES = ''' - # Restart a service - win_service: - name: spooler - state: restarted - - # Set service startup mode to auto and ensure it is started - win_service: - name: spooler - start_mode: auto - state: started -''' diff --git a/library/windows/win_service.ps1 b/library/windows/win_service.ps1 deleted file mode 100644 index a70d82a4ef..0000000000 --- a/library/windows/win_service.ps1 +++ /dev/null @@ -1,106 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Copyright 2014, Chris Hoffman -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$result = New-Object PSObject; -Set-Attr $result "changed" $false; - -If (-not $params.name.GetType) -{ - Fail-Json $result "missing required arguments: name" -} - -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne 'started') -and ($state -ne 'stopped') -and ($state -ne 'restarted')) { - Fail-Json $result "state is '$state'; must be 'started', 'stopped', or 'restarted'" - } -} - -If ($params.start_mode) { - $startMode = $params.start_mode.ToString().ToLower() - If (($startMode -ne 'auto') -and ($startMode -ne 'manual') -and ($startMode -ne 'disabled')) { - Fail-Json $result "start mode is '$startMode'; must be 'auto', 'manual', or 'disabled'" - } -} - -$svcName = $params.name -$svc = Get-Service -Name $svcName -ErrorAction SilentlyContinue -If (-not $svc) { - Fail-Json $result "Service '$svcName' not installed" -} -# Use service name instead of display name for remaining actions. -If ($svcName -ne $svc.ServiceName) { - $svcName = $svc.ServiceName -} - -Set-Attr $result "name" $svc.ServiceName -Set-Attr $result "display_name" $svc.DisplayName - -$svcMode = Get-WmiObject -Class Win32_Service -Property StartMode -Filter "Name='$svcName'" -If ($startMode) { - If ($svcMode.StartMode.ToLower() -ne $startMode) { - Set-Service -Name $svcName -StartupType $startMode - Set-Attr $result "changed" $true - Set-Attr $result "start_mode" $startMode - } - Else { - Set-Attr $result "start_mode" $svcMode.StartMode.ToLower() - } -} -Else { - Set-Attr $result "start_mode" $svcMode.StartMode.ToLower() -} - -If ($state) { - If ($state -eq "started" -and $svc.Status -ne "Running") { - try { - Start-Service -Name $svcName -ErrorAction Stop - } - catch { - Fail-Json $result $_.Exception.Message - } - Set-Attr $result "changed" $true; - } - ElseIf ($state -eq "stopped" -and $svc.Status -ne "Stopped") { - try { - Stop-Service -Name $svcName -ErrorAction Stop - } - catch { - Fail-Json $result $_.Exception.Message - } - Set-Attr $result "changed" $true; - } - ElseIf ($state -eq "restarted") { - try { - Restart-Service -Name $svcName -ErrorAction Stop - } - catch { - Fail-Json $result $_.Exception.Message - } - Set-Attr $result "changed" $true; - } -} -$svc.Refresh() -Set-Attr $result "state" $svc.Status.ToString().ToLower() - -Exit-Json $result; diff --git a/library/windows/win_stat b/library/windows/win_stat deleted file mode 100644 index c98cd55f59..0000000000 --- a/library/windows/win_stat +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub, actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_stat -version_added: "1.7" -short_description: returns information about a Windows file -description: - - Returns information about a Windows file -options: - path: - description: - - The full path of the file/object to get the facts of; both forward and - back slashes are accepted. - required: true - default: null - aliases: [] - get_md5: - description: - - Whether to return the md5 sum of the file - required: false - default: yes - aliases: [] -author: Chris Church -''' - -EXAMPLES = ''' -# Obtain information about a file - -- win_stat: path=C:\\foo.ini - register: file_info - -- debug: var=file_info -''' - diff --git a/library/windows/win_stat.ps1 b/library/windows/win_stat.ps1 deleted file mode 100644 index 4e4c55b2aa..0000000000 --- a/library/windows/win_stat.ps1 +++ /dev/null @@ -1,63 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$path = Get-Attr $params "path" $FALSE; -If ($path -eq $FALSE) -{ - Fail-Json (New-Object psobject) "missing required argument: path"; -} - -$get_md5 = Get-Attr $params "get_md5" $TRUE | ConvertTo-Bool; - -$result = New-Object psobject @{ - stat = New-Object psobject - changed = $false -}; - -If (Test-Path $path) -{ - Set-Attr $result.stat "exists" $TRUE; - $info = Get-Item $path; - If ($info.Directory) # Only files have the .Directory attribute. - { - Set-Attr $result.stat "isdir" $FALSE; - Set-Attr $result.stat "size" $info.Length; - } - Else - { - Set-Attr $result.stat "isdir" $TRUE; - } -} -Else -{ - Set-Attr $result.stat "exists" $FALSE; -} - -If ($get_md5 -and $result.stat.exists -and -not $result.stat.isdir) -{ - $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; - $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); - $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); - $fp.Dispose(); - Set-Attr $result.stat "md5" $hash; -} - -Exit-Json $result; diff --git a/library/windows/win_user b/library/windows/win_user deleted file mode 100644 index e2da6a1ddb..0000000000 --- a/library/windows/win_user +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2014, Matt Martz , and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# this is a windows documentation stub. actual code lives in the .ps1 -# file of the same name - -DOCUMENTATION = ''' ---- -module: win_user -version_added: "1.7" -short_description: Manages local Windows user accounts -description: - - Manages local Windows user accounts -options: - name: - description: - - Username of the user to manage - required: true - default: null - aliases: [] - password: - description: - - Password for the user (plain text) - required: true - default: null - aliases: [] - state: - description: - - Whether to create or delete a user - required: false - choices: - - present - - absent - default: present - aliases: [] -author: Paul Durivage -''' - -EXAMPLES = ''' -# Ad-hoc example -$ ansible -i hosts -m win_user -a "name=bob password=Password12345" all -$ ansible -i hosts -m win_user -a "name=bob password=Password12345 state=absent" all - -# Playbook example ---- -- name: Add a user - hosts: all - gather_facts: false - tasks: - - name: Add User - win_user: - name: ansible - password: "@ns1bl3" -''' diff --git a/library/windows/win_user.ps1 b/library/windows/win_user.ps1 deleted file mode 100644 index 306d7a0db2..0000000000 --- a/library/windows/win_user.ps1 +++ /dev/null @@ -1,116 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Copyright 2014, Paul Durivage -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -######## -$adsi = [ADSI]"WinNT://$env:COMPUTERNAME" - -function Get-User($user) { - $adsi.Children | where {$_.SchemaClassName -eq 'user' -and $_.Name -eq $user } - return -} - -function Create-User([string]$user, [string]$passwd) { - $adsiuser = $adsi.Create("User", $user) - $adsiuser.SetPassword($passwd) - $adsiuser.SetInfo() - $adsiuser - return -} - -function Update-Password($user, [string]$passwd) { - $user.SetPassword($passwd) - $user.SetInfo() -} - -function Delete-User($user) { - $adsi.delete("user", $user.Name.Value) -} -######## - -$params = Parse-Args $args; - -$result = New-Object psobject @{ - changed = $false -}; - -If (-not $params.name.GetType) -{ - Fail-Json $result "missing required arguments: name" -} - -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne 'present') -and ($state -ne 'absent')) { - Fail-Json $result "state is '$state'; must be 'present' or 'absent'" - } -} -Elseif (!$params.state) { - $state = "present" -} - -If ((-not $params.password.GetType) -and ($state -eq 'present')) -{ - Fail-Json $result "missing required arguments: password" -} - -$username = Get-Attr $params "name" -$password = Get-Attr $params "password" - -$user_obj = Get-User $username - -if ($state -eq 'present') { - # Add or update user - try { - if ($user_obj.GetType) { - Update-Password $user_obj $password - } - else { - Create-User $username $password - } - $result.changed = $true - $user_obj = Get-User $username - } - catch { - Fail-Json $result $_.Exception.Message - } -} -else { - # Remove user - try { - if ($user_obj.GetType) { - Delete-User $user_obj - $result.changed = $true - } - else { - Set-Attr $result "msg" "User '$username' was not found" - } - } - catch { - Fail-Json $result $_.Exception.Message - } -} - -# Set-Attr $result "user" $user_obj -Set-Attr $result "user_name" $user_obj.Name -Set-Attr $result "user_fullname" $user_obj.FullName -Set-Attr $result "user_path" $user_obj.Path - -Exit-Json $result; diff --git a/setup.py b/setup.py index de4b251fee..872dbefadb 100644 --- a/setup.py +++ b/setup.py @@ -13,20 +13,6 @@ except ImportError: "Install it using your package manager (usually python-setuptools) or via pip (pip install setuptools)." sys.exit(1) -# find library modules -from ansible.constants import DEFAULT_MODULE_PATH -module_paths = DEFAULT_MODULE_PATH.split(os.pathsep) -# always install in /usr/share/ansible if specified -# otherwise use the first module path listed -if '/usr/share/ansible' in module_paths: - install_path = '/usr/share/ansible' -else: - install_path = module_paths[0] -dirs=os.listdir("./library/") -data_files = [] -for i in dirs: - data_files.append((os.path.join(install_path, i), glob('./library/' + i + '/*'))) - setup(name='ansible', version=__version__, description='Radically simple IT automation', @@ -64,5 +50,5 @@ setup(name='ansible', 'bin/ansible-galaxy', 'bin/ansible-vault', ], - data_files=data_files + data_files=[], ) From bceb0026a53cfa6fcf8f9e96fa694a01f2d22a1c Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 26 Sep 2014 17:10:13 -0400 Subject: [PATCH 088/813] Updating the module formatter to deal with the new repo structure. --- Makefile | 4 ++-- docsite/Makefile | 2 +- hacking/module_formatter.py | 17 +++++++++++------ lib/ansible/modules/core | 2 +- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index afd7162f96..d1e1e02e8a 100644 --- a/Makefile +++ b/Makefile @@ -91,7 +91,7 @@ NOSETESTS ?= nosetests all: clean python tests: - PYTHONPATH=./lib ANSIBLE_LIBRARY=./library $(NOSETESTS) -d -w test/units -v + PYTHONPATH=./lib ANSIBLE_LIBRARY=./lib/ansible/modules $(NOSETESTS) -d -w test/units -v authors: sh hacking/authors.sh @@ -114,7 +114,7 @@ pep8: @echo "# Running PEP8 Compliance Tests" @echo "#############################################" -pep8 -r --ignore=E501,E221,W291,W391,E302,E251,E203,W293,E231,E303,E201,E225,E261,E241 lib/ bin/ - -pep8 -r --ignore=E501,E221,W291,W391,E302,E251,E203,W293,E231,E303,E201,E225,E261,E241 --filename "*" library/ + # -pep8 -r --ignore=E501,E221,W291,W391,E302,E251,E203,W293,E231,E303,E201,E225,E261,E241 --filename "*" library/ pyflakes: pyflakes lib/ansible/*.py lib/ansible/*/*.py bin/* diff --git a/docsite/Makefile b/docsite/Makefile index f5d1b10c12..92129f7851 100644 --- a/docsite/Makefile +++ b/docsite/Makefile @@ -40,7 +40,7 @@ clean: .PHONEY: docs clean modules: $(FORMATTER) ../hacking/templates/rst.j2 - PYTHONPATH=../lib $(FORMATTER) -t rst --template-dir=../hacking/templates --module-dir=../library -o rst/ + PYTHONPATH=../lib $(FORMATTER) -t rst --template-dir=../hacking/templates --module-dir=../lib/ansible/modules -o rst/ staticmin: cat _themes/srtd/static/css/theme.css | sed -e 's/^[ \t]*//g; s/[ \t]*$$//g; s/\([:{;,]\) /\1/g; s/ {/{/g; s/\/\*.*\*\///g; /^$$/d' | sed -e :a -e '$$!N; s/\n\(.\)/\1/; ta' > _themes/srtd/static/css/theme.min.css diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index f74d09ad72..c8c077a631 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -1,5 +1,6 @@ #!/usr/bin/env python # (c) 2012, Jan-Piet Mens +# (c) 2012-2014, Michael DeHaan and others # # This file is part of Ansible # @@ -44,7 +45,7 @@ TO_OLD_TO_BE_NOTABLE = 1.0 # Get parent directory of the directory this script lives in MODULEDIR=os.path.abspath(os.path.join( - os.path.dirname(os.path.realpath(__file__)), os.pardir, 'library' + os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules' )) # The name of the DOCUMENTATION template @@ -106,7 +107,9 @@ def write_data(text, options, outputname, module): ''' dumps module output to a file or the screen, as requested ''' if options.output_dir is not None: - f = open(os.path.join(options.output_dir, outputname % module), 'w') + fname = os.path.join(options.output_dir, outputname % module) + fname = fname.replace(".py","") + f = open(fname, 'w') f.write(text.encode('utf-8')) f.close() else: @@ -114,23 +117,24 @@ def write_data(text, options, outputname, module): ##################################################################################### + def list_modules(module_dir): ''' returns a hash of categories, each category being a hash of module names to file paths ''' categories = dict(all=dict()) - files = glob.glob("%s/*" % module_dir) + files = glob.glob("%s/*/*" % module_dir) for d in files: if os.path.isdir(d): files2 = glob.glob("%s/*" % d) for f in files2: - if f.endswith(".ps1"): + if not f.endswith(".py") or f.endswith('__init__.py'): # windows powershell modules have documentation stubs in python docstring # format (they are not executed) so skip the ps1 format files continue tokens = f.split("/") - module = tokens[-1] + module = tokens[-1].replace(".py","") category = tokens[-2] if not category in categories: categories[category] = {} @@ -191,7 +195,7 @@ def process_module(module, options, env, template, outputname, module_map): fname = module_map[module] # ignore files with extensions - if "." in os.path.basename(fname): + if not os.path.basename(fname).endswith(".py"): return # use ansible core library to parse out doc metadata YAML and plaintext examples @@ -201,6 +205,7 @@ def process_module(module, options, env, template, outputname, module_map): if doc is None and module not in ansible.utils.module_docs.BLACKLIST_MODULES: sys.stderr.write("*** ERROR: CORE MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module)) sys.exit(1) + if doc is None: return "SKIPPED" diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 385a037cd6..617a52b20d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 385a037cd6bc42fc64e387973c0e7ef539b04df7 +Subproject commit 617a52b20d512a4eb5e88fdc76658b220ff80266 From e8fe306cef083e19e73b9669ba9bb8afea765c01 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 26 Sep 2014 17:52:50 -0400 Subject: [PATCH 089/813] Some various comments about the new repos, more to likely come. --- docsite/rst/community.rst | 16 +++++++----- docsite/rst/developing_modules.rst | 41 +++++++++++++++++++++++++----- docsite/rst/developing_test_pr.rst | 6 ++++- docsite/rst/intro_installation.rst | 8 +++++- hacking/module_formatter.py | 8 ++++++ hacking/templates/rst.j2 | 28 ++++++++++++++++++++ 6 files changed, 91 insertions(+), 16 deletions(-) diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index d75ec8d0bb..9448b7d1ea 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -62,11 +62,14 @@ I'd Like To Report A Bugs Ansible practices responsible disclosure - if this is a security related bug, email `security@ansible.com `_ instead of filing a ticket or posting to the Google Group and you will receive a prompt response. -Bugs should be reported to `github.com/ansible/ansible `_ after +Bugs related to the core language should be reported to `github.com/ansible/ansible `_ after signing up for a free github account. Before reporting a bug, please use the bug/issue search to see if the issue has already been reported. -When filing a bug, please use the `issue template `_ to provide all relevant information. +MODULE related bugs however should go to `ansible-modules-core `_ or `ansible-modules-extras `_ based on the classification of the module. This is listed on the bottom of the docs page for any module. + +When filing a bug, please use the `issue template `_ to provide all relevant information, regardless of what repo you are filing a ticket against. + Knowing your ansible version and the exact commands you are running, and what you expect, saves time and helps us help everyone with their issues more quickly. @@ -102,8 +105,7 @@ documenting a new feature, submit a github pull request to the code that lives in the “docsite/rst†subdirectory of the project for most pages, and there is an "Edit on GitHub" link up on those. -Module documentation is generated from a DOCUMENTATION structure embedded in the source code of each module -in the library/ directory. +Module documentation is generated from a DOCUMENTATION structure embedded in the source code of each module, which is in either the ansible-modules-core or ansible-modules-extra repos on github, depending on the module. Information about this is always listed on the bottom of the web documentation for each module. Aside from modules, the main docs are in restructured text format. @@ -131,9 +133,9 @@ Contributing Code (Features or Bugfixes) ---------------------------------------- The Ansible project keeps it’s source on github at -`github.com/ansible/ansible `_ - -and takes contributions through +`github.com/ansible/ansible `_ for the core application, and two sub repos ansible/ansible-modules-core and ansible/ansible-modules-extras for module related items. If you need to know if a module is in 'core' or 'extras', consult the web documentation page for that module. + +The project takes contributions through `github pull requests `_. It is usually a good idea to join the ansible-devel list to discuss any large features prior to submission, and this especially helps in avoiding duplicate work or efforts where we decide, upon seeing a pull request for the first time, that revisions are needed. (This is not usually needed for module development, but can be nice for large changes). diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 0b1695c90f..9fa35f4d3e 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -11,9 +11,17 @@ See :doc:`modules` for a list of various ones developed in core. Modules can be written in any language and are found in the path specified by `ANSIBLE_LIBRARY` or the ``--module-path`` command line option. +By default, everything that ships with ansible is pulled from it's source tree, but +additional paths can be added. + +The directory "./library", alongside your top level playbooks, is also automatically +added as a search directory. + Should you develop an interesting Ansible module, consider sending a pull request to the -`github project `_ to see about getting your module -included in the core project. +`moudule-extras project `_. There's also a core +repo for more established and widely used modules. "Extras" modules may be promoted to core periodically, +but there's no fundamental difference in the end - both ship with ansible, all in one package, regardless +of how you acquire ansible. .. _module_dev_tutorial: @@ -59,7 +67,7 @@ Testing Modules There's a useful test script in the source checkout for ansible:: - git clone git@github.com:ansible/ansible.git + git clone git@github.com:ansible/ansible.git --recursive source ansible/hacking/env-setup chmod +x ansible/hacking/test-module @@ -78,6 +86,7 @@ If you did not, you might have a typo in your module, so recheck it and try agai Reading Input ````````````` + Let's modify the module to allow setting the current time. We'll do this by seeing if a key value pair in the form `time=` is passed in to the module. @@ -428,15 +437,33 @@ built and appear in the 'docsite/' directory. .. _module_contribution: -Getting Your Module Into Core -````````````````````````````` +Module Paths +```````````` + +If you are having trouble getting your module "found" by ansible, be sure it is in the ANSIBLE_LIBRARY_PATH. + +If you have a fork of one of the ansible module projects, do something like this:: + + ANSIBLE_LIBRARY=~/ansible-module-core:~/ansible-module-extras + +And this will make the items in your fork be loaded ahead of what ships with Ansible. Just be sure +to make sure you're not reporting bugs on versions from your fork! + +To be safe, if you're working on a variant on something in Ansible's normal distribution, it's not +a bad idea to give it a new name while you are working on it, to be sure you know you're pulling +your version. + +Getting Your Module Into Ansible +```````````````````````````````` High-quality modules with minimal dependencies -can be included in the core, but core modules (just due to the programming +can be included in the ansible, but modules (just due to the programming preferences of the developers) will need to be implemented in Python and use the AnsibleModule common code, and should generally use consistent arguments with the rest of the program. Stop by the mailing list to inquire about requirements if you like, and submit -a github pull request to the main project. +a github pull request to the `extras `_ project. +Included modules will ship with ansible, and also have a change to be promoted to 'core' status, which +gives them slightly higher development priority (though they'll work in exactly the same way). .. seealso:: diff --git a/docsite/rst/developing_test_pr.rst b/docsite/rst/developing_test_pr.rst index b19bf06493..76b0a53eef 100644 --- a/docsite/rst/developing_test_pr.rst +++ b/docsite/rst/developing_test_pr.rst @@ -45,12 +45,16 @@ If you want to run the full integration test suite you'll also need the followin Second, if you haven't already, clone the Ansible source code from GitHub:: - git clone https://github.com/ansible/ansible.git + git clone https://github.com/ansible/ansible.git --recursive cd ansible/ .. note:: If you have previously forked the repository on GitHub, you could also clone it from there. +.. note:: + If updating your repo for testing something module related, use "git rebase origin/devel" and then "git submodule update" to fetch + the latest development versions of modules. Skipping the "git submodule update" step will result in versions that will be stale. + Activating The Source Checkout ++++++++++++++++++++++++++++++ diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 410284ab7d..a03ccdb0fb 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -107,7 +107,7 @@ To install from source. .. code-block:: bash - $ git clone git://github.com/ansible/ansible.git + $ git clone git://github.com/ansible/ansible.git --recursive $ cd ./ansible $ source ./hacking/env-setup @@ -119,6 +119,12 @@ Ansible also uses the following Python modules that need to be installed:: $ sudo pip install paramiko PyYAML jinja2 httplib2 +Note when updating ansible, be sure to not only update the source tree, but also the "submodules" in git +which point at Ansible's own modules (not the same kind of modules, alas). + + $ git pull --rebase + $ git submodule update + Once running the env-setup script you'll be running from checkout and the default inventory file will be /etc/ansible/hosts. You can optionally specify an inventory file (see :doc:`intro_inventory`) other than /etc/ansible/hosts: diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index c8c077a631..3a6d372631 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -192,6 +192,7 @@ def process_module(module, options, env, template, outputname, module_map): print "rendering: %s" % module + fname = module_map[module] # ignore files with extensions @@ -209,6 +210,13 @@ def process_module(module, options, env, template, outputname, module_map): if doc is None: return "SKIPPED" + if "core" in module: + print "CORE" + doc['core'] = True + else: + doc['core'] = False + + all_keys = [] if not 'version_added' in doc: diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index 54fbafefc1..93213f47dd 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -101,3 +101,31 @@ Examples {% endfor %} {% endif %} +Developers, Testers, and Bug Reporting +-------------------------------------- + +{% set repo = "https://github.com/ansible/ansible-modules-extras" %} +{% set tracker = "https://github.com/ansible/ansible-modules-extras/issues" %} +{% if core %} + {% set repo = "https://github.com/ansible/ansible-modules-core" %} + {% set tracker = "https://github.com/ansible/ansible-modules-core/issues" %} +{% endif %} + +This source of this module is hosted on GitHub in the `{{ repo }} <{{ repo }}>`_ repo. + +If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansbile, first look in the issue tracker at `{{ tracker }} <{{ tracker }}>`_ to see if a bug has already been filed. If not, we would be greatful if you would file one. + +Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. + +Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. + +{% if not core %} + +Note that this module is designated a "extras" module. Non-core modules are still fully usuable, but may recieve slightly lower response rates for issues and pull requests. +Popular "extras" modules may be promoted to core modules over time. + +{% endif %} + +For help in developing on modules, should you be so inclined, please read :doc:`community`, :doc:`developing_test_pr` and :doc:`developing_modules`. + + From 16c4c378bd925fccac17a12d3e74d63f7e4156a6 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 26 Sep 2014 17:56:21 -0400 Subject: [PATCH 090/813] README update --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index cfb6fc4891..99ba9701a2 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,7 @@ Branch Info * Releases are named after Van Halen songs. * The devel branch corresponds to the release actively under development. + * Note that modules are contained in two different git submodule projects, under lib/ansible/modules/{core,extras} * Various release-X.Y branches exist for previous releases. * We'd love to have your contributions, read "CONTRIBUTING.md" for process notes. From 7f6ab89b5b9ad4f84b19d1b14c2d03cb91e720bf Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 26 Sep 2014 17:59:46 -0400 Subject: [PATCH 091/813] hacking/env-setup no longer needs to set library since modules appear as git submodules. --- hacking/env-setup | 3 --- 1 file changed, 3 deletions(-) diff --git a/hacking/env-setup b/hacking/env-setup index 6e4de1af72..4fed169097 100755 --- a/hacking/env-setup +++ b/hacking/env-setup @@ -21,8 +21,6 @@ PREFIX_MANPATH="$ANSIBLE_HOME/docs/man" [[ $PYTHONPATH != ${PREFIX_PYTHONPATH}* ]] && export PYTHONPATH=$PREFIX_PYTHONPATH:$PYTHONPATH [[ $PATH != ${PREFIX_PATH}* ]] && export PATH=$PREFIX_PATH:$PATH -unset ANSIBLE_LIBRARY -export ANSIBLE_LIBRARY="$ANSIBLE_HOME/library:`python $HACKING_DIR/get_library.py`" [[ $MANPATH != ${PREFIX_MANPATH}* ]] && export MANPATH=$PREFIX_MANPATH:$MANPATH # Print out values unless -q is set @@ -33,7 +31,6 @@ if [ $# -eq 0 -o "$1" != "-q" ] ; then echo "" echo "PATH=$PATH" echo "PYTHONPATH=$PYTHONPATH" - echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY" echo "MANPATH=$MANPATH" echo "" From 7e520c4574390250bf5d2f63e4824d2954dea12d Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 26 Sep 2014 18:16:32 -0400 Subject: [PATCH 092/813] Removing legacy test dir --- legacy/gce_tests.py | 748 -------------------------------------------- 1 file changed, 748 deletions(-) delete mode 100644 legacy/gce_tests.py diff --git a/legacy/gce_tests.py b/legacy/gce_tests.py deleted file mode 100644 index 3f0a4273b0..0000000000 --- a/legacy/gce_tests.py +++ /dev/null @@ -1,748 +0,0 @@ -#!/usr/bin/env python -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# This is a custom functional test script for the Google Compute Engine -# ansible modules. In order to run these tests, you must: -# 1) Create a Google Cloud Platform account and enable the Google -# Compute Engine service and billing -# 2) Download, install, and configure 'gcutil' -# see [https://developers.google.com/compute/docs/gcutil/] -# 3) Convert your GCE Service Account private key from PKCS12 to PEM format -# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \ -# > -nodes -nocerts | openssl rsa -out pkey.pem -# 4) Make sure you have libcloud 0.13.3 or later installed. -# 5) Make sure you have a libcloud 'secrets.py' file in your PYTHONPATH -# 6) Set GCE_PARAMS and GCE_KEYWORD_PARMS in your 'secrets.py' file. -# 7) Set up a simple hosts file -# $ echo 127.0.0.1 > ~/ansible_hosts -# $ echo "export ANSIBLE_HOSTS='~/ansible_hosts'" >> ~/.bashrc -# $ . ~/.bashrc -# 8) Set up your ansible 'hacking' environment -# $ cd ~/ansible -# $ . hacking/env-setup -# $ export ANSIBLE_HOST_KEY_CHECKING=no -# $ ansible all -m ping -# 9) Set your PROJECT variable below -# 10) Run and time the tests and log output, take ~30 minutes to run -# $ time stdbuf -oL python test/gce_tests.py 2>&1 | tee log -# -# Last update: gcutil-1.11.0 and v1beta16 - -# Set this to your test Project ID -PROJECT="google.com:erjohnso" - -# debugging -DEBUG=False # lots of debugging output -VERBOSE=True # on failure, display ansible command and expected/actual result - -# location - note that some tests rely on the module's 'default' -# region/zone, which should match the settings below. -REGION="us-central1" -ZONE="%s-a" % REGION - -# Peeking is a way to trigger looking at a specified set of resources -# before and/or after a test run. The 'test_cases' data structure below -# has a few tests with 'peek_before' and 'peek_after'. When those keys -# are set and PEEKING_ENABLED is True, then these steps will be executed -# to aid in debugging tests. Normally, this is not needed. -PEEKING_ENABLED=False - -# disks -DNAME="aaaaa-ansible-disk" -DNAME2="aaaaa-ansible-disk2" -DNAME6="aaaaa-ansible-inst6" -DNAME7="aaaaa-ansible-inst7" -USE_PD="true" -KERNEL="https://www.googleapis.com/compute/v1beta16/projects/google/global/kernels/gce-no-conn-track-v20130813" - -# instances -INAME="aaaaa-ansible-inst" -INAME2="aaaaa-ansible-inst2" -INAME3="aaaaa-ansible-inst3" -INAME4="aaaaa-ansible-inst4" -INAME5="aaaaa-ansible-inst5" -INAME6="aaaaa-ansible-inst6" -INAME7="aaaaa-ansible-inst7" -TYPE="n1-standard-1" -IMAGE="https://www.googleapis.com/compute/v1beta16/projects/debian-cloud/global/images/debian-7-wheezy-v20131014" -NETWORK="default" -SCOPES="https://www.googleapis.com/auth/userinfo.email,https://www.googleapis.com/auth/compute,https://www.googleapis.com/auth/devstorage.full_control" - -# networks / firewalls -NETWK1="ansible-network1" -NETWK2="ansible-network2" -NETWK3="ansible-network3" -CIDR1="10.240.16.0/24" -CIDR2="10.240.32.0/24" -CIDR3="10.240.64.0/24" -GW1="10.240.16.1" -GW2="10.240.32.1" -FW1="ansible-fwrule1" -FW2="ansible-fwrule2" -FW3="ansible-fwrule3" -FW4="ansible-fwrule4" - -# load-balancer tests -HC1="ansible-hc1" -HC2="ansible-hc2" -HC3="ansible-hc3" -LB1="ansible-lb1" -LB2="ansible-lb2" - -from commands import getstatusoutput as run -import sys - -test_cases = [ - {'id': '01', 'desc': 'Detach / Delete disk tests', - 'setup': ['gcutil addinstance "%s" --wait_until_running --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --persistent_boot_disk=%s' % (INAME, ZONE, TYPE, NETWORK, SCOPES, IMAGE, USE_PD), - 'gcutil adddisk "%s" --size_gb=2 --zone=%s --wait_until_complete' % (DNAME, ZONE)], - - 'tests': [ - {'desc': 'DETACH_ONLY but disk not found [success]', - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % ("missing-disk", INAME, ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "%s", "name": "missing-disk", "state": "absent", "zone": "%s"}' % (INAME, ZONE), - }, - {'desc': 'DETACH_ONLY but instance not found [success]', - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % (DNAME, "missing-instance", ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "missing-instance", "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (DNAME, ZONE), - }, - {'desc': 'DETACH_ONLY but neither disk nor instance exists [success]', - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % ("missing-disk", "missing-instance", ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "missing-instance", "name": "missing-disk", "state": "absent", "zone": "%s"}' % (ZONE), - }, - {'desc': 'DETACH_ONLY but disk is not currently attached [success]', - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "%s", "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (INAME, DNAME, ZONE), - }, - {'desc': 'DETACH_ONLY disk is attached and should be detached [success]', - 'setup': ['gcutil attachdisk --disk="%s,mode=READ_ONLY" --zone=%s %s' % (DNAME, ZONE, INAME), 'sleep 10'], - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": true, "detach_only": true, "detached_from_instance": "%s", "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (INAME, INAME, DNAME, ZONE), - 'teardown': ['gcutil detachdisk --zone=%s --device_name=%s %s' % (ZONE, DNAME, INAME)], - }, - {'desc': 'DETACH_ONLY but not instance specified [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s zone=%s detach_only=yes state=absent' % (DNAME, ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must specify an instance name when detaching a disk"}', - }, - {'desc': 'DELETE but disk not found [success]', - 'm': 'gce_pd', - 'a': 'name=%s zone=%s state=absent' % ("missing-disk", ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "name": "missing-disk", "state": "absent", "zone": "%s"}' % (ZONE), - }, - {'desc': 'DELETE but disk is attached [FAIL]', - 'setup': ['gcutil attachdisk --disk="%s,mode=READ_ONLY" --zone=%s %s' % (DNAME, ZONE, INAME), 'sleep 10'], - 'm': 'gce_pd', - 'a': 'name=%s zone=%s state=absent' % (DNAME, ZONE), - 'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"The disk resource 'projects/%s/zones/%s/disks/%s' is already being used by 'projects/%s/zones/%s/instances/%s'\"}" % (PROJECT, ZONE, DNAME, PROJECT, ZONE, INAME), - 'teardown': ['gcutil detachdisk --zone=%s --device_name=%s %s' % (ZONE, DNAME, INAME)], - }, - {'desc': 'DELETE disk [success]', - 'm': 'gce_pd', - 'a': 'name=%s zone=%s state=absent' % (DNAME, ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (DNAME, ZONE), - }, - ], - 'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE), - 'sleep 15', - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME, ZONE), - 'sleep 10', - 'gcutil deletedisk -f "%s" --zone=%s' % (DNAME, ZONE), - 'sleep 10'], - }, - - {'id': '02', 'desc': 'Create disk but do not attach (e.g. no instance_name param)', - 'setup': [], - 'tests': [ - {'desc': 'CREATE_NO_ATTACH "string" for size_gb [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s size_gb="foo" zone=%s' % (DNAME, ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}', - }, - {'desc': 'CREATE_NO_ATTACH negative size_gb [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s size_gb=-2 zone=%s' % (DNAME, ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}', - }, - {'desc': 'CREATE_NO_ATTACH size_gb exceeds quota [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s size_gb=9999 zone=%s' % ("big-disk", ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Requested disk size exceeds quota"}', - }, - {'desc': 'CREATE_NO_ATTACH create the disk [success]', - 'm': 'gce_pd', - 'a': 'name=%s zone=%s' % (DNAME, ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "size_gb": 10, "state": "present", "zone": "%s"}' % (DNAME, ZONE), - }, - {'desc': 'CREATE_NO_ATTACH but disk already exists [success]', - 'm': 'gce_pd', - 'a': 'name=%s zone=%s' % (DNAME, ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "name": "%s", "size_gb": 10, "state": "present", "zone": "%s"}' % (DNAME, ZONE), - }, - ], - 'teardown': ['gcutil deletedisk -f "%s" --zone=%s' % (DNAME, ZONE), - 'sleep 10'], - }, - - {'id': '03', 'desc': 'Create and attach disk', - 'setup': ['gcutil addinstance "%s" --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --persistent_boot_disk=%s' % (INAME2, ZONE, TYPE, NETWORK, SCOPES, IMAGE, USE_PD), - 'gcutil addinstance "%s" --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --persistent_boot_disk=%s' % (INAME, ZONE, "g1-small", NETWORK, SCOPES, IMAGE, USE_PD), - 'gcutil adddisk "%s" --size_gb=2 --zone=%s' % (DNAME, ZONE), - 'gcutil adddisk "%s" --size_gb=2 --zone=%s --wait_until_complete' % (DNAME2, ZONE),], - 'tests': [ - {'desc': 'CREATE_AND_ATTACH "string" for size_gb [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s size_gb="foo" instance_name=%s zone=%s' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}', - }, - {'desc': 'CREATE_AND_ATTACH negative size_gb [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s size_gb=-2 instance_name=%s zone=%s' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}', - }, - {'desc': 'CREATE_AND_ATTACH size_gb exceeds quota [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s size_gb=9999 instance_name=%s zone=%s' % ("big-disk", INAME, ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Requested disk size exceeds quota"}', - }, - {'desc': 'CREATE_AND_ATTACH missing instance [FAIL]', - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s' % (DNAME, "missing-instance", ZONE), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Instance %s does not exist in zone %s"}' % ("missing-instance", ZONE), - }, - {'desc': 'CREATE_AND_ATTACH disk exists but not attached [success]', - 'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": true, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME, DNAME, ZONE), - 'peek_after': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - }, - {'desc': 'CREATE_AND_ATTACH disk exists already attached [success]', - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": false, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME, DNAME, ZONE), - }, - {'desc': 'CREATE_AND_ATTACH attached RO, attempt RO to 2nd inst [success]', - 'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s' % (DNAME, INAME2, ZONE), - 'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": true, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME2, DNAME, ZONE), - 'peek_after': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - }, - {'desc': 'CREATE_AND_ATTACH attached RO, attach RW to self [FAILED no-op]', - 'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s mode=READ_WRITE' % (DNAME, INAME, ZONE), - 'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": false, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME, DNAME, ZONE), - }, - {'desc': 'CREATE_AND_ATTACH attached RW, attach RW to other [FAIL]', - 'setup': ['gcutil attachdisk --disk=%s,mode=READ_WRITE --zone=%s %s' % (DNAME2, ZONE, INAME), 'sleep 10'], - 'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s mode=READ_WRITE' % (DNAME2, INAME2, ZONE), - 'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[200], API error code[RESOURCE_IN_USE] and message: The disk resource 'projects/%s/zones/%s/disks/%s' is already being used in read-write mode\"}" % (PROJECT, ZONE, DNAME2), - 'peek_after': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - }, - {'desc': 'CREATE_AND_ATTACH attach too many disks to inst [FAIL]', - 'setup': ['gcutil adddisk aa-disk-dummy --size_gb=2 --zone=%s' % (ZONE), - 'gcutil adddisk aa-disk-dummy2 --size_gb=2 --zone=%s --wait_until_complete' % (ZONE), - 'gcutil attachdisk --disk=aa-disk-dummy --zone=%s %s' % (ZONE, INAME), - 'sleep 5'], - 'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)], - 'm': 'gce_pd', - 'a': 'name=%s instance_name=%s zone=%s' % ("aa-disk-dummy2", INAME, ZONE), - 'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[200], API error code[LIMIT_EXCEEDED] and message: Exceeded limit 'maximum_persistent_disks' on resource 'projects/%s/zones/%s/instances/%s'. Limit: 4\"}" % (PROJECT, ZONE, INAME), - 'teardown': ['gcutil detachdisk --device_name=aa-disk-dummy --zone=%s %s' % (ZONE, INAME), - 'sleep 3', - 'gcutil deletedisk -f aa-disk-dummy --zone=%s' % (ZONE), - 'sleep 10', - 'gcutil deletedisk -f aa-disk-dummy2 --zone=%s' % (ZONE), - 'sleep 10'], - }, - ], - 'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME2, ZONE), - 'sleep 15', - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE), - 'sleep 15', - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME, ZONE), - 'sleep 10', - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME2, ZONE), - 'sleep 10', - 'gcutil deletedisk -f "%s" --zone=%s' % (DNAME, ZONE), - 'sleep 10', - 'gcutil deletedisk -f "%s" --zone=%s' % (DNAME2, ZONE), - 'sleep 10'], - }, - - {'id': '04', 'desc': 'Delete / destroy instances', - 'setup': ['gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME, ZONE, TYPE, IMAGE), - 'gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME2, ZONE, TYPE, IMAGE), - 'gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME3, ZONE, TYPE, IMAGE), - 'gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME4, ZONE, TYPE, IMAGE), - 'gcutil addinstance "%s" --wait_until_running --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME5, ZONE, TYPE, IMAGE)], - 'tests': [ - {'desc': 'DELETE instance, bad zone param [FAIL]', - 'm': 'gce', - 'a': 'name=missing-inst zone=bogus state=absent', - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "value of zone must be one of: us-central1-a,us-central1-b,us-central2-a,europe-west1-a,europe-west1-b, got: bogus"}', - }, - {'desc': 'DELETE non-existent instance, no-op [success]', - 'm': 'gce', - 'a': 'name=missing-inst zone=%s state=absent' % (ZONE), - 'r': '127.0.0.1 | success >> {"changed": false, "name": "missing-inst", "state": "absent", "zone": "%s"}' % (ZONE), - }, - {'desc': 'DELETE an existing named instance [success]', - 'm': 'gce', - 'a': 'name=%s zone=%s state=absent' % (INAME, ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "state": "absent", "zone": "%s"}' % (INAME, ZONE), - }, - {'desc': 'DELETE list of instances with a non-existent one [success]', - 'm': 'gce', - 'a': 'instance_names=%s,missing,%s zone=%s state=absent' % (INAME2,INAME3, ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_names": ["%s", "%s"], "state": "absent", "zone": "%s"}' % (INAME2, INAME3, ZONE), - }, - {'desc': 'DELETE list of instances all pre-exist [success]', - 'm': 'gce', - 'a': 'instance_names=%s,%s zone=%s state=absent' % (INAME4,INAME5, ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_names": ["%s", "%s"], "state": "absent", "zone": "%s"}' % (INAME4, INAME5, ZONE), - }, - ], - 'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME2, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME3, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME4, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME5, ZONE), - 'sleep 10'], - }, - - {'id': '05', 'desc': 'Create instances', - 'setup': ['gcutil adddisk --source_image=%s --zone=%s %s --wait_until_complete' % (IMAGE, ZONE, DNAME7), - 'gcutil addinstance boo --wait_until_running --zone=%s --machine_type=%s --network=%s --disk=%s,mode=READ_WRITE,boot --kernel=%s' % (ZONE,TYPE,NETWORK,DNAME7,KERNEL), - ], - 'tests': [ - {'desc': 'CREATE_INSTANCE invalid image arg [FAIL]', - 'm': 'gce', - 'a': 'name=foo image=foo', - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required create instance variable"}', - }, - {'desc': 'CREATE_INSTANCE metadata a list [FAIL]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s metadata=\'[\\"foo\\":\\"bar\\",\\"baz\\":1]\'' % (INAME,ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata syntax"}', - }, - {'desc': 'CREATE_INSTANCE metadata not a dict [FAIL]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s metadata=\\"foo\\":\\"bar\\",\\"baz\\":1' % (INAME,ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata syntax"}', - }, - {'desc': 'CREATE_INSTANCE with metadata form1 [FAIL]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s metadata=\'{"foo":"bar","baz":1}\'' % (INAME,ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata: malformed string"}', - }, - {'desc': 'CREATE_INSTANCE with metadata form2 [FAIL]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s metadata={\'foo\':\'bar\',\'baz\':1}' % (INAME,ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata: malformed string"}', - }, - {'desc': 'CREATE_INSTANCE with metadata form3 [FAIL]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s metadata="foo:bar" '% (INAME,ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata syntax"}', - }, - {'desc': 'CREATE_INSTANCE with metadata form4 [FAIL]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s metadata="{\'foo\':\'bar\'}"'% (INAME,ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata: malformed string"}', - }, - {'desc': 'CREATE_INSTANCE invalid image arg [FAIL]', - 'm': 'gce', - 'a': 'instance_names=foo,bar image=foo', - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required create instance variable"}', - }, - {'desc': 'CREATE_INSTANCE single inst, using defaults [success]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s' % (INAME), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": "debian-7-wheezy-v20130816", "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.175.15", "public_ip": "173.255.120.190", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME, ZONE, INAME, ZONE), - }, - {'desc': 'CREATE_INSTANCE the same instance again, no-op [success]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s' % (INAME), - 'r': '127.0.0.1 | success >> {"changed": false, "instance_data": [{"image": "debian-7-wheezy-v20130816", "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.175.15", "public_ip": "173.255.120.190", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME, ZONE, INAME, ZONE), - }, - {'desc': 'CREATE_INSTANCE instance with alt type [success]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s machine_type=n1-standard-2' % (INAME2), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": "debian-7-wheezy-v20130816", "machine_type": "n1-standard-2", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.192.227", "public_ip": "173.255.121.233", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME2, ZONE, INAME2, ZONE), - }, - {'desc': 'CREATE_INSTANCE instance with root pd [success]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s persistent_boot_disk=yes' % (INAME3), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": null, "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.178.140", "public_ip": "173.255.121.176", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME3, ZONE, INAME3, ZONE), - }, - {'desc': 'CREATE_INSTANCE instance with root pd, that already exists [success]', - 'setup': ['gcutil adddisk --source_image=%s --zone=%s %s --wait_until_complete' % (IMAGE, ZONE, DNAME6),], - 'strip_numbers': True, - 'm': 'gce', - 'a': 'name=%s zone=%s persistent_boot_disk=yes' % (INAME6, ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": null, "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.178.140", "public_ip": "173.255.121.176", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME6, ZONE, INAME6, ZONE), - }, - {'desc': 'CREATE_INSTANCE instance with root pd attached to other inst [FAIL]', - 'm': 'gce', - 'a': 'name=%s zone=%s persistent_boot_disk=yes' % (INAME7, ZONE), - 'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "Unexpected error attempting to create instance %s, error: The disk resource \'projects/%s/zones/%s/disks/%s\' is already being used in read-write mode"}' % (INAME7,PROJECT,ZONE,DNAME7), - }, - {'desc': 'CREATE_INSTANCE use *all* the options! [success]', - 'strip_numbers': True, - 'm': 'gce', - 'a': 'instance_names=%s,%s metadata=\'{\\"foo\\":\\"bar\\", \\"baz\\":1}\' tags=t1,t2,t3 zone=%s image=centos-6-v20130731 persistent_boot_disk=yes' % (INAME4,INAME5,ZONE), - 'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": null, "machine_type": "n1-standard-1", "metadata": {"baz": "1", "foo": "bar"}, "name": "%s", "network": "default", "private_ip": "10.240.130.4", "public_ip": "173.255.121.97", "status": "RUNNING", "tags": ["t1", "t2", "t3"], "zone": "%s"}, {"image": null, "machine_type": "n1-standard-1", "metadata": {"baz": "1", "foo": "bar"}, "name": "%s", "network": "default", "private_ip": "10.240.207.226", "public_ip": "173.255.121.85", "status": "RUNNING", "tags": ["t1", "t2", "t3"], "zone": "%s"}], "instance_names": ["%s", "%s"], "state": "present", "zone": "%s"}' % (INAME4, ZONE, INAME5, ZONE, INAME4, INAME5, ZONE), - }, - ], - 'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME2, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME3, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME4, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME5, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME6, ZONE), - 'gcutil deleteinstance -f "%s" --zone=%s' % (INAME7, ZONE), - 'gcutil deleteinstance -f boo --zone=%s' % (ZONE), - 'sleep 10', - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME3, ZONE), - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME4, ZONE), - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME5, ZONE), - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME6, ZONE), - 'gcutil deletedisk -f "%s" --zone=%s' % (INAME7, ZONE), - 'sleep 10'], - }, - - {'id': '06', 'desc': 'Delete / destroy networks and firewall rules', - 'setup': ['gcutil addnetwork --range="%s" --gateway="%s" %s' % (CIDR1, GW1, NETWK1), - 'gcutil addnetwork --range="%s" --gateway="%s" %s' % (CIDR2, GW2, NETWK2), - 'sleep 5', - 'gcutil addfirewall --allowed="tcp:80" --network=%s %s' % (NETWK1, FW1), - 'gcutil addfirewall --allowed="tcp:80" --network=%s %s' % (NETWK2, FW2), - 'sleep 5'], - 'tests': [ - {'desc': 'DELETE bogus named firewall [success]', - 'm': 'gce_net', - 'a': 'fwname=missing-fwrule state=absent', - 'r': '127.0.0.1 | success >> {"changed": false, "fwname": "missing-fwrule", "state": "absent"}', - }, - {'desc': 'DELETE bogus named network [success]', - 'm': 'gce_net', - 'a': 'name=missing-network state=absent', - 'r': '127.0.0.1 | success >> {"changed": false, "name": "missing-network", "state": "absent"}', - }, - {'desc': 'DELETE named firewall rule [success]', - 'm': 'gce_net', - 'a': 'fwname=%s state=absent' % (FW1), - 'r': '127.0.0.1 | success >> {"changed": true, "fwname": "%s", "state": "absent"}' % (FW1), - 'teardown': ['sleep 5'], # pause to give GCE time to delete fwrule - }, - {'desc': 'DELETE unused named network [success]', - 'm': 'gce_net', - 'a': 'name=%s state=absent' % (NETWK1), - 'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "state": "absent"}' % (NETWK1), - }, - {'desc': 'DELETE named network *and* fwrule [success]', - 'm': 'gce_net', - 'a': 'name=%s fwname=%s state=absent' % (NETWK2, FW2), - 'r': '127.0.0.1 | success >> {"changed": true, "fwname": "%s", "name": "%s", "state": "absent"}' % (FW2, NETWK2), - }, - ], - 'teardown': ['gcutil deletenetwork -f %s' % (NETWK1), - 'gcutil deletenetwork -f %s' % (NETWK2), - 'sleep 5', - 'gcutil deletefirewall -f %s' % (FW1), - 'gcutil deletefirewall -f %s' % (FW2)], - }, - - {'id': '07', 'desc': 'Create networks and firewall rules', - 'setup': ['gcutil addnetwork --range="%s" --gateway="%s" %s' % (CIDR1, GW1, NETWK1), - 'sleep 5', - 'gcutil addfirewall --allowed="tcp:80" --network=%s %s' % (NETWK1, FW1), - 'sleep 5'], - 'tests': [ - {'desc': 'CREATE network without specifying ipv4_range [FAIL]', - 'm': 'gce_net', - 'a': 'name=fail', - 'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Missing required 'ipv4_range' parameter\"}", - }, - {'desc': 'CREATE network with specifying bad ipv4_range [FAIL]', - 'm': 'gce_net', - 'a': 'name=fail ipv4_range=bad_value', - 'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[400], API error code[None] and message: Invalid value for field 'resource.IPv4Range': 'bad_value'. Must be a CIDR address range that is contained in the RFC1918 private address blocks: [10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16]\"}", - }, - {'desc': 'CREATE existing network, not changed [success]', - 'm': 'gce_net', - 'a': 'name=%s ipv4_range=%s' % (NETWK1, CIDR1), - 'r': '127.0.0.1 | success >> {"changed": false, "ipv4_range": "%s", "name": "%s", "state": "present"}' % (CIDR1, NETWK1), - }, - {'desc': 'CREATE new network, changed [success]', - 'm': 'gce_net', - 'a': 'name=%s ipv4_range=%s' % (NETWK2, CIDR2), - 'r': '127.0.0.1 | success >> {"changed": true, "ipv4_range": "10.240.32.0/24", "name": "%s", "state": "present"}' % (NETWK2), - }, - {'desc': 'CREATE new fw rule missing params [FAIL]', - 'm': 'gce_net', - 'a': 'name=%s fwname=%s' % (NETWK1, FW1), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required firewall rule parameter(s)"}', - }, - {'desc': 'CREATE new fw rule bad params [FAIL]', - 'm': 'gce_net', - 'a': 'name=%s fwname=broken allowed=blah src_tags="one,two"' % (NETWK1), - 'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[400], API error code[None] and message: Invalid value for field 'resource.allowed[0].IPProtocol': 'blah'. Must be one of [\\\"tcp\\\", \\\"udp\\\", \\\"icmp\\\"] or an IP protocol number between 0 and 255\"}", - }, - {'desc': 'CREATE existing fw rule [success]', - 'm': 'gce_net', - 'a': 'name=%s fwname=%s allowed="tcp:80" src_tags="one,two"' % (NETWK1, FW1), - 'r': '127.0.0.1 | success >> {"allowed": "tcp:80", "changed": false, "fwname": "%s", "ipv4_range": "%s", "name": "%s", "src_range": null, "src_tags": ["one", "two"], "state": "present"}' % (FW1, CIDR1, NETWK1), - }, - {'desc': 'CREATE new fw rule [success]', - 'm': 'gce_net', - 'a': 'name=%s fwname=%s allowed="tcp:80" src_tags="one,two"' % (NETWK1, FW3), - 'r': '127.0.0.1 | success >> {"allowed": "tcp:80", "changed": true, "fwname": "%s", "ipv4_range": "%s", "name": "%s", "src_range": null, "src_tags": ["one", "two"], "state": "present"}' % (FW3, CIDR1, NETWK1), - }, - {'desc': 'CREATE new network *and* fw rule [success]', - 'm': 'gce_net', - 'a': 'name=%s ipv4_range=%s fwname=%s allowed="tcp:80" src_tags="one,two"' % (NETWK3, CIDR3, FW4), - 'r': '127.0.0.1 | success >> {"allowed": "tcp:80", "changed": true, "fwname": "%s", "ipv4_range": "%s", "name": "%s", "src_range": null, "src_tags": ["one", "two"], "state": "present"}' % (FW4, CIDR3, NETWK3), - }, - ], - 'teardown': ['gcutil deletefirewall -f %s' % (FW1), - 'gcutil deletefirewall -f %s' % (FW2), - 'gcutil deletefirewall -f %s' % (FW3), - 'gcutil deletefirewall -f %s' % (FW4), - 'sleep 5', - 'gcutil deletenetwork -f %s' % (NETWK1), - 'gcutil deletenetwork -f %s' % (NETWK2), - 'gcutil deletenetwork -f %s' % (NETWK3), - 'sleep 5'], - }, - - {'id': '08', 'desc': 'Create load-balancer resources', - 'setup': ['gcutil addinstance "%s" --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --nopersistent_boot_disk' % (INAME, ZONE, TYPE, NETWORK, SCOPES, IMAGE), - 'gcutil addinstance "%s" --wait_until_running --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --nopersistent_boot_disk' % (INAME2, ZONE, TYPE, NETWORK, SCOPES, IMAGE), - ], - 'tests': [ - {'desc': 'Do nothing [FAIL]', - 'm': 'gce_lb', - 'a': 'httphealthcheck_port=7', - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Nothing to do, please specify a \\\"name\\\" or \\\"httphealthcheck_name\\\" parameter"}', - }, - {'desc': 'CREATE_HC create basic http healthcheck [success]', - 'm': 'gce_lb', - 'a': 'httphealthcheck_name=%s' % (HC1), - 'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_healthy_count": 2, "httphealthcheck_host": null, "httphealthcheck_interval": 5, "httphealthcheck_name": "%s", "httphealthcheck_path": "/", "httphealthcheck_port": 80, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "name": null, "state": "present"}' % (HC1), - }, - {'desc': 'CREATE_HC (repeat, no-op) create basic http healthcheck [success]', - 'm': 'gce_lb', - 'a': 'httphealthcheck_name=%s' % (HC1), - 'r': '127.0.0.1 | success >> {"changed": false, "httphealthcheck_healthy_count": 2, "httphealthcheck_host": null, "httphealthcheck_interval": 5, "httphealthcheck_name": "%s", "httphealthcheck_path": "/", "httphealthcheck_port": 80, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "name": null, "state": "present"}' % (HC1), - }, - {'desc': 'CREATE_HC create custom http healthcheck [success]', - 'm': 'gce_lb', - 'a': 'httphealthcheck_name=%s httphealthcheck_port=1234 httphealthcheck_path="/whatup" httphealthcheck_host="foo" httphealthcheck_interval=300' % (HC2), - 'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_healthy_count": 2, "httphealthcheck_host": "foo", "httphealthcheck_interval": 300, "httphealthcheck_name": "%s", "httphealthcheck_path": "/whatup", "httphealthcheck_port": 1234, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "name": null, "state": "present"}' % (HC2), - }, - {'desc': 'CREATE_HC create (broken) custom http healthcheck [FAIL]', - 'm': 'gce_lb', - 'a': 'httphealthcheck_name=%s httphealthcheck_port="string" httphealthcheck_path=7' % (HC3), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Unexpected response: HTTP return_code[400], API error code[None] and message: Invalid value for: Expected a signed integer, got \'string\' (class java.lang.String)"}', - }, - {'desc': 'CREATE_LB create lb, missing region [FAIL]', - 'm': 'gce_lb', - 'a': 'name=%s' % (LB1), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required region name"}', - }, - {'desc': 'CREATE_LB create lb, bogus region [FAIL]', - 'm': 'gce_lb', - 'a': 'name=%s region=bogus' % (LB1), - 'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Unexpected response: HTTP return_code[404], API error code[None] and message: The resource \'projects/%s/regions/bogus\' was not found"}' % (PROJECT), - }, - {'desc': 'CREATE_LB create lb, minimal params [success]', - 'strip_numbers': True, - 'm': 'gce_lb', - 'a': 'name=%s region=%s' % (LB1, REGION), - 'r': '127.0.0.1 | success >> {"changed": true, "external_ip": "173.255.123.245", "httphealthchecks": [], "members": [], "name": "%s", "port_range": "1-65535", "protocol": "tcp", "region": "%s", "state": "present"}' % (LB1, REGION), - }, - {'desc': 'CREATE_LB create lb full params [success]', - 'strip_numbers': True, - 'm': 'gce_lb', - 'a': 'httphealthcheck_name=%s httphealthcheck_port=5055 httphealthcheck_path="/howami" name=%s port_range=8000-8888 region=%s members=%s/%s,%s/%s' % (HC3,LB2,REGION,ZONE,INAME,ZONE,INAME2), - 'r': '127.0.0.1 | success >> {"changed": true, "external_ip": "173.255.126.81", "httphealthcheck_healthy_count": 2, "httphealthcheck_host": null, "httphealthcheck_interval": 5, "httphealthcheck_name": "%s", "httphealthcheck_path": "/howami", "httphealthcheck_port": 5055, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "httphealthchecks": ["%s"], "members": ["%s/%s", "%s/%s"], "name": "%s", "port_range": "8000-8888", "protocol": "tcp", "region": "%s", "state": "present"}' % (HC3,HC3,ZONE,INAME,ZONE,INAME2,LB2,REGION), - }, - ], - 'teardown': [ - 'gcutil deleteinstance --zone=%s -f %s %s' % (ZONE, INAME, INAME2), - 'gcutil deleteforwardingrule --region=%s -f %s %s' % (REGION, LB1, LB2), - 'sleep 10', - 'gcutil deletetargetpool --region=%s -f %s-tp %s-tp' % (REGION, LB1, LB2), - 'sleep 10', - 'gcutil deletehttphealthcheck -f %s %s %s' % (HC1, HC2, HC3), - ], - }, - - {'id': '09', 'desc': 'Destroy load-balancer resources', - 'setup': ['gcutil addhttphealthcheck %s' % (HC1), - 'sleep 5', - 'gcutil addhttphealthcheck %s' % (HC2), - 'sleep 5', - 'gcutil addtargetpool --health_checks=%s --region=%s %s-tp' % (HC1, REGION, LB1), - 'sleep 5', - 'gcutil addforwardingrule --target=%s-tp --region=%s %s' % (LB1, REGION, LB1), - 'sleep 5', - 'gcutil addtargetpool --region=%s %s-tp' % (REGION, LB2), - 'sleep 5', - 'gcutil addforwardingrule --target=%s-tp --region=%s %s' % (LB2, REGION, LB2), - 'sleep 5', - ], - 'tests': [ - {'desc': 'DELETE_LB: delete a non-existent LB [success]', - 'm': 'gce_lb', - 'a': 'name=missing state=absent', - 'r': '127.0.0.1 | success >> {"changed": false, "name": "missing", "state": "absent"}', - }, - {'desc': 'DELETE_LB: delete a non-existent LB+HC [success]', - 'm': 'gce_lb', - 'a': 'name=missing httphealthcheck_name=alsomissing state=absent', - 'r': '127.0.0.1 | success >> {"changed": false, "httphealthcheck_name": "alsomissing", "name": "missing", "state": "absent"}', - }, - {'desc': 'DELETE_LB: destroy standalone healthcheck [success]', - 'm': 'gce_lb', - 'a': 'httphealthcheck_name=%s state=absent' % (HC2), - 'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_name": "%s", "name": null, "state": "absent"}' % (HC2), - }, - {'desc': 'DELETE_LB: destroy standalone balancer [success]', - 'm': 'gce_lb', - 'a': 'name=%s state=absent' % (LB2), - 'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "state": "absent"}' % (LB2), - }, - {'desc': 'DELETE_LB: destroy LB+HC [success]', - 'm': 'gce_lb', - 'a': 'name=%s httphealthcheck_name=%s state=absent' % (LB1, HC1), - 'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_name": "%s", "name": "%s", "state": "absent"}' % (HC1,LB1), - }, - ], - 'teardown': [ - 'gcutil deleteforwardingrule --region=%s -f %s %s' % (REGION, LB1, LB2), - 'sleep 10', - 'gcutil deletetargetpool --region=%s -f %s-tp %s-tp' % (REGION, LB1, LB2), - 'sleep 10', - 'gcutil deletehttphealthcheck -f %s %s' % (HC1, HC2), - ], - }, -] - -def main(tests_to_run=[]): - for test in test_cases: - if tests_to_run and test['id'] not in tests_to_run: - continue - print "=> starting/setup '%s:%s'"% (test['id'], test['desc']) - if DEBUG: print "=debug>", test['setup'] - for c in test['setup']: - (s,o) = run(c) - test_i = 1 - for t in test['tests']: - if DEBUG: print "=>debug>", test_i, t['desc'] - # run any test-specific setup commands - if t.has_key('setup'): - for setup in t['setup']: - (status, output) = run(setup) - - # run any 'peek_before' commands - if t.has_key('peek_before') and PEEKING_ENABLED: - for setup in t['peek_before']: - (status, output) = run(setup) - - # run the ansible test if 'a' exists, otherwise - # an empty 'a' directive allows test to run - # setup/teardown for a subsequent test. - if t['a']: - if DEBUG: print "=>debug>", t['m'], t['a'] - acmd = "ansible all -o -m %s -a \"%s\"" % (t['m'],t['a']) - #acmd = "ANSIBLE_KEEP_REMOTE_FILES=1 ansible all -vvv -m %s -a \"%s\"" % (t['m'],t['a']) - (s,o) = run(acmd) - - # check expected output - if DEBUG: print "=debug>", o.strip(), "!=", t['r'] - print "=> %s.%02d '%s':" % (test['id'], test_i, t['desc']), - if t.has_key('strip_numbers'): - # strip out all numbers so we don't trip over different - # IP addresses - is_good = (o.strip().translate(None, "0123456789") == t['r'].translate(None, "0123456789")) - else: - is_good = (o.strip() == t['r']) - - if is_good: - print "PASS" - else: - print "FAIL" - if VERBOSE: - print "=>", acmd - print "=> Expected:", t['r'] - print "=> Got:", o.strip() - - # run any 'peek_after' commands - if t.has_key('peek_after') and PEEKING_ENABLED: - for setup in t['peek_after']: - (status, output) = run(setup) - - # run any test-specific teardown commands - if t.has_key('teardown'): - for td in t['teardown']: - (status, output) = run(td) - test_i += 1 - - print "=> completing/teardown '%s:%s'" % (test['id'], test['desc']) - if DEBUG: print "=debug>", test['teardown'] - for c in test['teardown']: - (s,o) = run(c) - - -if __name__ == '__main__': - tests_to_run = [] - if len(sys.argv) == 2: - if sys.argv[1] in ["--help", "--list"]: - print "usage: %s [id1,id2,...,idN]" % sys.argv[0] - print " * An empty argument list will execute all tests" - print " * Do not need to specify tests in numerical order" - print " * List test categories with --list or --help" - print "" - for test in test_cases: - print "\t%s:%s" % (test['id'], test['desc']) - sys.exit(0) - else: - tests_to_run = sys.argv[1].split(',') - main(tests_to_run) From ffee9a8fe03ca7605c8f6afefda9b73e4634fab4 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 26 Sep 2014 18:23:57 -0400 Subject: [PATCH 093/813] Docsite formatting --- hacking/module_formatter.py | 3 +-- hacking/templates/rst.j2 | 31 ++++++++++++++++++++----------- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 3a6d372631..f7d8570e93 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -210,8 +210,7 @@ def process_module(module, options, env, template, outputname, module_map): if doc is None: return "SKIPPED" - if "core" in module: - print "CORE" + if "/core/" in fname: doc['core'] = True else: doc['core'] = False diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index 93213f47dd..ad7040a820 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -101,25 +101,34 @@ Examples {% endfor %} {% endif %} -Developers, Testers, and Bug Reporting --------------------------------------- -{% set repo = "https://github.com/ansible/ansible-modules-extras" %} -{% set tracker = "https://github.com/ansible/ansible-modules-extras/issues" %} {% if core %} - {% set repo = "https://github.com/ansible/ansible-modules-core" %} - {% set tracker = "https://github.com/ansible/ansible-modules-core/issues" %} -{% endif %} + +This is a Core Module +--------------------- + +This source of this module is hosted on GitHub in the `ansible-modules-core `_ repo. -This source of this module is hosted on GitHub in the `{{ repo }} <{{ repo }}>`_ repo. - -If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansbile, first look in the issue tracker at `{{ tracker }} <{{ tracker }}>`_ to see if a bug has already been filed. If not, we would be greatful if you would file one. +If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansbile, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be greatful if you would file one. Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. -{% if not core %} +This is a "core" ansible module, which means it will recieve slightly higher priority for all requests than those in the "extras" repos. + +{% else %} + +This is an Extras Module +------------------------ + +This source of this module is hosted on GitHub in the `ansible-modules-extras `_ repo. + +If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansbile, first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be greatful if you would file one. + +Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. + +Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. Note that this module is designated a "extras" module. Non-core modules are still fully usuable, but may recieve slightly lower response rates for issues and pull requests. Popular "extras" modules may be promoted to core modules over time. From a25da4af05f9e7f2b628bd3804540994f005d5b1 Mon Sep 17 00:00:00 2001 From: Bruno BAILLUET Date: Sat, 27 Sep 2014 17:39:49 +0200 Subject: [PATCH 094/813] Add a new inventory parameter (ansible_sudo_exe) to specify sudo command path. --- docsite/rst/intro_inventory.rst | 2 ++ lib/ansible/runner/__init__.py | 3 +++ lib/ansible/runner/connection_plugins/accelerate.py | 2 +- lib/ansible/runner/connection_plugins/local.py | 2 +- lib/ansible/runner/connection_plugins/paramiko_ssh.py | 2 +- lib/ansible/runner/connection_plugins/ssh.py | 2 +- lib/ansible/utils/__init__.py | 4 ++-- test/units/TestUtils.py | 2 +- 8 files changed, 12 insertions(+), 7 deletions(-) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index a70f2b059c..16360b1c91 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -207,6 +207,8 @@ mentioned:: The ssh password to use (this is insecure, we strongly recommend using --ask-pass or SSH keys) ansible_sudo_pass The sudo password to use (this is insecure, we strongly recommend using --ask-sudo-pass) + ansible_sudo_exe + The sudo command path. ansible_connection Connection type of the host. Candidates are local, ssh or paramiko. The default is paramiko before Ansible 1.2, and 'smart' afterwards which detects whether usage of 'ssh' would be feasible based on whether ControlPersist is supported. ansible_ssh_private_key_file diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index a1133fdbad..944e574e6a 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -154,6 +154,7 @@ class Runner(object): run_hosts=None, # an optional list of pre-calculated hosts to run on no_log=False, # option to enable/disable logging for a given task run_once=False, # option to enable/disable host bypass loop for a given task + sudo_exe=C.DEFAULT_SUDO_EXE, # ex: /usr/local/bin/sudo ): # used to lock multiprocess inputs and outputs at various levels @@ -212,6 +213,7 @@ class Runner(object): self.vault_pass = vault_pass self.no_log = no_log self.run_once = run_once + self.sudo_exe = sudo_exe if self.transport == 'smart': # if the transport is 'smart' see if SSH can support ControlPersist if not use paramiko @@ -810,6 +812,7 @@ class Runner(object): self.sudo_pass = inject.get('ansible_sudo_pass', self.sudo_pass) self.su = inject.get('ansible_su', self.su) self.su_pass = inject.get('ansible_su_pass', self.su_pass) + self.sudo_exe = inject.get('ansible_sudo_exe', self.sudo_exe) # select default root user in case self.sudo requested # but no user specified; happens e.g. in host vars when diff --git a/lib/ansible/runner/connection_plugins/accelerate.py b/lib/ansible/runner/connection_plugins/accelerate.py index 8277d805de..a31124e119 100644 --- a/lib/ansible/runner/connection_plugins/accelerate.py +++ b/lib/ansible/runner/connection_plugins/accelerate.py @@ -239,7 +239,7 @@ class Connection(object): executable = constants.DEFAULT_EXECUTABLE if self.runner.sudo and sudoable and sudo_user: - cmd, prompt, success_key = utils.make_sudo_cmd(sudo_user, executable, cmd) + cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) vvv("EXEC COMMAND %s" % cmd) diff --git a/lib/ansible/runner/connection_plugins/local.py b/lib/ansible/runner/connection_plugins/local.py index ec57afcecf..e282076ee1 100644 --- a/lib/ansible/runner/connection_plugins/local.py +++ b/lib/ansible/runner/connection_plugins/local.py @@ -57,7 +57,7 @@ class Connection(object): else: local_cmd = cmd else: - local_cmd, prompt, success_key = utils.make_sudo_cmd(sudo_user, executable, cmd) + local_cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) executable = executable.split()[0] if executable else None vvv("EXEC %s" % (local_cmd), host=self.host) diff --git a/lib/ansible/runner/connection_plugins/paramiko_ssh.py b/lib/ansible/runner/connection_plugins/paramiko_ssh.py index dc02b047f8..59932ebb7d 100644 --- a/lib/ansible/runner/connection_plugins/paramiko_ssh.py +++ b/lib/ansible/runner/connection_plugins/paramiko_ssh.py @@ -225,7 +225,7 @@ class Connection(object): width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0))) if self.runner.sudo or sudoable: - shcmd, prompt, success_key = utils.make_sudo_cmd(sudo_user, executable, cmd) + shcmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) elif self.runner.su or su: shcmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index cbba765903..e8e431f401 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -283,7 +283,7 @@ class Connection(object): else: ssh_cmd.append(cmd) else: - sudocmd, prompt, success_key = utils.make_sudo_cmd(sudo_user, executable, cmd) + sudocmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) ssh_cmd.append(sudocmd) vvv("EXEC %s" % ' '.join(ssh_cmd), host=self.host) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 9ad1254899..f40a83eec0 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1148,7 +1148,7 @@ def boolean(value): else: return False -def make_sudo_cmd(sudo_user, executable, cmd): +def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd): """ helper function for connection plugins to create sudo commands """ @@ -1163,7 +1163,7 @@ def make_sudo_cmd(sudo_user, executable, cmd): prompt = '[sudo via ansible, key=%s] password: ' % randbits success_key = 'SUDO-SUCCESS-%s' % randbits sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % ( - C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_FLAGS, + sudo_exe, sudo_exe, C.DEFAULT_SUDO_FLAGS, prompt, sudo_user, executable or '$SHELL', pipes.quote('echo %s; %s' % (success_key, cmd))) return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key) diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index 73ef979674..7920423890 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -471,7 +471,7 @@ class TestUtils(unittest.TestCase): self.assertEqual(ansible.utils.boolean("foo"), False) def test_make_sudo_cmd(self): - cmd = ansible.utils.make_sudo_cmd('root', '/bin/sh', '/bin/ls') + cmd = ansible.utils.make_sudo_cmd(C.DEFAULT_SUDO_EXE, 'root', '/bin/sh', '/bin/ls') self.assertTrue(isinstance(cmd, tuple)) self.assertEqual(len(cmd), 3) self.assertTrue('-u root' in cmd[0]) From 3513c9adc15c9da721f78abd22dc2547fa43b75f Mon Sep 17 00:00:00 2001 From: Will Thames Date: Sun, 28 Sep 2014 10:15:05 +1000 Subject: [PATCH 095/813] Updated installation instructions for submodules Added `--init --recursive` to installation instructions as per @bcoca's suggestion on ansible-project mailing list --- docsite/rst/intro_installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index a03ccdb0fb..daae340947 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -123,7 +123,7 @@ Note when updating ansible, be sure to not only update the source tree, but also which point at Ansible's own modules (not the same kind of modules, alas). $ git pull --rebase - $ git submodule update + $ git submodule update --init --recursive Once running the env-setup script you'll be running from checkout and the default inventory file will be /etc/ansible/hosts. You can optionally specify an inventory file (see :doc:`intro_inventory`) From 07287a2eaa8d12ccb9bec22a742c6d5b166930da Mon Sep 17 00:00:00 2001 From: Nitin Madhok Date: Sun, 28 Sep 2014 08:21:04 -0400 Subject: [PATCH 096/813] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 99ba9701a2..150c56b1bb 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ Ansible is a radically simple configuration-management, application deployment, Read the documentation and more at http://ansible.com/ Many users run straight from the development branch (it's generally fine to do so), but you might also wish to consume a release. You can find -instructions [here](http://docs.ansible.com/intro_getting_started.html) for a variety of platforms. If you want a tarball of the last release, go to [releases.ansible.com](http://releases.ansible.com/ansible) and you can also install with pip. +instructions [here](http://docs.ansible.com/intro_getting_started.html) for a variety of platforms. If you want to download a tarball of a previous/latest release, go to [releases.ansible.com](http://releases.ansible.com/ansible). You can also install Ansible using pip. Design Principles ================= From 88e08cb54da10b045a32defa6a6d827701195bee Mon Sep 17 00:00:00 2001 From: Nitin Madhok Date: Sun, 28 Sep 2014 08:25:08 -0400 Subject: [PATCH 097/813] Doc fix in intro_installation.rst Missing code block --- docsite/rst/intro_installation.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index a03ccdb0fb..26fde51fe6 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -122,6 +122,8 @@ Ansible also uses the following Python modules that need to be installed:: Note when updating ansible, be sure to not only update the source tree, but also the "submodules" in git which point at Ansible's own modules (not the same kind of modules, alas). +.. code-block:: bash + $ git pull --rebase $ git submodule update From f811a0f89a88b64ae317d9d3d4344d61ee2330ba Mon Sep 17 00:00:00 2001 From: Nitin Madhok Date: Sun, 28 Sep 2014 08:30:35 -0400 Subject: [PATCH 098/813] Update intro_configuration.rst --- docsite/rst/intro_configuration.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index eb57b2bf18..cf4b9b6122 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -154,9 +154,9 @@ command module appear to be simplified by using a default Ansible module instead. This can include reminders to use the 'git' module instead of shell commands to execute 'git'. Using modules when possible over arbitrary shell commands can lead to more reliable and consistent playbook runs, and -also easier to maintain playbooks. +also easier to maintain playbooks:: - command_warnings=False + command_warnings = False These warnings can be silenced by adjusting the following setting or adding warn=yes or warn=no to the end of the command line From dc750e652623383946380593c56b38b708fae244 Mon Sep 17 00:00:00 2001 From: Martin Ueding Date: Sun, 28 Sep 2014 15:27:08 +0200 Subject: [PATCH 099/813] Fix link in reST template There was a missing trailing underscore (`_`) that would have marked the content in the backticks as a link. This adds it and fixes the link on every core module page. --- hacking/templates/rst.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index ad7040a820..917c6ba754 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -111,7 +111,7 @@ This source of this module is hosted on GitHub in the `ansible-modules-core `_ to see if a bug has already been filed. If not, we would be greatful if you would file one. -Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. +Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. From d1476aeb01497cf2749c65a76792c8b951846157 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Sun, 28 Sep 2014 11:03:47 +1000 Subject: [PATCH 100/813] Updated version info to include submodule information `ansible --version` etc. now include information about submodules ``` ansible 1.8 (submodule_ansible_version ffee9a8fe0) last updated 2014/09/28 11:03:14 (GMT +1000) lib/ansible/modules/core: (ec2_snapshot_remove 3a77c31ecb) last updated 2014/09/27 18:23:31 (GMT +1000) lib/ansible/modules/extras: (detached HEAD 110250d344) last updated 2014/09/27 14:33:42 (GMT +1000) ``` Also improved handling of detached HEAD when printing out version information. --- lib/ansible/utils/__init__.py | 41 ++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 9ad1254899..3f26e0779f 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -832,11 +832,10 @@ def default(value, function): return function() return value -def _gitinfo(): + +def _gitrepoinfo(repo_path): ''' returns a string containing git branch, commit id and commit date ''' result = None - repo_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', '.git') - if os.path.exists(repo_path): # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. if os.path.isfile(repo_path): @@ -857,17 +856,39 @@ def _gitinfo(): f = open(branch_path) commit = f.readline()[:10] f.close() - date = time.localtime(os.stat(branch_path).st_mtime) - if time.daylight == 0: - offset = time.timezone - else: - offset = time.altzone - result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, - time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36) + else: + # detached HEAD + commit = branch[:10] + branch = 'detached HEAD' + branch_path = os.path.join(repo_path, "HEAD") + + date = time.localtime(os.stat(branch_path).st_mtime) + if time.daylight == 0: + offset = time.timezone + else: + offset = time.altzone + result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, + time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36) else: result = '' return result + +def _gitinfo(): + basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..') + repo_path = os.path.join(basedir, '.git') + result = _gitrepoinfo(repo_path) + submodules = os.path.join(basedir, '.gitmodules') + f = open(submodules) + for line in f: + tokens = line.strip().split(' ') + if tokens[0] == 'path': + repo_path = tokens[2] + result += "\n {0}: {1}".format(repo_path, _gitrepoinfo(os.path.join(basedir, repo_path, '.git'))) + f.close() + return result + + def version(prog): result = "{0} {1}".format(prog, __version__) gitinfo = _gitinfo() From 952a36920c745b94e3ef1d3ae41e81364456f296 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Sun, 28 Sep 2014 13:19:56 +1000 Subject: [PATCH 101/813] Cater for uninitialized submodules Output a useful message if `git submodule update --init --recursive` not yet performed ``` $ ansible --version ansible 1.8 (submodule_ansible_version 59ae596484) last updated 2014/09/28 13:20:51 (GMT +1000) lib/ansible/modules/core: (detached HEAD 617a52b20d) last updated 2014/09/28 13:15:25 (GMT +1000) lib/ansible/modules/extras: not found - use git submodule update --init lib/ansible/modules/extras ``` --- lib/ansible/utils/__init__.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 3f26e0779f..ba65028f9f 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -883,8 +883,11 @@ def _gitinfo(): for line in f: tokens = line.strip().split(' ') if tokens[0] == 'path': - repo_path = tokens[2] - result += "\n {0}: {1}".format(repo_path, _gitrepoinfo(os.path.join(basedir, repo_path, '.git'))) + submodule_path = tokens[2] + submodule_info =_gitrepoinfo(os.path.join(basedir, submodule_path, '.git')) + if not submodule_info: + submodule_info = ' not found - use git submodule update --init ' + submodule_path + result += "\n {0}: {1}".format(submodule_path, submodule_info) f.close() return result From 21c3784a43baaece7582b67c86827da00acedc80 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 28 Sep 2014 11:24:32 -0400 Subject: [PATCH 102/813] If submodules are not found, don't error out. --- lib/ansible/utils/__init__.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index ba65028f9f..811eb7eefc 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -833,7 +833,7 @@ def default(value, function): return value -def _gitrepoinfo(repo_path): +def _git_repo_info(repo_path): ''' returns a string containing git branch, commit id and commit date ''' result = None if os.path.exists(repo_path): @@ -877,14 +877,16 @@ def _gitrepoinfo(repo_path): def _gitinfo(): basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..') repo_path = os.path.join(basedir, '.git') - result = _gitrepoinfo(repo_path) + result = _git_repo_info(repo_path) submodules = os.path.join(basedir, '.gitmodules') + if not os.path.exists(submodules): + return result f = open(submodules) for line in f: tokens = line.strip().split(' ') if tokens[0] == 'path': submodule_path = tokens[2] - submodule_info =_gitrepoinfo(os.path.join(basedir, submodule_path, '.git')) + submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git')) if not submodule_info: submodule_info = ' not found - use git submodule update --init ' + submodule_path result += "\n {0}: {1}".format(submodule_path, submodule_info) From 91c58902479aeeaaab94739d2b2bcedff4577b6e Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 28 Sep 2014 11:31:51 -0400 Subject: [PATCH 103/813] Add some helpful detection if core modules are not found for those running on the development branch. --- lib/ansible/runner/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index f727bc6e4e..014cb6276a 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1215,7 +1215,11 @@ class Runner(object): module_suffixes = getattr(conn, 'default_suffixes', None) module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes) if module_path is None: - raise errors.AnsibleFileNotFound("module %s not found in configured module paths" % (module_name)) + module_path2 = utils.plugins.module_finder.find_plugin('ping', module_suffixes) + if module_path2 is not None: + raise errors.AnsibleFileNotFound("module %s not found in configured module paths" % (module_name)) + else: + raise errors.AnsibleFileNotFound("module %s not found in configured module paths. Additionally, core modules are missing. If this is a checkout, run 'git submodule update --init --recursive' to correct this problem." % (module_name)) # insert shared code and arguments into the module From 1d17881960ee3251b082daaf6d3a1c2bcaf99133 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 28 Sep 2014 11:39:04 -0400 Subject: [PATCH 104/813] Add module search path to --version output. --- lib/ansible/utils/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 811eb7eefc..fb43005885 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -899,6 +899,7 @@ def version(prog): gitinfo = _gitinfo() if gitinfo: result = result + " {0}".format(gitinfo) + result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH return result def version_info(gitinfo=False): From 1b7f7aa597d4d8d22da0e759e2afb6de4a57ceb6 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 28 Sep 2014 11:57:06 -0400 Subject: [PATCH 105/813] Adding a stub for responding to new tickets that should be filed in the module repos. --- ticket_stubs/module_repo.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 ticket_stubs/module_repo.md diff --git a/ticket_stubs/module_repo.md b/ticket_stubs/module_repo.md new file mode 100644 index 0000000000..7cfbf6c1de --- /dev/null +++ b/ticket_stubs/module_repo.md @@ -0,0 +1,29 @@ +Module Repo Information +======================= + +Hi! + +Thanks very much for your interest in Ansible. It sincerely means a lot to us. + +This appears to be a submission about a module, and aside from action_plugins, if you know what those are, the modules +in ansible are now moved two seperate repos. We would appreciate if you can submit this there instead. + +If this is about a new module, submit pull requests or ideas to: + + * https://github.com/ansible/ansible-modules-extras + +If this is about an existing module: + + * Find the module at http://docs.ansible.com/list_of_all_modules.html + * Open the documentation page for that module + * If the bottom of the docs say "This is an extras module", submit your ticket to https://github.com/ansible/ansible-modules-extras + * Otherwise, submit your module to https://github.com/ansible/ansible-modules-core + +Additionally, should you need more help with this, you can ask questions on: + + * IRC: #ansible on irc.freenode.net + * mailing list: https://groups.google.com/forum/#!forum/ansible-project + +Thanks! + + From 3908d50b03f7036267848b2cab228cf69d0094a9 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 28 Sep 2014 12:09:53 -0400 Subject: [PATCH 106/813] Tolerate no module search path. --- lib/ansible/utils/plugins.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/utils/plugins.py index 9349f133c4..816bf19c69 100644 --- a/lib/ansible/utils/plugins.py +++ b/lib/ansible/utils/plugins.py @@ -126,15 +126,16 @@ class PluginLoader(object): ret.append(fullpath) # look in any configured plugin paths, allow one level deep for subcategories - configured_paths = self.config.split(os.pathsep) - for path in configured_paths: - path = os.path.realpath(os.path.expanduser(path)) - contents = glob.glob("%s/*" % path) - for c in contents: - if os.path.isdir(c) and c not in ret: - ret.append(c) - if path not in ret: - ret.append(path) + if self.config is not None: + configured_paths = self.config.split(os.pathsep) + for path in configured_paths: + path = os.path.realpath(os.path.expanduser(path)) + contents = glob.glob("%s/*" % path) + for c in contents: + if os.path.isdir(c) and c not in ret: + ret.append(c) + if path not in ret: + ret.append(path) # look for any plugins installed in the package subtree ret.extend(self._get_package_paths()) From c02e8d8c8060acbb462438b57d4712a243841402 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 28 Sep 2014 12:17:03 -0400 Subject: [PATCH 107/813] Don't search powershell modules unless using the winrm connection. --- lib/ansible/modules/core | 2 +- lib/ansible/runner/__init__.py | 2 +- lib/ansible/utils/plugins.py | 7 +++++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 617a52b20d..9b35a39121 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 617a52b20d512a4eb5e88fdc76658b220ff80266 +Subproject commit 9b35a391213fe87834af5ebc907109de2bc0005f diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 014cb6276a..a76e6863ba 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1213,7 +1213,7 @@ class Runner(object): # Search module path(s) for named module. module_suffixes = getattr(conn, 'default_suffixes', None) - module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes) + module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes, transport=self.transport) if module_path is None: module_path2 = utils.plugins.module_finder.find_plugin('ping', module_suffixes) if module_path2 is not None: diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/utils/plugins.py index 816bf19c69..800802bc15 100644 --- a/lib/ansible/utils/plugins.py +++ b/lib/ansible/utils/plugins.py @@ -159,14 +159,17 @@ class PluginLoader(object): if directory not in self._extra_dirs: self._extra_dirs.append(directory) - def find_plugin(self, name, suffixes=None): + def find_plugin(self, name, suffixes=None, transport=''): ''' Find a plugin named name ''' if not suffixes: if self.class_name: suffixes = ['.py'] else: - suffixes = ['', '.ps1', '.py'] + if transport == 'winrm': + suffixes = ['.ps1', ''] + else: + suffixes = ['.py', ''] for suffix in suffixes: full_name = '%s%s' % (name, suffix) From 56b90390f4db137c1604b756d1c7102f25812d78 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 28 Sep 2014 15:22:33 -0400 Subject: [PATCH 108/813] Update README.md --- README.md | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 150c56b1bb..383b3a8caf 100644 --- a/README.md +++ b/README.md @@ -4,22 +4,25 @@ Ansible ======= -Ansible is a radically simple configuration-management, application deployment, task-execution, and multinode orchestration engine. +Ansible is a radically simple IT automation system. It handles configuration-management, application deployment, cloud provisioning, ad-hoc task-execution, and multinode orchestration - including trivializing things like zero downtime rolling updates with load balancers. Read the documentation and more at http://ansible.com/ -Many users run straight from the development branch (it's generally fine to do so), but you might also wish to consume a release. You can find -instructions [here](http://docs.ansible.com/intro_getting_started.html) for a variety of platforms. If you want to download a tarball of a previous/latest release, go to [releases.ansible.com](http://releases.ansible.com/ansible). You can also install Ansible using pip. +Many users run straight from the development branch (it's generally fine to do so), but you might also wish to consume a release. + +You can find instructions [here](http://docs.ansible.com/intro_getting_started.html) for a variety of platforms. If you decide to go with the development branch, be sure to run "git submodule update --init --recursive" after doing a checkout. + +If you want to download a tarball of a release, go to [releases.ansible.com](http://releases.ansible.com/ansible), though most users use yum (using the EPEL instructions linked above), apt (using the PPA instructions linked above), or "pip install ansible". Design Principles ================= * Have a dead simple setup process and a minimal learning curve - * Be super fast & parallel by default - * Require no server or client daemons; use existing SSHd - * Use a language that is both machine and human friendly + * Manage machines very quickly and in parallel + * Avoid custom-agents and additional open ports, be agentless by leveraging the existing SSH daemon + * Describe infrastructure in a language that is both machine and human friendly * Focus on security and easy auditability/review/rewriting of content - * Manage remote machines instantly, without bootstrapping + * Manage new remote machines instantly, without bootstrapping any software * Allow module development in any dynamic language, not just Python * Be usable as non-root * Be the easiest IT automation system to use, ever. @@ -27,8 +30,11 @@ Design Principles Get Involved ============ - * Read [Contributing.md](https://github.com/ansible/ansible/blob/devel/CONTRIBUTING.md) for all kinds of ways to contribute to and interact with the project, including mailing list information and how to submit bug reports and code to Ansible. + * Read [Community Information](http://docs.ansible.com/community.html) for all kinds of ways to contribute to and interact with the project, including mailing list information and how to submit bug reports and code to Ansible. * All code submissions are done through pull requests. Take care to make sure no merge commits are in the submission, and use "git rebase" vs "git merge" for this reason. If submitting a large code change (other than modules), it's probably a good idea to join ansible-devel and talk about what you would like to do or add first and to avoid duplicate efforts. This not only helps everyone know what's going on, it also helps save time and effort if we decide some changes are needed. + * Users list: [ansible-project](http://groups.google.com/group/ansible-project) + * Development list: [ansible-devel](http://groups.google.com/group/ansible-devel) + * Announcement list: [ansible-announce](http://groups.google.com/group/ansible-announce) - read only * irc.freenode.net: #ansible Branch Info @@ -36,9 +42,9 @@ Branch Info * Releases are named after Van Halen songs. * The devel branch corresponds to the release actively under development. - * Note that modules are contained in two different git submodule projects, under lib/ansible/modules/{core,extras} + * As of 1.8, modules are kept in different repos, you'll want to follow [core](https://github.com/ansible/ansible-modules-core) and [extras](https://github.com/ansible/ansible-modules-extras) * Various release-X.Y branches exist for previous releases. - * We'd love to have your contributions, read "CONTRIBUTING.md" for process notes. + * We'd love to have your contributions, read [Community Information](http://docs.ansible.com/community.html) for notes on how to get started. Author ====== From 0354d19f73ace24bf39e77be402dceb601cdb64d Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 28 Sep 2014 15:24:44 -0400 Subject: [PATCH 109/813] Update README.md --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 383b3a8caf..ab08cf027f 100644 --- a/README.md +++ b/README.md @@ -46,11 +46,11 @@ Branch Info * Various release-X.Y branches exist for previous releases. * We'd love to have your contributions, read [Community Information](http://docs.ansible.com/community.html) for notes on how to get started. -Author -====== +Authors +======= -Ansible was created by Michael DeHaan (michael@ansible.com) and has contributions from over +Ansible was created by [Michael DeHaan](github.com/mpdehaan) (michael@ansible.com) and has contributions from over 800 users (and growing). Thanks everyone! -[Ansible, Inc](http://ansible.com) +Ansible is sponsored by [Ansible, Inc](http://ansible.com) From df78f51b78654c80ae78de1d8281c5a3264d6ecb Mon Sep 17 00:00:00 2001 From: Vladimir Khramtsov Date: Sun, 28 Sep 2014 23:34:43 +0300 Subject: [PATCH 110/813] Fix grammar in password --- docsite/rst/intro_windows.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index 7774a6ce0c..262fb7f0f0 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -45,7 +45,7 @@ In group_vars/windows.yml, define the following inventory variables:: # ansible-vault edit group_vars/windows.yml ansible_ssh_user: Administrator - ansible_ssh_pass: SekritPasswordGoesHere + ansible_ssh_pass: SecretPasswordGoesHere ansible_ssh_port: 5986 ansible_connection: winrm From ae2d198342c9bbb691647b897b69b9d9f26522db Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 29 Sep 2014 08:46:35 -0400 Subject: [PATCH 111/813] Remove install references to library/ --- MANIFEST.in | 2 -- packaging/debian/ansible.install | 1 - packaging/rpm/ansible.spec | 3 --- 3 files changed, 6 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index ff3a022108..9db0472b36 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,8 +1,6 @@ include README.md packaging/rpm/ansible.spec COPYING include examples/hosts include examples/ansible.cfg -graft examples/playbooks -include packaging/distutils/setup.py include lib/ansible/module_utils/powershell.ps1 recursive-include docs * include Makefile diff --git a/packaging/debian/ansible.install b/packaging/debian/ansible.install index 30aef22b87..2caf1452fe 100644 --- a/packaging/debian/ansible.install +++ b/packaging/debian/ansible.install @@ -1,5 +1,4 @@ examples/hosts etc/ansible -library/* usr/share/ansible docs/man/man1/*.1 usr/share/man/man1 bin/* usr/bin examples/ansible.cfg etc/ansible diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec index 4483e7da5d..79e1dd03c7 100644 --- a/packaging/rpm/ansible.spec +++ b/packaging/rpm/ansible.spec @@ -95,7 +95,6 @@ cp examples/ansible.cfg %{buildroot}/etc/ansible/ mkdir -p %{buildroot}/%{_mandir}/man1/ cp -v docs/man/man1/*.1 %{buildroot}/%{_mandir}/man1/ mkdir -p %{buildroot}/%{_datadir}/ansible -cp -rv library/* %{buildroot}/%{_datadir}/ansible/ %clean rm -rf %{buildroot} @@ -110,8 +109,6 @@ rm -rf %{buildroot} %config(noreplace) %{_sysconfdir}/ansible %doc README.md PKG-INFO COPYING %doc %{_mandir}/man1/ansible* -%doc examples/playbooks - %changelog From 459722899d8dd618cd5bdd2ea11ed0c2810e1d5e Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 29 Sep 2014 09:12:50 -0400 Subject: [PATCH 112/813] Update core link --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 9b35a39121..db5668b84c 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9b35a391213fe87834af5ebc907109de2bc0005f +Subproject commit db5668b84c3a19498b843d0bfe34574aef40c193 From a0fecd61010b6958aae46f04ce8013d3698e4482 Mon Sep 17 00:00:00 2001 From: James Laska Date: Mon, 29 Sep 2014 09:27:25 -0400 Subject: [PATCH 113/813] Fix packaging to work with new module location Changes include: * Remove references to old module dir from .spec * Use setuptools find_packages find all ansible packages --- MANIFEST.in | 1 + packaging/rpm/ansible.spec | 2 -- setup.py | 20 ++------------------ 3 files changed, 3 insertions(+), 20 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 9db0472b36..5fdfe50f34 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,6 +2,7 @@ include README.md packaging/rpm/ansible.spec COPYING include examples/hosts include examples/ansible.cfg include lib/ansible/module_utils/powershell.ps1 +recursive-include lib/ansible/modules * recursive-include docs * include Makefile include VERSION diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec index 79e1dd03c7..c6b85fd1ab 100644 --- a/packaging/rpm/ansible.spec +++ b/packaging/rpm/ansible.spec @@ -104,8 +104,6 @@ rm -rf %{buildroot} %{python_sitelib}/ansible* %{_bindir}/ansible* %dir %{_datadir}/ansible -%dir %{_datadir}/ansible/* -%{_datadir}/ansible/*/* %config(noreplace) %{_sysconfdir}/ansible %doc README.md PKG-INFO COPYING %doc %{_mandir}/man1/ansible* diff --git a/setup.py b/setup.py index 872dbefadb..fd3fb0a8a3 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ from glob import glob sys.path.insert(0, os.path.abspath('lib')) from ansible import __version__, __author__ try: - from setuptools import setup + from setuptools import setup, find_packages except ImportError: print "Ansible now needs setuptools in order to build. " \ "Install it using your package manager (usually python-setuptools) or via pip (pip install setuptools)." @@ -22,23 +22,7 @@ setup(name='ansible', license='GPLv3', install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6'], package_dir={ 'ansible': 'lib/ansible' }, - packages=[ - 'ansible', - 'ansible.cache', - 'ansible.utils', - 'ansible.utils.module_docs_fragments', - 'ansible.inventory', - 'ansible.inventory.vars_plugins', - 'ansible.playbook', - 'ansible.runner', - 'ansible.runner.action_plugins', - 'ansible.runner.lookup_plugins', - 'ansible.runner.connection_plugins', - 'ansible.runner.shell_plugins', - 'ansible.runner.filter_plugins', - 'ansible.callback_plugins', - 'ansible.module_utils' - ], + packages=find_packages('lib'), package_data={ '': ['module_utils/*.ps1'], }, From 4e5dc754fa45ff451bc57c499fee859ac769dbdc Mon Sep 17 00:00:00 2001 From: Bruno BAILLUET Date: Mon, 29 Sep 2014 19:46:41 +0200 Subject: [PATCH 114/813] Added a comment to mention that ansible_sudo_exe is available from version 1.8 and beyond --- docsite/rst/intro_inventory.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index 16360b1c91..5b409e8e65 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -207,7 +207,7 @@ mentioned:: The ssh password to use (this is insecure, we strongly recommend using --ask-pass or SSH keys) ansible_sudo_pass The sudo password to use (this is insecure, we strongly recommend using --ask-sudo-pass) - ansible_sudo_exe + ansible_sudo_exe (new in version 1.8) The sudo command path. ansible_connection Connection type of the host. Candidates are local, ssh or paramiko. The default is paramiko before Ansible 1.2, and 'smart' afterwards which detects whether usage of 'ssh' would be feasible based on whether ControlPersist is supported. From 8cadfc2743dc9a6870d96bf976856b436fc2fbcb Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 29 Sep 2014 15:13:00 -0400 Subject: [PATCH 115/813] Add ticket stubs to be used in the great ticket move. --- ticket_stubs/_module_issue_move.md | 35 ++++++++++++++++++++++++ ticket_stubs/_module_pr_move.md | 44 ++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) create mode 100644 ticket_stubs/_module_issue_move.md create mode 100644 ticket_stubs/_module_pr_move.md diff --git a/ticket_stubs/_module_issue_move.md b/ticket_stubs/_module_issue_move.md new file mode 100644 index 0000000000..9153946dac --- /dev/null +++ b/ticket_stubs/_module_issue_move.md @@ -0,0 +1,35 @@ +Module Repo Information +======================= + +Hi! + +Thanks very much for your interest in Ansible. It sincerely means a lot to us. + +On September 26, 2014, due to enormous levels of contribution to the project Ansible decided to reorganize module repos, making it easier +for developers to work on the project and for us to more easily manage new contributions and tickets. + +We split modules from the main project off into two repos, http://github.com/ansible/ansible-modules-core and http://github.com/ansible/ansible-modules-extras + +If you would still like this ticket attended to, we will need your help in having it reopened in one of the two new repos, and instructions are provided below. + +We apologize that we are not able to make this transition happen seamlessly, though this is a one-time change and your help is greatly appreciated -- +this will greatly improve velocity going forward. + +Both sets of modules will ship with Ansible, though they'll recieve slightly different ticket handling. + +To locate where a module lives between 'core' and 'extras' + + * Find the module at http://docs.ansible.com/list_of_all_modules.html + * Open the documentation page for that module + * If the bottom of the docs say "This is an extras module", submit your ticket to https://github.com/ansible/ansible-modules-extras + * Otherwise, submit your pull request to update the existing module to https://github.com/ansible/ansible-modules-core + * action_plugins (modules with server side components) still live in the main repo. If your ticket affects both, open the ticket + on the module repo just the same. + +Additionally, should you need more help with this, you can ask questions on: + + * the ansible-project mailing list: https://groups.google.com/forum/#!forum/ansible-project + +Thanks you very much! + + diff --git a/ticket_stubs/_module_pr_move.md b/ticket_stubs/_module_pr_move.md new file mode 100644 index 0000000000..67ccb8065e --- /dev/null +++ b/ticket_stubs/_module_pr_move.md @@ -0,0 +1,44 @@ +Module Repo Information +======================= + +Hi! + +Thanks very much for your interest in Ansible. It sincerely means a lot to us. + +On September 26, 2014, due to enormous levels of contribution to the project Ansible decided to reorganize module repos, making it easier +for developers to work on the project and for us to more easily manage new contributions and tickets. + +We split modules from the main project off into two repos, http://github.com/ansible/ansible-modules-core and http://github.com/ansible/ansible-modules-extras + +If you still would like this pull request merged, we will need your help making this target the new repo. If you do not take any action, this +pull request unfortunately cannot be applied. + +We apologize that we are not able to make this transition happen seamlessly, though this is a one-time change and your help is greatly appreciated -- +this will greatly improve velocity going forward. + +Both sets of modules will ship with Ansible, though they'll recieve slightly different ticket handling. + +To locate where a module lives between 'core' and 'extras' + + * Find the module at http://docs.ansible.com/list_of_all_modules.html + * Open the documentation page for that module + * If the bottom of the docs say "This is an extras module", submit your ticket to https://github.com/ansible/ansible-modules-extras + * Otherwise, submit your pull request to update the existing module to https://github.com/ansible/ansible-modules-core + * Note that python modules in ansible now also end in ".py" and this extension is required for new contributions. + * action_plugins (modules with server side components) still live in the main repo. If your pull request touches both, which should be + exceedingly rare, submit two new pull requests and make sure to mention the links to each other in the comments. + +Otherwise, if this is a new module: + + * Submit your pull request to add a module to https://github.com/ansible/ansible-modules-extras + +It may be possible to re-patriate your pull requests automatically, one user-submitted approach for advanced git users +has been suggested at https://gist.github.com/willthames/afbaaab0c9681ed45619 + +Additionally, should you need more help with this, you can ask questions on: + + * the development mailing list: https://groups.google.com/forum/#!forum/ansible-devel + +Thanks you very much! + + From 92f4b09a9beca63d35cbc73028464cb82149b23e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Sep 2014 15:30:09 -0400 Subject: [PATCH 116/813] Spelling in ticket stubs --- ticket_stubs/_module_issue_move.md | 2 +- ticket_stubs/_module_pr_move.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ticket_stubs/_module_issue_move.md b/ticket_stubs/_module_issue_move.md index 9153946dac..3fe51bdb07 100644 --- a/ticket_stubs/_module_issue_move.md +++ b/ticket_stubs/_module_issue_move.md @@ -15,7 +15,7 @@ If you would still like this ticket attended to, we will need your help in havin We apologize that we are not able to make this transition happen seamlessly, though this is a one-time change and your help is greatly appreciated -- this will greatly improve velocity going forward. -Both sets of modules will ship with Ansible, though they'll recieve slightly different ticket handling. +Both sets of modules will ship with Ansible, though they'll receive slightly different ticket handling. To locate where a module lives between 'core' and 'extras' diff --git a/ticket_stubs/_module_pr_move.md b/ticket_stubs/_module_pr_move.md index 67ccb8065e..f2ed8537c8 100644 --- a/ticket_stubs/_module_pr_move.md +++ b/ticket_stubs/_module_pr_move.md @@ -16,7 +16,7 @@ pull request unfortunately cannot be applied. We apologize that we are not able to make this transition happen seamlessly, though this is a one-time change and your help is greatly appreciated -- this will greatly improve velocity going forward. -Both sets of modules will ship with Ansible, though they'll recieve slightly different ticket handling. +Both sets of modules will ship with Ansible, though they'll receive slightly different ticket handling. To locate where a module lives between 'core' and 'extras' From 9bc114e94887dfd1f491972dabb10bdaae0deae9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Sep 2014 15:36:48 -0400 Subject: [PATCH 117/813] Minor grammar fixes --- ticket_stubs/bug_confirmed_p1_or_p2.md | 2 +- ticket_stubs/great_idea.md | 4 ++-- ticket_stubs/needs_info.md | 4 ++-- ticket_stubs/pr_cleanup_commits.md | 4 ++-- ticket_stubs/pr_needs_rebase.md | 4 ++-- ticket_stubs/pr_needs_revision.md | 4 ++-- ticket_stubs/thanks.md | 4 ++-- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/ticket_stubs/bug_confirmed_p1_or_p2.md b/ticket_stubs/bug_confirmed_p1_or_p2.md index deb287b84d..463a867586 100644 --- a/ticket_stubs/bug_confirmed_p1_or_p2.md +++ b/ticket_stubs/bug_confirmed_p1_or_p2.md @@ -12,7 +12,7 @@ Additionally: * INSERT REASONS! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code diff --git a/ticket_stubs/great_idea.md b/ticket_stubs/great_idea.md index d9e36173ae..4ad794e797 100644 --- a/ticket_stubs/great_idea.md +++ b/ticket_stubs/great_idea.md @@ -13,10 +13,10 @@ It might take us a little while to get to this one. Just as a quick reminder of we assign things a priority between P1 (highest) and P5. We may also ask some questions and it may be a while before we can get to this, but we'd like to thank you very much for your time! -We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgot about you! +We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgotten about you! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code diff --git a/ticket_stubs/needs_info.md b/ticket_stubs/needs_info.md index 4e0353a26d..24ec4017ac 100644 --- a/ticket_stubs/needs_info.md +++ b/ticket_stubs/needs_info.md @@ -11,10 +11,10 @@ We have some questions we'd like to know about before we can get this request qu Just as a quick reminder of things, this is a really busy project. We have over 800 contributors and to manage the queue effectively we assign things a priority between P1 (highest) and P5. We'd like to thank you very much for your time! -We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgot about you! +We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgotten about you! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code diff --git a/ticket_stubs/pr_cleanup_commits.md b/ticket_stubs/pr_cleanup_commits.md index b55b70177d..a06cb6973e 100644 --- a/ticket_stubs/pr_cleanup_commits.md +++ b/ticket_stubs/pr_cleanup_commits.md @@ -14,10 +14,10 @@ To resolve this problem, it may be helpful to create a new branch and cherry-pic Just as a quick reminder of things, this is a really busy project. We have over 800 contributors and to manage the queue effectively we assign things a priority between P1 (highest) and P5. We'd like to thank you very much for your time! -We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgot about you! +We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgotten about you! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code diff --git a/ticket_stubs/pr_needs_rebase.md b/ticket_stubs/pr_needs_rebase.md index 679cff7906..f90af9940c 100644 --- a/ticket_stubs/pr_needs_rebase.md +++ b/ticket_stubs/pr_needs_rebase.md @@ -13,10 +13,10 @@ It looks like the code underneath has changed since this was submitted. Can you Just as a quick reminder of things, this is a really busy project. We have over 800 contributors and to manage the queue effectively we assign things a priority between P1 (highest) and P5. We'd like to thank you very much for your time! -We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgot about you! +We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgotten about you! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code diff --git a/ticket_stubs/pr_needs_revision.md b/ticket_stubs/pr_needs_revision.md index 36e41184a2..64590cd7d8 100644 --- a/ticket_stubs/pr_needs_revision.md +++ b/ticket_stubs/pr_needs_revision.md @@ -11,10 +11,10 @@ We'd like to see a few things tweaked if you don't mind. If you can help resolv Just as a quick reminder of things, this is a really busy project. We have over 800 contributors and to manage the queue effectively we assign things a priority between P1 (highest) and P5. We'd like to thank you very much for your time! -We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgot about you! +We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgotten about you! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code diff --git a/ticket_stubs/thanks.md b/ticket_stubs/thanks.md index f86fdd9afa..c77019889a 100644 --- a/ticket_stubs/thanks.md +++ b/ticket_stubs/thanks.md @@ -9,10 +9,10 @@ Just as a quick reminder of things, this is a really busy project. We have over we assign things a priority between P1 (highest) and P5. We may also ask some questions and it may be a while before we can get to this, but we'd like to thank you very much for your time! -We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgot about you! +We'll work things in priority order, so just wanted you to be aware of the queue and know we haven't forgotten about you! We will definitely see your comments on this issue when reading this ticket, but may not be able to reply promptly. You may also wish to join one of our two mailing lists -which is very active: +which are very active: * https://groups.google.com/forum/#!forum/ansible-project - for user questions, tips, and tricks * https://groups.google.com/forum/#!forum/ansible-devel - for strategy, future planning, and questions about writing code From 92dd3cb84035d587a959b6bf54f1dbd362676985 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 29 Sep 2014 16:14:31 -0400 Subject: [PATCH 118/813] Update _module_issue_move.md --- ticket_stubs/_module_issue_move.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ticket_stubs/_module_issue_move.md b/ticket_stubs/_module_issue_move.md index 3fe51bdb07..173b3932ed 100644 --- a/ticket_stubs/_module_issue_move.md +++ b/ticket_stubs/_module_issue_move.md @@ -30,6 +30,6 @@ Additionally, should you need more help with this, you can ask questions on: * the ansible-project mailing list: https://groups.google.com/forum/#!forum/ansible-project -Thanks you very much! +Thank you very much! From 5e0121e11bc3ec66827a894a194eb1cd7b2f7fb6 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 29 Sep 2014 16:14:41 -0400 Subject: [PATCH 119/813] Update _module_pr_move.md --- ticket_stubs/_module_pr_move.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ticket_stubs/_module_pr_move.md b/ticket_stubs/_module_pr_move.md index f2ed8537c8..59eb6cce7b 100644 --- a/ticket_stubs/_module_pr_move.md +++ b/ticket_stubs/_module_pr_move.md @@ -39,6 +39,6 @@ Additionally, should you need more help with this, you can ask questions on: * the development mailing list: https://groups.google.com/forum/#!forum/ansible-devel -Thanks you very much! +Thank you very much! From a1809a371a7d0f4dc13616ae25b9312e3cecbe8e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Sep 2014 17:54:15 -0400 Subject: [PATCH 120/813] Target unnecessary templating warning at lines consisting of a single variable Fixes #6407 --- lib/ansible/playbook/task.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 1570b88a4d..5e38f63cc9 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -84,8 +84,11 @@ class Task(object): # code to allow "with_glob" and to reference a lookup plugin named glob elif x.startswith("with_"): - - if isinstance(ds[x], basestring) and ds[x].lstrip().startswith("{{"): + # Only a variable, no logic + if (isinstance(ds[x], basestring) and + ds[x].startswith('{{') and + ds[x].find('}}') == len(ds[x]) - 2 and + find ('|') == -1): utils.warning("It is unnecessary to use '{{' in loops, leave variables in loop expressions bare.") plugin_name = x.replace("with_","") @@ -97,7 +100,11 @@ class Task(object): raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name)) elif x in [ 'changed_when', 'failed_when', 'when']: - if isinstance(ds[x], basestring) and ds[x].lstrip().startswith("{{"): + # Only a variable, no logic + if (isinstance(ds[x], basestring) and + ds[x].startswith('{{') and + ds[x].find('}}') == len(ds[x]) - 2 and + find ('|') == -1): utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.") elif x.startswith("when_"): utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True) From 2211ae113c3bace4aa4f2f5b3462d8b90542dd68 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 29 Sep 2014 18:01:16 -0400 Subject: [PATCH 121/813] Update rst.j2 typo fix --- hacking/templates/rst.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index 917c6ba754..f02b32fd11 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -109,7 +109,7 @@ This is a Core Module This source of this module is hosted on GitHub in the `ansible-modules-core `_ repo. -If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansbile, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be greatful if you would file one. +If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be greatful if you would file one. Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. @@ -124,7 +124,7 @@ This is an Extras Module This source of this module is hosted on GitHub in the `ansible-modules-extras `_ repo. -If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansbile, first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be greatful if you would file one. +If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be greatful if you would file one. Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. From cf548baeabf25086cf42b1f335a6ebd32da4504b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Sep 2014 18:06:48 -0400 Subject: [PATCH 122/813] Fix missing strip() in a1809a371a7d0f4dc13616ae25b9312e3cecbe8e --- lib/ansible/playbook/task.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 5e38f63cc9..db10f7c494 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -84,12 +84,13 @@ class Task(object): # code to allow "with_glob" and to reference a lookup plugin named glob elif x.startswith("with_"): - # Only a variable, no logic - if (isinstance(ds[x], basestring) and - ds[x].startswith('{{') and - ds[x].find('}}') == len(ds[x]) - 2 and - find ('|') == -1): - utils.warning("It is unnecessary to use '{{' in loops, leave variables in loop expressions bare.") + if isinstance(ds[x], basestring): + param = ds[x].strip() + # Only a variable, no logic + if (param.startswith('{{') and + param.find('}}') == len(ds[x]) - 2 and + param.find('|') == -1): + utils.warning("It is unnecessary to use '{{' in loops, leave variables in loop expressions bare.") plugin_name = x.replace("with_","") if plugin_name in utils.plugins.lookup_loader: @@ -100,12 +101,13 @@ class Task(object): raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name)) elif x in [ 'changed_when', 'failed_when', 'when']: - # Only a variable, no logic - if (isinstance(ds[x], basestring) and - ds[x].startswith('{{') and - ds[x].find('}}') == len(ds[x]) - 2 and - find ('|') == -1): - utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.") + if isinstance(ds[x], basestring): + param = ds[x].strip() + # Only a variable, no logic + if (param.startswith('{{') and + param.find('}}') == len(ds[x]) - 2 and + param.find('|') == -1): + utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.") elif x.startswith("when_"): utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True) From fafa44a375dbe63803ac0c66f92cc7e1ace2cc77 Mon Sep 17 00:00:00 2001 From: Dick Davies Date: Tue, 30 Sep 2014 00:34:02 +0100 Subject: [PATCH 123/813] Update guide_gce.rst Clarify which of the options to pick. --- docsite/rst/guide_gce.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/guide_gce.rst b/docsite/rst/guide_gce.rst index f9e498ac0a..a777b53944 100644 --- a/docsite/rst/guide_gce.rst +++ b/docsite/rst/guide_gce.rst @@ -22,7 +22,7 @@ The GCE modules all require the apache-libcloud module, which you can install fr Credentials ----------- -To work with the GCE modules, you'll first need to get some credentials. You can create new one from the `console `_ by going to the "APIs and Auth" section. Once you've created a new client ID and downloaded the generated private key (in the `pkcs12 format `_), you'll need to convert the key by running the following command: +To work with the GCE modules, you'll first need to get some credentials. You can create new one from the `console `_ by going to the "APIs and Auth" section and choosing to create a new client ID for a service account. Once you've created a new client ID and downloaded the generated private key (in the `pkcs12 format `_), you'll need to convert the key by running the following command: .. code-block:: bash From cc6f10b6ac7735ea9211c85df99c8a49a72a8460 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Sun, 28 Sep 2014 20:18:39 -0700 Subject: [PATCH 124/813] Update the GCE guide with a working configuration. Also remove a few whitespaces, improve the bash script and fix a typo. --- docsite/rst/guide_gce.rst | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/docsite/rst/guide_gce.rst b/docsite/rst/guide_gce.rst index f9e498ac0a..c93db36771 100644 --- a/docsite/rst/guide_gce.rst +++ b/docsite/rst/guide_gce.rst @@ -136,15 +136,15 @@ For the following use case, let's use this small shell script as a wrapper. #!/bin/bash PLAYBOOK="$1" - if [ -z $PLAYBOOK ]; then - echo "You need to pass a playback as argument to this script." + if [[ -z $PLAYBOOK ]]; then + echo "You need to pass a playbook as argument to this script." exit 1 fi export SSL_CERT_FILE=$(pwd)/cacert.cer export ANSIBLE_HOST_KEY_CHECKING=False - if [ ! -f "$SSL_CERT_FILE" ]; then + if [[ ! -f "$SSL_CERT_FILE" ]]; then curl -O http://curl.haxx.se/ca/cacert.pem fi @@ -175,11 +175,11 @@ A playbook would looks like this: tasks: - name: Launch instances gce: - instance_names: dev - machine_type: "{{ machine_type }}" - image: "{{ image }}" - service_account_email: "{{ service_account_email }}" - pem_file: "{{ pem_file }}" + instance_names: dev + machine_type: "{{ machine_type }}" + image: "{{ image }}" + service_account_email: "{{ service_account_email }}" + pem_file: "{{ pem_file }}" project_id: "{{ project_id }}" tags: webserver register: gce @@ -188,15 +188,18 @@ A playbook would looks like this: wait_for: host={{ item.public_ip }} port=22 delay=10 timeout=60 with_items: gce.instance_data - - name: add_host hostname={{ item.public_ip }} groupname=new_instances + - name: Add host to groupname + add_host: hostname={{ item.public_ip }} groupname=new_instances + with_items: gce.instance_data - name: Manage new instances hosts: new_instances connection: ssh + sudo: True roles: - base_configuration - production_server - + Note that use of the "add_host" module above creates a temporary, in-memory group. This means that a play in the same playbook can then manage machines in the 'new_instances' group, if so desired. Any sort of arbitrary configuration is possible at this point. From f6bd0d4841e470e0b4258be2743929faa91aee19 Mon Sep 17 00:00:00 2001 From: Nitin Madhok Date: Tue, 30 Sep 2014 04:57:01 -0400 Subject: [PATCH 125/813] Update community.rst Fixes https://github.com/ansible/ansible/issues/9202 --- docsite/rst/community.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index 9448b7d1ea..ad992286ab 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -57,7 +57,7 @@ feature development, so clearing bugs out of the way is one of the best things y If you're not a developer, helping test pull requests for bug fixes and features is still immensely valuable. You can do this by checking out ansible, making a test branch off the main one, merging a GitHub issue, testing, and then commenting on that particular issue on GitHub. -I'd Like To Report A Bugs +I'd Like To Report A Bug ------------------------------------ Ansible practices responsible disclosure - if this is a security related bug, email `security@ansible.com `_ instead of filing a ticket or posting to the Google Group and you will receive a prompt response. From 0a0b741c327517dbdcf21f34d9167cd3b68357ea Mon Sep 17 00:00:00 2001 From: Nitin Madhok Date: Tue, 30 Sep 2014 05:12:44 -0400 Subject: [PATCH 126/813] Update playbooks_roles.rst Fixes https://github.com/ansible/ansible/issues/7241 --- docsite/rst/playbooks_roles.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index fc347e39a0..2742936cf8 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -153,7 +153,7 @@ Roles .. versionadded:: 1.2 -Now that you have learned about vars_files, tasks, and handlers, what is the best way to organize your playbooks? +Now that you have learned about `vars_files `_, tasks, and handlers, what is the best way to organize your playbooks? The short answer is to use roles! Roles are ways of automatically loading certain vars_files, tasks, and handlers based on a known file structure. Grouping content by roles also allows easy sharing of roles with other users. From 684352926bc53fc3b397726f4d45949bc09fe188 Mon Sep 17 00:00:00 2001 From: James Laska Date: Tue, 30 Sep 2014 10:33:42 -0400 Subject: [PATCH 127/813] Resolve issue where repo_path contains multiple '.git' strings If the repo_path contained multiple '.git' strings, the _git_repo_info() call resulted in a traceback. This change removes the trailing '.git' and resolves the traceback. --- lib/ansible/utils/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index fb43005885..f18ba876b1 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -845,7 +845,7 @@ def _git_repo_info(repo_path): if os.path.isabs(gitdir): repo_path = gitdir else: - repo_path = os.path.join(repo_path.split('.git')[0], gitdir) + repo_path = os.path.join(repo_path[:-4], gitdir) except (IOError, AttributeError): return '' f = open(os.path.join(repo_path, "HEAD")) From 05994cd9006eb59ccc280c71c9202410ea907463 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 30 Sep 2014 09:43:40 -0500 Subject: [PATCH 128/813] Don't clear paths when adding a new directory path in PluginLoader --- lib/ansible/utils/plugins.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/utils/plugins.py index 800802bc15..09aaa5b3ba 100644 --- a/lib/ansible/utils/plugins.py +++ b/lib/ansible/utils/plugins.py @@ -150,7 +150,6 @@ class PluginLoader(object): def add_directory(self, directory, with_subdir=False): ''' Adds an additional directory to the search path ''' - self._paths = None directory = os.path.realpath(directory) if directory is not None: From c070ab69f70152e6a7339af3677649e05bc94018 Mon Sep 17 00:00:00 2001 From: Dave Rawks Date: Tue, 30 Sep 2014 11:24:47 -0700 Subject: [PATCH 129/813] Resolve #9223 Fix test for python 2.6 * trivial test refactor to allow for python 2.6 compat --- test/units/TestUtils.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index 73ef979674..3be2668fa9 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -687,14 +687,8 @@ class TestUtils(unittest.TestCase): ) # invalid quote detection - try: - with self.assertRaises(Exception): - split_args('hey I started a quote"') - with self.assertRaises(Exception): - split_args('hey I started a\' quote') - except TypeError: - # you must be on Python 2.6 still, FIXME - pass + self.assertRaises(Exception, split_args, 'hey I started a quote"') + self.assertRaises(Exception, split_args, 'hey I started a\' quote') # jinja2 loop blocks with lots of complexity _test_combo( From 664fa1e942ab5004b99f0dfd83f126c9555ec747 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 30 Sep 2014 15:05:03 -0400 Subject: [PATCH 130/813] Changelog entry for #9182 specifying the sudo command. --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b84708e22..c443a5552c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,6 +56,8 @@ Some other notable changes: * various parser improvements * produce a friendly error message if the SSH key is too permissive * ec2_ami_search: support for SSD and IOPS provisioned EBS images +* can set ansible_sudo_exe as an inventory variable which allows specifying + a different sudo (or equivalent) command And various other bug fixes and improvements ... From c6bfd05c810d49dbcc2f180460a48b45b4a57c34 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 30 Sep 2014 15:17:28 -0400 Subject: [PATCH 131/813] Document pattern ranges on the patterns page. --- docsite/rst/intro_patterns.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docsite/rst/intro_patterns.rst b/docsite/rst/intro_patterns.rst index 1dd1935f06..41e9affc81 100644 --- a/docsite/rst/intro_patterns.rst +++ b/docsite/rst/intro_patterns.rst @@ -68,6 +68,14 @@ It's also ok to mix wildcard patterns and groups at the same time:: one*.com:dbservers +As an advanced usage, you can also select the numbered server in a group:: + + webservers[0] + +Or a portion of servers in a group:: + + webservers[0:25] + Most people don't specify patterns as regular expressions, but you can. Just start the pattern with a '~':: ~(web|db).*\.example\.com From 997ea78b1fd2d1863c241849f0f5bb93478ecbe0 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Tue, 30 Sep 2014 16:20:10 -0400 Subject: [PATCH 132/813] Add integration tests for the win_feature module. --- .../roles/test_win_feature/defaults/main.yml | 4 + .../roles/test_win_feature/tasks/main.yml | 131 ++++++++++++++++++ test/integration/test_winrm.yml | 1 + 3 files changed, 136 insertions(+) create mode 100644 test/integration/roles/test_win_feature/defaults/main.yml create mode 100644 test/integration/roles/test_win_feature/tasks/main.yml diff --git a/test/integration/roles/test_win_feature/defaults/main.yml b/test/integration/roles/test_win_feature/defaults/main.yml new file mode 100644 index 0000000000..e1833cd8a8 --- /dev/null +++ b/test/integration/roles/test_win_feature/defaults/main.yml @@ -0,0 +1,4 @@ +--- + +# Feature not normally installed by default. +test_win_feature_name: Telnet-Client diff --git a/test/integration/roles/test_win_feature/tasks/main.yml b/test/integration/roles/test_win_feature/tasks/main.yml new file mode 100644 index 0000000000..a49622c232 --- /dev/null +++ b/test/integration/roles/test_win_feature/tasks/main.yml @@ -0,0 +1,131 @@ +# test code for the win_feature module +# (c) 2014, Chris Church + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +- name: start with feature absent + win_feature: + name: "{{ test_win_feature_name }}" + state: absent + +- name: install feature + win_feature: + name: "{{ test_win_feature_name }}" + state: present + restart: no + include_sub_features: yes + include_management_tools: yes + register: win_feature_install_result + +- name: check result of installing feature + assert: + that: + - "win_feature_install_result|changed" + - "win_feature_install_result.success" + - "win_feature_install_result.exitcode == 'Success'" + - "not win_feature_install_result.restart_needed" + - "win_feature_install_result.feature_result|length == 1" + - "win_feature_install_result.feature_result[0].id" + - "win_feature_install_result.feature_result[0].display_name" + - "win_feature_install_result.feature_result[0].message is defined" + - "win_feature_install_result.feature_result[0].restart_needed is defined" + - "win_feature_install_result.feature_result[0].skip_reason" + - "win_feature_install_result.feature_result[0].success is defined" + +- name: install feature again + win_feature: + name: "{{ test_win_feature_name }}" + state: present + restart: no + include_sub_features: yes + include_management_tools: yes + register: win_feature_install_again_result + +- name: check result of installing feature again + assert: + that: + - "not win_feature_install_again_result|changed" + - "win_feature_install_again_result.success" + - "win_feature_install_again_result.exitcode == 'NoChangeNeeded'" + - "not win_feature_install_again_result.restart_needed" + - "win_feature_install_again_result.feature_result == []" + +- name: remove feature + win_feature: + name: "{{ test_win_feature_name }}" + state: absent + register: win_feature_remove_result + +- name: check result of removing feature + assert: + that: + - "win_feature_remove_result|changed" + - "win_feature_remove_result.success" + - "win_feature_remove_result.exitcode == 'Success'" + - "not win_feature_remove_result.restart_needed" + - "win_feature_remove_result.feature_result|length == 1" + - "win_feature_remove_result.feature_result[0].id" + - "win_feature_remove_result.feature_result[0].display_name" + - "win_feature_remove_result.feature_result[0].message is defined" + - "win_feature_remove_result.feature_result[0].restart_needed is defined" + - "win_feature_remove_result.feature_result[0].skip_reason" + - "win_feature_remove_result.feature_result[0].success is defined" + +- name: remove feature again + win_feature: + name: "{{ test_win_feature_name }}" + state: absent + register: win_feature_remove_again_result + +- name: check result of removing feature again + assert: + that: + - "not win_feature_remove_again_result|changed" + - "win_feature_remove_again_result.success" + - "win_feature_remove_again_result.exitcode == 'NoChangeNeeded'" + - "not win_feature_remove_again_result.restart_needed" + - "win_feature_remove_again_result.feature_result == []" + +- name: try to install an invalid feature name + win_feature: + name: "Microsoft-Bob" + state: present + register: win_feature_install_invalid_result + ignore_errors: true + +- name: check result of installing invalid feature name + assert: + that: + - "win_feature_install_invalid_result|failed" + - "not win_feature_install_invalid_result|changed" + - "win_feature_install_invalid_result.msg" + - "win_feature_install_invalid_result.exitcode == 'InvalidArgs'" + +- name: try to remove an invalid feature name + win_feature: + name: "Microsoft-Bob" + state: absent + register: win_feature_remove_invalid_result + ignore_errors: true + +- name: check result of removing invalid feature name + assert: + that: + - "win_feature_remove_invalid_result|failed" + - "not win_feature_remove_invalid_result|changed" + - "win_feature_remove_invalid_result.msg" + - "win_feature_remove_invalid_result.exitcode == 'InvalidArgs'" diff --git a/test/integration/test_winrm.yml b/test/integration/test_winrm.yml index c05a130831..415f381d46 100644 --- a/test/integration/test_winrm.yml +++ b/test/integration/test_winrm.yml @@ -29,3 +29,4 @@ - { role: test_win_get_url, tags: test_win_get_url } - { role: test_win_msi, tags: test_win_msi } - { role: test_win_service, tags: test_win_service } + - { role: test_win_feature, tags: test_win_feature } From 5f54b57b6d55a2e6d3d8ffe8fe5f403f8d392147 Mon Sep 17 00:00:00 2001 From: Dave Rawks Date: Tue, 30 Sep 2014 13:37:47 -0700 Subject: [PATCH 133/813] Corrected indentation --- test/units/TestUtils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index 3be2668fa9..b16584199f 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -687,8 +687,8 @@ class TestUtils(unittest.TestCase): ) # invalid quote detection - self.assertRaises(Exception, split_args, 'hey I started a quote"') - self.assertRaises(Exception, split_args, 'hey I started a\' quote') + self.assertRaises(Exception, split_args, 'hey I started a quote"') + self.assertRaises(Exception, split_args, 'hey I started a\' quote') # jinja2 loop blocks with lots of complexity _test_combo( From 313f26f866cec92e4dbe0e1d713a0ab0da647c98 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 30 Sep 2014 15:50:46 -0500 Subject: [PATCH 134/813] Fix issue with vars precedence from roles Fixes #9219 --- lib/ansible/playbook/play.py | 2 +- lib/ansible/runner/__init__.py | 12 ++++++++++-- .../roles/test_var_precedence_role1/tasks/main.yml | 2 +- .../roles/test_var_precedence_role2/tasks/main.yml | 2 +- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 6f51ac832b..ad57ebd9df 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -372,7 +372,7 @@ class Play(object): # flush handlers after pre_tasks new_tasks.append(dict(meta='flush_handlers')) - roles = self._build_role_dependencies(roles, [], self.vars) + roles = self._build_role_dependencies(roles, [], {}) # give each role an uuid and # make role_path available as variable to the task diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 1469fbb726..19c90ba529 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -600,6 +600,14 @@ class Runner(object): module_vars_inject = utils.combine_vars(self.module_vars, module_vars_inject) module_vars = template.template(self.basedir, self.module_vars, module_vars_inject) + # remove bad variables from the module vars, which may be in there due + # the way role declarations are specified in playbooks + if 'tags' in module_vars: + del module_vars['tags'] + if 'when' in module_vars: + del module_vars['when'] + + # start building the dictionary of injected variables inject = {} # default vars are the lowest priority @@ -608,10 +616,10 @@ class Runner(object): inject = utils.combine_vars(inject, host_variables) # then the setup_cache which contains facts gathered inject = utils.combine_vars(inject, self.setup_cache.get(host, {})) - # then come the module variables - inject = utils.combine_vars(inject, module_vars) # followed by vars (vars, vars_files, vars/main.yml) inject = utils.combine_vars(inject, self.vars_cache.get(host, {})) + # then come the module variables + inject = utils.combine_vars(inject, module_vars) # and finally -e vars are the highest priority inject = utils.combine_vars(inject, self.extra_vars) # and then special vars diff --git a/test/integration/roles/test_var_precedence_role1/tasks/main.yml b/test/integration/roles/test_var_precedence_role1/tasks/main.yml index 410e01b570..95b2a0bb5a 100644 --- a/test/integration/roles/test_var_precedence_role1/tasks/main.yml +++ b/test/integration/roles/test_var_precedence_role1/tasks/main.yml @@ -10,5 +10,5 @@ - 'param_var == "param_var_role1"' - 'vars_var == "vars_var"' - 'vars_files_var == "vars_files_var"' - - 'vars_files_var_role == "vars_files_var_role3"' + - 'vars_files_var_role == "vars_files_var_role1"' - 'defaults_file_var_role1 == "defaults_file_var_role1"' diff --git a/test/integration/roles/test_var_precedence_role2/tasks/main.yml b/test/integration/roles/test_var_precedence_role2/tasks/main.yml index 96551a8e9c..a862389cd3 100644 --- a/test/integration/roles/test_var_precedence_role2/tasks/main.yml +++ b/test/integration/roles/test_var_precedence_role2/tasks/main.yml @@ -10,5 +10,5 @@ - 'param_var == "param_var_role2"' - 'vars_var == "vars_var"' - 'vars_files_var == "vars_files_var"' - - 'vars_files_var_role == "vars_files_var_role3"' + - 'vars_files_var_role == "vars_files_var_role2"' - 'defaults_file_var_role2 == "overridden by role vars"' From d5bab777cbb78ce85a8fb2e4ef1691a1c8b25f2b Mon Sep 17 00:00:00 2001 From: wsuff Date: Wed, 1 Oct 2014 00:30:37 -0400 Subject: [PATCH 135/813] Small typo in galaxy.rst other SCM sourced in line 96 --- docsite/rst/galaxy.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index e7963a3e7e..2929292b08 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -93,7 +93,7 @@ And here's an example showing some specific version downloads from multiple sour As you can see in the above, there are a large amount of controls available to customize where roles can be pulled from, and what to save roles as. -Roles pulled from galaxy work as with othe SCM sourced roles above. To download a role with dependencies, and automatically install those dependencies, the role must be uploaded to the Ansible Galaxy website. +Roles pulled from galaxy work as with other SCM sourced roles above. To download a role with dependencies, and automatically install those dependencies, the role must be uploaded to the Ansible Galaxy website. .. seealso:: From adc238c80be322a2373db7d86a03e8d1a9d817bc Mon Sep 17 00:00:00 2001 From: William Jimenez Date: Tue, 30 Sep 2014 22:23:08 -0700 Subject: [PATCH 136/813] broken link in developing_modules.rst link to core modules source is broken since restructuring of public ansible repos --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 9fa35f4d3e..a1628763c4 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -231,7 +231,7 @@ As mentioned, if you are writing a module in Python, there are some very powerfu Modules are still transferred as one file, but an arguments file is no longer needed, so these are not only shorter in terms of code, they are actually FASTER in terms of execution time. -Rather than mention these here, the best way to learn is to read some of the `source of the modules `_ that come with Ansible. +Rather than mention these here, the best way to learn is to read some of the `source of the modules `_ that come with Ansible. The 'group' and 'user' modules are reasonably non-trivial and showcase what this looks like. From 4dea5c34abf1b36fe75b4ccffb099e1ab70d3911 Mon Sep 17 00:00:00 2001 From: gamename Date: Wed, 1 Oct 2014 09:54:48 -0500 Subject: [PATCH 137/813] Update developing_modules.rst The existing description does not make 2 things clear. 1) This is an environment variable 2) It must be set on the _controlling_ host, not the remote host. --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 9fa35f4d3e..b34843a4cd 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -432,7 +432,7 @@ built and appear in the 'docsite/' directory. .. tip:: - You can use ANSIBLE_KEEP_REMOTE_FILES=1 to prevent ansible from + You can set the environment variable ANSIBLE_KEEP_REMOTE_FILES=1 on the controlling host to prevent ansible from deleting the remote files so you can debug your module. .. _module_contribution: From e00d657362497bd5fe0b4aad036d04d72999b42b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Oct 2014 11:50:45 -0400 Subject: [PATCH 138/813] Make unittest compatible with python <= 2.6 --- test/units/TestModuleUtilsBasic.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/test/units/TestModuleUtilsBasic.py b/test/units/TestModuleUtilsBasic.py index 2ffb310b95..ceba17be4f 100644 --- a/test/units/TestModuleUtilsBasic.py +++ b/test/units/TestModuleUtilsBasic.py @@ -294,12 +294,20 @@ class TestModuleUtilsBasicHelpers(unittest.TestCase): ssh_output = self.module._heuristic_log_sanitize(ssh_data) # Basic functionality: Successfully hid the password - self.assertNotIn('pas:word', url_output) - self.assertNotIn('pas:word', ssh_output) + try: + self.assertNotIn('pas:word', url_output) + self.assertNotIn('pas:word', ssh_output) - # Slightly more advanced, we hid all of the password despite the ":" - self.assertNotIn('pas', url_output) - self.assertNotIn('pas', ssh_output) + # Slightly more advanced, we hid all of the password despite the ":" + self.assertNotIn('pas', url_output) + self.assertNotIn('pas', ssh_output) + except AttributeError: + # python2.6 or less's unittest + self.assertFalse('pas:word' in url_output, '%s is present in %s' % ('"pas:word"', url_output)) + self.assertFalse('pas:word' in ssh_output, '%s is present in %s' % ('"pas:word"', ssh_output)) + + self.assertFalse('pas' in url_output, '%s is present in %s' % ('"pas"', url_output)) + self.assertFalse('pas' in ssh_output, '%s is present in %s' % ('"pas"', ssh_output)) # In this implementation we replace the password with 8 "*" which is # also the length of our password. The url fields should be able to @@ -313,7 +321,11 @@ class TestModuleUtilsBasicHelpers(unittest.TestCase): # the data, though: self.assertTrue(ssh_output.startswith("{'")) self.assertTrue(ssh_output.endswith("'}}}}")) - self.assertIn(":********@foo.com/data',", ssh_output) + try: + self.assertIn(":********@foo.com/data',", ssh_output) + except AttributeError: + # python2.6 or less's unittest + self.assertTrue(":********@foo.com/data'," in ssh_output, '%s is not present in %s' % (":********@foo.com/data',", ssh_output)) # The overzealous-ness here may lead to us changing the algorithm in # the future. We could make it consume less of the data (with the From 93ea3695402db3654fd277300ba00376afc3a851 Mon Sep 17 00:00:00 2001 From: Jim Kleckner Date: Wed, 1 Oct 2014 16:49:33 -0700 Subject: [PATCH 139/813] Add doc of @filename in intro_patterns.rst I noticed that the @filename notation for the --limit argument was not conveniently described. This patch adds a simple example to the intro_patterns.rst file. --- docsite/rst/intro_patterns.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docsite/rst/intro_patterns.rst b/docsite/rst/intro_patterns.rst index 41e9affc81..7830c97c49 100644 --- a/docsite/rst/intro_patterns.rst +++ b/docsite/rst/intro_patterns.rst @@ -84,6 +84,10 @@ While we're jumping a bit ahead, additionally, you can add an exclusion criteria ansible-playbook site.yml --limit datacenter2 +And if you want to read the list of hosts from a file, prefix the file name with '@'. Since Ansible 1.2:: + + ansible-playbook site.yml --limit @retry_hosts.txt + Easy enough. See :doc:`intro_adhoc` and then :doc:`playbooks` for how to apply this knowledge. .. seealso:: From 93e398e447ee5689352e69adb323051aeba6e510 Mon Sep 17 00:00:00 2001 From: Brendan Jurd Date: Thu, 2 Oct 2014 16:52:29 +1000 Subject: [PATCH 140/813] Fix several it's/its errors in the documentation. --- docsite/rst/community.rst | 2 +- docsite/rst/developing_modules.rst | 2 +- docsite/rst/guide_aws.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index ad992286ab..70e65cdf20 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -132,7 +132,7 @@ Modules are some of the easiest places to get started. Contributing Code (Features or Bugfixes) ---------------------------------------- -The Ansible project keeps it’s source on github at +The Ansible project keeps its source on github at `github.com/ansible/ansible `_ for the core application, and two sub repos ansible/ansible-modules-core and ansible/ansible-modules-extras for module related items. If you need to know if a module is in 'core' or 'extras', consult the web documentation page for that module. The project takes contributions through diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 4ee2140f29..b50c4f415f 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -11,7 +11,7 @@ See :doc:`modules` for a list of various ones developed in core. Modules can be written in any language and are found in the path specified by `ANSIBLE_LIBRARY` or the ``--module-path`` command line option. -By default, everything that ships with ansible is pulled from it's source tree, but +By default, everything that ships with ansible is pulled from its source tree, but additional paths can be added. The directory "./library", alongside your top level playbooks, is also automatically diff --git a/docsite/rst/guide_aws.rst b/docsite/rst/guide_aws.rst index deecca2bda..3456a2f4bc 100644 --- a/docsite/rst/guide_aws.rst +++ b/docsite/rst/guide_aws.rst @@ -271,7 +271,7 @@ Example 4 # Using the instanceid, call the ec2 module # locally to remove the instance by declaring - # it's state is "absent" + # its state is "absent" - hosts: ec2hosts gather_facts: True From daab8e7ad487444ee9f8511e3f8f898413d232a9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 2 Oct 2014 12:07:05 -0500 Subject: [PATCH 141/813] Creating initial framework for refactoring core codebase --- lib/v2/__init__.py | 19 ++++++ lib/v2/cache/__init__.py | 17 +++++ lib/v2/config/__init__.py | 17 +++++ lib/v2/errors/__init__.py | 19 ++++++ lib/v2/inventory/__init__.py | 26 ++++++++ lib/v2/inventory/group.py | 44 +++++++++++++ lib/v2/inventory/host.py | 27 ++++++++ lib/v2/inventory/loaders/__init__.py | 17 +++++ lib/v2/inventory/loaders/dir.py | 17 +++++ lib/v2/inventory/loaders/ini.py | 17 +++++ lib/v2/inventory/loaders/script.py | 17 +++++ lib/v2/inventory/pattern.py | 36 +++++++++++ lib/v2/modules/__init__.py | 17 +++++ lib/v2/modules/docs/__init__.py | 17 +++++ lib/v2/modules/docs/fragments/__init__.py | 17 +++++ lib/v2/modules/utils/__init__.py | 17 +++++ lib/v2/playbook/__init__.py | 30 +++++++++ lib/v2/playbook/base.py | 28 ++++++++ lib/v2/playbook/block.py | 30 +++++++++ lib/v2/playbook/conditional.py | 78 +++++++++++++++++++++++ lib/v2/playbook/handler.py | 39 ++++++++++++ lib/v2/playbook/play.py | 17 +++++ lib/v2/playbook/playbook_include.py | 17 +++++ lib/v2/playbook/role.py | 55 ++++++++++++++++ lib/v2/playbook/tag.py | 45 +++++++++++++ lib/v2/playbook/task.py | 38 +++++++++++ lib/v2/playbook/task_include.py | 17 +++++ lib/v2/plugins/__init__.py | 17 +++++ lib/v2/plugins/action/__init__.py | 17 +++++ lib/v2/plugins/callback/__init__.py | 17 +++++ lib/v2/plugins/connections/__init__.py | 17 +++++ lib/v2/plugins/filter/__init__.py | 17 +++++ lib/v2/plugins/inventory/__init__.py | 17 +++++ lib/v2/plugins/lookup/__init__.py | 17 +++++ lib/v2/plugins/shell/__init__.py | 17 +++++ lib/v2/plugins/vars/__init__.py | 17 +++++ lib/v2/runner/__init__.py | 36 +++++++++++ lib/v2/utils/__init__.py | 17 +++++ 38 files changed, 941 insertions(+) create mode 100644 lib/v2/__init__.py create mode 100644 lib/v2/cache/__init__.py create mode 100644 lib/v2/config/__init__.py create mode 100644 lib/v2/errors/__init__.py create mode 100644 lib/v2/inventory/__init__.py create mode 100644 lib/v2/inventory/group.py create mode 100644 lib/v2/inventory/host.py create mode 100644 lib/v2/inventory/loaders/__init__.py create mode 100644 lib/v2/inventory/loaders/dir.py create mode 100644 lib/v2/inventory/loaders/ini.py create mode 100644 lib/v2/inventory/loaders/script.py create mode 100644 lib/v2/inventory/pattern.py create mode 100644 lib/v2/modules/__init__.py create mode 100644 lib/v2/modules/docs/__init__.py create mode 100644 lib/v2/modules/docs/fragments/__init__.py create mode 100644 lib/v2/modules/utils/__init__.py create mode 100644 lib/v2/playbook/__init__.py create mode 100644 lib/v2/playbook/base.py create mode 100644 lib/v2/playbook/block.py create mode 100644 lib/v2/playbook/conditional.py create mode 100644 lib/v2/playbook/handler.py create mode 100644 lib/v2/playbook/play.py create mode 100644 lib/v2/playbook/playbook_include.py create mode 100644 lib/v2/playbook/role.py create mode 100644 lib/v2/playbook/tag.py create mode 100644 lib/v2/playbook/task.py create mode 100644 lib/v2/playbook/task_include.py create mode 100644 lib/v2/plugins/__init__.py create mode 100644 lib/v2/plugins/action/__init__.py create mode 100644 lib/v2/plugins/callback/__init__.py create mode 100644 lib/v2/plugins/connections/__init__.py create mode 100644 lib/v2/plugins/filter/__init__.py create mode 100644 lib/v2/plugins/inventory/__init__.py create mode 100644 lib/v2/plugins/lookup/__init__.py create mode 100644 lib/v2/plugins/shell/__init__.py create mode 100644 lib/v2/plugins/vars/__init__.py create mode 100644 lib/v2/runner/__init__.py create mode 100644 lib/v2/utils/__init__.py diff --git a/lib/v2/__init__.py b/lib/v2/__init__.py new file mode 100644 index 0000000000..05b82a40c7 --- /dev/null +++ b/lib/v2/__init__.py @@ -0,0 +1,19 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +__version__ = '1.8' +__author__ = 'Michael DeHaan' diff --git a/lib/v2/cache/__init__.py b/lib/v2/cache/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/cache/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/config/__init__.py b/lib/v2/config/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/config/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/errors/__init__.py b/lib/v2/errors/__init__.py new file mode 100644 index 0000000000..8b250383d5 --- /dev/null +++ b/lib/v2/errors/__init__.py @@ -0,0 +1,19 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +class AnsibleError(Exception): + pass diff --git a/lib/v2/inventory/__init__.py b/lib/v2/inventory/__init__.py new file mode 100644 index 0000000000..29c9a8324d --- /dev/null +++ b/lib/v2/inventory/__init__.py @@ -0,0 +1,26 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +class Inventory(object): + def __init__(self): + pass + + def get_hosts(self): + return [] + + def get_groups(self): + return [] diff --git a/lib/v2/inventory/group.py b/lib/v2/inventory/group.py new file mode 100644 index 0000000000..9a277178f4 --- /dev/null +++ b/lib/v2/inventory/group.py @@ -0,0 +1,44 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +class Group(object): + def __init__(self, name, hosts=[]): + self.name = name + self.hosts = hosts + self.parents = [] + self.children = [] + + def get_vars(self): + return dict() + + def get_hosts(self): + return self.hosts + + def get_direct_subgroups(self): + direct_children = [] + for child in self.children: + direct_children.append(child.name) + return direct_children + + def get_all_subgroups(self): + all_children = [] + for child in self.children: + all_children.extend(child.get_all_subgroups()) + return all_children + + def get_parent_groups(self): + return self.parents diff --git a/lib/v2/inventory/host.py b/lib/v2/inventory/host.py new file mode 100644 index 0000000000..390f0f8744 --- /dev/null +++ b/lib/v2/inventory/host.py @@ -0,0 +1,27 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +class Host(object): + def __init__(self, name): + self.name = name + self.groups = [] + + def get_vars(self): + return dict() + + def get_groups(self): + return self.groups diff --git a/lib/v2/inventory/loaders/__init__.py b/lib/v2/inventory/loaders/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/inventory/loaders/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/inventory/loaders/dir.py b/lib/v2/inventory/loaders/dir.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/inventory/loaders/dir.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/inventory/loaders/ini.py b/lib/v2/inventory/loaders/ini.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/inventory/loaders/ini.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/inventory/loaders/script.py b/lib/v2/inventory/loaders/script.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/inventory/loaders/script.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/inventory/pattern.py b/lib/v2/inventory/pattern.py new file mode 100644 index 0000000000..dd7068bdbc --- /dev/null +++ b/lib/v2/inventory/pattern.py @@ -0,0 +1,36 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from v2.inventory import Host, Group + +class HostPattern(object): + def __init__(self, pattern): + self.pattern = pattern + + def match(thing): + ''' + return a list of matches + ''' + + matches = [] + if isinstance(thing, Host): + # simple match against a single host + pass + elif isinstance(thing, Group): + # match against the list of hosts in the group + pass + return matches diff --git a/lib/v2/modules/__init__.py b/lib/v2/modules/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/modules/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/modules/docs/__init__.py b/lib/v2/modules/docs/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/modules/docs/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/modules/docs/fragments/__init__.py b/lib/v2/modules/docs/fragments/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/modules/docs/fragments/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/modules/utils/__init__.py b/lib/v2/modules/utils/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/modules/utils/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/playbook/__init__.py b/lib/v2/playbook/__init__.py new file mode 100644 index 0000000000..0671c261ff --- /dev/null +++ b/lib/v2/playbook/__init__.py @@ -0,0 +1,30 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import v2.utils + +class Playbook(object): + def __init__(self, filename): + self.ds = v2.utils.load_yaml_from_file(filename) + self.plays = [] + + def load(self): + # loads a list of plays from the parsed ds + self.plays = [] + + def get_plays(self): + return self.plays diff --git a/lib/v2/playbook/base.py b/lib/v2/playbook/base.py new file mode 100644 index 0000000000..44abe4a6ea --- /dev/null +++ b/lib/v2/playbook/base.py @@ -0,0 +1,28 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from v2.playbook import Tag, Conditional + +class PlaybookBase(Tag, Conditional): + ''' + Implements a common object, which supports filtering based on + both tags and when: conditional statements + ''' + + def __init__(self): + pass + diff --git a/lib/v2/playbook/block.py b/lib/v2/playbook/block.py new file mode 100644 index 0000000000..f4d5946135 --- /dev/null +++ b/lib/v2/playbook/block.py @@ -0,0 +1,30 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from v2.playbook.base import PlaybookBase + +class Block(PlaybookBase): + def __init__(self): + self.ds = None + self.plays = [] + + def load(self, ds): + self.ds = ds + self.plays = [] + + def get_plays(self): + return self.plays diff --git a/lib/v2/playbook/conditional.py b/lib/v2/playbook/conditional.py new file mode 100644 index 0000000000..f12a1eb218 --- /dev/null +++ b/lib/v2/playbook/conditional.py @@ -0,0 +1,78 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import v2.config as C +from v2.utils import template +from v2.utils import list_union + +class Conditional(object): + def __init__(self, basedir, conditionals=[]): + self.basedir = basedir + self.conditionals = conditionals + + def push(self, conditional): + if conditional not in self.conditionals: + self.conditionals.append(conditional) + + def get_conditionals(self): + # return a full slice to make sure the reference + # doesn't get mangled by other users of the result + return self.conditionals[:] + + def merge(self, conditional): + if isinstance(conditional, basestring): + conditional = Conditional(self.basedir, [conditional]) + elif isinstance(conditional, list): + conditional = Conditional(self.basedir, conditional) + elif not isinstance(conditional, Conditional): + raise AnsibleError('expected a Conditional() class, instead got a %s' % type(conditional)) + self.conditionals = list_union(self.conditionals, conditional.get_conditonals()) + + def evaluate(self, inject): + for conditional in self.conditional: + if not self._do_evaluate(conditional, inject): + return False + return True + + def _do_evaluate(self, conditional, inject): + # allow variable names + if conditional in inject and '-' not in str(inject[conditional]): + conditional = inject[conditional] + conditional = template.template(self.basedir, conditional, inject, fail_on_undefined=C.fail_on_undefined) + original = str(conditional).replace("jinja2_compare ","") + # a Jinja2 evaluation that results in something Python can eval! + presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional + conditional = template.template(self.basedir, presented, inject) + val = conditional.strip() + if val == presented: + # the templating failed, meaning most likely a + # variable was undefined. If we happened to be + # looking for an undefined variable, return True, + # otherwise fail + if "is undefined" in conditional: + return True + elif "is defined" in conditional: + return False + else: + raise errors.AnsibleError("error while evaluating conditional: %s" % original) + elif val == "True": + return True + elif val == "False": + return False + else: + raise errors.AnsibleError("unable to evaluate conditional: %s" % original) + diff --git a/lib/v2/playbook/handler.py b/lib/v2/playbook/handler.py new file mode 100644 index 0000000000..32a803dbc5 --- /dev/null +++ b/lib/v2/playbook/handler.py @@ -0,0 +1,39 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from v2.errors import AnsibleError +from v2.inventory import Host +from v2.playbook import Task + +class Handler(Task): + def __init__(self): + self.triggered = False + self.triggered_by = [] + + def flag_for_host(self, host): + if not isinstance(host, Host): + raise AnsibleError('handlers expected to be triggered by a Host(), instead got %s' % type(host)) + if host.name not in self.triggered_by: + triggered_by.append(host.name) + + def get_has_triggered(self): + return self.triggered + + def set_has_triggered(self, triggered): + if not isinstance(triggered, bool): + raise AnsibleError('a handlers triggered property should be a boolean, instead got %s' % type(triggered)) + self.triggered = triggered diff --git a/lib/v2/playbook/play.py b/lib/v2/playbook/play.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/playbook/play.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/playbook/playbook_include.py b/lib/v2/playbook/playbook_include.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/playbook/playbook_include.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/playbook/role.py b/lib/v2/playbook/role.py new file mode 100644 index 0000000000..80251833d0 --- /dev/null +++ b/lib/v2/playbook/role.py @@ -0,0 +1,55 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from v2.playbook.base import PlaybookBase +from v2.utils import list_union + +class Role(PlaybookBase): + def __init__(self): + pass + + def load(self, ds): + self.ds = ds + self.tasks = [] + self.handlers = [] + self.blocks = [] + self.dependencies = [] + self.metadata = dict() + self.defaults = dict() + self.vars = dict() + self.params = dict() + + def get_vars(self): + # returns the merged variables for this role, including + # recursively merging those of all child roles + return dict() + + def get_immediate_dependencies(self): + return self.dependencies + + def get_all_dependencies(self): + # returns a list built recursively, of all deps from + # all child dependencies + all_deps = [] + for dep in self.dependencies: + list_union(all_deps, dep.get_all_dependencies()) + all_deps = list_union(all_deps, self.dependencies) + return all_deps + + def get_blocks(self): + # should return + return self.blocks diff --git a/lib/v2/playbook/tag.py b/lib/v2/playbook/tag.py new file mode 100644 index 0000000000..239038ea88 --- /dev/null +++ b/lib/v2/playbook/tag.py @@ -0,0 +1,45 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from v2.errors import AnsibleError +from v2.utils import list_union + +class Tag(object): + def __init__(self, tags=[]): + self.tags = tags + + def push(self, tag): + if tag not in self.tags: + self.tags.append(tag) + + def get_tags(self): + return self.tags + + def merge(self, tags): + # returns a union of the tags, which can be a string, + # a list of strings, or another Tag() class + if isinstance(tags, basestring): + tags = Tag([tags]) + elif isinstance(tags, list): + tags = Tag(tags) + elif not isinstance(tags, Tag): + raise AnsibleError('expected a Tag() instance, instead got %s' % type(tags)) + return utils.list_union(self.tags, tags.get_tags()) + + def matches(self, tag): + return tag in self.tags + diff --git a/lib/v2/playbook/task.py b/lib/v2/playbook/task.py new file mode 100644 index 0000000000..2979ba2146 --- /dev/null +++ b/lib/v2/playbook/task.py @@ -0,0 +1,38 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from v2.playbook.base import PlaybookBase + +class Task(PlaybookBase): + def __init__(self, block=None, role=None): + self.ds = None + self.block = block + self.role = role + + def load(self, ds): + self.ds = ds + self.name = "" + + def get_vars(self): + return dict() + + def get_role(self): + return self.role + + def get_block(self): + return self.block + diff --git a/lib/v2/playbook/task_include.py b/lib/v2/playbook/task_include.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/playbook/task_include.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/plugins/__init__.py b/lib/v2/plugins/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/plugins/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/plugins/action/__init__.py b/lib/v2/plugins/action/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/plugins/action/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/plugins/callback/__init__.py b/lib/v2/plugins/callback/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/plugins/callback/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/plugins/connections/__init__.py b/lib/v2/plugins/connections/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/plugins/connections/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/plugins/filter/__init__.py b/lib/v2/plugins/filter/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/plugins/filter/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/plugins/inventory/__init__.py b/lib/v2/plugins/inventory/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/plugins/inventory/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/plugins/lookup/__init__.py b/lib/v2/plugins/lookup/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/plugins/lookup/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/plugins/shell/__init__.py b/lib/v2/plugins/shell/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/plugins/shell/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/plugins/vars/__init__.py b/lib/v2/plugins/vars/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/plugins/vars/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + diff --git a/lib/v2/runner/__init__.py b/lib/v2/runner/__init__.py new file mode 100644 index 0000000000..2e60a96faa --- /dev/null +++ b/lib/v2/runner/__init__.py @@ -0,0 +1,36 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from v2.inventory import Host +from v2.playbook import Task + +class Runner(object): + def __init__(self, host, task): + self.host = host + self.task = task + self.action = self.get_action() + + def get_action(self): + # returns the action plugin from plugins/action/ + # for the given task + return None + + def execute(self): + # runs the given task on the given host using + # the action determined by get_action() + return + diff --git a/lib/v2/utils/__init__.py b/lib/v2/utils/__init__.py new file mode 100644 index 0000000000..d6c11ffa74 --- /dev/null +++ b/lib/v2/utils/__init__.py @@ -0,0 +1,17 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + From 49c463f98b36dab0aceab1a9a4407304ff99c79f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Jane=C5=BE?= Date: Thu, 2 Oct 2014 19:11:48 +0200 Subject: [PATCH 142/813] Added a note about escaping backreferences when using 'regex_replace' filter. Users will often be puzzled why 'regex_replace' is not working as intended when used inside YAML arguments. This note explains what they have to do to get it working. --- docsite/rst/playbooks_variables.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index c5eeff0135..bdb4e3ddef 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -355,6 +355,9 @@ To replace text in a string with regex, use the "regex_replace" filter:: # convert "foobar" to "bar" {{ 'foobar' | regex_replace('^f.*o(.*)$', '\\1') }} +.. note:: If "regex_replace" filter is used with variables inside YAML arguments (as opposed to simpler 'key=value' arguments), + then you need to escape backreferences (e.g. ``\\1``) with 4 backslashes (``\\\\``) instead of 2 (``\\``). + A few useful filters are typically added with each new Ansible release. The development documentation shows how to extend Ansible filters by writing your own as plugins, though in general, we encourage new ones to be added to core so everyone can make use of them. From b8e4e59d6b7db8d74374be3a990f6c3212ef5de8 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 2 Oct 2014 13:24:42 -0400 Subject: [PATCH 143/813] Signatures --- lib/v2/playbook/conditional.py | 57 +++------------------------------- 1 file changed, 4 insertions(+), 53 deletions(-) diff --git a/lib/v2/playbook/conditional.py b/lib/v2/playbook/conditional.py index f12a1eb218..6be68607a0 100644 --- a/lib/v2/playbook/conditional.py +++ b/lib/v2/playbook/conditional.py @@ -20,59 +20,10 @@ from v2.utils import template from v2.utils import list_union class Conditional(object): - def __init__(self, basedir, conditionals=[]): - self.basedir = basedir - self.conditionals = conditionals - def push(self, conditional): - if conditional not in self.conditionals: - self.conditionals.append(conditional) + def __init__(self, task): + pass - def get_conditionals(self): - # return a full slice to make sure the reference - # doesn't get mangled by other users of the result - return self.conditionals[:] - - def merge(self, conditional): - if isinstance(conditional, basestring): - conditional = Conditional(self.basedir, [conditional]) - elif isinstance(conditional, list): - conditional = Conditional(self.basedir, conditional) - elif not isinstance(conditional, Conditional): - raise AnsibleError('expected a Conditional() class, instead got a %s' % type(conditional)) - self.conditionals = list_union(self.conditionals, conditional.get_conditonals()) - - def evaluate(self, inject): - for conditional in self.conditional: - if not self._do_evaluate(conditional, inject): - return False - return True - - def _do_evaluate(self, conditional, inject): - # allow variable names - if conditional in inject and '-' not in str(inject[conditional]): - conditional = inject[conditional] - conditional = template.template(self.basedir, conditional, inject, fail_on_undefined=C.fail_on_undefined) - original = str(conditional).replace("jinja2_compare ","") - # a Jinja2 evaluation that results in something Python can eval! - presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional - conditional = template.template(self.basedir, presented, inject) - val = conditional.strip() - if val == presented: - # the templating failed, meaning most likely a - # variable was undefined. If we happened to be - # looking for an undefined variable, return True, - # otherwise fail - if "is undefined" in conditional: - return True - elif "is defined" in conditional: - return False - else: - raise errors.AnsibleError("error while evaluating conditional: %s" % original) - elif val == "True": - return True - elif val == "False": - return False - else: - raise errors.AnsibleError("unable to evaluate conditional: %s" % original) + def evaluate(self, context): + pass From 7ebf246ff12bfc4f9bdf1984620db9df2f3f7a45 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 2 Oct 2014 13:29:24 -0400 Subject: [PATCH 144/813] Some signature related things, prior to starting test scaffolding --- lib/v2/playbook/base.py | 8 +------- lib/v2/playbook/block.py | 10 ++-------- lib/v2/playbook/handler.py | 21 +++++++++------------ lib/v2/playbook/role.py | 25 ++++++++++++++----------- 4 files changed, 26 insertions(+), 38 deletions(-) diff --git a/lib/v2/playbook/base.py b/lib/v2/playbook/base.py index 44abe4a6ea..3b4785ee46 100644 --- a/lib/v2/playbook/base.py +++ b/lib/v2/playbook/base.py @@ -15,13 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from v2.playbook import Tag, Conditional - -class PlaybookBase(Tag, Conditional): - ''' - Implements a common object, which supports filtering based on - both tags and when: conditional statements - ''' +class Base(object): def __init__(self): pass diff --git a/lib/v2/playbook/block.py b/lib/v2/playbook/block.py index f4d5946135..c5ab518efa 100644 --- a/lib/v2/playbook/block.py +++ b/lib/v2/playbook/block.py @@ -18,13 +18,7 @@ from v2.playbook.base import PlaybookBase class Block(PlaybookBase): + def __init__(self): - self.ds = None - self.plays = [] + pass - def load(self, ds): - self.ds = ds - self.plays = [] - - def get_plays(self): - return self.plays diff --git a/lib/v2/playbook/handler.py b/lib/v2/playbook/handler.py index 32a803dbc5..42d937ec46 100644 --- a/lib/v2/playbook/handler.py +++ b/lib/v2/playbook/handler.py @@ -20,20 +20,17 @@ from v2.inventory import Host from v2.playbook import Task class Handler(Task): + def __init__(self): - self.triggered = False - self.triggered_by = [] + pass def flag_for_host(self, host): - if not isinstance(host, Host): - raise AnsibleError('handlers expected to be triggered by a Host(), instead got %s' % type(host)) - if host.name not in self.triggered_by: - triggered_by.append(host.name) + assert instanceof(host, Host) + pass - def get_has_triggered(self): - return self.triggered + def has_triggered(self): + return self._triggered - def set_has_triggered(self, triggered): - if not isinstance(triggered, bool): - raise AnsibleError('a handlers triggered property should be a boolean, instead got %s' % type(triggered)) - self.triggered = triggered + def set_triggered(self, triggered): + assert instanceof(triggered, bool) + self._triggered = triggered diff --git a/lib/v2/playbook/role.py b/lib/v2/playbook/role.py index 80251833d0..0dd8817dbc 100644 --- a/lib/v2/playbook/role.py +++ b/lib/v2/playbook/role.py @@ -19,19 +19,20 @@ from v2.playbook.base import PlaybookBase from v2.utils import list_union class Role(PlaybookBase): + def __init__(self): pass def load(self, ds): - self.ds = ds - self.tasks = [] - self.handlers = [] - self.blocks = [] - self.dependencies = [] - self.metadata = dict() - self.defaults = dict() - self.vars = dict() - self.params = dict() + self._ds = ds + self._tasks = [] + self._handlers = [] + self._blocks = [] + self._dependencies = [] + self._metadata = dict() + self._defaults = dict() + self._vars = dict() + self._params = dict() def get_vars(self): # returns the merged variables for this role, including @@ -39,13 +40,13 @@ class Role(PlaybookBase): return dict() def get_immediate_dependencies(self): - return self.dependencies + return self._dependencies def get_all_dependencies(self): # returns a list built recursively, of all deps from # all child dependencies all_deps = [] - for dep in self.dependencies: + for dep in self._dependencies: list_union(all_deps, dep.get_all_dependencies()) all_deps = list_union(all_deps, self.dependencies) return all_deps @@ -53,3 +54,5 @@ class Role(PlaybookBase): def get_blocks(self): # should return return self.blocks + + From 7fb55c7a9b98960d6bd41cb62db72b4e8a0be14d Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 2 Oct 2014 13:36:41 -0400 Subject: [PATCH 145/813] Some restructuring. --- lib/v2/{cache => common/config}/__init__.py | 0 lib/v2/{ => common}/errors/__init__.py | 0 lib/v2/modules/docs/fragments/__init__.py | 17 ----------------- lib/v2/modules/utils/__init__.py | 17 ----------------- .../{config/__init__.py => playbook/include.py} | 0 .../{modules/__init__.py => playbook/vars.py} | 0 .../docs/__init__.py => playbook/vars_file.py} | 0 lib/v2/runner/__init__.py | 16 +--------------- lib/v2/utils/__init__.py | 17 ----------------- 9 files changed, 1 insertion(+), 66 deletions(-) rename lib/v2/{cache => common/config}/__init__.py (100%) rename lib/v2/{ => common}/errors/__init__.py (100%) delete mode 100644 lib/v2/modules/docs/fragments/__init__.py delete mode 100644 lib/v2/modules/utils/__init__.py rename lib/v2/{config/__init__.py => playbook/include.py} (100%) rename lib/v2/{modules/__init__.py => playbook/vars.py} (100%) rename lib/v2/{modules/docs/__init__.py => playbook/vars_file.py} (100%) delete mode 100644 lib/v2/utils/__init__.py diff --git a/lib/v2/cache/__init__.py b/lib/v2/common/config/__init__.py similarity index 100% rename from lib/v2/cache/__init__.py rename to lib/v2/common/config/__init__.py diff --git a/lib/v2/errors/__init__.py b/lib/v2/common/errors/__init__.py similarity index 100% rename from lib/v2/errors/__init__.py rename to lib/v2/common/errors/__init__.py diff --git a/lib/v2/modules/docs/fragments/__init__.py b/lib/v2/modules/docs/fragments/__init__.py deleted file mode 100644 index d6c11ffa74..0000000000 --- a/lib/v2/modules/docs/fragments/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - diff --git a/lib/v2/modules/utils/__init__.py b/lib/v2/modules/utils/__init__.py deleted file mode 100644 index d6c11ffa74..0000000000 --- a/lib/v2/modules/utils/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - diff --git a/lib/v2/config/__init__.py b/lib/v2/playbook/include.py similarity index 100% rename from lib/v2/config/__init__.py rename to lib/v2/playbook/include.py diff --git a/lib/v2/modules/__init__.py b/lib/v2/playbook/vars.py similarity index 100% rename from lib/v2/modules/__init__.py rename to lib/v2/playbook/vars.py diff --git a/lib/v2/modules/docs/__init__.py b/lib/v2/playbook/vars_file.py similarity index 100% rename from lib/v2/modules/docs/__init__.py rename to lib/v2/playbook/vars_file.py diff --git a/lib/v2/runner/__init__.py b/lib/v2/runner/__init__.py index 2e60a96faa..ebebb9cfc0 100644 --- a/lib/v2/runner/__init__.py +++ b/lib/v2/runner/__init__.py @@ -19,18 +19,4 @@ from v2.inventory import Host from v2.playbook import Task class Runner(object): - def __init__(self, host, task): - self.host = host - self.task = task - self.action = self.get_action() - - def get_action(self): - # returns the action plugin from plugins/action/ - # for the given task - return None - - def execute(self): - # runs the given task on the given host using - # the action determined by get_action() - return - + pass diff --git a/lib/v2/utils/__init__.py b/lib/v2/utils/__init__.py deleted file mode 100644 index d6c11ffa74..0000000000 --- a/lib/v2/utils/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - From afe1409a6ece15610632e0170d0ee556ba755bbf Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 2 Oct 2014 13:40:12 -0400 Subject: [PATCH 146/813] Have a parallel development and test tree. --- lib/v2/{common => ansible/anisble}/config/__init__.py | 0 lib/v2/{common => ansible/anisble}/errors/__init__.py | 0 lib/v2/{ => ansible}/inventory/__init__.py | 0 lib/v2/{ => ansible}/inventory/group.py | 0 lib/v2/{ => ansible}/inventory/host.py | 0 lib/v2/{ => ansible}/inventory/loaders/__init__.py | 0 lib/v2/{ => ansible}/inventory/loaders/dir.py | 0 lib/v2/{ => ansible}/inventory/loaders/ini.py | 0 lib/v2/{ => ansible}/inventory/loaders/script.py | 0 lib/v2/{ => ansible}/inventory/pattern.py | 0 lib/v2/{ => ansible}/playbook/__init__.py | 0 lib/v2/{ => ansible}/playbook/base.py | 0 lib/v2/{ => ansible}/playbook/block.py | 0 lib/v2/{ => ansible}/playbook/conditional.py | 0 lib/v2/{ => ansible}/playbook/handler.py | 0 lib/v2/{ => ansible}/playbook/include.py | 0 lib/v2/{ => ansible}/playbook/play.py | 0 lib/v2/{ => ansible}/playbook/playbook_include.py | 0 lib/v2/{ => ansible}/playbook/role.py | 0 lib/v2/{ => ansible}/playbook/tag.py | 0 lib/v2/{ => ansible}/playbook/task.py | 0 lib/v2/{ => ansible}/playbook/task_include.py | 0 lib/v2/{ => ansible}/playbook/vars.py | 0 lib/v2/{ => ansible}/playbook/vars_file.py | 0 lib/v2/{ => ansible}/plugins/__init__.py | 0 lib/v2/{ => ansible}/plugins/action/__init__.py | 0 lib/v2/{ => ansible}/plugins/callback/__init__.py | 0 lib/v2/{ => ansible}/plugins/connections/__init__.py | 0 lib/v2/{ => ansible}/plugins/filter/__init__.py | 0 lib/v2/{ => ansible}/plugins/inventory/__init__.py | 0 lib/v2/{ => ansible}/plugins/lookup/__init__.py | 0 lib/v2/{ => ansible}/plugins/shell/__init__.py | 0 lib/v2/{ => ansible}/plugins/vars/__init__.py | 0 lib/v2/{ => ansible}/runner/__init__.py | 0 34 files changed, 0 insertions(+), 0 deletions(-) rename lib/v2/{common => ansible/anisble}/config/__init__.py (100%) rename lib/v2/{common => ansible/anisble}/errors/__init__.py (100%) rename lib/v2/{ => ansible}/inventory/__init__.py (100%) rename lib/v2/{ => ansible}/inventory/group.py (100%) rename lib/v2/{ => ansible}/inventory/host.py (100%) rename lib/v2/{ => ansible}/inventory/loaders/__init__.py (100%) rename lib/v2/{ => ansible}/inventory/loaders/dir.py (100%) rename lib/v2/{ => ansible}/inventory/loaders/ini.py (100%) rename lib/v2/{ => ansible}/inventory/loaders/script.py (100%) rename lib/v2/{ => ansible}/inventory/pattern.py (100%) rename lib/v2/{ => ansible}/playbook/__init__.py (100%) rename lib/v2/{ => ansible}/playbook/base.py (100%) rename lib/v2/{ => ansible}/playbook/block.py (100%) rename lib/v2/{ => ansible}/playbook/conditional.py (100%) rename lib/v2/{ => ansible}/playbook/handler.py (100%) rename lib/v2/{ => ansible}/playbook/include.py (100%) rename lib/v2/{ => ansible}/playbook/play.py (100%) rename lib/v2/{ => ansible}/playbook/playbook_include.py (100%) rename lib/v2/{ => ansible}/playbook/role.py (100%) rename lib/v2/{ => ansible}/playbook/tag.py (100%) rename lib/v2/{ => ansible}/playbook/task.py (100%) rename lib/v2/{ => ansible}/playbook/task_include.py (100%) rename lib/v2/{ => ansible}/playbook/vars.py (100%) rename lib/v2/{ => ansible}/playbook/vars_file.py (100%) rename lib/v2/{ => ansible}/plugins/__init__.py (100%) rename lib/v2/{ => ansible}/plugins/action/__init__.py (100%) rename lib/v2/{ => ansible}/plugins/callback/__init__.py (100%) rename lib/v2/{ => ansible}/plugins/connections/__init__.py (100%) rename lib/v2/{ => ansible}/plugins/filter/__init__.py (100%) rename lib/v2/{ => ansible}/plugins/inventory/__init__.py (100%) rename lib/v2/{ => ansible}/plugins/lookup/__init__.py (100%) rename lib/v2/{ => ansible}/plugins/shell/__init__.py (100%) rename lib/v2/{ => ansible}/plugins/vars/__init__.py (100%) rename lib/v2/{ => ansible}/runner/__init__.py (100%) diff --git a/lib/v2/common/config/__init__.py b/lib/v2/ansible/anisble/config/__init__.py similarity index 100% rename from lib/v2/common/config/__init__.py rename to lib/v2/ansible/anisble/config/__init__.py diff --git a/lib/v2/common/errors/__init__.py b/lib/v2/ansible/anisble/errors/__init__.py similarity index 100% rename from lib/v2/common/errors/__init__.py rename to lib/v2/ansible/anisble/errors/__init__.py diff --git a/lib/v2/inventory/__init__.py b/lib/v2/ansible/inventory/__init__.py similarity index 100% rename from lib/v2/inventory/__init__.py rename to lib/v2/ansible/inventory/__init__.py diff --git a/lib/v2/inventory/group.py b/lib/v2/ansible/inventory/group.py similarity index 100% rename from lib/v2/inventory/group.py rename to lib/v2/ansible/inventory/group.py diff --git a/lib/v2/inventory/host.py b/lib/v2/ansible/inventory/host.py similarity index 100% rename from lib/v2/inventory/host.py rename to lib/v2/ansible/inventory/host.py diff --git a/lib/v2/inventory/loaders/__init__.py b/lib/v2/ansible/inventory/loaders/__init__.py similarity index 100% rename from lib/v2/inventory/loaders/__init__.py rename to lib/v2/ansible/inventory/loaders/__init__.py diff --git a/lib/v2/inventory/loaders/dir.py b/lib/v2/ansible/inventory/loaders/dir.py similarity index 100% rename from lib/v2/inventory/loaders/dir.py rename to lib/v2/ansible/inventory/loaders/dir.py diff --git a/lib/v2/inventory/loaders/ini.py b/lib/v2/ansible/inventory/loaders/ini.py similarity index 100% rename from lib/v2/inventory/loaders/ini.py rename to lib/v2/ansible/inventory/loaders/ini.py diff --git a/lib/v2/inventory/loaders/script.py b/lib/v2/ansible/inventory/loaders/script.py similarity index 100% rename from lib/v2/inventory/loaders/script.py rename to lib/v2/ansible/inventory/loaders/script.py diff --git a/lib/v2/inventory/pattern.py b/lib/v2/ansible/inventory/pattern.py similarity index 100% rename from lib/v2/inventory/pattern.py rename to lib/v2/ansible/inventory/pattern.py diff --git a/lib/v2/playbook/__init__.py b/lib/v2/ansible/playbook/__init__.py similarity index 100% rename from lib/v2/playbook/__init__.py rename to lib/v2/ansible/playbook/__init__.py diff --git a/lib/v2/playbook/base.py b/lib/v2/ansible/playbook/base.py similarity index 100% rename from lib/v2/playbook/base.py rename to lib/v2/ansible/playbook/base.py diff --git a/lib/v2/playbook/block.py b/lib/v2/ansible/playbook/block.py similarity index 100% rename from lib/v2/playbook/block.py rename to lib/v2/ansible/playbook/block.py diff --git a/lib/v2/playbook/conditional.py b/lib/v2/ansible/playbook/conditional.py similarity index 100% rename from lib/v2/playbook/conditional.py rename to lib/v2/ansible/playbook/conditional.py diff --git a/lib/v2/playbook/handler.py b/lib/v2/ansible/playbook/handler.py similarity index 100% rename from lib/v2/playbook/handler.py rename to lib/v2/ansible/playbook/handler.py diff --git a/lib/v2/playbook/include.py b/lib/v2/ansible/playbook/include.py similarity index 100% rename from lib/v2/playbook/include.py rename to lib/v2/ansible/playbook/include.py diff --git a/lib/v2/playbook/play.py b/lib/v2/ansible/playbook/play.py similarity index 100% rename from lib/v2/playbook/play.py rename to lib/v2/ansible/playbook/play.py diff --git a/lib/v2/playbook/playbook_include.py b/lib/v2/ansible/playbook/playbook_include.py similarity index 100% rename from lib/v2/playbook/playbook_include.py rename to lib/v2/ansible/playbook/playbook_include.py diff --git a/lib/v2/playbook/role.py b/lib/v2/ansible/playbook/role.py similarity index 100% rename from lib/v2/playbook/role.py rename to lib/v2/ansible/playbook/role.py diff --git a/lib/v2/playbook/tag.py b/lib/v2/ansible/playbook/tag.py similarity index 100% rename from lib/v2/playbook/tag.py rename to lib/v2/ansible/playbook/tag.py diff --git a/lib/v2/playbook/task.py b/lib/v2/ansible/playbook/task.py similarity index 100% rename from lib/v2/playbook/task.py rename to lib/v2/ansible/playbook/task.py diff --git a/lib/v2/playbook/task_include.py b/lib/v2/ansible/playbook/task_include.py similarity index 100% rename from lib/v2/playbook/task_include.py rename to lib/v2/ansible/playbook/task_include.py diff --git a/lib/v2/playbook/vars.py b/lib/v2/ansible/playbook/vars.py similarity index 100% rename from lib/v2/playbook/vars.py rename to lib/v2/ansible/playbook/vars.py diff --git a/lib/v2/playbook/vars_file.py b/lib/v2/ansible/playbook/vars_file.py similarity index 100% rename from lib/v2/playbook/vars_file.py rename to lib/v2/ansible/playbook/vars_file.py diff --git a/lib/v2/plugins/__init__.py b/lib/v2/ansible/plugins/__init__.py similarity index 100% rename from lib/v2/plugins/__init__.py rename to lib/v2/ansible/plugins/__init__.py diff --git a/lib/v2/plugins/action/__init__.py b/lib/v2/ansible/plugins/action/__init__.py similarity index 100% rename from lib/v2/plugins/action/__init__.py rename to lib/v2/ansible/plugins/action/__init__.py diff --git a/lib/v2/plugins/callback/__init__.py b/lib/v2/ansible/plugins/callback/__init__.py similarity index 100% rename from lib/v2/plugins/callback/__init__.py rename to lib/v2/ansible/plugins/callback/__init__.py diff --git a/lib/v2/plugins/connections/__init__.py b/lib/v2/ansible/plugins/connections/__init__.py similarity index 100% rename from lib/v2/plugins/connections/__init__.py rename to lib/v2/ansible/plugins/connections/__init__.py diff --git a/lib/v2/plugins/filter/__init__.py b/lib/v2/ansible/plugins/filter/__init__.py similarity index 100% rename from lib/v2/plugins/filter/__init__.py rename to lib/v2/ansible/plugins/filter/__init__.py diff --git a/lib/v2/plugins/inventory/__init__.py b/lib/v2/ansible/plugins/inventory/__init__.py similarity index 100% rename from lib/v2/plugins/inventory/__init__.py rename to lib/v2/ansible/plugins/inventory/__init__.py diff --git a/lib/v2/plugins/lookup/__init__.py b/lib/v2/ansible/plugins/lookup/__init__.py similarity index 100% rename from lib/v2/plugins/lookup/__init__.py rename to lib/v2/ansible/plugins/lookup/__init__.py diff --git a/lib/v2/plugins/shell/__init__.py b/lib/v2/ansible/plugins/shell/__init__.py similarity index 100% rename from lib/v2/plugins/shell/__init__.py rename to lib/v2/ansible/plugins/shell/__init__.py diff --git a/lib/v2/plugins/vars/__init__.py b/lib/v2/ansible/plugins/vars/__init__.py similarity index 100% rename from lib/v2/plugins/vars/__init__.py rename to lib/v2/ansible/plugins/vars/__init__.py diff --git a/lib/v2/runner/__init__.py b/lib/v2/ansible/runner/__init__.py similarity index 100% rename from lib/v2/runner/__init__.py rename to lib/v2/ansible/runner/__init__.py From 1aa338a353db7f5ce3e14464c110bb6e98562138 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 2 Oct 2014 13:41:52 -0400 Subject: [PATCH 147/813] Make target for nosetests, no need to set library path for previous tests --- Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index d1e1e02e8a..b1d001104e 100644 --- a/Makefile +++ b/Makefile @@ -91,7 +91,11 @@ NOSETESTS ?= nosetests all: clean python tests: - PYTHONPATH=./lib ANSIBLE_LIBRARY=./lib/ansible/modules $(NOSETESTS) -d -w test/units -v + PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v + +newtests: + PYTHONPATH=./lib $(NOSETESTS) -d -w lib/v2/test -v + authors: sh hacking/authors.sh From 367f70fb174e8a2e3e9473bc03ac6703b4e31560 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 2 Oct 2014 13:43:36 -0400 Subject: [PATCH 148/813] Fix target in makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b1d001104e..1ecda3e8e6 100644 --- a/Makefile +++ b/Makefile @@ -94,7 +94,7 @@ tests: PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v newtests: - PYTHONPATH=./lib $(NOSETESTS) -d -w lib/v2/test -v + PYTHONPATH=./lib/v2/ $(NOSETESTS) -d -w lib/v2/tests -v authors: From da01eef4dba4ff938f4f283821942ed18deae248 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 2 Oct 2014 13:47:25 -0400 Subject: [PATCH 149/813] Move v2 out of the package tree so it does not yet appear. --- Makefile | 2 +- {lib/v2 => v2}/__init__.py | 0 {lib/v2 => v2}/ansible/anisble/config/__init__.py | 0 {lib/v2 => v2}/ansible/anisble/errors/__init__.py | 0 {lib/v2 => v2}/ansible/inventory/__init__.py | 0 {lib/v2 => v2}/ansible/inventory/group.py | 0 {lib/v2 => v2}/ansible/inventory/host.py | 0 {lib/v2 => v2}/ansible/inventory/loaders/__init__.py | 0 {lib/v2 => v2}/ansible/inventory/loaders/dir.py | 0 {lib/v2 => v2}/ansible/inventory/loaders/ini.py | 0 {lib/v2 => v2}/ansible/inventory/loaders/script.py | 0 {lib/v2 => v2}/ansible/inventory/pattern.py | 0 {lib/v2 => v2}/ansible/playbook/__init__.py | 0 {lib/v2 => v2}/ansible/playbook/base.py | 0 {lib/v2 => v2}/ansible/playbook/block.py | 0 {lib/v2 => v2}/ansible/playbook/conditional.py | 0 {lib/v2 => v2}/ansible/playbook/handler.py | 0 {lib/v2 => v2}/ansible/playbook/include.py | 0 {lib/v2 => v2}/ansible/playbook/play.py | 0 {lib/v2 => v2}/ansible/playbook/playbook_include.py | 0 {lib/v2 => v2}/ansible/playbook/role.py | 0 {lib/v2 => v2}/ansible/playbook/tag.py | 0 {lib/v2 => v2}/ansible/playbook/task.py | 0 {lib/v2 => v2}/ansible/playbook/task_include.py | 0 {lib/v2 => v2}/ansible/playbook/vars.py | 0 {lib/v2 => v2}/ansible/playbook/vars_file.py | 0 {lib/v2 => v2}/ansible/plugins/__init__.py | 0 {lib/v2 => v2}/ansible/plugins/action/__init__.py | 0 {lib/v2 => v2}/ansible/plugins/callback/__init__.py | 0 {lib/v2 => v2}/ansible/plugins/connections/__init__.py | 0 {lib/v2 => v2}/ansible/plugins/filter/__init__.py | 0 {lib/v2 => v2}/ansible/plugins/inventory/__init__.py | 0 {lib/v2 => v2}/ansible/plugins/lookup/__init__.py | 0 {lib/v2 => v2}/ansible/plugins/shell/__init__.py | 0 {lib/v2 => v2}/ansible/plugins/vars/__init__.py | 0 {lib/v2 => v2}/ansible/runner/__init__.py | 0 36 files changed, 1 insertion(+), 1 deletion(-) rename {lib/v2 => v2}/__init__.py (100%) rename {lib/v2 => v2}/ansible/anisble/config/__init__.py (100%) rename {lib/v2 => v2}/ansible/anisble/errors/__init__.py (100%) rename {lib/v2 => v2}/ansible/inventory/__init__.py (100%) rename {lib/v2 => v2}/ansible/inventory/group.py (100%) rename {lib/v2 => v2}/ansible/inventory/host.py (100%) rename {lib/v2 => v2}/ansible/inventory/loaders/__init__.py (100%) rename {lib/v2 => v2}/ansible/inventory/loaders/dir.py (100%) rename {lib/v2 => v2}/ansible/inventory/loaders/ini.py (100%) rename {lib/v2 => v2}/ansible/inventory/loaders/script.py (100%) rename {lib/v2 => v2}/ansible/inventory/pattern.py (100%) rename {lib/v2 => v2}/ansible/playbook/__init__.py (100%) rename {lib/v2 => v2}/ansible/playbook/base.py (100%) rename {lib/v2 => v2}/ansible/playbook/block.py (100%) rename {lib/v2 => v2}/ansible/playbook/conditional.py (100%) rename {lib/v2 => v2}/ansible/playbook/handler.py (100%) rename {lib/v2 => v2}/ansible/playbook/include.py (100%) rename {lib/v2 => v2}/ansible/playbook/play.py (100%) rename {lib/v2 => v2}/ansible/playbook/playbook_include.py (100%) rename {lib/v2 => v2}/ansible/playbook/role.py (100%) rename {lib/v2 => v2}/ansible/playbook/tag.py (100%) rename {lib/v2 => v2}/ansible/playbook/task.py (100%) rename {lib/v2 => v2}/ansible/playbook/task_include.py (100%) rename {lib/v2 => v2}/ansible/playbook/vars.py (100%) rename {lib/v2 => v2}/ansible/playbook/vars_file.py (100%) rename {lib/v2 => v2}/ansible/plugins/__init__.py (100%) rename {lib/v2 => v2}/ansible/plugins/action/__init__.py (100%) rename {lib/v2 => v2}/ansible/plugins/callback/__init__.py (100%) rename {lib/v2 => v2}/ansible/plugins/connections/__init__.py (100%) rename {lib/v2 => v2}/ansible/plugins/filter/__init__.py (100%) rename {lib/v2 => v2}/ansible/plugins/inventory/__init__.py (100%) rename {lib/v2 => v2}/ansible/plugins/lookup/__init__.py (100%) rename {lib/v2 => v2}/ansible/plugins/shell/__init__.py (100%) rename {lib/v2 => v2}/ansible/plugins/vars/__init__.py (100%) rename {lib/v2 => v2}/ansible/runner/__init__.py (100%) diff --git a/Makefile b/Makefile index 1ecda3e8e6..e228c1e9f6 100644 --- a/Makefile +++ b/Makefile @@ -94,7 +94,7 @@ tests: PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v newtests: - PYTHONPATH=./lib/v2/ $(NOSETESTS) -d -w lib/v2/tests -v + PYTHONPATH=./v2/ $(NOSETESTS) -d -w v2/tests -v authors: diff --git a/lib/v2/__init__.py b/v2/__init__.py similarity index 100% rename from lib/v2/__init__.py rename to v2/__init__.py diff --git a/lib/v2/ansible/anisble/config/__init__.py b/v2/ansible/anisble/config/__init__.py similarity index 100% rename from lib/v2/ansible/anisble/config/__init__.py rename to v2/ansible/anisble/config/__init__.py diff --git a/lib/v2/ansible/anisble/errors/__init__.py b/v2/ansible/anisble/errors/__init__.py similarity index 100% rename from lib/v2/ansible/anisble/errors/__init__.py rename to v2/ansible/anisble/errors/__init__.py diff --git a/lib/v2/ansible/inventory/__init__.py b/v2/ansible/inventory/__init__.py similarity index 100% rename from lib/v2/ansible/inventory/__init__.py rename to v2/ansible/inventory/__init__.py diff --git a/lib/v2/ansible/inventory/group.py b/v2/ansible/inventory/group.py similarity index 100% rename from lib/v2/ansible/inventory/group.py rename to v2/ansible/inventory/group.py diff --git a/lib/v2/ansible/inventory/host.py b/v2/ansible/inventory/host.py similarity index 100% rename from lib/v2/ansible/inventory/host.py rename to v2/ansible/inventory/host.py diff --git a/lib/v2/ansible/inventory/loaders/__init__.py b/v2/ansible/inventory/loaders/__init__.py similarity index 100% rename from lib/v2/ansible/inventory/loaders/__init__.py rename to v2/ansible/inventory/loaders/__init__.py diff --git a/lib/v2/ansible/inventory/loaders/dir.py b/v2/ansible/inventory/loaders/dir.py similarity index 100% rename from lib/v2/ansible/inventory/loaders/dir.py rename to v2/ansible/inventory/loaders/dir.py diff --git a/lib/v2/ansible/inventory/loaders/ini.py b/v2/ansible/inventory/loaders/ini.py similarity index 100% rename from lib/v2/ansible/inventory/loaders/ini.py rename to v2/ansible/inventory/loaders/ini.py diff --git a/lib/v2/ansible/inventory/loaders/script.py b/v2/ansible/inventory/loaders/script.py similarity index 100% rename from lib/v2/ansible/inventory/loaders/script.py rename to v2/ansible/inventory/loaders/script.py diff --git a/lib/v2/ansible/inventory/pattern.py b/v2/ansible/inventory/pattern.py similarity index 100% rename from lib/v2/ansible/inventory/pattern.py rename to v2/ansible/inventory/pattern.py diff --git a/lib/v2/ansible/playbook/__init__.py b/v2/ansible/playbook/__init__.py similarity index 100% rename from lib/v2/ansible/playbook/__init__.py rename to v2/ansible/playbook/__init__.py diff --git a/lib/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py similarity index 100% rename from lib/v2/ansible/playbook/base.py rename to v2/ansible/playbook/base.py diff --git a/lib/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py similarity index 100% rename from lib/v2/ansible/playbook/block.py rename to v2/ansible/playbook/block.py diff --git a/lib/v2/ansible/playbook/conditional.py b/v2/ansible/playbook/conditional.py similarity index 100% rename from lib/v2/ansible/playbook/conditional.py rename to v2/ansible/playbook/conditional.py diff --git a/lib/v2/ansible/playbook/handler.py b/v2/ansible/playbook/handler.py similarity index 100% rename from lib/v2/ansible/playbook/handler.py rename to v2/ansible/playbook/handler.py diff --git a/lib/v2/ansible/playbook/include.py b/v2/ansible/playbook/include.py similarity index 100% rename from lib/v2/ansible/playbook/include.py rename to v2/ansible/playbook/include.py diff --git a/lib/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py similarity index 100% rename from lib/v2/ansible/playbook/play.py rename to v2/ansible/playbook/play.py diff --git a/lib/v2/ansible/playbook/playbook_include.py b/v2/ansible/playbook/playbook_include.py similarity index 100% rename from lib/v2/ansible/playbook/playbook_include.py rename to v2/ansible/playbook/playbook_include.py diff --git a/lib/v2/ansible/playbook/role.py b/v2/ansible/playbook/role.py similarity index 100% rename from lib/v2/ansible/playbook/role.py rename to v2/ansible/playbook/role.py diff --git a/lib/v2/ansible/playbook/tag.py b/v2/ansible/playbook/tag.py similarity index 100% rename from lib/v2/ansible/playbook/tag.py rename to v2/ansible/playbook/tag.py diff --git a/lib/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py similarity index 100% rename from lib/v2/ansible/playbook/task.py rename to v2/ansible/playbook/task.py diff --git a/lib/v2/ansible/playbook/task_include.py b/v2/ansible/playbook/task_include.py similarity index 100% rename from lib/v2/ansible/playbook/task_include.py rename to v2/ansible/playbook/task_include.py diff --git a/lib/v2/ansible/playbook/vars.py b/v2/ansible/playbook/vars.py similarity index 100% rename from lib/v2/ansible/playbook/vars.py rename to v2/ansible/playbook/vars.py diff --git a/lib/v2/ansible/playbook/vars_file.py b/v2/ansible/playbook/vars_file.py similarity index 100% rename from lib/v2/ansible/playbook/vars_file.py rename to v2/ansible/playbook/vars_file.py diff --git a/lib/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py similarity index 100% rename from lib/v2/ansible/plugins/__init__.py rename to v2/ansible/plugins/__init__.py diff --git a/lib/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py similarity index 100% rename from lib/v2/ansible/plugins/action/__init__.py rename to v2/ansible/plugins/action/__init__.py diff --git a/lib/v2/ansible/plugins/callback/__init__.py b/v2/ansible/plugins/callback/__init__.py similarity index 100% rename from lib/v2/ansible/plugins/callback/__init__.py rename to v2/ansible/plugins/callback/__init__.py diff --git a/lib/v2/ansible/plugins/connections/__init__.py b/v2/ansible/plugins/connections/__init__.py similarity index 100% rename from lib/v2/ansible/plugins/connections/__init__.py rename to v2/ansible/plugins/connections/__init__.py diff --git a/lib/v2/ansible/plugins/filter/__init__.py b/v2/ansible/plugins/filter/__init__.py similarity index 100% rename from lib/v2/ansible/plugins/filter/__init__.py rename to v2/ansible/plugins/filter/__init__.py diff --git a/lib/v2/ansible/plugins/inventory/__init__.py b/v2/ansible/plugins/inventory/__init__.py similarity index 100% rename from lib/v2/ansible/plugins/inventory/__init__.py rename to v2/ansible/plugins/inventory/__init__.py diff --git a/lib/v2/ansible/plugins/lookup/__init__.py b/v2/ansible/plugins/lookup/__init__.py similarity index 100% rename from lib/v2/ansible/plugins/lookup/__init__.py rename to v2/ansible/plugins/lookup/__init__.py diff --git a/lib/v2/ansible/plugins/shell/__init__.py b/v2/ansible/plugins/shell/__init__.py similarity index 100% rename from lib/v2/ansible/plugins/shell/__init__.py rename to v2/ansible/plugins/shell/__init__.py diff --git a/lib/v2/ansible/plugins/vars/__init__.py b/v2/ansible/plugins/vars/__init__.py similarity index 100% rename from lib/v2/ansible/plugins/vars/__init__.py rename to v2/ansible/plugins/vars/__init__.py diff --git a/lib/v2/ansible/runner/__init__.py b/v2/ansible/runner/__init__.py similarity index 100% rename from lib/v2/ansible/runner/__init__.py rename to v2/ansible/runner/__init__.py From 723e4bddc37e40f1278af5fcd7b6a235eaf98494 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 2 Oct 2014 13:48:22 -0400 Subject: [PATCH 150/813] Remove typo and intermediate dir. --- v2/ansible/{anisble => }/config/__init__.py | 0 v2/ansible/{anisble => }/errors/__init__.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename v2/ansible/{anisble => }/config/__init__.py (100%) rename v2/ansible/{anisble => }/errors/__init__.py (100%) diff --git a/v2/ansible/anisble/config/__init__.py b/v2/ansible/config/__init__.py similarity index 100% rename from v2/ansible/anisble/config/__init__.py rename to v2/ansible/config/__init__.py diff --git a/v2/ansible/anisble/errors/__init__.py b/v2/ansible/errors/__init__.py similarity index 100% rename from v2/ansible/anisble/errors/__init__.py rename to v2/ansible/errors/__init__.py From 32309e37cee4af51b1a98da7c750e5c97c5fab5f Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 2 Oct 2014 13:50:54 -0400 Subject: [PATCH 151/813] Plan for unified CLI with subcommands? --- v2/scripts/ansible | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 v2/scripts/ansible diff --git a/v2/scripts/ansible b/v2/scripts/ansible new file mode 100644 index 0000000000..e69de29bb2 From e98d8c2b5e457fcaeb37fadac0fe7167a354596a Mon Sep 17 00:00:00 2001 From: i5513 Date: Thu, 2 Oct 2014 20:30:44 +0200 Subject: [PATCH 152/813] Add readlink filter doc As requested at request pull #9213, I'm proposing updathing this doc. Do you have man pages where I can update this too ? Thanks --- docsite/rst/playbooks_variables.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index c5eeff0135..86146cdd0f 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -317,6 +317,10 @@ To get the directory from a path:: To expand a path containing a tilde (`~`) character (new in version 1.5):: {{ path | expanduser }} + +To get the real path of a link (new in version 1.8):: + + {{ path | readlink }} To work with Base64 encoded strings:: From 48ee191379b3ae655e382d3ee06244c11e2a5e14 Mon Sep 17 00:00:00 2001 From: Ahmed Kamal Date: Thu, 2 Oct 2014 23:45:46 +0300 Subject: [PATCH 153/813] Add one line example for looking up through etcd --- docsite/rst/playbooks_lookups.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docsite/rst/playbooks_lookups.rst b/docsite/rst/playbooks_lookups.rst index 3eb5ebb35c..429a1a1232 100644 --- a/docsite/rst/playbooks_lookups.rst +++ b/docsite/rst/playbooks_lookups.rst @@ -125,6 +125,8 @@ Here are some examples:: - debug: msg="{{ lookup('template', './some_template.j2') }} is a value from evaluation of this template" + - debug: msg="{{ lookup('etcd', 'foo') }} is a value from a locally running etcd" + As an alternative you can also assign lookup plugins to variables or use them elsewhere. This macros are evaluated each time they are used in a task (or template):: From cef2a8795f4517be65ce9f80080cff3dadac9d6c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 2 Oct 2014 20:26:09 -0400 Subject: [PATCH 154/813] Make dynamic inventory return byte str, not unicode --- lib/ansible/inventory/script.py | 4 +++- lib/ansible/utils/__init__.py | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py index 723089db88..a69135aecb 100644 --- a/lib/ansible/inventory/script.py +++ b/lib/ansible/inventory/script.py @@ -26,6 +26,7 @@ from ansible import utils from ansible import errors import sys + class InventoryScript(object): ''' Host inventory parser for ansible using external inventory scripts. ''' @@ -53,6 +54,7 @@ class InventoryScript(object): # not passing from_remote because data from CMDB is trusted self.raw = utils.parse_json(self.data) + self.raw = utils.json_dict_unicode_to_bytes(self.raw) all = Group('all') groups = dict(all=all) @@ -141,7 +143,7 @@ class InventoryScript(object): if out.strip() == '': return dict() try: - return utils.parse_json(out) + return utils.json_dict_unicode_to_bytes(utils.parse_json(out)) except ValueError: raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out)) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 7a7c94e9f4..195046caf0 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1215,6 +1215,25 @@ def to_unicode(value): return value return value.decode("utf-8") +def json_dict_unicode_to_bytes(d): + ''' Recursively convert dict keys and values to byte str + + Specialized for json return because this only handles, lists, tuples, + and dict container types (the containers that the json module returns) + ''' + + if isinstance(d, unicode): + return d.encode('utf-8') + elif isinstance(d, dict): + return dict(map(json_dict_unicode_to_bytes, d.iteritems())) + elif isinstance(d, list): + return list(map(json_dict_unicode_to_bytes, d)) + elif isinstance(d, tuple): + return tuple(map(json_dict_unicode_to_bytes, d)) + else: + return d + + def get_diff(diff): # called by --diff usage in playbook and runner via callbacks # include names in diffs 'before' and 'after' and do diff -U 10 From a71be1affdc8b435a8598a6207561f0b85d66b52 Mon Sep 17 00:00:00 2001 From: Nitin Madhok Date: Thu, 2 Oct 2014 22:39:32 -0400 Subject: [PATCH 155/813] Removing hard coded link and replacing with reference --- docsite/rst/playbooks_roles.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index 2742936cf8..3a2b2b7514 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -153,7 +153,7 @@ Roles .. versionadded:: 1.2 -Now that you have learned about `vars_files `_, tasks, and handlers, what is the best way to organize your playbooks? +Now that you have learned about :ref:`vars_files `, tasks, and handlers, what is the best way to organize your playbooks? The short answer is to use roles! Roles are ways of automatically loading certain vars_files, tasks, and handlers based on a known file structure. Grouping content by roles also allows easy sharing of roles with other users. From cae88ca12f1eec2251da504253f48846722341a9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 3 Oct 2014 07:08:03 -0500 Subject: [PATCH 156/813] Initial work to refactor Task --- v2/ansible/playbook/__init__.py | 2 +- v2/ansible/playbook/base.py | 45 ++++++- v2/ansible/playbook/conditional.py | 11 +- v2/ansible/playbook/tag.py | 24 ++-- v2/ansible/playbook/task.py | 202 ++++++++++++++++++++++++++++- 5 files changed, 260 insertions(+), 24 deletions(-) diff --git a/v2/ansible/playbook/__init__.py b/v2/ansible/playbook/__init__.py index 0671c261ff..6b9cfa6ce6 100644 --- a/v2/ansible/playbook/__init__.py +++ b/v2/ansible/playbook/__init__.py @@ -15,7 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import v2.utils +import ansible.utils class Playbook(object): def __init__(self, filename): diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 3b4785ee46..ac7748caf7 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -15,8 +15,49 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from errors import AnsibleError +from playbook.tag import Tag + class Base(object): - def __init__(self): - pass + def __init__(self): + self._tags = Tag() + def _ensure_int(self, attr, default=0): + value = getattr(self, attr) + if value is None: + setattr(self, attr, default) + elif not isinstance(value, int): + try: + setattr(self, attr, int(value)) + except ValueError: + raise AnsibleError("failed to set attr %s to an integer, got '%s' which is a %s" % (attr, value, type(value))) + + def _ensure_bool(self, attr, default=False): + value = getattr(self, attr) + if value is None: + setattr(self, attr, default) + elif not isinstance(value, bool): + setattr(self, attr, utils.boolean(value)) + + def _ensure_basestring(self, attr, default=""): + value = getattr(self, attr) + if value is None: + setattr(self, attr, default) + elif not isinstance(value, basestring): + setattr(self, attr, "%s" % value) + + def _ensure_list_of_strings(self, attr, default=[]): + value = getattr(self, attr) + if value is None: + setattr(self, attr, default) + elif not isinstance(value, list): + setattr(self, attr, [ str(value) ]) + else: + changed = False + for idx,val in enumerate(value): + if not isinstance(val, basestring): + value[idx] = str(val) + changed = True + if changed: + setattr(self, attr, value) diff --git a/v2/ansible/playbook/conditional.py b/v2/ansible/playbook/conditional.py index 6be68607a0..56028e29ea 100644 --- a/v2/ansible/playbook/conditional.py +++ b/v2/ansible/playbook/conditional.py @@ -15,15 +15,16 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import v2.config as C -from v2.utils import template -from v2.utils import list_union - class Conditional(object): def __init__(self, task): - pass + self._task = task + self._conditionals = [] def evaluate(self, context): pass + def push(self, conditionals): + if not isinstance(conditionals, list): + conditionals = [ conditionals ] + self._conditionals.extend(conditionals) diff --git a/v2/ansible/playbook/tag.py b/v2/ansible/playbook/tag.py index 239038ea88..a992f8dee0 100644 --- a/v2/ansible/playbook/tag.py +++ b/v2/ansible/playbook/tag.py @@ -15,19 +15,25 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from v2.errors import AnsibleError -from v2.utils import list_union +from errors import AnsibleError +from ansible.utils import list_union class Tag(object): def __init__(self, tags=[]): - self.tags = tags + assert isinstance(tags, list) + self._tags = tags - def push(self, tag): - if tag not in self.tags: - self.tags.append(tag) + def push(self, tags): + if not isinstance(tags, list): + tags = [ tags ] + for tag in tags: + if not isinstance(tag, basestring): + tag = str(tag) + if tag not in self._tags: + self._tags.append(tag) def get_tags(self): - return self.tags + return self._tags def merge(self, tags): # returns a union of the tags, which can be a string, @@ -38,8 +44,8 @@ class Tag(object): tags = Tag(tags) elif not isinstance(tags, Tag): raise AnsibleError('expected a Tag() instance, instead got %s' % type(tags)) - return utils.list_union(self.tags, tags.get_tags()) + return utils.list_union(self._tags, tags.get_tags()) def matches(self, tag): - return tag in self.tags + return tag in self._tags diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 2979ba2146..dc01515145 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -15,17 +15,205 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from v2.playbook.base import PlaybookBase +from playbook.base import Base +from playbook.conditional import Conditional +from errors import AnsibleError +from ansible import utils + +class Task(Base): + + # the list of valid keys for tasks + VALID_KEYS = [ + 'always_run', + 'any_errors_fatal', + 'async', + 'connection', + 'delay', + 'delegate_to', + 'environment', + 'first_available_file', + 'ignore_errors', + 'include', + 'local_action', + 'meta', + 'name', + 'no_log', + 'notify', + 'poll', + 'register', + 'remote_user', + 'retries', + 'run_once', + 'su', + 'su_pass', + 'su_user', + 'sudo', + 'sudo_pass', + 'sudo_user', + 'transport', + 'until', + ] -class Task(PlaybookBase): def __init__(self, block=None, role=None): - self.ds = None - self.block = block - self.role = role + self._ds = None + self._block = block + self._role = role + self._reset() + super(Task, self).__init__() + + def __repr__(self): + if self._role: + return "%s : %s" % (self._role.get_name(), self._name) + else: + return self._name + + def _reset(self): + ''' clears internal data structures ''' + + for k in self.VALID_KEYS: + setattr(self, '_%s' % k, None) + + # attributes not set via the ds + self._action = None + self._module_name = None + self._parameters = None + self._lookup_plugin = None + self._lookup_terms = None + + # special conditionals + self._changed_when = Conditional(self) + self._failed_when = Conditional(self) + self._when = Conditional(self) + + def _load_parameters(data): + ''' sets the parameters for this task, based on the type of the data ''' + if isinstance(data, dict): + self._parameters = data + elif isinstance(data, basestring): + self._parameters = utils.parse_kv(data) + elif isinstance(data, None): + self._parameters = '' + else: + raise AnsibleError("invalid arguments specified, got '%s' (type=%s')" % (data, type(data))) def load(self, ds): - self.ds = ds - self.name = "" + ''' parses and loads the task from the given datastructure ''' + + # reset everything internally + self._reset() + + # 'action' and 'local_action' are mutually-exclusive options + if 'action' in ds and 'local_action' in ds: + raise AnsibleError("the 'action' and 'local_action' attributes can not be used together") + + # iterate over each key/value in the datastructure to parse out its parameters. + args = None + for k,v in ds.iteritems(): + if k in ('action', 'local_action'): + # task structure is: + # action: module_name k=v ... + # or + # local_action: module_name k=v ... + module_name, params = v.strip().split(' ', 1) + if module_name not in utils.plugins.module_finder: + raise AnsibleError("the specified module '%s' could not be found, check your module path" % module_name) + self._module_name = module_name + self._parameters = utils.parse_kv(params) + if k == 'local_action': + if 'delegate_to' in ds: + raise AnsibleError("delegate_to cannot be specified with local_action in task: %s" % ds.get('name', v)) + self._delegate_to = '127.0.0.1' + if not 'transport' in ds and not 'connection' in ds: + self._transport = 'local' + elif k in utils.plugins.module_finder: + # task structure is: + # - module_name: k=v ... + if self._module_name: + raise AnsibleError("the module name (%s) was already specified, '%s' is a duplicate" % (self._module_name, k)) + elif 'action' in ds: + raise AnsibleError("multiple actions specified in task: '%s' and '%s'" % (k, ds.get('name', ds['action']))) + self._module_name = k + if isinstance(v, dict) and 'args' in ds: + raise AnsibleError("can't combine args: and a dict for %s: in task %s" % (k, ds.get('name', "%s: %s" % (k, v)))) + self._parameters = self._load_parameters(v) + elif k == 'args': + args = self._load_parameters(v) + elif k.startswith('with_'): + if isinstance(v, basestring): + param = v.strip() + if (param.startswith('{{') and param.find('}}') == len(ds[x]) - 2 and param.find('|') == -1): + utils.warning("It is unnecessary to use '{{' in loops, leave variables in loop expressions bare.") + plugin_name = k.replace("with_","") + if plugin_name in utils.plugins.lookup_loader: + self._lookup_plugin = plugin_name + self._lookup_terms = v + else: + raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name)) + elif k.startswith('when_'): + utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True) + if self._when: + raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds.get('action')))) + when_name = k.replace("when_","") + self._when = "%s %s" % (when_name, v) + elif k in ('changed_when', 'failed_when', 'when'): + # these are conditional objects, so we push the new conditional value + # into the object so that it can be evaluated later + getattr(self, '_%s' % k).push(v) + elif k == 'tags': + # all taggable datastructures in Ansible (tasks, roles, etc.) are + # based on the Base() class, which includes the _tags attribute + # (which is a Tag() class) + tags = v + if isinstance(v, basestring): + tags = v.split(',') + self._tags.push(tags) + elif k not in self.VALID_KEYS: + raise AnsibleError("%s is not a legal parameter in an Ansible task or handler" % k) + else: + setattr(self, '_%s' % k, v) + + # if args were specified along with parameters, merge them now + # with the args taking lower precedence + if args: + self._parameters = utils.combine_vars(args, self._parameters) + + # run validation + self._validate() + + # finally, store the ds for later use/reference + self._ds = ds + + def _validate(self): + ''' + Validates internal datastructures and verifies mutually-exclusive + options are not in conflict. + ''' + + if not self._name: + # if no name: was specified, flatten the parameters back + # into a string and combine them with with module name + flat_params = " ".join(["%s=%s" % (k,v) for k,v in self._parameters.iteritems()]) + self._name = "%s %s" % (self._module_name, flat_params) + + # use builtin _ensure* methods to massage/set values on attributes + # anything not listed here will be defaulted to None by _reset() + self._ensure_int("_async", 0) + self._ensure_int("_poll", 10) + self._ensure_bool("_ignore_errors", False) + self._ensure_bool("_always_run", False) + self._ensure_list_of_strings("_notify", []) + + # handle mutually incompatible options + if (self._sudo or self._sudo_user or self._sudo_pass) and (self._su or self._su_user or self._su_pass): + raise AnsibleError('sudo params ("sudo", "sudo_user", "sudo_pass") and su params ("su", "su_user", "su_pass") cannot be used together') + + incompatibles = [ x for x in [ self._first_available_file, self._lookup_plugin ] if x is not None ] + if len(incompatibles) > 1: + raise AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task") + + @property + def name(self): + return self.__repr__() def get_vars(self): return dict() From ff87ac08a745100500a15900436bda5e95b4e275 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 3 Oct 2014 10:34:51 -0400 Subject: [PATCH 157/813] An attempt at modularization prior to moving things towards BaseObject and considering Attributes. --- lib/ansible/modules/core | 2 +- v2/ansible/playbook/base.py | 40 +--- v2/ansible/playbook/task.py | 403 +++++++++++++++++++++--------------- 3 files changed, 242 insertions(+), 203 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index db5668b84c..9b35a39121 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit db5668b84c3a19498b843d0bfe34574aef40c193 +Subproject commit 9b35a391213fe87834af5ebc907109de2bc0005f diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index ac7748caf7..a16e22f15f 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -21,43 +21,5 @@ from playbook.tag import Tag class Base(object): def __init__(self): - self._tags = Tag() + pass - def _ensure_int(self, attr, default=0): - value = getattr(self, attr) - if value is None: - setattr(self, attr, default) - elif not isinstance(value, int): - try: - setattr(self, attr, int(value)) - except ValueError: - raise AnsibleError("failed to set attr %s to an integer, got '%s' which is a %s" % (attr, value, type(value))) - - def _ensure_bool(self, attr, default=False): - value = getattr(self, attr) - if value is None: - setattr(self, attr, default) - elif not isinstance(value, bool): - setattr(self, attr, utils.boolean(value)) - - def _ensure_basestring(self, attr, default=""): - value = getattr(self, attr) - if value is None: - setattr(self, attr, default) - elif not isinstance(value, basestring): - setattr(self, attr, "%s" % value) - - def _ensure_list_of_strings(self, attr, default=[]): - value = getattr(self, attr) - if value is None: - setattr(self, attr, default) - elif not isinstance(value, list): - setattr(self, attr, [ str(value) ]) - else: - changed = False - for idx,val in enumerate(value): - if not isinstance(val, basestring): - value[idx] = str(val) - changed = True - if changed: - setattr(self, attr, value) diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index dc01515145..e8efa7eb0d 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -20,207 +20,284 @@ from playbook.conditional import Conditional from errors import AnsibleError from ansible import utils +# TODO: it would be fantastic (if possible) if a task new where in the YAML it was defined for describing +# it in error conditions + class Task(Base): - # the list of valid keys for tasks + """ + A task is a language feature that represents a call to a module, with given arguments and other parameters. + A handler is a subclass of a task. + + Usage: + + Task.load(datastructure) -> Task + Task.something(...) + """ + + # ================================================================================= + # KEYS AND SLOTS: defines what variables in are valid in the data structure and + # the object itself + VALID_KEYS = [ - 'always_run', - 'any_errors_fatal', - 'async', - 'connection', - 'delay', - 'delegate_to', - 'environment', - 'first_available_file', - 'ignore_errors', - 'include', - 'local_action', - 'meta', - 'name', - 'no_log', - 'notify', - 'poll', - 'register', - 'remote_user', - 'retries', - 'run_once', - 'su', - 'su_pass', - 'su_user', - 'sudo', - 'sudo_pass', - 'sudo_user', - 'transport', - 'until', + 'always_run', 'any_errors_fatal', 'async', 'connection', 'delay', 'delegate_to', 'environment', + 'first_available_file', 'ignore_errors', 'include', 'local_action', 'meta', 'name', 'no_log', + 'notify', 'poll', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user', + 'sudo', 'sudo_pass', 'sudo_user', 'transport', 'until' ] + __slots__ = [ + '_always_run', '_any_errors_fatal', '_async', '_connection', '_delay', '_delegate_to', '_environment', + '_first_available_file', '_ignore_errors', '_include', '_local_action', '_meta', '_name', '_no_log', + '_notify', '_poll', '_register', '_remote_user', '_retries', '_run_once', '_su', '_su_pass', '_su_user', + '_sudo', '_sudo_pass', '_sudo_user', '_transport', '_until' + ] + + # ================================================================================== + def __init__(self, block=None, role=None): - self._ds = None + ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' self._block = block self._role = role self._reset() super(Task, self).__init__() - def __repr__(self): - if self._role: + # TODO: move to BaseObject + def _reset(self): + ''' clear out the object ''' + + for x in __slots__: + setattr(x, None) + + # ================================================================================== + # BASIC ACCESSORS + + def get_name(self): + ''' return the name of the task ''' + if self._role: return "%s : %s" % (self._role.get_name(), self._name) else: return self._name - def _reset(self): - ''' clears internal data structures ''' + def __repr__(self): + ''' returns a human readable representation of the task ''' + return "TASK: %s" % self.get_name() - for k in self.VALID_KEYS: - setattr(self, '_%s' % k, None) + # FIXME: does a task have variables? + def get_vars(self): + ''' return the variables associated with the task ''' + raise exception.NotImplementedError() - # attributes not set via the ds - self._action = None - self._module_name = None - self._parameters = None - self._lookup_plugin = None - self._lookup_terms = None + def get_role(self): + '' return the role associated with the task ''' + return self._role - # special conditionals - self._changed_when = Conditional(self) - self._failed_when = Conditional(self) - self._when = Conditional(self) + def get_block(self): + ''' return the block the task is in ''' + return self._block + + + # ================================================================================== + # LOAD: functions related to walking the datastructure and storing data def _load_parameters(data): - ''' sets the parameters for this task, based on the type of the data ''' - if isinstance(data, dict): - self._parameters = data + ''' validate/transmogrify/assign any module parameters for this task ''' + + if isinstance(data, dict): + return dict(_parameters=data) elif isinstance(data, basestring): - self._parameters = utils.parse_kv(data) + return dict(_parameters=utils.parse_kv(data)) elif isinstance(data, None): - self._parameters = '' + return dict(_parameters='') else: raise AnsibleError("invalid arguments specified, got '%s' (type=%s')" % (data, type(data))) - def load(self, ds): - ''' parses and loads the task from the given datastructure ''' + def _load_action(self, ds, k, v): + ''' validate/transmogrify/assign the module and parameters if used in 'action/local_action' format ''' - # reset everything internally - self._reset() + results = dict() + module_name, params = v.strip().split(' ', 1) + if module_name not in utils.plugins.module_finder: + raise AnsibleError("the specified module '%s' could not be found, check your module path" % module_name) + results['_module_name'] = module_name + results['_parameters'] = utils.parse_kv(params) - # 'action' and 'local_action' are mutually-exclusive options - if 'action' in ds and 'local_action' in ds: - raise AnsibleError("the 'action' and 'local_action' attributes can not be used together") + if k == 'local_action': + if 'delegate_to' in ds: + raise AnsibleError("delegate_to cannot be specified with local_action in task: %s" % ds.get('name', v)) + results['_delegate_to'] = '127.0.0.1' + if not 'transport' in ds and not 'connection' in ds: + results['_transport'] = 'local' + return results - # iterate over each key/value in the datastructure to parse out its parameters. - args = None + def _load_module(self, ds, k, v): + ''' validate/transmogrify/assign the module and parameters if used in 'module:' format ''' + + results = dict() + if self._module_name: + raise AnsibleError("the module name (%s) was already specified, '%s' is a duplicate" % (self._module_name, k)) + elif 'action' in ds: + raise AnsibleError("multiple actions specified in task: '%s' and '%s'" % (k, ds.get('name', ds['action']))) + results['_module_name'] = k + if isinstance(v, dict) and 'args' in ds: + raise AnsibleError("can't combine args: and a dict for %s: in task %s" % (k, ds.get('name', "%s: %s" % (k, v)))) + results['_parameters'] = self._load_parameters(v) + return results + + def _load_loop(self, ds, k, v): + ''' validate/transmogrify/assign the module any loop directives that have valid action plugins as names ''' + + results = dict() + if isinstance(v, basestring): + param = v.strip() + if (param.startswith('{{') and param.find('}}') == len(ds[x]) - 2 and param.find('|') == -1): + utils.warning("It is unnecessary to use '{{' in loops, leave variables in loop expressions bare.") + plugin_name = k.replace("with_","") + if plugin_name in utils.plugins.lookup_loader: + results['_lookup_plugin'] = plugin_name + results['_lookup_terms'] = v + else: + raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name)) + return results + + def _load_legacy_when(self, ds, k, v): + ''' yell about old when syntax being used still ''' + + utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True) + if self._when: + raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds.get('action')))) + when_name = k.replace("when_","") + return dict(_when = "%s %s" % (when_name, v)) + + def _load_when(self, ds, k, v): + ''' validate/transmogrify/assign a conditional ''' + + conditionals = self._when.copy() + conditionals.push(v) + return dict(_when=conditionals) + + def _load_changed_when(self, ds, k, v): + ''' validate/transmogrify/assign a changed_when conditional ''' + + conditionals = self._changed_when.copy() + conditionals.push(v) + return dict(_changed_when=conditionals) + + def _load_failed_when(self, ds, k, v): + ''' validate/transmogrify/assign a failed_when conditional ''' + + conditionals = self._failed_when.copy() + conditionals.push(v) + return dict(_failed_when=conditionals) + + # FIXME: move to BaseObject + def _load_tags(self, ds, k, v): + ''' validate/transmogrify/assign any tags ''' + + new_tags = self.tags.copy() + tags = v + if isinstance(v, basestring): + tags = v.split(',') + new_tags.push(v) + return dict(_tags=v) + + def _load_invalid_key(self, ds, k, v): + ''' handle any key we do not recognize ''' + + raise AnsibleError("%s is not a legal parameter in an Ansible task or handler" % k) + + def _load_other_valid_key(self, ds, k, v): + ''' handle any other attribute we DO recognize ''' + + results = dict() + k = "_%s" % k + results[k] = v + return results + + def _loader_for_key(self, k): + ''' based on the name of a datastructure element, find the code to handle it ''' + + if k in ('action', 'local_action'): + return self._load_action + elif k in utils.plugins.module_finder: + return self._load_module + elif k.startswith('with_'): + return self._load_loop + elif k == 'changed_when': + return self._load_changed_when + elif k == 'failed_when': + return self._load_failed_when + elif k == 'when': + return self._load_when + elif k == 'tags': + return self._load_tags + elif k not in self.VALID_KEYS: + return self._load_invalid_key + else: + return self._load_other_valid_key + + @classmethod + def load(self, ds, block=None, role=None): + ''' walk the datastructure and store/validate parameters ''' + + self = Task(block=block, role=role) + return self._load_from_datastructure(ds) + + # TODO: move to BaseObject + def _load_from_datastructure(ds) + + self._pre_validate(ds) + + # load the keys from the datastructure for k,v in ds.iteritems(): - if k in ('action', 'local_action'): - # task structure is: - # action: module_name k=v ... - # or - # local_action: module_name k=v ... - module_name, params = v.strip().split(' ', 1) - if module_name not in utils.plugins.module_finder: - raise AnsibleError("the specified module '%s' could not be found, check your module path" % module_name) - self._module_name = module_name - self._parameters = utils.parse_kv(params) - if k == 'local_action': - if 'delegate_to' in ds: - raise AnsibleError("delegate_to cannot be specified with local_action in task: %s" % ds.get('name', v)) - self._delegate_to = '127.0.0.1' - if not 'transport' in ds and not 'connection' in ds: - self._transport = 'local' - elif k in utils.plugins.module_finder: - # task structure is: - # - module_name: k=v ... - if self._module_name: - raise AnsibleError("the module name (%s) was already specified, '%s' is a duplicate" % (self._module_name, k)) - elif 'action' in ds: - raise AnsibleError("multiple actions specified in task: '%s' and '%s'" % (k, ds.get('name', ds['action']))) - self._module_name = k - if isinstance(v, dict) and 'args' in ds: - raise AnsibleError("can't combine args: and a dict for %s: in task %s" % (k, ds.get('name', "%s: %s" % (k, v)))) - self._parameters = self._load_parameters(v) - elif k == 'args': - args = self._load_parameters(v) - elif k.startswith('with_'): - if isinstance(v, basestring): - param = v.strip() - if (param.startswith('{{') and param.find('}}') == len(ds[x]) - 2 and param.find('|') == -1): - utils.warning("It is unnecessary to use '{{' in loops, leave variables in loop expressions bare.") - plugin_name = k.replace("with_","") - if plugin_name in utils.plugins.lookup_loader: - self._lookup_plugin = plugin_name - self._lookup_terms = v - else: - raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name)) - elif k.startswith('when_'): - utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True) - if self._when: - raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds.get('action')))) - when_name = k.replace("when_","") - self._when = "%s %s" % (when_name, v) - elif k in ('changed_when', 'failed_when', 'when'): - # these are conditional objects, so we push the new conditional value - # into the object so that it can be evaluated later - getattr(self, '_%s' % k).push(v) - elif k == 'tags': - # all taggable datastructures in Ansible (tasks, roles, etc.) are - # based on the Base() class, which includes the _tags attribute - # (which is a Tag() class) - tags = v - if isinstance(v, basestring): - tags = v.split(',') - self._tags.push(tags) - elif k not in self.VALID_KEYS: - raise AnsibleError("%s is not a legal parameter in an Ansible task or handler" % k) - else: - setattr(self, '_%s' % k, v) + mods = self._loader_for_key(k)(k,v) + if (k,v) in mods.iteritems(): + setattr(self,k,v) - # if args were specified along with parameters, merge them now - # with the args taking lower precedence - if args: - self._parameters = utils.combine_vars(args, self._parameters) + self._post_validate() + return self + + # ================================================================================== + # PRE-VALIDATION - expected to be uncommonly used, this checks for arguments that + # are aliases of each other. Most everything else should be in the LOAD block + # or the POST-VALIDATE block. - # run validation - self._validate() + def _pre_validate(self, ds): + ''' rarely used function to see if the datastructure has items that mean the same thing ''' - # finally, store the ds for later use/reference - self._ds = ds + if 'action' in ds and 'local_action' in ds: + raise AnsibleError("the 'action' and 'local_action' attributes can not be used together") - def _validate(self): - ''' - Validates internal datastructures and verifies mutually-exclusive - options are not in conflict. - ''' + # ================================================================================= + # POST-VALIDATION: checks for internal inconsistency between fields + # validation can result in an error but also corrections + + def _post_validate(self): + ''' is the loaded datastructure sane? ''' if not self._name: - # if no name: was specified, flatten the parameters back - # into a string and combine them with with module name - flat_params = " ".join(["%s=%s" % (k,v) for k,v in self._parameters.iteritems()]) - self._name = "%s %s" % (self._module_name, flat_params) + self._name = self._post_validate_fixed_name() - # use builtin _ensure* methods to massage/set values on attributes - # anything not listed here will be defaulted to None by _reset() - self._ensure_int("_async", 0) - self._ensure_int("_poll", 10) - self._ensure_bool("_ignore_errors", False) - self._ensure_bool("_always_run", False) - self._ensure_list_of_strings("_notify", []) + # incompatible items + self._validate_conflicting_su_and_sudo() + self._validate_conflicting_first_available_file_and_loookup() + + def _post_validate_fixed_name(self): + '' construct a name for the task if no name was specified ''' - # handle mutually incompatible options - if (self._sudo or self._sudo_user or self._sudo_pass) and (self._su or self._su_user or self._su_pass): + flat_params = " ".join(["%s=%s" % (k,v) for k,v in self._parameters.iteritems()]) + return = "%s %s" % (self._module_name, flat_params) + + def _post_validate_conflicting_su_and_sudo(self): + ''' make sure su/sudo usage doesn't conflict ''' + + conflicting = (self._sudo or self._sudo_user or self._sudo_pass) and (self._su or self._su_user or self._su_pass): + if conflicting: raise AnsibleError('sudo params ("sudo", "sudo_user", "sudo_pass") and su params ("su", "su_user", "su_pass") cannot be used together') - incompatibles = [ x for x in [ self._first_available_file, self._lookup_plugin ] if x is not None ] - if len(incompatibles) > 1: + def _post_validate_conflicting_first_available_file_and_lookup(self): + ''' first_available_file (deprecated) predates lookup plugins, and cannot be used with those kinds of loops ''' + + if self._first_available_file and self._lookup_plugin: raise AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task") - @property - def name(self): - return self.__repr__() - - def get_vars(self): - return dict() - - def get_role(self): - return self.role - - def get_block(self): - return self.block - From fa81680d9bfe387a0683c7c8a1eaa4737893d0f7 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 3 Oct 2014 10:42:38 -0400 Subject: [PATCH 158/813] Fix docstring. --- v2/ansible/playbook/task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index e8efa7eb0d..9600d85b18 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -89,7 +89,7 @@ class Task(Base): raise exception.NotImplementedError() def get_role(self): - '' return the role associated with the task ''' + ''' return the role associated with the task ''' return self._role def get_block(self): From b4657234d296bec812eb47d1cc958b686c265e82 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 3 Oct 2014 10:46:49 -0400 Subject: [PATCH 159/813] Refresh submodule pointers. --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 9b35a39121..f624689bad 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9b35a391213fe87834af5ebc907109de2bc0005f +Subproject commit f624689bad24cb3a7b2ef43d5280b5f4fbabb5bd diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 110250d344..8a4f07eecd 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 110250d344be156387d08ea837f4bcb2c42034b4 +Subproject commit 8a4f07eecd2bb877f51b7b04b5352efa6076cce5 From 402d0c37b8c68ad17ee11475381f23d8a9db5084 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 3 Oct 2014 12:04:06 -0400 Subject: [PATCH 160/813] Submodule update --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index f624689bad..cb69744bce 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit f624689bad24cb3a7b2ef43d5280b5f4fbabb5bd +Subproject commit cb69744bcee4b4217d83b4a30006635ba69e2aa0 From 0c40c6c23c3253f7de8d83f03775b1a8aa0dc2c4 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 3 Oct 2014 13:30:47 -0500 Subject: [PATCH 161/813] Fix order of resolving dependent role variables from vars files Also adds a new class to the test_var_precedence test to check for this type of error in the future. Fixes #9178 --- lib/ansible/playbook/play.py | 3 ++- .../test_var_precedence_dep/defaults/main.yml | 5 +++++ .../roles/test_var_precedence_dep/tasks/main.yml | 14 ++++++++++++++ .../roles/test_var_precedence_dep/vars/main.yml | 4 ++++ .../roles/test_var_precedence_role1/meta/main.yml | 2 ++ 5 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 test/integration/roles/test_var_precedence_dep/defaults/main.yml create mode 100644 test/integration/roles/test_var_precedence_dep/tasks/main.yml create mode 100644 test/integration/roles/test_var_precedence_dep/vars/main.yml create mode 100644 test/integration/roles/test_var_precedence_role1/meta/main.yml diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index ad57ebd9df..ca53bc029e 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -287,7 +287,8 @@ class Play(object): if os.path.isfile(vars): vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password) if vars_data: - dep_vars = utils.combine_vars(vars_data, dep_vars) + #dep_vars = utils.combine_vars(vars_data, dep_vars) + dep_vars = utils.combine_vars(dep_vars, vars_data) defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults'))) dep_defaults_data = {} if os.path.isfile(defaults): diff --git a/test/integration/roles/test_var_precedence_dep/defaults/main.yml b/test/integration/roles/test_var_precedence_dep/defaults/main.yml new file mode 100644 index 0000000000..dda4224c35 --- /dev/null +++ b/test/integration/roles/test_var_precedence_dep/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# should be overridden by vars_files in the main play +vars_files_var: "BAD!" +# should be seen in role1 (no override) +defaults_file_var_role1: "defaults_file_var_role1" diff --git a/test/integration/roles/test_var_precedence_dep/tasks/main.yml b/test/integration/roles/test_var_precedence_dep/tasks/main.yml new file mode 100644 index 0000000000..2f8e17096b --- /dev/null +++ b/test/integration/roles/test_var_precedence_dep/tasks/main.yml @@ -0,0 +1,14 @@ +- debug: var=extra_var +- debug: var=param_var +- debug: var=vars_var +- debug: var=vars_files_var +- debug: var=vars_files_var_role +- debug: var=defaults_file_var_role1 +- assert: + that: + - 'extra_var == "extra_var"' + - 'param_var == "param_var_role1"' + - 'vars_var == "vars_var"' + - 'vars_files_var == "vars_files_var"' + - 'vars_files_var_role == "vars_files_var_dep"' + - 'defaults_file_var_role1 == "defaults_file_var_role1"' diff --git a/test/integration/roles/test_var_precedence_dep/vars/main.yml b/test/integration/roles/test_var_precedence_dep/vars/main.yml new file mode 100644 index 0000000000..a69efad537 --- /dev/null +++ b/test/integration/roles/test_var_precedence_dep/vars/main.yml @@ -0,0 +1,4 @@ +--- +# should override the global vars_files_var since it's local to the role +# but will be set to the value in the last role included which defines it +vars_files_var_role: "vars_files_var_dep" diff --git a/test/integration/roles/test_var_precedence_role1/meta/main.yml b/test/integration/roles/test_var_precedence_role1/meta/main.yml new file mode 100644 index 0000000000..c8b410b59c --- /dev/null +++ b/test/integration/roles/test_var_precedence_role1/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - test_var_precedence_dep From 335221d79e9b7a219ba0ed25dedb235e18e12d14 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 3 Oct 2014 14:53:28 -0400 Subject: [PATCH 162/813] This is an exceedingly rough sketch of what attributes might look like - metaclass implementations NOT complete. --- Makefile | 2 +- lib/ansible/modules/core | 2 +- test/v2/playbook/__init__.py | 2 + test/v2/playbook/task.py | 37 ++++++++++ v2/ansible/playbook/__init__.py | 2 - v2/ansible/playbook/base.py | 18 ++++- v2/ansible/playbook/task.py | 124 ++++++++++++++++++-------------- v2/ansible/runner/__init__.py | 4 +- 8 files changed, 128 insertions(+), 63 deletions(-) create mode 100644 test/v2/playbook/__init__.py create mode 100644 test/v2/playbook/task.py diff --git a/Makefile b/Makefile index e228c1e9f6..7702f5162d 100644 --- a/Makefile +++ b/Makefile @@ -94,7 +94,7 @@ tests: PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v newtests: - PYTHONPATH=./v2/ $(NOSETESTS) -d -w v2/tests -v + PYTHONPATH=./v2 $(NOSETESTS) -d -w test/v2 -v authors: diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index cb69744bce..f624689bad 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit cb69744bcee4b4217d83b4a30006635ba69e2aa0 +Subproject commit f624689bad24cb3a7b2ef43d5280b5f4fbabb5bd diff --git a/test/v2/playbook/__init__.py b/test/v2/playbook/__init__.py new file mode 100644 index 0000000000..ec86ee6101 --- /dev/null +++ b/test/v2/playbook/__init__.py @@ -0,0 +1,2 @@ +# TODO: header + diff --git a/test/v2/playbook/task.py b/test/v2/playbook/task.py new file mode 100644 index 0000000000..9f65a35cc5 --- /dev/null +++ b/test/v2/playbook/task.py @@ -0,0 +1,37 @@ +# TODO: header + +from ansible.playbook.task import Task +import unittest + +basic_shell_task = dict( + name = 'Test Task', + shell = 'echo hi' +) + +class TestTask(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_can_construct_empty_task(): + t = Task() + + def test_can_construct_task_with_role(): + pass + + def test_can_construct_task_with_block(): + pass + + def test_can_construct_task_with_role_and_block(): + pass + + def test_can_load_simple_task(): + t = Task.load(basic_shell_task) + assert t.name == basic_shell_task['name'] + assert t.module == 'shell' + assert t.args == 'echo hi' + + diff --git a/v2/ansible/playbook/__init__.py b/v2/ansible/playbook/__init__.py index 6b9cfa6ce6..d2430dfc0c 100644 --- a/v2/ansible/playbook/__init__.py +++ b/v2/ansible/playbook/__init__.py @@ -15,8 +15,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import ansible.utils - class Playbook(object): def __init__(self, filename): self.ds = v2.utils.load_yaml_from_file(filename) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index a16e22f15f..9c2ad358c4 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -15,11 +15,23 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from errors import AnsibleError -from playbook.tag import Tag +#from ansible.cmmon.errors import AnsibleError +#from playbook.tag import Tag class Base(object): - def __init__(self): + def __init__(self, attribute): pass + def add_attribute(self): + self.attributes.push(attribute) + + def load(self, data): + for attribute in self.attributes: + attribute.load(data) + + def validate(self): + for attribute in self.attributes: + attribute.validate(self) + + diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 9600d85b18..4767c95106 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -15,10 +15,11 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from playbook.base import Base -from playbook.conditional import Conditional -from errors import AnsibleError -from ansible import utils +from ansible.playbook.base import Base +from ansible.playbook.attribute import Attribute, FieldAttribute +from ansible.playbook.conditional import Conditional +#from ansible.common.errors import AnsibleError +#from ansible import utils # TODO: it would be fantastic (if possible) if a task new where in the YAML it was defined for describing # it in error conditions @@ -36,81 +37,94 @@ class Task(Base): """ # ================================================================================= - # KEYS AND SLOTS: defines what variables in are valid in the data structure and - # the object itself + # ATTRIBUTES + # load_ and + # validate_ + # will be used if defined + # might be possible to define others - VALID_KEYS = [ - 'always_run', 'any_errors_fatal', 'async', 'connection', 'delay', 'delegate_to', 'environment', - 'first_available_file', 'ignore_errors', 'include', 'local_action', 'meta', 'name', 'no_log', - 'notify', 'poll', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user', - 'sudo', 'sudo_pass', 'sudo_user', 'transport', 'until' - ] + always_run = FieldAttribute(isa='bool') + any_errors_fatal = FieldAttribute(isa='bool') + async = FieldAttribute(isa='int') + connection = FieldAttribute(isa='string') + delay = FieldAttribute(isa='int') + delegate_to = FieldAttribute(isa='string') + environment = FieldAttribute(isa='dict') + first_available_file = FieldAttribute(isa='list') + ignore_errors = FieldAttribute(isa='bool') - __slots__ = [ - '_always_run', '_any_errors_fatal', '_async', '_connection', '_delay', '_delegate_to', '_environment', - '_first_available_file', '_ignore_errors', '_include', '_local_action', '_meta', '_name', '_no_log', - '_notify', '_poll', '_register', '_remote_user', '_retries', '_run_once', '_su', '_su_pass', '_su_user', - '_sudo', '_sudo_pass', '_sudo_user', '_transport', '_until' - ] + # FIXME: this should not be a Task + # include = FieldAttribute(isa='string') + + local_action = FieldAttribute(isa='string', alias='action', post_validate='_set_local_action') + + # FIXME: this should not be a Task + meta = FieldAttribute(isa='string') + + name = FieldAttribute(isa='string', post_validate='_set_name') + no_log = FieldAttribute(isa='bool') + notify = FieldAttribute(isa='list') + poll = FieldAttribute(isa='integer') + register = FieldAttribute(isa='string') + remote_user = FieldAttribute(isa='string') + retries = FieldAttribute(isa='integer') + run_once = FieldAttribute(isa='bool') + su = FieldAttribute(isa='bool') + su_pass = FieldAttribute(isa='string') + su_user = FieldAttribute(isa='string') + sudo = FieldAttribute(isa='bool') + sudo_user = FieldAttribute(isa='string') + sudo_pass = FieldAttribute(isa='string') + transport = FieldAttribute(isa='string') + until = FieldAttribute(isa='list') # ? + + role = Attribute() + block = Attribute() # ================================================================================== def __init__(self, block=None, role=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' - self._block = block - self._role = role - self._reset() + self.block = block + self.role = role super(Task, self).__init__() - # TODO: move to BaseObject - def _reset(self): - ''' clear out the object ''' - - for x in __slots__: - setattr(x, None) - # ================================================================================== # BASIC ACCESSORS def get_name(self): ''' return the name of the task ''' if self._role: - return "%s : %s" % (self._role.get_name(), self._name) - else: - return self._name + return "%s : %s" % (self._role.get_name(), self._name) + else: + return self._name def __repr__(self): ''' returns a human readable representation of the task ''' return "TASK: %s" % self.get_name() - # FIXME: does a task have variables? - def get_vars(self): - ''' return the variables associated with the task ''' - raise exception.NotImplementedError() + @classmethod + def load(self, block=None, role=None, data=None): + self = Task(block=block, role=role) + self._load_field_attributes(data) # from BaseObject + self._load_plugin_attributes(data) # from here, becuase of lookupPlugins + return self - def get_role(self): - ''' return the role associated with the task ''' - return self._role - - def get_block(self): - ''' return the block the task is in ''' - return self._block + def _load_plugin_attributes(self, data): + module_names = self._module_names() + for (k,v) in data.iteritems(): + if k in module_names: + self.module = k + self.args = v # ================================================================================== - # LOAD: functions related to walking the datastructure and storing data + # BELOW THIS LINE + # info below this line is "old" and is before the attempt to build Attributes + # use as reference but plan to replace and radically simplify + # ================================================================================== - def _load_parameters(data): - ''' validate/transmogrify/assign any module parameters for this task ''' - - if isinstance(data, dict): - return dict(_parameters=data) - elif isinstance(data, basestring): - return dict(_parameters=utils.parse_kv(data)) - elif isinstance(data, None): - return dict(_parameters='') - else: - raise AnsibleError("invalid arguments specified, got '%s' (type=%s')" % (data, type(data))) +LEGACY = """ def _load_action(self, ds, k, v): ''' validate/transmogrify/assign the module and parameters if used in 'action/local_action' format ''' @@ -301,3 +315,5 @@ class Task(Base): if self._first_available_file and self._lookup_plugin: raise AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task") +""" + diff --git a/v2/ansible/runner/__init__.py b/v2/ansible/runner/__init__.py index ebebb9cfc0..b8cc0a9219 100644 --- a/v2/ansible/runner/__init__.py +++ b/v2/ansible/runner/__init__.py @@ -15,8 +15,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from v2.inventory import Host -from v2.playbook import Task +#from v2.inventory import Host +#from v2.playbook import Task class Runner(object): pass From b9223e599567d43af2c1a7380a14787150db49ef Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 3 Oct 2014 15:01:59 -0400 Subject: [PATCH 163/813] Add attribute starter notes --- v2/ansible/__init__.py | 1 + v2/ansible/playbook/attribute.py | 52 ++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 v2/ansible/__init__.py create mode 100644 v2/ansible/playbook/attribute.py diff --git a/v2/ansible/__init__.py b/v2/ansible/__init__.py new file mode 100644 index 0000000000..44026bdff0 --- /dev/null +++ b/v2/ansible/__init__.py @@ -0,0 +1 @@ +# TODO: header diff --git a/v2/ansible/playbook/attribute.py b/v2/ansible/playbook/attribute.py new file mode 100644 index 0000000000..ae1edea943 --- /dev/null +++ b/v2/ansible/playbook/attribute.py @@ -0,0 +1,52 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +#from ansible.common.errors import AnsibleError + +class MyMeta(type): + + def __call__(self, *args, **kwargs): + + obj = type.__call__(self, *args) + for name, value in kwargs.items(): + setattr(obj, name, value) + return obj + +class Attribute(object): + + __metaclass__ = MyMeta + + def load(self, data, base_object): + ''' the loader is called to store the attribute from a datastructure when key names match. The default is very basic ''' + self._validate(base_object, data) + setattr(base_object, self.name, data) + + def _validate(self, data, base_object): + ''' validate is called after loading an object data structure to massage any input or raise errors on any incompatibilities ''' + if self.validator: + self.validator(base_object) + + def post_validate(self, base_object): + ''' post validate is called after templating the context of a Task (usually in Runner) to validate the types of arguments ''' + if self.post_validator: + self.post_validator(base_object) + +class FieldAttribute(Attribute): + + __metaclass__ = MyMeta + pass + From 6db1b4dfd217c8968c8fed323daf8a74340fd10f Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 3 Oct 2014 15:25:21 -0400 Subject: [PATCH 164/813] WIP on refactoring changes --- .../inventory_test_data/inventory_api.py | 0 test/v2/playbook/{task.py => test_task.py} | 0 v2/ansible/playbook/attribute.py | 30 +++---------------- v2/ansible/playbook/base.py | 2 +- v2/ansible/playbook/task.py | 17 ++--------- 5 files changed, 7 insertions(+), 42 deletions(-) mode change 100644 => 100755 test/units/inventory_test_data/inventory_api.py rename test/v2/playbook/{task.py => test_task.py} (100%) diff --git a/test/units/inventory_test_data/inventory_api.py b/test/units/inventory_test_data/inventory_api.py old mode 100644 new mode 100755 diff --git a/test/v2/playbook/task.py b/test/v2/playbook/test_task.py similarity index 100% rename from test/v2/playbook/task.py rename to test/v2/playbook/test_task.py diff --git a/v2/ansible/playbook/attribute.py b/v2/ansible/playbook/attribute.py index ae1edea943..b064406af7 100644 --- a/v2/ansible/playbook/attribute.py +++ b/v2/ansible/playbook/attribute.py @@ -17,36 +17,14 @@ #from ansible.common.errors import AnsibleError -class MyMeta(type): - - def __call__(self, *args, **kwargs): - - obj = type.__call__(self, *args) - for name, value in kwargs.items(): - setattr(obj, name, value) - return obj - class Attribute(object): - __metaclass__ = MyMeta - - def load(self, data, base_object): - ''' the loader is called to store the attribute from a datastructure when key names match. The default is very basic ''' - self._validate(base_object, data) - setattr(base_object, self.name, data) - - def _validate(self, data, base_object): - ''' validate is called after loading an object data structure to massage any input or raise errors on any incompatibilities ''' - if self.validator: - self.validator(base_object) - - def post_validate(self, base_object): - ''' post validate is called after templating the context of a Task (usually in Runner) to validate the types of arguments ''' - if self.post_validator: - self.post_validator(base_object) + def __init__(self, isa=None, validator=None, post_validator=None): + self.isa = isa + self.validator = validator + self.post_validator = post_validator class FieldAttribute(Attribute): - __metaclass__ = MyMeta pass diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 9c2ad358c4..8bc9a83bd3 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -34,4 +34,4 @@ class Base(object): for attribute in self.attributes: attribute.validate(self) - + diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 4767c95106..6c7d93f9fd 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -61,7 +61,8 @@ class Task(Base): # FIXME: this should not be a Task meta = FieldAttribute(isa='string') - name = FieldAttribute(isa='string', post_validate='_set_name') + name = FieldAttribute(isa='string', validate=self._set_name) + no_log = FieldAttribute(isa='bool') notify = FieldAttribute(isa='list') poll = FieldAttribute(isa='integer') @@ -103,20 +104,6 @@ class Task(Base): ''' returns a human readable representation of the task ''' return "TASK: %s" % self.get_name() - @classmethod - def load(self, block=None, role=None, data=None): - self = Task(block=block, role=role) - self._load_field_attributes(data) # from BaseObject - self._load_plugin_attributes(data) # from here, becuase of lookupPlugins - return self - - def _load_plugin_attributes(self, data): - module_names = self._module_names() - for (k,v) in data.iteritems(): - if k in module_names: - self.module = k - self.args = v - # ================================================================================== # BELOW THIS LINE From b02afa2268ed284a9772b566904b8dac5c2b236a Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 3 Oct 2014 15:33:36 -0400 Subject: [PATCH 165/813] Update submodule reference --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index f624689bad..cb69744bce 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit f624689bad24cb3a7b2ef43d5280b5f4fbabb5bd +Subproject commit cb69744bcee4b4217d83b4a30006635ba69e2aa0 From b54434c1b2f0fd4945d6a6c6ef67eace699a01dc Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 3 Oct 2014 16:37:32 -0400 Subject: [PATCH 166/813] WIP on Attributes. Getting closer. --- test/v2/playbook/test_task.py | 12 ++++--- v2/ansible/playbook/attribute.py | 1 + v2/ansible/playbook/base.py | 48 ++++++++++++++++++++++------ v2/ansible/playbook/role.py | 5 +++ v2/ansible/playbook/task.py | 54 +++++++++++--------------------- 5 files changed, 71 insertions(+), 49 deletions(-) diff --git a/test/v2/playbook/test_task.py b/test/v2/playbook/test_task.py index 9f65a35cc5..171b7128ea 100644 --- a/test/v2/playbook/test_task.py +++ b/test/v2/playbook/test_task.py @@ -16,20 +16,22 @@ class TestTask(unittest.TestCase): def tearDown(self): pass - def test_can_construct_empty_task(): + def test_can_construct_empty_task(self): t = Task() - def test_can_construct_task_with_role(): + def test_can_construct_task_with_role(self): pass - def test_can_construct_task_with_block(): + def test_can_construct_task_with_block(self): pass - def test_can_construct_task_with_role_and_block(): + def test_can_construct_task_with_role_and_block(self): pass - def test_can_load_simple_task(): + def test_can_load_simple_task(self): t = Task.load(basic_shell_task) + assert t is not None + print "T.NAME = %s" % t.name assert t.name == basic_shell_task['name'] assert t.module == 'shell' assert t.args == 'echo hi' diff --git a/v2/ansible/playbook/attribute.py b/v2/ansible/playbook/attribute.py index b064406af7..8b3dda8894 100644 --- a/v2/ansible/playbook/attribute.py +++ b/v2/ansible/playbook/attribute.py @@ -23,6 +23,7 @@ class Attribute(object): self.isa = isa self.validator = validator self.post_validator = post_validator + self.value = None class FieldAttribute(Attribute): diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 8bc9a83bd3..f4b4d0819e 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -17,21 +17,51 @@ #from ansible.cmmon.errors import AnsibleError #from playbook.tag import Tag +from ansible.playbook.attribute import Attribute, FieldAttribute class Base(object): - def __init__(self, attribute): - pass + def __init__(self): + self._data = dict() + self._attributes = dict() - def add_attribute(self): - self.attributes.push(attribute) + def load_data(self, ds): + ''' walk the input datastructure and assign any values ''' - def load(self, data): - for attribute in self.attributes: - attribute.load(data) + assert ds is not None + + for name in self.__class__.__dict__: + + print "DEBUG: processing attribute: %s" % name + + attribute = self.__class__.__dict__[name] + + if isinstance(attribute, FieldAttribute): + method = getattr(self, '_load_%s' % name, None) + if method: + self._attributes[name] = method(self, attribute) + else: + if name in ds: + self._attributes[name] = ds[name] + + # implement PluginAtrribute which allows "with_" and "action" aliases. + + return self + + + def attribute_value(self, name): + return self._attributes[name] def validate(self): - for attribute in self.attributes: - attribute.validate(self) + for name in self.__dict__: + attribute = self.__dict__[name] + if instanceof(attribute, FieldAttribute): + method = getattr(self, '_validate_%s' % (prefix, name), None) + if method: + method(self, attribute) + def post_validate(self, runner_context): + raise exception.NotImplementedError + + # TODO: __getattr__ that looks inside _attributes diff --git a/v2/ansible/playbook/role.py b/v2/ansible/playbook/role.py index 0dd8817dbc..dd1426ee8a 100644 --- a/v2/ansible/playbook/role.py +++ b/v2/ansible/playbook/role.py @@ -20,9 +20,14 @@ from v2.utils import list_union class Role(PlaybookBase): + # TODO: this will be overhauled to match Task.py at some point + def __init__(self): pass + def get_name(self): + return "TEMPORARY" + def load(self, ds): self._ds = ds self._tasks = [] diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 6c7d93f9fd..24f2f8b432 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -43,6 +43,7 @@ class Task(Base): # will be used if defined # might be possible to define others + action = FieldAttribute(isa='string') always_run = FieldAttribute(isa='bool') any_errors_fatal = FieldAttribute(isa='bool') async = FieldAttribute(isa='int') @@ -54,14 +55,14 @@ class Task(Base): ignore_errors = FieldAttribute(isa='bool') # FIXME: this should not be a Task - # include = FieldAttribute(isa='string') + # include = FieldAttribute(isa='string') - local_action = FieldAttribute(isa='string', alias='action', post_validate='_set_local_action') + local_action = FieldAttribute(isa='string') # FIXME: this should not be a Task meta = FieldAttribute(isa='string') - name = FieldAttribute(isa='string', validate=self._set_name) + name = FieldAttribute(isa='string') no_log = FieldAttribute(isa='bool') notify = FieldAttribute(isa='list') @@ -82,29 +83,33 @@ class Task(Base): role = Attribute() block = Attribute() - # ================================================================================== - def __init__(self, block=None, role=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' - self.block = block - self.role = role + self._block = block + self._role = role super(Task, self).__init__() - # ================================================================================== - # BASIC ACCESSORS - def get_name(self): ''' return the name of the task ''' - if self._role: - return "%s : %s" % (self._role.get_name(), self._name) + + # FIXME: getattr magic in baseclass so this is not required: + original = self.attribute_value('name') + role_value = self.attribute_value('role') + + if role_value: + return "%s : %s" % (role_value.get_name(), original) else: - return self._name + return original + + @staticmethod + def load(data, block=block, role=role): + t = Task(block=block, role=role) + return t.load_data(data) def __repr__(self): ''' returns a human readable representation of the task ''' return "TASK: %s" % self.get_name() - # ================================================================================== # BELOW THIS LINE # info below this line is "old" and is before the attempt to build Attributes @@ -237,27 +242,6 @@ LEGACY = """ else: return self._load_other_valid_key - @classmethod - def load(self, ds, block=None, role=None): - ''' walk the datastructure and store/validate parameters ''' - - self = Task(block=block, role=role) - return self._load_from_datastructure(ds) - - # TODO: move to BaseObject - def _load_from_datastructure(ds) - - self._pre_validate(ds) - - # load the keys from the datastructure - for k,v in ds.iteritems(): - mods = self._loader_for_key(k)(k,v) - if (k,v) in mods.iteritems(): - setattr(self,k,v) - - self._post_validate() - return self - # ================================================================================== # PRE-VALIDATION - expected to be uncommonly used, this checks for arguments that # are aliases of each other. Most everything else should be in the LOAD block From a175168686abedaad88051aedc2dab3d164ba792 Mon Sep 17 00:00:00 2001 From: Bruce Pennypacker Date: Fri, 3 Oct 2014 15:47:03 -0500 Subject: [PATCH 167/813] atfork import warning should be suppressed when system_warnings = False Fixes #9247 --- lib/ansible/runner/__init__.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 19c90ba529..0f11e8ba44 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -32,6 +32,7 @@ import pipes import jinja2 import subprocess import getpass +import warnings import ansible.constants as C import ansible.inventory @@ -49,6 +50,7 @@ from ansible.module_common import ModuleReplacer from ansible.module_utils.splitter import split_args, unquote from ansible.cache import FactCache from ansible.utils import update_hash +from ansible.utils.display_functions import * module_replacer = ModuleReplacer(strip_comments=False) @@ -59,10 +61,26 @@ except ImportError: HAS_ATFORK=True try: - from Crypto.Random import atfork + # some versions of pycrypto may not have this? + from Crypto.pct_warnings import PowmInsecureWarning except ImportError: - HAS_ATFORK=False + PowmInsecureWarning = RuntimeWarning +with warnings.catch_warnings(record=True) as warning_handler: + warnings.simplefilter("error", PowmInsecureWarning) + try: + from Crypto.Random import atfork + except PowmInsecureWarning: + system_warning( + "The version of gmp you have installed has a known issue regarding " + \ + "timing vulnerabilities when used with pycrypto. " + \ + "If possible, you should update it (ie. yum update gmp)." + ) + warnings.resetwarnings() + warnings.simplefilter("ignore") + HAS_ATFORK=False + except ImportError: + HAS_ATFORK=False multiprocessing_runner = None OUTPUT_LOCKFILE = tempfile.TemporaryFile() From 94db7365b911ac740902142e807ab5f65a970f94 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 3 Oct 2014 17:08:52 -0400 Subject: [PATCH 168/813] __getattr__ to hide some of the attribute magic. --- test/v2/playbook/test_task.py | 6 +-- v2/ansible/playbook/attribute.py | 9 ++-- v2/ansible/playbook/base.py | 51 ++++++++++++++-------- v2/ansible/playbook/task.py | 72 +++++++++++++++----------------- 4 files changed, 75 insertions(+), 63 deletions(-) diff --git a/test/v2/playbook/test_task.py b/test/v2/playbook/test_task.py index 171b7128ea..c6215d56ff 100644 --- a/test/v2/playbook/test_task.py +++ b/test/v2/playbook/test_task.py @@ -31,9 +31,9 @@ class TestTask(unittest.TestCase): def test_can_load_simple_task(self): t = Task.load(basic_shell_task) assert t is not None - print "T.NAME = %s" % t.name + print "NAME=%s" % t.name assert t.name == basic_shell_task['name'] - assert t.module == 'shell' - assert t.args == 'echo hi' + #assert t.module == 'shell' + #assert t.args == 'echo hi' diff --git a/v2/ansible/playbook/attribute.py b/v2/ansible/playbook/attribute.py index 8b3dda8894..e3e2dd13d9 100644 --- a/v2/ansible/playbook/attribute.py +++ b/v2/ansible/playbook/attribute.py @@ -18,14 +18,13 @@ #from ansible.common.errors import AnsibleError class Attribute(object): - - def __init__(self, isa=None, validator=None, post_validator=None): + def __init__(self, isa=None): self.isa = isa - self.validator = validator - self.post_validator = post_validator self.value = None + def __call__(self): + return self.value + class FieldAttribute(Attribute): - pass diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index f4b4d0819e..522d3ac4ef 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -24,44 +24,61 @@ class Base(object): def __init__(self): self._data = dict() self._attributes = dict() + + for name in self.__class__.__dict__: + aname = name[1:] + if isinstance(aname, Attribute) and not isinstance(aname, FieldAttribute): + self._attributes[aname] = None def load_data(self, ds): ''' walk the input datastructure and assign any values ''' assert ds is not None - for name in self.__class__.__dict__: - - print "DEBUG: processing attribute: %s" % name - - attribute = self.__class__.__dict__[name] + for (name, attribute) in self.__class__.__dict__.iteritems(): + aname = name[1:] + # process Fields if isinstance(attribute, FieldAttribute): - method = getattr(self, '_load_%s' % name, None) + method = getattr(self, '_load_%s' % aname, None) if method: - self._attributes[name] = method(self, attribute) + self._attributes[aname] = method(self, attribute) else: - if name in ds: - self._attributes[name] = ds[name] + if aname in ds: + self._attributes[aname] = ds[aname] - # implement PluginAtrribute which allows "with_" and "action" aliases. + # TODO: implement PluginAtrribute which allows "with_" and "action" aliases. return self - def attribute_value(self, name): - return self._attributes[name] - def validate(self): - + # TODO: finish for name in self.__dict__: - attribute = self.__dict__[name] + aname = name[1:] + attribute = self.__dict__[aname] if instanceof(attribute, FieldAttribute): - method = getattr(self, '_validate_%s' % (prefix, name), None) + method = getattr(self, '_validate_%s' % (prefix, aname), None) if method: method(self, attribute) def post_validate(self, runner_context): + # TODO: finish raise exception.NotImplementedError - # TODO: __getattr__ that looks inside _attributes + def __getattr__(self, needle): + if needle in self._attributes: + return self._attributes[needle] + if needle in self.__dict__: + return self.__dict__[needle] + raise AttributeError + + #def __setattr__(self, needle, value): + # if needle in self._attributes: + # self._attributes[needle] = value + # if needle in self.__dict__: + # super(Base, self).__setattr__(needle, value) + # # self.__dict__[needle] = value + # raise AttributeError + + diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 24f2f8b432..80d04fcb95 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -43,45 +43,45 @@ class Task(Base): # will be used if defined # might be possible to define others - action = FieldAttribute(isa='string') - always_run = FieldAttribute(isa='bool') - any_errors_fatal = FieldAttribute(isa='bool') - async = FieldAttribute(isa='int') - connection = FieldAttribute(isa='string') - delay = FieldAttribute(isa='int') - delegate_to = FieldAttribute(isa='string') - environment = FieldAttribute(isa='dict') - first_available_file = FieldAttribute(isa='list') - ignore_errors = FieldAttribute(isa='bool') + _action = FieldAttribute(isa='string') + _always_run = FieldAttribute(isa='bool') + _any_errors_fatal = FieldAttribute(isa='bool') + _async = FieldAttribute(isa='int') + _connection = FieldAttribute(isa='string') + _delay = FieldAttribute(isa='int') + _delegate_to = FieldAttribute(isa='string') + _environment = FieldAttribute(isa='dict') + _first_available_file = FieldAttribute(isa='list') + _ignore_errors = FieldAttribute(isa='bool') # FIXME: this should not be a Task # include = FieldAttribute(isa='string') - local_action = FieldAttribute(isa='string') + _local_action = FieldAttribute(isa='string') # FIXME: this should not be a Task - meta = FieldAttribute(isa='string') + _meta = FieldAttribute(isa='string') - name = FieldAttribute(isa='string') + _name = FieldAttribute(isa='string') - no_log = FieldAttribute(isa='bool') - notify = FieldAttribute(isa='list') - poll = FieldAttribute(isa='integer') - register = FieldAttribute(isa='string') - remote_user = FieldAttribute(isa='string') - retries = FieldAttribute(isa='integer') - run_once = FieldAttribute(isa='bool') - su = FieldAttribute(isa='bool') - su_pass = FieldAttribute(isa='string') - su_user = FieldAttribute(isa='string') - sudo = FieldAttribute(isa='bool') - sudo_user = FieldAttribute(isa='string') - sudo_pass = FieldAttribute(isa='string') - transport = FieldAttribute(isa='string') - until = FieldAttribute(isa='list') # ? + _no_log = FieldAttribute(isa='bool') + _notify = FieldAttribute(isa='list') + _poll = FieldAttribute(isa='integer') + _register = FieldAttribute(isa='string') + _remote_user = FieldAttribute(isa='string') + _retries = FieldAttribute(isa='integer') + _run_once = FieldAttribute(isa='bool') + _su = FieldAttribute(isa='bool') + _su_pass = FieldAttribute(isa='string') + _su_user = FieldAttribute(isa='string') + _sudo = FieldAttribute(isa='bool') + _sudo_user = FieldAttribute(isa='string') + _sudo_pass = FieldAttribute(isa='string') + _transport = FieldAttribute(isa='string') + _until = FieldAttribute(isa='list') # ? - role = Attribute() - block = Attribute() + _role = Attribute() + _block = Attribute() def __init__(self, block=None, role=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' @@ -92,17 +92,13 @@ class Task(Base): def get_name(self): ''' return the name of the task ''' - # FIXME: getattr magic in baseclass so this is not required: - original = self.attribute_value('name') - role_value = self.attribute_value('role') - - if role_value: - return "%s : %s" % (role_value.get_name(), original) + if self.role: + return "%s : %s" % (self.role.get_name(), self.name) else: - return original + return self.name @staticmethod - def load(data, block=block, role=role): + def load(data, block=None, role=None): t = Task(block=block, role=role) return t.load_data(data) From 1d05be82f004f28e76df39d46bad1a2aa899ef89 Mon Sep 17 00:00:00 2001 From: Stephen Jahl Date: Fri, 3 Oct 2014 19:45:35 -0400 Subject: [PATCH 169/813] Documents the --step and --start-at options to ansible-playbook. Fixes #9041. --- docsite/rst/playbooks_intro.rst | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 70db3f7fe2..c93cf7f983 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -335,6 +335,25 @@ Let's run a playbook using a parallelism level of 10:: ansible-playbook playbook.yml -f 10 +Playbooks can also be executed interactively with ``--step``:: + + ansible-playbook playbook.yml --step + +This will cause ansible to stop on each task, and ask if it should execute that task. +Say you had a task called "configure ssh", the playbook run will stop and ask:: + + Perform task: configure ssh (y/n/c): + +Answering "y" will execute the task, answering "n" will skip the task, and answering "c" +will continue executing all the remaining tasks without asking. + +If you want to start executing your playbook at a particular task, you can do so +with the ``--start-at`` option:: + + ansible-playbook playbook.yml --start-at="install packages" + +The above will start executing your playbook at a task named "install packages". + .. _ansible-pull: Ansible-Pull From 8cecb0690f1cbfcf1a0151eb4de03debcc8a7514 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sat, 4 Oct 2014 09:48:25 -0400 Subject: [PATCH 170/813] Update base.py updated with some notes --- v2/ansible/playbook/base.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 522d3ac4ef..1223eafefe 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -19,6 +19,19 @@ #from playbook.tag import Tag from ansible.playbook.attribute import Attribute, FieldAttribute + +# general concept +# FooObject.load(datastructure) -> Foo +# FooObject._load_field # optional +# FooObject._validate_field # optional +# FooObject._post_validate_field # optional +# FooObject.evaluate(host_context) -> FooObject ? (calls post_validators, templates all members) +# question - are there some things that need to be evaluated *before* host context, i.e. globally? +# most things should be templated but want to provide as much early checking as possible +# TODO: also check for fields in datastructure that are not valid +# TODO: PluginAttribute(type) allows all the valid plugins as valid types of names +# lookupPlugins start with "with_", ModulePluginAttribute allows any key + class Base(object): def __init__(self): From e26ed64ad8bada0a880d08b56710d165136425b9 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Sat, 4 Oct 2014 17:27:20 -0400 Subject: [PATCH 171/813] Documentation typo: ansible-module[s]-extras Typo: ansible-module-extras -> ansible-modules-extras in a link to this repository. --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index b50c4f415f..4e9c69f59f 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -18,7 +18,7 @@ The directory "./library", alongside your top level playbooks, is also automatic added as a search directory. Should you develop an interesting Ansible module, consider sending a pull request to the -`moudule-extras project `_. There's also a core +`moudules-extras project `_. There's also a core repo for more established and widely used modules. "Extras" modules may be promoted to core periodically, but there's no fundamental difference in the end - both ship with ansible, all in one package, regardless of how you acquire ansible. From 05644686de229e29f348f37ba22adbddd0d6dbfe Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 4 Oct 2014 21:40:59 -0500 Subject: [PATCH 172/813] Fix bug in plugin path caching Fixes #9263 --- lib/ansible/utils/plugins.py | 15 ++++++--------- test/integration/non_destructive.yml | 2 ++ .../library/test_integration_module | 3 +++ .../roles/test_embedded_module/tasks/main.yml | 9 +++++++++ 4 files changed, 20 insertions(+), 9 deletions(-) create mode 100644 test/integration/roles/test_embedded_module/library/test_integration_module create mode 100644 test/integration/roles/test_embedded_module/tasks/main.yml diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/utils/plugins.py index 09aaa5b3ba..faf5b5f26f 100644 --- a/lib/ansible/utils/plugins.py +++ b/lib/ansible/utils/plugins.py @@ -94,10 +94,8 @@ class PluginLoader(object): m = __import__(self.package) parts = self.package.split('.')[1:] self.package_path = os.path.join(os.path.dirname(m.__file__), *parts) - paths.extend(self._all_directories(self.package_path)) - return paths - else: - return [ self.package_path ] + paths.extend(self._all_directories(self.package_path)) + return paths def _get_paths(self): ''' Return a list of paths to search for plugins in ''' @@ -105,8 +103,7 @@ class PluginLoader(object): if self._paths is not None: return self._paths - ret = [] - ret += self._extra_dirs + ret = self._extra_dirs[:] for basedir in _basedirs: fullpath = os.path.realpath(os.path.join(basedir, self.subdir)) if os.path.isdir(fullpath): @@ -139,11 +136,9 @@ class PluginLoader(object): # look for any plugins installed in the package subtree ret.extend(self._get_package_paths()) - package_dirs = self._get_package_paths() - + # cache and return the result self._paths = ret - return ret @@ -156,7 +151,9 @@ class PluginLoader(object): if with_subdir: directory = os.path.join(directory, self.subdir) if directory not in self._extra_dirs: + # append the directory and invalidate the path cache self._extra_dirs.append(directory) + self._paths = None def find_plugin(self, name, suffixes=None, transport=''): ''' Find a plugin named name ''' diff --git a/test/integration/non_destructive.yml b/test/integration/non_destructive.yml index 619396acb2..b177763fbf 100644 --- a/test/integration/non_destructive.yml +++ b/test/integration/non_destructive.yml @@ -38,3 +38,5 @@ - { role: test_script, tags: test_script } - { role: test_authorized_key, tags: test_authorized_key } - { role: test_get_url, tags: test_get_url } + - { role: test_embedded_module, tags: test_embedded_module } + diff --git a/test/integration/roles/test_embedded_module/library/test_integration_module b/test/integration/roles/test_embedded_module/library/test_integration_module new file mode 100644 index 0000000000..5af29b4c01 --- /dev/null +++ b/test/integration/roles/test_embedded_module/library/test_integration_module @@ -0,0 +1,3 @@ +#!/usr/bin/env python + +print '{"changed":false, "msg":"this is the embedded module"}' diff --git a/test/integration/roles/test_embedded_module/tasks/main.yml b/test/integration/roles/test_embedded_module/tasks/main.yml new file mode 100644 index 0000000000..6a6d6485fc --- /dev/null +++ b/test/integration/roles/test_embedded_module/tasks/main.yml @@ -0,0 +1,9 @@ +- name: run the embedded dummy module + test_integration_module: + register: result + +- name: assert the embedded module ran + assert: + that: + - "'msg' in result" + - result.msg == "this is the embedded module" From 83457e147c765a2d0b468e007dc8f0019d4a221f Mon Sep 17 00:00:00 2001 From: CaptTofu Date: Sun, 5 Oct 2014 03:35:22 -0700 Subject: [PATCH 173/813] Small fix for issue #9275 --- docsite/rst/developing_modules.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index b50c4f415f..3d430326c9 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -18,7 +18,7 @@ The directory "./library", alongside your top level playbooks, is also automatic added as a search directory. Should you develop an interesting Ansible module, consider sending a pull request to the -`moudule-extras project `_. There's also a core +`moudules-extras project `_. There's also a core repo for more established and widely used modules. "Extras" modules may be promoted to core periodically, but there's no fundamental difference in the end - both ship with ansible, all in one package, regardless of how you acquire ansible. @@ -48,7 +48,7 @@ modules. Keep in mind, though, that some modules in ansible's source tree are so look at `service` or `yum`, and don't stare too close into things like `async_wrapper` or you'll turn to stone. Nobody ever executes async_wrapper directly. -Ok, let's get going with an example. We'll use Python. For starters, save this as a file named `time`:: +Ok, let's get going with an example. We'll use Python. For starters, save this as a file named `time.py`:: #!/usr/bin/python @@ -73,7 +73,7 @@ There's a useful test script in the source checkout for ansible:: Let's run the script you just wrote with that:: - ansible/hacking/test-module -m ./time + ansible/hacking/test-module -m ./time.py You should see output that looks something like this:: @@ -444,7 +444,7 @@ If you are having trouble getting your module "found" by ansible, be sure it is If you have a fork of one of the ansible module projects, do something like this:: - ANSIBLE_LIBRARY=~/ansible-module-core:~/ansible-module-extras + ANSIBLE_LIBRARY=~/ansible-modules-core:~/ansible-modules-extras And this will make the items in your fork be loaded ahead of what ships with Ansible. Just be sure to make sure you're not reporting bugs on versions from your fork! From 3534bdf953bd0ab98c521b0f5e6ea5523d3f9ea8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 5 Oct 2014 20:53:06 -0400 Subject: [PATCH 174/813] added new 'shuffle' filter --- docsite/rst/playbooks_variables.rst | 13 +++++++++++++ lib/ansible/runner/filter_plugins/core.py | 8 ++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 86146cdd0f..ba85210293 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -297,6 +297,19 @@ Get a random number from 1 to 100 but in steps of 10:: {{ 100 |random(start=1, step=10) }} => 51 +Shuffle Filter +-------------- + +.. versionadded:: 1.8 + +This filter will randomize an existing list, giving a differnt order every invocation. + +To get a random list from an existing list:: + + {{ ['a','b','c']|shuffle }} => ['c','a','b'] + {{ ['a','b','c']|shuffle }} => ['b','c','a'] + + .. _other_useful_filters: Other Useful Filters diff --git a/lib/ansible/runner/filter_plugins/core.py b/lib/ansible/runner/filter_plugins/core.py index 61b80bce2c..71cfd267dc 100644 --- a/lib/ansible/runner/filter_plugins/core.py +++ b/lib/ansible/runner/filter_plugins/core.py @@ -28,7 +28,7 @@ import operator as py_operator from ansible import errors from ansible.utils import md5s from distutils.version import LooseVersion, StrictVersion -from random import SystemRandom +from random import SystemRandom, shuffle from jinja2.filters import environmentfilter @@ -235,6 +235,9 @@ def rand(environment, end, start=None, step=None): else: raise errors.AnsibleFilterError('random can only be used on sequences and integers') +def randomize_list(mylist): + shuffle(mylist) + return mylist class FilterModule(object): ''' Ansible core jinja2 filters ''' @@ -305,6 +308,7 @@ class FilterModule(object): # version comparison 'version_compare': version_compare, - # random numbers + # random stuff 'random': rand, + 'shuffle': randomize_list, } From 12d5b75a438fae53b5ba65bdf44dec0069400bba Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 6 Oct 2014 10:50:55 -0500 Subject: [PATCH 175/813] Template play vars after reading them Fixes #9242 --- lib/ansible/playbook/play.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index ca53bc029e..742c12b382 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -84,6 +84,11 @@ class Play(object): if self.playbook.inventory.src() is not None: self.vars['inventory_file'] = self.playbook.inventory.src() + # template the play vars with themselves and the extra vars + # from the playbook, to make sure they're correct + all_vars = utils.combine_vars(self.vars, self.playbook.extra_vars) + self.vars = template(basedir, self.vars, all_vars) + # We first load the vars files from the datastructure # so we have the default variables to pass into the roles self.vars_files = ds.get('vars_files', []) From f3714c88a6c748145066f335e9fb8109ed9c748d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 6 Oct 2014 15:05:52 -0500 Subject: [PATCH 176/813] Revert "atfork import warning should be suppressed when system_warnings = False" This reverts commit a175168686abedaad88051aedc2dab3d164ba792. --- lib/ansible/runner/__init__.py | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 0f11e8ba44..19c90ba529 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -32,7 +32,6 @@ import pipes import jinja2 import subprocess import getpass -import warnings import ansible.constants as C import ansible.inventory @@ -50,7 +49,6 @@ from ansible.module_common import ModuleReplacer from ansible.module_utils.splitter import split_args, unquote from ansible.cache import FactCache from ansible.utils import update_hash -from ansible.utils.display_functions import * module_replacer = ModuleReplacer(strip_comments=False) @@ -61,26 +59,10 @@ except ImportError: HAS_ATFORK=True try: - # some versions of pycrypto may not have this? - from Crypto.pct_warnings import PowmInsecureWarning + from Crypto.Random import atfork except ImportError: - PowmInsecureWarning = RuntimeWarning + HAS_ATFORK=False -with warnings.catch_warnings(record=True) as warning_handler: - warnings.simplefilter("error", PowmInsecureWarning) - try: - from Crypto.Random import atfork - except PowmInsecureWarning: - system_warning( - "The version of gmp you have installed has a known issue regarding " + \ - "timing vulnerabilities when used with pycrypto. " + \ - "If possible, you should update it (ie. yum update gmp)." - ) - warnings.resetwarnings() - warnings.simplefilter("ignore") - HAS_ATFORK=False - except ImportError: - HAS_ATFORK=False multiprocessing_runner = None OUTPUT_LOCKFILE = tempfile.TemporaryFile() From f066e361f1a5404c1767c803fa67777deabd29af Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 6 Oct 2014 15:27:41 -0400 Subject: [PATCH 177/813] WIP on data structure processing patterns. --- Makefile | 2 +- test/v2/playbook/test_task.py | 37 +++-- v2/ansible/constants.py | 190 +++++++++++++++++++++++ v2/ansible/playbook/base.py | 76 ++++----- v2/ansible/playbook/task.py | 55 ++++++- v2/ansible/plugins/__init__.py | 271 ++++++++++++++++++++++++++++++++- 6 files changed, 575 insertions(+), 56 deletions(-) create mode 100644 v2/ansible/constants.py diff --git a/Makefile b/Makefile index 7702f5162d..b659e044f8 100644 --- a/Makefile +++ b/Makefile @@ -94,7 +94,7 @@ tests: PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v newtests: - PYTHONPATH=./v2 $(NOSETESTS) -d -w test/v2 -v + PYTHONPATH=./v2:./lib $(NOSETESTS) -d -w test/v2 -v authors: diff --git a/test/v2/playbook/test_task.py b/test/v2/playbook/test_task.py index c6215d56ff..a012dff4bf 100644 --- a/test/v2/playbook/test_task.py +++ b/test/v2/playbook/test_task.py @@ -16,24 +16,39 @@ class TestTask(unittest.TestCase): def tearDown(self): pass - def test_can_construct_empty_task(self): + def test_construct_empty_task(self): t = Task() - def test_can_construct_task_with_role(self): + def test_construct_task_with_role(self): pass - def test_can_construct_task_with_block(self): + def test_construct_task_with_block(self): pass - def test_can_construct_task_with_role_and_block(self): + def test_construct_task_with_role_and_block(self): pass - def test_can_load_simple_task(self): - t = Task.load(basic_shell_task) - assert t is not None - print "NAME=%s" % t.name - assert t.name == basic_shell_task['name'] - #assert t.module == 'shell' - #assert t.args == 'echo hi' + def test_load_simple_task(self): + t = Task.load(basic_shell_task) + assert t is not None + assert t.name == basic_shell_task['name'] + assert t.module == 'shell' + assert t.args == 'echo hi' + def test_can_load_action_kv_form(self): + pass + + def test_can_load_action_complex_form(self): + pass + + def test_can_load_module_complex_form(self): + pass + + def test_local_action_implies_delegate(self): + pass + def test_local_action_conflicts_with_delegate(self): + pass + + def test_delegate_to_parses(self): + pass diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py new file mode 100644 index 0000000000..861dd5325c --- /dev/null +++ b/v2/ansible/constants.py @@ -0,0 +1,190 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import pwd +import sys +import ConfigParser +from string import ascii_letters, digits + +# copied from utils, avoid circular reference fun :) +def mk_boolean(value): + if value is None: + return False + val = str(value) + if val.lower() in [ "true", "t", "y", "1", "yes" ]: + return True + else: + return False + +def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False): + ''' return a configuration variable with casting ''' + value = _get_config(p, section, key, env_var, default) + if boolean: + return mk_boolean(value) + if value and integer: + return int(value) + if value and floating: + return float(value) + if value and islist: + return [x.strip() for x in value.split(',')] + return value + +def _get_config(p, section, key, env_var, default): + ''' helper function for get_config ''' + if env_var is not None: + value = os.environ.get(env_var, None) + if value is not None: + return value + if p is not None: + try: + return p.get(section, key, raw=True) + except: + return default + return default + +def load_config_file(): + ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible ''' + + p = ConfigParser.ConfigParser() + + path0 = os.getenv("ANSIBLE_CONFIG", None) + if path0 is not None: + path0 = os.path.expanduser(path0) + path1 = os.getcwd() + "/ansible.cfg" + path2 = os.path.expanduser("~/.ansible.cfg") + path3 = "/etc/ansible/ansible.cfg" + + for path in [path0, path1, path2, path3]: + if path is not None and os.path.exists(path): + try: + p.read(path) + except ConfigParser.Error as e: + print "Error reading config file: \n%s" % e + sys.exit(1) + return p + return None + +def shell_expand_path(path): + ''' shell_expand_path is needed as os.path.expanduser does not work + when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE ''' + if path: + path = os.path.expanduser(os.path.expandvars(path)) + return path + +p = load_config_file() + +active_user = pwd.getpwuid(os.geteuid())[0] + +# check all of these extensions when looking for yaml files for things like +# group variables -- really anything we can load +YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ] + +# sections in config file +DEFAULTS='defaults' + +# configurable things +DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts')) +DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) +DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles')) +DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') +DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command') +DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*') +DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True) +DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '') +DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', 'en_US.UTF-8') +DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True) +DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True) +DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user) +DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True) +DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None)) +DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') +DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True) +DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True) +DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True) +DEFAULT_VAULT_PASSWORD_FILE = shell_expand_path(get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None)) +DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart') +DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True) +DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}') +DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER') +DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True) +DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True) +DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo') +DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H') +DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace') +DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None) +DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh') +DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su') +DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True) +DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '') +DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') +DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) +DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() + +DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '/usr/share/ansible_plugins/action_plugins') +DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '/usr/share/ansible_plugins/cache_plugins') +DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '/usr/share/ansible_plugins/callback_plugins') +DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '/usr/share/ansible_plugins/connection_plugins') +DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '/usr/share/ansible_plugins/lookup_plugins') +DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '/usr/share/ansible_plugins/vars_plugins') +DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '/usr/share/ansible_plugins/filter_plugins') +DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) + +CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') +CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) +CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts') +CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True) + +ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True) +ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True) +ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True) +DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True) +DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True) +HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True) +SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True) +DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True) +DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True) +COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) +DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) + +# CONNECTION RELATED +ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None) +ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") +ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True) +PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) +# obsolete -- will be formally removed in 1.6 +ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True) +ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True) +ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True) +ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True) +ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True) +ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys') +ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700') +ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600') +ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True) +PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True) + +# characters included in auto-generated passwords +DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" + +# non-configurable things +DEFAULT_SUDO_PASS = None +DEFAULT_REMOTE_PASS = None +DEFAULT_SUBSET = None +DEFAULT_SU_PASS = None +VAULT_VERSION_MIN = 1.0 +VAULT_VERSION_MAX = 1.0 diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 1223eafefe..68dc2d6ffe 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -15,44 +15,39 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -#from ansible.cmmon.errors import AnsibleError -#from playbook.tag import Tag from ansible.playbook.attribute import Attribute, FieldAttribute - -# general concept -# FooObject.load(datastructure) -> Foo -# FooObject._load_field # optional -# FooObject._validate_field # optional -# FooObject._post_validate_field # optional -# FooObject.evaluate(host_context) -> FooObject ? (calls post_validators, templates all members) -# question - are there some things that need to be evaluated *before* host context, i.e. globally? -# most things should be templated but want to provide as much early checking as possible -# TODO: also check for fields in datastructure that are not valid -# TODO: PluginAttribute(type) allows all the valid plugins as valid types of names -# lookupPlugins start with "with_", ModulePluginAttribute allows any key - class Base(object): def __init__(self): - self._data = dict() + + # each class knows attributes set upon it, see Task.py for example self._attributes = dict() - for name in self.__class__.__dict__: aname = name[1:] if isinstance(aname, Attribute) and not isinstance(aname, FieldAttribute): self._attributes[aname] = None + def munge(self, ds): + ''' infrequently used method to do some pre-processing of legacy terms ''' + + return ds + def load_data(self, ds): ''' walk the input datastructure and assign any values ''' assert ds is not None + ds = self.munge(ds) + # walk all attributes in the class for (name, attribute) in self.__class__.__dict__.iteritems(): aname = name[1:] - # process Fields + # process Field attributes which get loaded from the YAML + if isinstance(attribute, FieldAttribute): + + # copy the value over unless a _load_field method is defined method = getattr(self, '_load_%s' % aname, None) if method: self._attributes[aname] = method(self, attribute) @@ -60,38 +55,45 @@ class Base(object): if aname in ds: self._attributes[aname] = ds[aname] - # TODO: implement PluginAtrribute which allows "with_" and "action" aliases. - + # return the constructed object + self.validate() return self def validate(self): - # TODO: finish - for name in self.__dict__: - aname = name[1:] - attribute = self.__dict__[aname] - if instanceof(attribute, FieldAttribute): + ''' validation that is done at parse time, not load time ''' + + # walk all fields in the object + for (name, attribute) in self.__dict__: + + # find any field attributes + if isinstance(attribute, FieldAttribute): + + if not name.startswith("_"): + raise AnsibleError("FieldAttribute %s must start with _" % name) + + aname = name[1:] + + # run validator only if present method = getattr(self, '_validate_%s' % (prefix, aname), None) if method: method(self, attribute) def post_validate(self, runner_context): - # TODO: finish + ''' + we can't tell that everything is of the right type until we have + all the variables. Run basic types (from isa) as well as + any _post_validate_ functions. + ''' + raise exception.NotImplementedError def __getattr__(self, needle): + + # return any attribute names as if they were real. + # access them like obj.attrname() if needle in self._attributes: return self._attributes[needle] - if needle in self.__dict__: - return self.__dict__[needle] + raise AttributeError - #def __setattr__(self, needle, value): - # if needle in self._attributes: - # self._attributes[needle] = value - # if needle in self.__dict__: - # super(Base, self).__setattr__(needle, value) - # # self.__dict__[needle] = value - # raise AttributeError - - diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 80d04fcb95..ad708f167c 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -17,13 +17,15 @@ from ansible.playbook.base import Base from ansible.playbook.attribute import Attribute, FieldAttribute -from ansible.playbook.conditional import Conditional -#from ansible.common.errors import AnsibleError -#from ansible import utils + +# from ansible.playbook.conditional import Conditional +# from ansible.common.errors import AnsibleError # TODO: it would be fantastic (if possible) if a task new where in the YAML it was defined for describing # it in error conditions +from ansible.plugins import module_finder, lookup_finder + class Task(Base): """ @@ -44,6 +46,7 @@ class Task(Base): # might be possible to define others _action = FieldAttribute(isa='string') + _always_run = FieldAttribute(isa='bool') _any_errors_fatal = FieldAttribute(isa='bool') _async = FieldAttribute(isa='int') @@ -55,12 +58,14 @@ class Task(Base): _ignore_errors = FieldAttribute(isa='bool') # FIXME: this should not be a Task - # include = FieldAttribute(isa='string') + # include = FieldAttribute(isa='string') + _loop = Attribute() _local_action = FieldAttribute(isa='string') # FIXME: this should not be a Task - _meta = FieldAttribute(isa='string') + _module_args = Attribute(isa='dict') + _meta = FieldAttribute(isa='string') _name = FieldAttribute(isa='string') @@ -106,6 +111,44 @@ class Task(Base): ''' returns a human readable representation of the task ''' return "TASK: %s" % self.get_name() + def munge(self, ds): + ''' + tasks are especially complex arguments so need pre-processing. + keep it short. + ''' + + + assert isinstance(ds, dict) + + new_ds = dict() + for (k,v) in ds.iteritems(): + + # if any attributes of the datastructure match a module name + # convert it to "module + args" + + if k in module_finder: + if _module.value is not None or 'action' in ds or 'local_action' in ds: + raise AnsibleError("duplicate action in task: %s" % k) + _module.value = k + _module_args.value = v + + # handle any loops, there can be only one kind of loop + + elif "with_%s" % k in lookup_finder: + if _loop.value is not None: + raise AnsibleError("duplicate loop in task: %s" % k) + _loop.value = k + _loop_args.value = v + + # otherwise send it through straight + + else: + # nothing we need to filter + new_ds[k] = v + + return new_ds + + # ================================================================================== # BELOW THIS LINE # info below this line is "old" and is before the attempt to build Attributes @@ -119,7 +162,7 @@ LEGACY = """ results = dict() module_name, params = v.strip().split(' ', 1) - if module_name not in utils.plugins.module_finder: + if module_name not in module_finder: raise AnsibleError("the specified module '%s' could not be found, check your module path" % module_name) results['_module_name'] = module_name results['_parameters'] = utils.parse_kv(params) diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py index d6c11ffa74..4bb6c39312 100644 --- a/v2/ansible/plugins/__init__.py +++ b/v2/ansible/plugins/__init__.py @@ -1,4 +1,5 @@ -# (c) 2012-2014, Michael DeHaan +# (c) 2012, Daniel Hokka Zakrisson +# (c) 2012-2014, Michael DeHaan and others # # This file is part of Ansible # @@ -15,3 +16,271 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import os +import os.path +import sys +import glob +import imp +from ansible import constants as C +from ansible import errors + +MODULE_CACHE = {} +PATH_CACHE = {} +PLUGIN_PATH_CACHE = {} +_basedirs = [] + +def push_basedir(basedir): + # avoid pushing the same absolute dir more than once + basedir = os.path.realpath(basedir) + if basedir not in _basedirs: + _basedirs.insert(0, basedir) + +class PluginLoader(object): + + ''' + PluginLoader loads plugins from the configured plugin directories. + + It searches for plugins by iterating through the combined list of + play basedirs, configured paths, and the python path. + The first match is used. + ''' + + def __init__(self, class_name, package, config, subdir, aliases={}): + + self.class_name = class_name + self.package = package + self.config = config + self.subdir = subdir + self.aliases = aliases + + if not class_name in MODULE_CACHE: + MODULE_CACHE[class_name] = {} + if not class_name in PATH_CACHE: + PATH_CACHE[class_name] = None + if not class_name in PLUGIN_PATH_CACHE: + PLUGIN_PATH_CACHE[class_name] = {} + + self._module_cache = MODULE_CACHE[class_name] + self._paths = PATH_CACHE[class_name] + self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name] + + self._extra_dirs = [] + + def print_paths(self): + ''' Returns a string suitable for printing of the search path ''' + + # Uses a list to get the order right + ret = [] + for i in self._get_paths(): + if i not in ret: + ret.append(i) + return os.pathsep.join(ret) + + def _all_directories(self, dir): + results = [] + results.append(dir) + for root, subdirs, files in os.walk(dir): + if '__init__.py' in files: + for x in subdirs: + results.append(os.path.join(root,x)) + return results + + def _get_package_paths(self): + ''' Gets the path of a Python package ''' + + paths = [] + if not self.package: + return [] + if not hasattr(self, 'package_path'): + m = __import__(self.package) + parts = self.package.split('.')[1:] + self.package_path = os.path.join(os.path.dirname(m.__file__), *parts) + paths.extend(self._all_directories(self.package_path)) + return paths + + def _get_paths(self): + ''' Return a list of paths to search for plugins in ''' + + if self._paths is not None: + return self._paths + + ret = self._extra_dirs[:] + for basedir in _basedirs: + fullpath = os.path.realpath(os.path.join(basedir, self.subdir)) + if os.path.isdir(fullpath): + + files = glob.glob("%s/*" % fullpath) + + # allow directories to be two levels deep + files2 = glob.glob("%s/*/*" % fullpath) + + if files2 is not None: + files.extend(files2) + + for file in files: + if os.path.isdir(file) and file not in ret: + ret.append(file) + if fullpath not in ret: + ret.append(fullpath) + + # look in any configured plugin paths, allow one level deep for subcategories + if self.config is not None: + configured_paths = self.config.split(os.pathsep) + for path in configured_paths: + path = os.path.realpath(os.path.expanduser(path)) + contents = glob.glob("%s/*" % path) + for c in contents: + if os.path.isdir(c) and c not in ret: + ret.append(c) + if path not in ret: + ret.append(path) + + # look for any plugins installed in the package subtree + ret.extend(self._get_package_paths()) + + # cache and return the result + self._paths = ret + return ret + + + def add_directory(self, directory, with_subdir=False): + ''' Adds an additional directory to the search path ''' + + directory = os.path.realpath(directory) + + if directory is not None: + if with_subdir: + directory = os.path.join(directory, self.subdir) + if directory not in self._extra_dirs: + # append the directory and invalidate the path cache + self._extra_dirs.append(directory) + self._paths = None + + def find_plugin(self, name, suffixes=None, transport=''): + ''' Find a plugin named name ''' + + if not suffixes: + if self.class_name: + suffixes = ['.py'] + else: + if transport == 'winrm': + suffixes = ['.ps1', ''] + else: + suffixes = ['.py', ''] + + for suffix in suffixes: + full_name = '%s%s' % (name, suffix) + if full_name in self._plugin_path_cache: + return self._plugin_path_cache[full_name] + + for i in self._get_paths(): + path = os.path.join(i, full_name) + if os.path.isfile(path): + self._plugin_path_cache[full_name] = path + return path + + return None + + def has_plugin(self, name): + ''' Checks if a plugin named name exists ''' + + return self.find_plugin(name) is not None + + __contains__ = has_plugin + + def get(self, name, *args, **kwargs): + ''' instantiates a plugin of the given name using arguments ''' + + if name in self.aliases: + name = self.aliases[name] + path = self.find_plugin(name) + if path is None: + return None + if path not in self._module_cache: + self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path) + return getattr(self._module_cache[path], self.class_name)(*args, **kwargs) + + def all(self, *args, **kwargs): + ''' instantiates all plugins with the same arguments ''' + + for i in self._get_paths(): + matches = glob.glob(os.path.join(i, "*.py")) + matches.sort() + for path in matches: + name, ext = os.path.splitext(os.path.basename(path)) + if name.startswith("_"): + continue + if path not in self._module_cache: + self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path) + yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs) + +action_loader = PluginLoader( + 'ActionModule', + 'ansible.runner.action_plugins', + C.DEFAULT_ACTION_PLUGIN_PATH, + 'action_plugins' +) + +cache_loader = PluginLoader( + 'CacheModule', + 'ansible.cache', + C.DEFAULT_CACHE_PLUGIN_PATH, + 'cache_plugins' +) + +callback_loader = PluginLoader( + 'CallbackModule', + 'ansible.callback_plugins', + C.DEFAULT_CALLBACK_PLUGIN_PATH, + 'callback_plugins' +) + +connection_loader = PluginLoader( + 'Connection', + 'ansible.runner.connection_plugins', + C.DEFAULT_CONNECTION_PLUGIN_PATH, + 'connection_plugins', + aliases={'paramiko': 'paramiko_ssh'} +) + +shell_loader = PluginLoader( + 'ShellModule', + 'ansible.runner.shell_plugins', + 'shell_plugins', + 'shell_plugins', +) + +module_finder = PluginLoader( + '', + 'ansible.modules', + C.DEFAULT_MODULE_PATH, + 'library' +) + +lookup_finder = PluginLoader( + 'LookupModule', + 'ansible.runner.lookup_plugins', + C.DEFAULT_LOOKUP_PLUGIN_PATH, + 'lookup_plugins' +) + +vars_finder = PluginLoader( + 'VarsModule', + 'ansible.inventory.vars_plugins', + C.DEFAULT_VARS_PLUGIN_PATH, + 'vars_plugins' +) + +filter_finder = PluginLoader( + 'FilterModule', + 'ansible.runner.filter_plugins', + C.DEFAULT_FILTER_PLUGIN_PATH, + 'filter_plugins' +) + +fragment_finder = PluginLoader( + 'ModuleDocFragment', + 'ansible.utils.module_docs_fragments', + os.path.join(os.path.dirname(__file__), 'module_docs_fragments'), + '', +) From 1556b0384fa28b6e754eccefc8a365e9a3f8f949 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 6 Oct 2014 15:31:11 -0400 Subject: [PATCH 178/813] Add submodule references since v2 development will need them, the tree obviously does not need two copies. --- .gitmodules | 6 ++++++ v2/ansible/modules/core | 1 + v2/ansible/modules/extras | 1 + 3 files changed, 8 insertions(+) create mode 160000 v2/ansible/modules/core create mode 160000 v2/ansible/modules/extras diff --git a/.gitmodules b/.gitmodules index 92c3072a28..f33b632fb6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,3 +4,9 @@ [submodule "lib/ansible/modules/extras"] path = lib/ansible/modules/extras url = git://github.com/ansible/ansible-modules-extras.git +[submodule "v2/ansible/modules/core"] + path = v2/ansible/modules/core + url = https://github.com/ansible/ansible-modules-core.git +[submodule "v2/ansible/modules/extras"] + path = v2/ansible/modules/extras + url = https://github.com/ansible/ansible-modules-extras.git diff --git a/v2/ansible/modules/core b/v2/ansible/modules/core new file mode 160000 index 0000000000..cb69744bce --- /dev/null +++ b/v2/ansible/modules/core @@ -0,0 +1 @@ +Subproject commit cb69744bcee4b4217d83b4a30006635ba69e2aa0 diff --git a/v2/ansible/modules/extras b/v2/ansible/modules/extras new file mode 160000 index 0000000000..8a4f07eecd --- /dev/null +++ b/v2/ansible/modules/extras @@ -0,0 +1 @@ +Subproject commit 8a4f07eecd2bb877f51b7b04b5352efa6076cce5 From e66a0096a777af6a3866616faa7401ff1f4874e1 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 6 Oct 2014 16:29:02 -0400 Subject: [PATCH 179/813] Work in progress on task loading. --- test/v2/playbook/test_task.py | 2 +- v2/ansible/modules/__init__.py | 2 ++ v2/ansible/playbook/attribute.py | 3 ++- v2/ansible/playbook/base.py | 11 +++++++---- v2/ansible/playbook/task.py | 24 ++++++++++++++++-------- v2/ansible/plugins/__init__.py | 16 ++++++++-------- 6 files changed, 36 insertions(+), 22 deletions(-) create mode 100644 v2/ansible/modules/__init__.py diff --git a/test/v2/playbook/test_task.py b/test/v2/playbook/test_task.py index a012dff4bf..eef422aee8 100644 --- a/test/v2/playbook/test_task.py +++ b/test/v2/playbook/test_task.py @@ -32,7 +32,7 @@ class TestTask(unittest.TestCase): t = Task.load(basic_shell_task) assert t is not None assert t.name == basic_shell_task['name'] - assert t.module == 'shell' + assert t.action == 'shell' assert t.args == 'echo hi' def test_can_load_action_kv_form(self): diff --git a/v2/ansible/modules/__init__.py b/v2/ansible/modules/__init__.py new file mode 100644 index 0000000000..ec86ee6101 --- /dev/null +++ b/v2/ansible/modules/__init__.py @@ -0,0 +1,2 @@ +# TODO: header + diff --git a/v2/ansible/playbook/attribute.py b/v2/ansible/playbook/attribute.py index e3e2dd13d9..1a22f3dbdb 100644 --- a/v2/ansible/playbook/attribute.py +++ b/v2/ansible/playbook/attribute.py @@ -18,8 +18,9 @@ #from ansible.common.errors import AnsibleError class Attribute(object): - def __init__(self, isa=None): + def __init__(self, isa=None, private=False): self.isa = isa + self.private = private self.value = None def __call__(self): diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 68dc2d6ffe..825e9171ff 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -37,6 +37,10 @@ class Base(object): ''' walk the input datastructure and assign any values ''' assert ds is not None + + # we currently don't do anything with private attributes but may + # later decide to filter them out of 'ds' here. + ds = self.munge(ds) # walk all attributes in the class @@ -54,7 +58,7 @@ class Base(object): else: if aname in ds: self._attributes[aname] = ds[aname] - + # return the constructed object self.validate() return self @@ -64,7 +68,7 @@ class Base(object): ''' validation that is done at parse time, not load time ''' # walk all fields in the object - for (name, attribute) in self.__dict__: + for (name, attribute) in self.__dict__.iteritems(): # find any field attributes if isinstance(attribute, FieldAttribute): @@ -95,5 +99,4 @@ class Base(object): if needle in self._attributes: return self._attributes[needle] - raise AttributeError - + raise AttributeError("attribute not found: %s" % needle) diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index ad708f167c..6ab9231ba1 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -45,6 +45,8 @@ class Task(Base): # will be used if defined # might be possible to define others + + _args = FieldAttribute(isa='dict') _action = FieldAttribute(isa='string') _always_run = FieldAttribute(isa='bool') @@ -60,11 +62,11 @@ class Task(Base): # FIXME: this should not be a Task # include = FieldAttribute(isa='string') - _loop = Attribute() + _loop = FieldAttribute(isa='string', private=True) + _loop_args = FieldAttribute(isa='list', private=True) _local_action = FieldAttribute(isa='string') # FIXME: this should not be a Task - _module_args = Attribute(isa='dict') _meta = FieldAttribute(isa='string') _name = FieldAttribute(isa='string') @@ -127,25 +129,31 @@ class Task(Base): # convert it to "module + args" if k in module_finder: - if _module.value is not None or 'action' in ds or 'local_action' in ds: + + + if self._action.value is not None or 'action' in ds or 'local_action' in ds: raise AnsibleError("duplicate action in task: %s" % k) - _module.value = k - _module_args.value = v + + print "SCANNED: %s" % k + new_ds['action'] = k + new_ds['args'] = v # handle any loops, there can be only one kind of loop elif "with_%s" % k in lookup_finder: - if _loop.value is not None: + if self._loop.value is not None: raise AnsibleError("duplicate loop in task: %s" % k) - _loop.value = k - _loop_args.value = v + new_ds['loop'] = k + new_ds['loop_args'] = v # otherwise send it through straight else: # nothing we need to filter + print "PASSING: %s => %s" % (k,v) new_ds[k] = v + print "NEW_DS=%s" % new_ds return new_ds diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py index 4bb6c39312..faa284ce16 100644 --- a/v2/ansible/plugins/__init__.py +++ b/v2/ansible/plugins/__init__.py @@ -216,28 +216,28 @@ class PluginLoader(object): action_loader = PluginLoader( 'ActionModule', - 'ansible.runner.action_plugins', + 'ansible.plugins.action', C.DEFAULT_ACTION_PLUGIN_PATH, 'action_plugins' ) cache_loader = PluginLoader( 'CacheModule', - 'ansible.cache', + 'ansible.plugins.cache', C.DEFAULT_CACHE_PLUGIN_PATH, 'cache_plugins' ) callback_loader = PluginLoader( 'CallbackModule', - 'ansible.callback_plugins', + 'ansible.plugins.callback', C.DEFAULT_CALLBACK_PLUGIN_PATH, 'callback_plugins' ) connection_loader = PluginLoader( 'Connection', - 'ansible.runner.connection_plugins', + 'ansible.plugins.connection', C.DEFAULT_CONNECTION_PLUGIN_PATH, 'connection_plugins', aliases={'paramiko': 'paramiko_ssh'} @@ -245,7 +245,7 @@ connection_loader = PluginLoader( shell_loader = PluginLoader( 'ShellModule', - 'ansible.runner.shell_plugins', + 'ansible.plugins.shell', 'shell_plugins', 'shell_plugins', ) @@ -259,21 +259,21 @@ module_finder = PluginLoader( lookup_finder = PluginLoader( 'LookupModule', - 'ansible.runner.lookup_plugins', + 'ansible.plugins.lookup', C.DEFAULT_LOOKUP_PLUGIN_PATH, 'lookup_plugins' ) vars_finder = PluginLoader( 'VarsModule', - 'ansible.inventory.vars_plugins', + 'ansible.plugins.vars', C.DEFAULT_VARS_PLUGIN_PATH, 'vars_plugins' ) filter_finder = PluginLoader( 'FilterModule', - 'ansible.runner.filter_plugins', + 'ansible.plugins.filter', C.DEFAULT_FILTER_PLUGIN_PATH, 'filter_plugins' ) From d97b38ba83d69d1ec7b6168698d7968968ee69a0 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 6 Oct 2014 17:06:13 -0400 Subject: [PATCH 180/813] Attribute defaults and optional accessors. --- test/v2/playbook/test_task.py | 15 +++++++++++++++ v2/ansible/playbook/attribute.py | 5 ++++- v2/ansible/playbook/base.py | 18 +++++++++++++----- v2/ansible/playbook/task.py | 23 +++++++++++++++++------ 4 files changed, 49 insertions(+), 12 deletions(-) diff --git a/test/v2/playbook/test_task.py b/test/v2/playbook/test_task.py index eef422aee8..124fc1bc98 100644 --- a/test/v2/playbook/test_task.py +++ b/test/v2/playbook/test_task.py @@ -8,6 +8,10 @@ basic_shell_task = dict( shell = 'echo hi' ) +kv_shell_task = dict( + action = 'shell echo hi' +) + class TestTask(unittest.TestCase): def setUp(self): @@ -36,6 +40,17 @@ class TestTask(unittest.TestCase): assert t.args == 'echo hi' def test_can_load_action_kv_form(self): + t = Task.load(kv_shell_task) + assert t.action == 'shell' + assert t.args == 'echo hi' + + def test_can_auto_name(self): + assert 'name' not in kv_shell_task + t = Task.load(kv_shell_task) + print "GOT NAME=(%s)" % t.name + assert t.name == 'shell echo hi' + + def test_can_auto_name_with_role(self): pass def test_can_load_action_complex_form(self): diff --git a/v2/ansible/playbook/attribute.py b/v2/ansible/playbook/attribute.py index 1a22f3dbdb..a10da490c7 100644 --- a/v2/ansible/playbook/attribute.py +++ b/v2/ansible/playbook/attribute.py @@ -18,10 +18,13 @@ #from ansible.common.errors import AnsibleError class Attribute(object): - def __init__(self, isa=None, private=False): + + def __init__(self, isa=None, private=False, default=None): + self.isa = isa self.private = private self.value = None + self.default = default def __call__(self): return self.value diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 825e9171ff..6390f3432f 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -23,10 +23,11 @@ class Base(object): # each class knows attributes set upon it, see Task.py for example self._attributes = dict() - for name in self.__class__.__dict__: + + for (name, value) in self.__class__.__dict__.iteritems(): aname = name[1:] - if isinstance(aname, Attribute) and not isinstance(aname, FieldAttribute): - self._attributes[aname] = None + if isinstance(value, Attribute): + self._attributes[aname] = value.default def munge(self, ds): ''' infrequently used method to do some pre-processing of legacy terms ''' @@ -94,9 +95,16 @@ class Base(object): def __getattr__(self, needle): - # return any attribute names as if they were real. - # access them like obj.attrname() + # return any attribute names as if they were real + # optionally allowing masking by accessors + + if not needle.startswith("_"): + method = "get_%s" % needle + if method in self.__dict__: + return method(self) + if needle in self._attributes: return self._attributes[needle] raise AttributeError("attribute not found: %s" % needle) + diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 6ab9231ba1..a3324bbba3 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -87,9 +87,6 @@ class Task(Base): _transport = FieldAttribute(isa='string') _until = FieldAttribute(isa='list') # ? - _role = Attribute() - _block = Attribute() - def __init__(self, block=None, role=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' self._block = block @@ -99,10 +96,24 @@ class Task(Base): def get_name(self): ''' return the name of the task ''' - if self.role: - return "%s : %s" % (self.role.get_name(), self.name) - else: + if self._role and self.name: + return "%s : %s" % (self._role.name, self.name) + elif self.name: return self.name + else: + return "%s %s" % (self.action, self._merge_kv(self.args)) + + def _merge_kv(self, ds): + if ds is None: + return "" + elif isinstance(ds, basestring): + return ds + elif instance(ds, dict): + buf = "" + for (k,v) in ds.iteritems(): + buf = buf + "%s=%s " % (k,v) + buf = buf.strip() + return buf @staticmethod def load(data, block=None, role=None): From e8aa847e5b7ffaa04359130065e599fc34cb6bb0 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 6 Oct 2014 17:40:36 -0400 Subject: [PATCH 181/813] Attempt to clean up the munging functions a little. --- v2/ansible/playbook/task.py | 85 ++++++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 35 deletions(-) diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index a3324bbba3..00fc0e2b42 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -103,17 +103,20 @@ class Task(Base): else: return "%s %s" % (self.action, self._merge_kv(self.args)) + def _parse_kv(self, str): + return ansible.utils.parse_kv(str) + def _merge_kv(self, ds): if ds is None: - return "" + return "" elif isinstance(ds, basestring): - return ds + return ds elif instance(ds, dict): - buf = "" - for (k,v) in ds.iteritems(): - buf = buf + "%s=%s " % (k,v) - buf = buf.strip() - return buf + buf = "" + for (k,v) in ds.iteritems(): + buf = buf + "%s=%s " % (k,v) + buf = buf.strip() + return buf @staticmethod def load(data, block=None, role=None): @@ -123,6 +126,39 @@ class Task(Base): def __repr__(self): ''' returns a human readable representation of the task ''' return "TASK: %s" % self.get_name() + + + def _munge_action(self, ds, new_ds, k, v): + ''' take a module name and split into action and args ''' + if self._action.value is not None or 'action' in ds or 'local_action' in ds: + raise AnsibleError("duplicate action in task: %s" % k) + new_ds['action'] = k + new_ds['args'] = v + + + def _munge_loop(self, ds, new_ds, k, v): + ''' take a lookup plugin name and store it correctly ''' + if self._loop.value is not None: + raise AnsibleError("duplicate loop in task: %s" % k) + new_ds['loop'] = k + new_ds['loop_args'] = v + + def _munge_action2(self, ds, new_ds, k, v, local=False): + ''' take an old school action/local_action and reformat it ''' + if isinstance(v, basestring): + (module, args) = self._parse_kv(v) + new_ds['action'] = module + if 'args' in ds: + raise AnsibleError("unexpected and redundant 'args'") + new_ds['args'] = args + if local: + if 'delegate_to' in ds: + raise AnsbileError("local_action and action conflict") + new_ds['delegate_to'] = 'localhost' + else: + raise AnsibleError("unexpected use of 'action'") + else: + raise AnsibleError("unexpected use of 'action'") def munge(self, ds): ''' @@ -130,41 +166,20 @@ class Task(Base): keep it short. ''' - assert isinstance(ds, dict) new_ds = dict() for (k,v) in ds.iteritems(): - - # if any attributes of the datastructure match a module name - # convert it to "module + args" - if k in module_finder: - - - if self._action.value is not None or 'action' in ds or 'local_action' in ds: - raise AnsibleError("duplicate action in task: %s" % k) - - print "SCANNED: %s" % k - new_ds['action'] = k - new_ds['args'] = v - - # handle any loops, there can be only one kind of loop - + self._munge_action(ds, new_ds, k, v) elif "with_%s" % k in lookup_finder: - if self._loop.value is not None: - raise AnsibleError("duplicate loop in task: %s" % k) - new_ds['loop'] = k - new_ds['loop_args'] = v - - # otherwise send it through straight - + self._munge_loop(new_ds, k, v) + elif k == 'action': + self._munge_action2(ds, new_ds, k, v) + elif k == 'local_action': + self._munge_action2(ds, new_ds, k, v, local=True) else: - # nothing we need to filter - print "PASSING: %s => %s" % (k,v) new_ds[k] = v - - print "NEW_DS=%s" % new_ds return new_ds @@ -184,7 +199,7 @@ LEGACY = """ if module_name not in module_finder: raise AnsibleError("the specified module '%s' could not be found, check your module path" % module_name) results['_module_name'] = module_name - results['_parameters'] = utils.parse_kv(params) + results['_parameters'] = self._parse_kv(params) if k == 'local_action': if 'delegate_to' in ds: From 05231dcde6cf24040ae428f05bdb6241f287881f Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 7 Oct 2014 11:52:45 -0400 Subject: [PATCH 182/813] Move over some of the parsing functions that we need. --- v2/ansible/playbook/task.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 00fc0e2b42..dd37fe1da2 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -24,6 +24,7 @@ from ansible.playbook.attribute import Attribute, FieldAttribute # TODO: it would be fantastic (if possible) if a task new where in the YAML it was defined for describing # it in error conditions +from ansible.parsing.splitter import parse_kv from ansible.plugins import module_finder, lookup_finder class Task(Base): @@ -103,9 +104,6 @@ class Task(Base): else: return "%s %s" % (self.action, self._merge_kv(self.args)) - def _parse_kv(self, str): - return ansible.utils.parse_kv(str) - def _merge_kv(self, ds): if ds is None: return "" @@ -146,7 +144,7 @@ class Task(Base): def _munge_action2(self, ds, new_ds, k, v, local=False): ''' take an old school action/local_action and reformat it ''' if isinstance(v, basestring): - (module, args) = self._parse_kv(v) + (module, args) = parse_kv(v) new_ds['action'] = module if 'args' in ds: raise AnsibleError("unexpected and redundant 'args'") @@ -199,7 +197,7 @@ LEGACY = """ if module_name not in module_finder: raise AnsibleError("the specified module '%s' could not be found, check your module path" % module_name) results['_module_name'] = module_name - results['_parameters'] = self._parse_kv(params) + results['_parameters'] = parse_kv(params) if k == 'local_action': if 'delegate_to' in ds: From 21642c0fbfecaa3db4cda1d6270bc8beeba8671c Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 7 Oct 2014 14:56:15 -0400 Subject: [PATCH 183/813] We need module args specific parsing classes. --- test/v2/playbook/test_task.py | 1 - v2/ansible/parsing/__init__.py | 1 + v2/ansible/parsing/modargs.py | 26 ++++ v2/ansible/parsing/splitter.py | 215 +++++++++++++++++++++++++++++++++ v2/ansible/playbook/task.py | 24 +++- 5 files changed, 260 insertions(+), 7 deletions(-) create mode 100644 v2/ansible/parsing/__init__.py create mode 100644 v2/ansible/parsing/modargs.py create mode 100644 v2/ansible/parsing/splitter.py diff --git a/test/v2/playbook/test_task.py b/test/v2/playbook/test_task.py index 124fc1bc98..8120ba36c5 100644 --- a/test/v2/playbook/test_task.py +++ b/test/v2/playbook/test_task.py @@ -47,7 +47,6 @@ class TestTask(unittest.TestCase): def test_can_auto_name(self): assert 'name' not in kv_shell_task t = Task.load(kv_shell_task) - print "GOT NAME=(%s)" % t.name assert t.name == 'shell echo hi' def test_can_auto_name_with_role(self): diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py new file mode 100644 index 0000000000..44026bdff0 --- /dev/null +++ b/v2/ansible/parsing/__init__.py @@ -0,0 +1 @@ +# TODO: header diff --git a/v2/ansible/parsing/modargs.py b/v2/ansible/parsing/modargs.py new file mode 100644 index 0000000000..9af202c635 --- /dev/null +++ b/v2/ansible/parsing/modargs.py @@ -0,0 +1,26 @@ +# (c) 2014 Michael DeHaan, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +class ModArgsParser(object) + + def __init__(self, thing1, thing2): + pass + + def parse(): + raise exception.NotImplementedError + + diff --git a/v2/ansible/parsing/splitter.py b/v2/ansible/parsing/splitter.py new file mode 100644 index 0000000000..430f4e299a --- /dev/null +++ b/v2/ansible/parsing/splitter.py @@ -0,0 +1,215 @@ +# (c) 2014 James Cammarata, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +def parse_kv(args): + ''' convert a string of key/value items to a dict ''' + options = {} + if args is not None: + try: + vargs = split_args(args) + except ValueError, ve: + if 'no closing quotation' in str(ve).lower(): + raise errors.AnsibleError("error parsing argument string, try quoting the entire line.") + else: + raise + for x in vargs: + if "=" in x: + k, v = x.split("=",1) + options[k.strip()] = unquote(v.strip()) + return options + +def _get_quote_state(token, quote_char): + ''' + the goal of this block is to determine if the quoted string + is unterminated in which case it needs to be put back together + ''' + # the char before the current one, used to see if + # the current character is escaped + prev_char = None + for idx, cur_char in enumerate(token): + if idx > 0: + prev_char = token[idx-1] + if cur_char in '"\'' and prev_char != '\\': + if quote_char: + if cur_char == quote_char: + quote_char = None + else: + quote_char = cur_char + return quote_char + +def _count_jinja2_blocks(token, cur_depth, open_token, close_token): + ''' + this function counts the number of opening/closing blocks for a + given opening/closing type and adjusts the current depth for that + block based on the difference + ''' + num_open = token.count(open_token) + num_close = token.count(close_token) + if num_open != num_close: + cur_depth += (num_open - num_close) + if cur_depth < 0: + cur_depth = 0 + return cur_depth + +def split_args(args): + ''' + Splits args on whitespace, but intelligently reassembles + those that may have been split over a jinja2 block or quotes. + + When used in a remote module, we won't ever have to be concerned about + jinja2 blocks, however this function is/will be used in the + core portions as well before the args are templated. + + example input: a=b c="foo bar" + example output: ['a=b', 'c="foo bar"'] + + Basically this is a variation shlex that has some more intelligence for + how Ansible needs to use it. + ''' + + # the list of params parsed out of the arg string + # this is going to be the result value when we are donei + params = [] + + # here we encode the args, so we have a uniform charset to + # work with, and split on white space + args = args.strip() + try: + args = args.encode('utf-8') + do_decode = True + except UnicodeDecodeError: + do_decode = False + items = args.strip().split('\n') + + # iterate over the tokens, and reassemble any that may have been + # split on a space inside a jinja2 block. + # ex if tokens are "{{", "foo", "}}" these go together + + # These variables are used + # to keep track of the state of the parsing, since blocks and quotes + # may be nested within each other. + + quote_char = None + inside_quotes = False + print_depth = 0 # used to count nested jinja2 {{ }} blocks + block_depth = 0 # used to count nested jinja2 {% %} blocks + comment_depth = 0 # used to count nested jinja2 {# #} blocks + + # now we loop over each split chunk, coalescing tokens if the white space + # split occurred within quotes or a jinja2 block of some kind + for itemidx,item in enumerate(items): + + # we split on spaces and newlines separately, so that we + # can tell which character we split on for reassembly + # inside quotation characters + tokens = item.strip().split(' ') + + line_continuation = False + for idx,token in enumerate(tokens): + + # if we hit a line continuation character, but + # we're not inside quotes, ignore it and continue + # on to the next token while setting a flag + if token == '\\' and not inside_quotes: + line_continuation = True + continue + + # store the previous quoting state for checking later + was_inside_quotes = inside_quotes + quote_char = _get_quote_state(token, quote_char) + inside_quotes = quote_char is not None + + # multiple conditions may append a token to the list of params, + # so we keep track with this flag to make sure it only happens once + # append means add to the end of the list, don't append means concatenate + # it to the end of the last token + appended = False + + # if we're inside quotes now, but weren't before, append the token + # to the end of the list, since we'll tack on more to it later + # otherwise, if we're inside any jinja2 block, inside quotes, or we were + # inside quotes (but aren't now) concat this token to the last param + if inside_quotes and not was_inside_quotes: + params.append(token) + appended = True + elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes: + if idx == 0 and not inside_quotes and was_inside_quotes: + params[-1] = "%s%s" % (params[-1], token) + elif len(tokens) > 1: + spacer = '' + if idx > 0: + spacer = ' ' + params[-1] = "%s%s%s" % (params[-1], spacer, token) + else: + params[-1] = "%s\n%s" % (params[-1], token) + appended = True + + # if the number of paired block tags is not the same, the depth has changed, so we calculate that here + # and may append the current token to the params (if we haven't previously done so) + prev_print_depth = print_depth + print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}") + if print_depth != prev_print_depth and not appended: + params.append(token) + appended = True + + prev_block_depth = block_depth + block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}") + if block_depth != prev_block_depth and not appended: + params.append(token) + appended = True + + prev_comment_depth = comment_depth + comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}") + if comment_depth != prev_comment_depth and not appended: + params.append(token) + appended = True + + # finally, if we're at zero depth for all blocks and not inside quotes, and have not + # yet appended anything to the list of params, we do so now + if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '': + params.append(token) + + # if this was the last token in the list, and we have more than + # one item (meaning we split on newlines), add a newline back here + # to preserve the original structure + if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation: + if not params[-1].endswith('\n'): + params[-1] += '\n' + + # always clear the line continuation flag + line_continuation = False + + # If we're done and things are not at zero depth or we're still inside quotes, + # raise an error to indicate that the args were unbalanced + if print_depth or block_depth or comment_depth or inside_quotes: + raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes") + + # finally, we decode each param back to the unicode it was in the arg string + if do_decode: + params = [x.decode('utf-8') for x in params] + + return params + +def is_quoted(data): + return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'") + +def unquote(data): + ''' removes first and last quotes from a string, if the string starts and ends with the same quotes ''' + if is_quoted(data): + return data[1:-1] + return data + diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index dd37fe1da2..856246c327 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -19,7 +19,7 @@ from ansible.playbook.base import Base from ansible.playbook.attribute import Attribute, FieldAttribute # from ansible.playbook.conditional import Conditional -# from ansible.common.errors import AnsibleError +from ansible.errors import AnsibleError # TODO: it would be fantastic (if possible) if a task new where in the YAML it was defined for describing # it in error conditions @@ -125,9 +125,21 @@ class Task(Base): ''' returns a human readable representation of the task ''' return "TASK: %s" % self.get_name() + def _parse_old_school_action(self, v): + ''' given a action/local_action line, return the module and args ''' + tokens = v.split() + if len(tokens) < 2: + return [v,{}] + else: + if v not in [ 'command', 'shell' ]: + joined = " ".join(tokens[1:]) + return [tokens[0], parse_kv(joined)] + else: + return [tokens[0], joined] def _munge_action(self, ds, new_ds, k, v): ''' take a module name and split into action and args ''' + if self._action.value is not None or 'action' in ds or 'local_action' in ds: raise AnsibleError("duplicate action in task: %s" % k) new_ds['action'] = k @@ -136,6 +148,7 @@ class Task(Base): def _munge_loop(self, ds, new_ds, k, v): ''' take a lookup plugin name and store it correctly ''' + if self._loop.value is not None: raise AnsibleError("duplicate loop in task: %s" % k) new_ds['loop'] = k @@ -143,9 +156,10 @@ class Task(Base): def _munge_action2(self, ds, new_ds, k, v, local=False): ''' take an old school action/local_action and reformat it ''' + if isinstance(v, basestring): - (module, args) = parse_kv(v) - new_ds['action'] = module + tokens = self._parse_old_school_action(v) + new_ds['action'] = tokens[0] if 'args' in ds: raise AnsibleError("unexpected and redundant 'args'") new_ds['args'] = args @@ -153,8 +167,6 @@ class Task(Base): if 'delegate_to' in ds: raise AnsbileError("local_action and action conflict") new_ds['delegate_to'] = 'localhost' - else: - raise AnsibleError("unexpected use of 'action'") else: raise AnsibleError("unexpected use of 'action'") @@ -171,7 +183,7 @@ class Task(Base): if k in module_finder: self._munge_action(ds, new_ds, k, v) elif "with_%s" % k in lookup_finder: - self._munge_loop(new_ds, k, v) + self._munge_loop(ds, new_ds, k, v) elif k == 'action': self._munge_action2(ds, new_ds, k, v) elif k == 'local_action': From ad9ab8e033ca327cadea3d046a196a2759bc4300 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 7 Oct 2014 14:59:55 -0400 Subject: [PATCH 184/813] Stub class for mod arg parsing tests. --- test/v2/parsing/test_mod_args.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 test/v2/parsing/test_mod_args.py diff --git a/test/v2/parsing/test_mod_args.py b/test/v2/parsing/test_mod_args.py new file mode 100644 index 0000000000..a97e2ce13a --- /dev/null +++ b/test/v2/parsing/test_mod_args.py @@ -0,0 +1,16 @@ +# TODO: header + +#from ansible.playbook.task import Task +import unittest + +class TestModArgs(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_sample(self): + pass + From 8ab0749217aadd481c32d83ff3bb539bc00e453b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 7 Oct 2014 15:04:42 -0500 Subject: [PATCH 185/813] Adding new yaml parsing classes --- v2/ansible/parsing/yaml/__init__.py | 7 +++++++ v2/ansible/parsing/yaml/composer.py | 28 ++++++++++++++++++++++++++ v2/ansible/parsing/yaml/constructor.py | 28 ++++++++++++++++++++++++++ v2/ansible/parsing/yaml/loader.py | 17 ++++++++++++++++ v2/ansible/parsing/yaml/objects.py | 14 +++++++++++++ 5 files changed, 94 insertions(+) create mode 100644 v2/ansible/parsing/yaml/__init__.py create mode 100644 v2/ansible/parsing/yaml/composer.py create mode 100644 v2/ansible/parsing/yaml/constructor.py create mode 100644 v2/ansible/parsing/yaml/loader.py create mode 100644 v2/ansible/parsing/yaml/objects.py diff --git a/v2/ansible/parsing/yaml/__init__.py b/v2/ansible/parsing/yaml/__init__.py new file mode 100644 index 0000000000..040a91d689 --- /dev/null +++ b/v2/ansible/parsing/yaml/__init__.py @@ -0,0 +1,7 @@ +from yaml import load +from parsing.yaml.loader import AnsibleLoader + +def safe_load(stream): + ''' implements yaml.safe_load(), except using our custom loader class ''' + return load(stream, AnsibleLoader) + diff --git a/v2/ansible/parsing/yaml/composer.py b/v2/ansible/parsing/yaml/composer.py new file mode 100644 index 0000000000..b0acc08a24 --- /dev/null +++ b/v2/ansible/parsing/yaml/composer.py @@ -0,0 +1,28 @@ +from yaml.composer import Composer +from yaml.nodes import MappingNode + +class AnsibleComposer(Composer): + def __init__(self): + self.__mapping_starts = [] + super(Composer, self).__init__() + def compose_node(self, parent, index): + # the line number where the previous token has ended (plus empty lines) + node = Composer.compose_node(self, parent, index) + if isinstance(node, MappingNode): + node.__datasource__ = self.name + try: + (cur_line, cur_column) = self.__mapping_starts.pop() + except: + cur_line = None + cur_column = None + node.__line__ = cur_line + node.__column__ = cur_column + return node + def compose_mapping_node(self, anchor): + # the column here will point at the position in the file immediately + # after the first key is found, which could be a space or a newline. + # We could back this up to find the beginning of the key, but this + # should be good enough to determine the error location. + self.__mapping_starts.append((self.line + 1, self.column + 1)) + return Composer.compose_mapping_node(self, anchor) + diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py new file mode 100644 index 0000000000..fd4a35e7af --- /dev/null +++ b/v2/ansible/parsing/yaml/constructor.py @@ -0,0 +1,28 @@ +from yaml.constructor import Constructor +from parsing.yaml.objects import AnsibleMapping + +class AnsibleConstructor(Constructor): + def construct_yaml_map(self, node): + data = AnsibleMapping() + yield data + value = self.construct_mapping(node) + data.update(value) + data._line_number = value._line_number + data._column_number = value._column_number + data._data_source = value._data_source + + def construct_mapping(self, node, deep=False): + ret = AnsibleMapping(super(Constructor, self).construct_mapping(node, deep)) + ret._line_number = node.__line__ + ret._column_number = node.__column__ + ret._data_source = node.__datasource__ + return ret + +AnsibleConstructor.add_constructor( + u'tag:yaml.org,2002:map', + AnsibleConstructor.construct_yaml_map) + +AnsibleConstructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + AnsibleConstructor.construct_yaml_map) + diff --git a/v2/ansible/parsing/yaml/loader.py b/v2/ansible/parsing/yaml/loader.py new file mode 100644 index 0000000000..d9dc4fea89 --- /dev/null +++ b/v2/ansible/parsing/yaml/loader.py @@ -0,0 +1,17 @@ +from yaml.reader import Reader +from yaml.scanner import Scanner +from yaml.parser import Parser +from yaml.resolver import Resolver + +from parsing.yaml.composer import AnsibleComposer +from parsing.yaml.constructor import AnsibleConstructor + +class AnsibleLoader(Reader, Scanner, Parser, AnsibleComposer, AnsibleConstructor, Resolver): + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + AnsibleComposer.__init__(self) + AnsibleConstructor.__init__(self) + Resolver.__init__(self) + diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py new file mode 100644 index 0000000000..cc9fc445d2 --- /dev/null +++ b/v2/ansible/parsing/yaml/objects.py @@ -0,0 +1,14 @@ +class AnsibleBaseYAMLObject(object): + ''' + the base class used to sub-class python built-in objects + so that we can add attributes to them during yaml parsing + + ''' + _data_source = None + _line_number = None + _column_number = None + +class AnsibleMapping(AnsibleBaseYAMLObject, dict): + ''' sub class for dictionaries ''' + pass + From 93e273333d4859e83e64830d5ce52e52d3822317 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 7 Oct 2014 15:52:58 -0500 Subject: [PATCH 186/813] Adding tests for new yaml parsing stuff and adjusting imports --- test/v2/parsing/__init__.py | 0 test/v2/parsing/yaml/__init__.py | 2 + test/v2/parsing/yaml/test_yaml.py | 69 ++++++++++++++++++++++++++ v2/ansible/parsing/yaml/__init__.py | 2 +- v2/ansible/parsing/yaml/constructor.py | 2 +- v2/ansible/parsing/yaml/loader.py | 4 +- 6 files changed, 75 insertions(+), 4 deletions(-) create mode 100644 test/v2/parsing/__init__.py create mode 100644 test/v2/parsing/yaml/__init__.py create mode 100644 test/v2/parsing/yaml/test_yaml.py diff --git a/test/v2/parsing/__init__.py b/test/v2/parsing/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/v2/parsing/yaml/__init__.py b/test/v2/parsing/yaml/__init__.py new file mode 100644 index 0000000000..ec86ee6101 --- /dev/null +++ b/test/v2/parsing/yaml/__init__.py @@ -0,0 +1,2 @@ +# TODO: header + diff --git a/test/v2/parsing/yaml/test_yaml.py b/test/v2/parsing/yaml/test_yaml.py new file mode 100644 index 0000000000..c32365dcbf --- /dev/null +++ b/test/v2/parsing/yaml/test_yaml.py @@ -0,0 +1,69 @@ +# TODO: header + +import unittest +from ansible.parsing.yaml import safe_load +from ansible.parsing.yaml.objects import AnsibleMapping + +# a single dictionary instance +data1 = '''--- +key: value +''' + +# multiple dictionary instances +data2 = '''--- +- key1: value1 +- key2: value2 + +- key3: value3 + + +- key4: value4 +''' + +# multiple dictionary instances with other nested +# dictionaries contained within those +data3 = '''--- +- key1: + subkey1: subvalue1 + subkey2: subvalue2 + subkey3: + subsubkey1: subsubvalue1 +- key2: + subkey4: subvalue4 +- list1: + - list1key1: list1value1 + list1key2: list1value2 + list1key3: list1value3 +''' + +class TestSafeLoad(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_safe_load(self): + # test basic dictionary + res = safe_load(data1) + assert type(res) == AnsibleMapping + assert res._line_number == 2 + + # test data with multiple dictionaries + res = safe_load(data2) + assert len(res) == 4 + assert res[0]._line_number == 2 + assert res[1]._line_number == 3 + assert res[2]._line_number == 5 + assert res[3]._line_number == 8 + + # test data with multiple sub-dictionaries + res = safe_load(data3) + assert len(res) == 3 + assert res[0]._line_number == 2 + assert res[1]._line_number == 7 + assert res[2]._line_number == 9 + assert res[0]['key1']._line_number == 3 + assert res[1]['key2']._line_number == 8 + assert res[2]['list1'][0]._line_number == 10 diff --git a/v2/ansible/parsing/yaml/__init__.py b/v2/ansible/parsing/yaml/__init__.py index 040a91d689..730515089e 100644 --- a/v2/ansible/parsing/yaml/__init__.py +++ b/v2/ansible/parsing/yaml/__init__.py @@ -1,5 +1,5 @@ from yaml import load -from parsing.yaml.loader import AnsibleLoader +from ansible.parsing.yaml.loader import AnsibleLoader def safe_load(stream): ''' implements yaml.safe_load(), except using our custom loader class ''' diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py index fd4a35e7af..8d0ed2b8f6 100644 --- a/v2/ansible/parsing/yaml/constructor.py +++ b/v2/ansible/parsing/yaml/constructor.py @@ -1,5 +1,5 @@ from yaml.constructor import Constructor -from parsing.yaml.objects import AnsibleMapping +from ansible.parsing.yaml.objects import AnsibleMapping class AnsibleConstructor(Constructor): def construct_yaml_map(self, node): diff --git a/v2/ansible/parsing/yaml/loader.py b/v2/ansible/parsing/yaml/loader.py index d9dc4fea89..9b15a7f3c1 100644 --- a/v2/ansible/parsing/yaml/loader.py +++ b/v2/ansible/parsing/yaml/loader.py @@ -3,8 +3,8 @@ from yaml.scanner import Scanner from yaml.parser import Parser from yaml.resolver import Resolver -from parsing.yaml.composer import AnsibleComposer -from parsing.yaml.constructor import AnsibleConstructor +from ansible.parsing.yaml.composer import AnsibleComposer +from ansible.parsing.yaml.constructor import AnsibleConstructor class AnsibleLoader(Reader, Scanner, Parser, AnsibleComposer, AnsibleConstructor, Resolver): def __init__(self, stream): From 4cb7f654b6e7ef6a64bf31a6bb41f56237449a5b Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 7 Oct 2014 16:59:49 -0400 Subject: [PATCH 187/813] Add stub tests for module args parsing. --- .../inventory_test_data/inventory_api.py | 0 test/v2/parsing/test_mod_args.py | 71 +++++++++++++++++-- test/v2/playbook/test_task.py | 18 ++--- v2/ansible/parsing/mod_args.py | 60 ++++++++++++++++ v2/ansible/parsing/modargs.py | 26 ------- 5 files changed, 137 insertions(+), 38 deletions(-) mode change 100755 => 100644 test/units/inventory_test_data/inventory_api.py create mode 100644 v2/ansible/parsing/mod_args.py delete mode 100644 v2/ansible/parsing/modargs.py diff --git a/test/units/inventory_test_data/inventory_api.py b/test/units/inventory_test_data/inventory_api.py old mode 100755 new mode 100644 diff --git a/test/v2/parsing/test_mod_args.py b/test/v2/parsing/test_mod_args.py index a97e2ce13a..f55ac29aaf 100644 --- a/test/v2/parsing/test_mod_args.py +++ b/test/v2/parsing/test_mod_args.py @@ -1,16 +1,79 @@ # TODO: header -#from ansible.playbook.task import Task +from ansible.parsing.mod_args import ModuleArgsParser import unittest -class TestModArgs(unittest.TestCase): +class TestModArgsDwim(unittest.TestCase): def setUp(self): + self.m = ModuleArgsParser() pass def tearDown(self): pass - def test_sample(self): - pass + def test_action_to_shell(self): + mod, args, to = self.m.parse('action', 'shell echo hi') + assert mod == 'shell' + assert args == dict( + free_form = 'echo hi', + use_shell = True + ) + assert to is None + + def test_basic_shell(self): + mod, args, to = self.m.parse('shell', 'echo hi') + assert mod == 'shell' + assert args == dict( + free_form = 'echo hi', + use_shell = True + ) + assert to is None + + def test_basic_command(self): + mod, args, to = self.m.parse('command', 'echo hi') + assert mod == 'command' + assert args == dict( + free_form = 'echo hi', + use_shell = False + ) + assert to is None + + def test_shell_with_modifiers(self): + mod, args, to = self.m.parse('shell', '/bin/foo creates=/tmp/baz removes=/tmp/bleep') + assert mod == 'shell' + assert args == dict( + free_form = 'echo hi', + use_shell = False, + creates = '/tmp/baz', + removes = '/tmp/bleep' + ) + assert to is None + + def test_normal_usage(self): + mod, args, to = self.m.parse('copy', 'src=a dest=b') + assert mod == 'copy' + assert args == dict(src='a', dest='b') + assert to is None + + def test_complex_args(self): + mod, args, to = self.m.parse('copy', dict(src=a, dest=b)) + assert mod == 'copy' + assert args == dict(src = 'a', dest = 'b') + assert to is None + + def test_action_with_complex(self): + mod, args, to = self.m.parse('action', dict(module='copy',src='a',dest='b')) + assert mod == 'action' + assert args == dict(src = 'a', dest = 'b') + assert to is None + + def test_local_action_string(self): + mod, args, to = self.m.parse('local_action', 'copy src=a dest=b') + assert mod == 'copy' + assert args == dict(src=a, dest=b) + assert to is 'localhost' + + + diff --git a/test/v2/playbook/test_task.py b/test/v2/playbook/test_task.py index 8120ba36c5..1404f87639 100644 --- a/test/v2/playbook/test_task.py +++ b/test/v2/playbook/test_task.py @@ -32,27 +32,27 @@ class TestTask(unittest.TestCase): def test_construct_task_with_role_and_block(self): pass - def test_load_simple_task(self): + def test_load_task_simple(self): t = Task.load(basic_shell_task) assert t is not None assert t.name == basic_shell_task['name'] assert t.action == 'shell' assert t.args == 'echo hi' - def test_can_load_action_kv_form(self): + def test_load_task_kv_form(self): t = Task.load(kv_shell_task) assert t.action == 'shell' - assert t.args == 'echo hi' + #assert t.args == 'echo hi' - def test_can_auto_name(self): + def test_task_auto_name(self): assert 'name' not in kv_shell_task t = Task.load(kv_shell_task) - assert t.name == 'shell echo hi' + #assert t.name == 'shell echo hi' - def test_can_auto_name_with_role(self): + def test_task_auto_name_with_role(self): pass - def test_can_load_action_complex_form(self): + def test_load_task_complex_form(self): pass def test_can_load_module_complex_form(self): @@ -65,4 +65,6 @@ class TestTask(unittest.TestCase): pass def test_delegate_to_parses(self): - pass + pass + + diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py new file mode 100644 index 0000000000..e125459037 --- /dev/null +++ b/v2/ansible/parsing/mod_args.py @@ -0,0 +1,60 @@ +# (c) 2014 Michael DeHaan, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import exceptions + +class ModuleArgsParser(object): + + """ + There are several ways a module and argument set can be expressed: + + # legacy form (for a shell command) + - action: shell echo hi + + # common shorthand for local actions vs delegate_to + - local_action: shell echo hi + + # most commonly: + - copy: src=a dest=b + + # legacy form + - action: copy src=a dest=b + + # complex args form, for passing structured data + - copy: + src: a + dest: b + + # gross, but technically legal + - action: + module: copy + args: + src: a + dest: b + + This class exists so other things don't have to remember how this + all works. Pass it "part1" and "part2", and the parse function + will tell you about the modules in a predictable way. + """ + + def __init__(self): + pass + + def parse(self, thing1, thing2): + raise exceptions.NotImplementedError + + diff --git a/v2/ansible/parsing/modargs.py b/v2/ansible/parsing/modargs.py deleted file mode 100644 index 9af202c635..0000000000 --- a/v2/ansible/parsing/modargs.py +++ /dev/null @@ -1,26 +0,0 @@ -# (c) 2014 Michael DeHaan, -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -class ModArgsParser(object) - - def __init__(self, thing1, thing2): - pass - - def parse(): - raise exception.NotImplementedError - - From bbd9921dbde7528817d35b1065a0b998ad8e20fd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 7 Oct 2014 19:59:39 -0500 Subject: [PATCH 188/813] Adding a negative test for new safe_load --- test/v2/parsing/yaml/test_yaml.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/v2/parsing/yaml/test_yaml.py b/test/v2/parsing/yaml/test_yaml.py index c32365dcbf..00394f6091 100644 --- a/test/v2/parsing/yaml/test_yaml.py +++ b/test/v2/parsing/yaml/test_yaml.py @@ -1,6 +1,9 @@ # TODO: header import unittest + +from yaml.scanner import ScannerError + from ansible.parsing.yaml import safe_load from ansible.parsing.yaml.objects import AnsibleMapping @@ -36,6 +39,11 @@ data3 = '''--- list1key3: list1value3 ''' +bad_data1 = '''--- +foo: bar + bam: baz +''' + class TestSafeLoad(unittest.TestCase): def setUp(self): @@ -44,6 +52,10 @@ class TestSafeLoad(unittest.TestCase): def tearDown(self): pass + def test_safe_load_bad(self): + # test the loading of bad yaml data + self.assertRaises(ScannerError, safe_load, bad_data1) + def test_safe_load(self): # test basic dictionary res = safe_load(data1) From 2769098fe7fcb51302cc8fabe9a1ff3f51aeec6f Mon Sep 17 00:00:00 2001 From: Rob Howard Date: Wed, 8 Oct 2014 13:44:59 +1100 Subject: [PATCH 189/813] Make listify respect the global setting for undefined variables. (Fixes #9008.) With credit to jimi-c for the initial pass in this commit: https://github.com/jimi-c/ansible/commit/b18bd6b98edecda1fcb5a85053593e78b46b9709 --- lib/ansible/utils/__init__.py | 5 ++++- test/units/TestUtils.py | 9 +++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 195046caf0..977a820733 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -48,6 +48,7 @@ import sys import json import subprocess import contextlib +import jinja2.exceptions from vault import VaultLib @@ -1419,11 +1420,13 @@ def listify_lookup_plugin_terms(terms, basedir, inject): # if not already a list, get ready to evaluate with Jinja2 # not sure why the "/" is in above code :) try: - new_terms = template.template(basedir, "{{ %s }}" % terms, inject) + new_terms = template.template(basedir, terms, inject, convert_bare=True, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR) if isinstance(new_terms, basestring) and "{{" in new_terms: pass else: terms = new_terms + except jinja2.exceptions.UndefinedError, e: + raise errors.AnsibleUndefinedVariable('undefined variable in items: %s' % e) except: pass diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index fa5913335d..07b52a9d38 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -541,11 +541,20 @@ class TestUtils(unittest.TestCase): def test_listify_lookup_plugin_terms(self): basedir = os.path.dirname(__file__) + + # Straight lookups self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict()), ['things']) self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['one', 'two'])), ['one', 'two']) + # Variable interpolation + self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['{{ foo }}', '{{ bar }}'], foo="hello", bar="world")), + ['hello', 'world']) + with self.assertRaises(ansible.errors.AnsibleError) as ex: + ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['{{ foo }}', '{{ bar_typo }}'], foo="hello", bar="world")) + self.assertTrue("undefined variable in items: 'bar_typo'" in ex.exception.msg) + def test_deprecated(self): sys_stderr = sys.stderr sys.stderr = StringIO.StringIO() From ce58706dd0e25ed4f820f18d54cafe4280a7cfd6 Mon Sep 17 00:00:00 2001 From: Michel Blanc Date: Wed, 8 Oct 2014 10:41:29 +0200 Subject: [PATCH 190/813] Fixes Arch PKGBUILD Build was failing with new Ansible submodule setup for core and extras modules. Integrated fix from @firecat53 Added @firecat53 to contributors. --- packaging/arch/PKGBUILD | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packaging/arch/PKGBUILD b/packaging/arch/PKGBUILD index f4db6fbd7b..f2f9422906 100644 --- a/packaging/arch/PKGBUILD +++ b/packaging/arch/PKGBUILD @@ -1,4 +1,5 @@ # Maintainer: Michel Blanc +# Contributor: Scott Hansen https://github.com/firecat53 # Contributor: Buce # Contributor: BartÅ‚omiej Piotrowski # Contributor: cgtx @@ -9,7 +10,7 @@ # Contributor: Michael DeHaan pkgname=ansible-git -pkgver=1.1.4095.g3f2f5fe +pkgver=1.6.0.1835.ga1809a3 pkgrel=1 pkgdesc='Radically simple IT automation platform' arch=('any') @@ -33,6 +34,7 @@ pkgver() { build() { cd $pkgname + git submodule update --init --recursive make PYTHON=python2 } @@ -40,7 +42,6 @@ package() { cd $pkgname install -dm755 $pkgdir/usr/share/ansible - cp -dpr --no-preserve=ownership ./library/* "$pkgdir/usr/share/ansible/" cp -dpr --no-preserve=ownership ./examples "$pkgdir/usr/share/ansible" python2 setup.py install -O1 --root="$pkgdir" From 7a94d566e55e9cd8309260208403fd26ccde2ece Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Oct 2014 10:54:14 -0400 Subject: [PATCH 191/813] tried to ensure input is always a list but will now be a noop for non listable items --- lib/ansible/runner/filter_plugins/core.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/filter_plugins/core.py b/lib/ansible/runner/filter_plugins/core.py index 71cfd267dc..7d4c57155a 100644 --- a/lib/ansible/runner/filter_plugins/core.py +++ b/lib/ansible/runner/filter_plugins/core.py @@ -236,7 +236,10 @@ def rand(environment, end, start=None, step=None): raise errors.AnsibleFilterError('random can only be used on sequences and integers') def randomize_list(mylist): - shuffle(mylist) + try: + shuffle(list(mylist)) + except: + pass return mylist class FilterModule(object): From 12016b95a8fd202e1a56a5c570fcd41e0b1fd367 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Oct 2014 10:58:48 -0400 Subject: [PATCH 192/813] documented type based behaviour --- docsite/rst/playbooks_variables.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index ba85210293..2f1704139b 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -309,6 +309,7 @@ To get a random list from an existing list:: {{ ['a','b','c']|shuffle }} => ['c','a','b'] {{ ['a','b','c']|shuffle }} => ['b','c','a'] +note that when used with a non 'listable' item it is a noop, otherwise it always returns a list .. _other_useful_filters: From c83a8337406cc5149da268620a4a85be65061950 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 8 Oct 2014 10:35:58 -0500 Subject: [PATCH 193/813] New v2 ModuleArgsParser code and fixing up tests/other task code --- test/v2/parsing/test_mod_args.py | 58 +++---- test/v2/playbook/test_task.py | 9 +- v2/ansible/parsing/mod_args.py | 266 ++++++++++++++++++++++++++++++- v2/ansible/parsing/splitter.py | 35 +++- v2/ansible/playbook/task.py | 78 +++------ 5 files changed, 350 insertions(+), 96 deletions(-) diff --git a/test/v2/parsing/test_mod_args.py b/test/v2/parsing/test_mod_args.py index f55ac29aaf..2e98cd5b00 100644 --- a/test/v2/parsing/test_mod_args.py +++ b/test/v2/parsing/test_mod_args.py @@ -13,67 +13,69 @@ class TestModArgsDwim(unittest.TestCase): pass def test_action_to_shell(self): - mod, args, to = self.m.parse('action', 'shell echo hi') - assert mod == 'shell' + mod, args, to = self.m.parse(dict(action='shell echo hi')) + assert mod == 'command' assert args == dict( - free_form = 'echo hi', - use_shell = True + _raw_params = 'echo hi', + _uses_shell = True, ) assert to is None def test_basic_shell(self): - mod, args, to = self.m.parse('shell', 'echo hi') - assert mod == 'shell' + mod, args, to = self.m.parse(dict(shell='echo hi')) + assert mod == 'command' assert args == dict( - free_form = 'echo hi', - use_shell = True + _raw_params = 'echo hi', + _uses_shell = True, ) assert to is None def test_basic_command(self): - mod, args, to = self.m.parse('command', 'echo hi') + mod, args, to = self.m.parse(dict(command='echo hi')) assert mod == 'command' assert args == dict( - free_form = 'echo hi', - use_shell = False + _raw_params = 'echo hi', ) assert to is None def test_shell_with_modifiers(self): - mod, args, to = self.m.parse('shell', '/bin/foo creates=/tmp/baz removes=/tmp/bleep') - assert mod == 'shell' + mod, args, to = self.m.parse(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep')) + assert mod == 'command' assert args == dict( - free_form = 'echo hi', - use_shell = False, - creates = '/tmp/baz', - removes = '/tmp/bleep' + creates = '/tmp/baz', + removes = '/tmp/bleep', + _raw_params = '/bin/foo', + _uses_shell = True, ) assert to is None def test_normal_usage(self): - mod, args, to = self.m.parse('copy', 'src=a dest=b') + mod, args, to = self.m.parse(dict(copy='src=a dest=b')) assert mod == 'copy' assert args == dict(src='a', dest='b') assert to is None def test_complex_args(self): - mod, args, to = self.m.parse('copy', dict(src=a, dest=b)) + mod, args, to = self.m.parse(dict(copy=dict(src='a', dest='b'))) assert mod == 'copy' - assert args == dict(src = 'a', dest = 'b') + assert args == dict(src='a', dest='b') assert to is None def test_action_with_complex(self): - mod, args, to = self.m.parse('action', dict(module='copy',src='a',dest='b')) - assert mod == 'action' - assert args == dict(src = 'a', dest = 'b') + mod, args, to = self.m.parse(dict(action=dict(module='copy', src='a', dest='b'))) + assert mod == 'copy' + assert args == dict(src='a', dest='b') + assert to is None + + def test_action_with_complex_and_complex_args(self): + mod, args, to = self.m.parse(dict(action=dict(module='copy', args=dict(src='a', dest='b')))) + assert mod == 'copy' + assert args == dict(src='a', dest='b') assert to is None def test_local_action_string(self): - mod, args, to = self.m.parse('local_action', 'copy src=a dest=b') + mod, args, to = self.m.parse(dict(local_action='copy src=a dest=b')) assert mod == 'copy' - assert args == dict(src=a, dest=b) + assert args == dict(src='a', dest='b') assert to is 'localhost' - - - diff --git a/test/v2/playbook/test_task.py b/test/v2/playbook/test_task.py index 1404f87639..ed85ad64e7 100644 --- a/test/v2/playbook/test_task.py +++ b/test/v2/playbook/test_task.py @@ -36,13 +36,14 @@ class TestTask(unittest.TestCase): t = Task.load(basic_shell_task) assert t is not None assert t.name == basic_shell_task['name'] - assert t.action == 'shell' - assert t.args == 'echo hi' + assert t.action == 'command' + assert t.args == dict(_raw_params='echo hi', _uses_shell=True) def test_load_task_kv_form(self): t = Task.load(kv_shell_task) - assert t.action == 'shell' - #assert t.args == 'echo hi' + print "task action is %s" % t.action + assert t.action == 'command' + assert t.args == dict(_raw_params='echo hi', _uses_shell=True) def test_task_auto_name(self): assert 'name' not in kv_shell_task diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index e125459037..b8f63123f9 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -17,6 +17,10 @@ import exceptions +from ansible.errors import AnsibleError +from ansible.plugins import module_finder +from ansible.parsing.splitter import parse_kv + class ModuleArgsParser(object): """ @@ -24,25 +28,25 @@ class ModuleArgsParser(object): # legacy form (for a shell command) - action: shell echo hi - + # common shorthand for local actions vs delegate_to - local_action: shell echo hi # most commonly: - copy: src=a dest=b - + # legacy form - action: copy src=a dest=b # complex args form, for passing structured data - - copy: + - copy: src: a dest: b # gross, but technically legal - action: module: copy - args: + args: src: a dest: b @@ -52,9 +56,257 @@ class ModuleArgsParser(object): """ def __init__(self): - pass + self._ds = None - def parse(self, thing1, thing2): - raise exceptions.NotImplementedError + def _get_delegate_to(self): + ''' + Returns the value of the delegate_to key from the task datastructure, + or None if the value was not directly specified + ''' + return self._ds.get('delegate_to') + def _get_old_style_action(self): + ''' + Searches the datastructure for 'action:' or 'local_action:' keywords. + When local_action is found, the delegate_to value is set to the localhost + IP, otherwise delegate_to is left as None. + + Inputs: + - None + + Outputs: + - None (if neither keyword is found), or a dictionary containing: + action: + the module name to be executed + args: + a dictionary containing the arguments to the module + delegate_to: + None or 'localhost' + ''' + + # determine if this is an 'action' or 'local_action' + if 'action' in self._ds: + action_data = self._ds.get('action', '') + delegate_to = None + elif 'local_action' in self._ds: + action_data = self._ds.get('local_action', '') + delegate_to = 'localhost' + else: + return None + + # now we get the arguments for the module, which may be a + # string of key=value pairs, a dictionary of values, or a + # dictionary with a special 'args:' value in it + if isinstance(action_data, dict): + action = self._get_specified_module(action_data) + args = dict() + if 'args' in action_data: + args = self._get_args_from_ds(action, action_data) + del action_data['args'] + other_args = action_data.copy() + # remove things we don't want in the args + if 'module' in other_args: + del other_args['module'] + args.update(other_args) + elif isinstance(action_data, basestring): + action_data = action_data.strip() + if not action_data: + # TODO: change to an AnsibleParsingError so that the + # filename/line number can be reported in the error + raise AnsibleError("when using 'action:' or 'local_action:', the module name must be specified") + else: + # split up the string based on spaces, where the first + # item specified must be a valid module name + parts = action_data.split(' ', 1) + action = parts[0] + if action not in module_finder: + # TODO: change to an AnsibleParsingError so that the + # filename/line number can be reported in the error + raise AnsibleError("the module '%s' was not found in the list of loaded modules") + if len(parts) > 1: + args = self._get_args_from_action(action, ' '.join(parts[1:])) + else: + args = {} + else: + # TODO: change to an AnsibleParsingError so that the + # filename/line number can be reported in the error + raise AnsibleError('module args must be specified as a dictionary or string') + + return dict(action=action, args=args, delegate_to=delegate_to) + + def _get_new_style_action(self): + ''' + Searches the datastructure for 'module_name:', where the module_name is a + valid module loaded by the module_finder plugin. + + Inputs: + - None + + Outputs: + - None (if no valid module is found), or a dictionary containing: + action: + the module name to be executed + args: + a dictionary containing the arguments to the module + delegate_to: + None + ''' + + # for all keys in the datastructure, check to see if the value + # corresponds to a module found by the module_finder plugin + action = None + for item in self._ds: + if item in module_finder: + action = item + break + else: + # none of the keys matched a known module name + return None + + # now we get the arguments for the module, which may be a + # string of key=value pairs, a dictionary of values, or a + # dictionary with a special 'args:' value in it + action_data = self._ds.get(action, '') + if isinstance(action_data, dict): + args = dict() + if 'args' in action_data: + args = self._get_args_from_ds(action, action_data) + del action_data['args'] + other_args = action_data.copy() + # remove things we don't want in the args + if 'module' in other_args: + del other_args['module'] + args.update(other_args) + else: + args = self._get_args_from_action(action, action_data.strip()) + + return dict(action=action, args=args, delegate_to=None) + + def _get_args_from_ds(self, action, action_data): + ''' + Gets the module arguments from the 'args' value of the + action_data, when action_data is a dict. The value of + 'args' can be either a string or a dictionary itself, so + we use parse_kv() to split up the key=value pairs when + a string is found. + + Inputs: + - action_data: + a dictionary of values, which may or may not contain a + key named 'args' + + Outputs: + - a dictionary of values, representing the arguments to the + module action specified + ''' + args = action_data.get('args', {}).copy() + if isinstance(args, basestring): + if action in ('command', 'shell'): + args = parse_kv(args, check_raw=True) + else: + args = parse_kv(args) + return args + + def _get_args_from_action(self, action, action_data): + ''' + Gets the module arguments from the action data when it is + specified as a string of key=value pairs. Special handling + is used for the command/shell modules, which allow free- + form syntax for the options. + + Inputs: + - action: + the module to be executed + - action_data: + a string of key=value pairs (and possibly free-form arguments) + + Outputs: + - A dictionary of values, representing the arguments to the + module action specified OR a string of key=value pairs (when + the module action is command or shell) + ''' + tokens = action_data.split() + if len(tokens) == 0: + return {} + else: + joined = " ".join(tokens) + if action in ('command', 'shell'): + return parse_kv(joined, check_raw=True) + else: + return parse_kv(joined) + + def _get_specified_module(self, action_data): + ''' + gets the module if specified directly in the arguments, ie: + - action: + module: foo + + Inputs: + - action_data: + a dictionary of values, which may or may not contain the + key 'module' + + Outputs: + - a string representing the module specified in the data, or + None if that key was not found + ''' + return action_data.get('module') + + def parse(self, ds): + ''' + Given a task in one of the supported forms, parses and returns + returns the action, arguments, and delegate_to values for the + task. + + Inputs: + - ds: + a dictionary datastructure representing the task as parsed + from a YAML file + + Outputs: + - A tuple containing 3 values: + action: + the action (module name) to be executed + args: + the args for the action + delegate_to: + the delegate_to option (which may be None, if no delegate_to + option was specified and this is not a local_action) + ''' + + assert type(ds) == dict + + self._ds = ds + + # first we try to get the module action/args based on the + # new-style format, where the module name is the key + result = self._get_new_style_action() + if result is None: + # failing that, we resort to checking for the old-style syntax, + # where 'action' or 'local_action' is the key + result = self._get_old_style_action() + if result is None: + # TODO: change to an AnsibleParsingError so that the + # filename/line number can be reported in the error + raise AnsibleError('no action specified for this task') + + # if the action is set to 'shell', we switch that to 'command' and + # set the special parameter '_uses_shell' to true in the args dict + if result['action'] == 'shell': + result['action'] = 'command' + result['args']['_uses_shell'] = True + + # finally, we check to see if a delegate_to value was specified + # in the task datastructure (and raise an error for local_action, + # which essentially means we're delegating to localhost) + specified_delegate_to = self._get_delegate_to() + if specified_delegate_to is not None: + if result['delegate_to'] is not None: + # TODO: change to an AnsibleParsingError so that the + # filename/line number can be reported in the error + raise AnsibleError('delegate_to cannot be used with local_action') + else: + result['delegate_to'] = specified_delegate_to + + return (result['action'], result['args'], result['delegate_to']) diff --git a/v2/ansible/parsing/splitter.py b/v2/ansible/parsing/splitter.py index 430f4e299a..48367ca5d9 100644 --- a/v2/ansible/parsing/splitter.py +++ b/v2/ansible/parsing/splitter.py @@ -15,8 +15,14 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -def parse_kv(args): - ''' convert a string of key/value items to a dict ''' +def parse_kv(args, check_raw=False): + ''' + Convert a string of key/value items to a dict. If any free-form params + are found and the check_raw option is set to True, they will be added + to a new parameter called '_raw_params'. If check_raw is not enabled, + they will simply be ignored. + ''' + options = {} if args is not None: try: @@ -26,10 +32,31 @@ def parse_kv(args): raise errors.AnsibleError("error parsing argument string, try quoting the entire line.") else: raise + + raw_params = [] for x in vargs: if "=" in x: - k, v = x.split("=",1) - options[k.strip()] = unquote(v.strip()) + k, v = x.split("=", 1) + + # only internal variables can start with an underscore, so + # we don't allow users to set them directy in arguments + if k.startswith('_'): + raise AnsibleError("invalid parameter specified: '%s'" % k) + + # FIXME: make the retrieval of this list of shell/command + # options a function, so the list is centralized + if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'): + raw_params.append(x) + else: + options[k.strip()] = unquote(v.strip()) + else: + raw_params.append(x) + + # recombine the free-form params, if any were found, and assign + # them to a special option for use later by the shell/command module + if len(raw_params) > 0: + options['_raw_params'] = ' '.join(raw_params) + return options def _get_quote_state(token, quote_char): diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 856246c327..0bccedaa21 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -18,13 +18,10 @@ from ansible.playbook.base import Base from ansible.playbook.attribute import Attribute, FieldAttribute -# from ansible.playbook.conditional import Conditional from ansible.errors import AnsibleError -# TODO: it would be fantastic (if possible) if a task new where in the YAML it was defined for describing -# it in error conditions - from ansible.parsing.splitter import parse_kv +from ansible.parsing.mod_args import ModuleArgsParser from ansible.plugins import module_finder, lookup_finder class Task(Base): @@ -45,7 +42,6 @@ class Task(Base): # validate_ # will be used if defined # might be possible to define others - _args = FieldAttribute(isa='dict') _action = FieldAttribute(isa='string') @@ -60,9 +56,6 @@ class Task(Base): _first_available_file = FieldAttribute(isa='list') _ignore_errors = FieldAttribute(isa='bool') - # FIXME: this should not be a Task - # include = FieldAttribute(isa='string') - _loop = FieldAttribute(isa='string', private=True) _loop_args = FieldAttribute(isa='list', private=True) _local_action = FieldAttribute(isa='string') @@ -102,16 +95,19 @@ class Task(Base): elif self.name: return self.name else: - return "%s %s" % (self.action, self._merge_kv(self.args)) + flattened_args = self._merge_kv(self.args) + return "%s %s" % (self.action, flattened_args) def _merge_kv(self, ds): if ds is None: return "" elif isinstance(ds, basestring): return ds - elif instance(ds, dict): + elif isinstance(ds, dict): buf = "" for (k,v) in ds.iteritems(): + if k.startswith('_'): + continue buf = buf + "%s=%s " % (k,v) buf = buf.strip() return buf @@ -125,27 +121,6 @@ class Task(Base): ''' returns a human readable representation of the task ''' return "TASK: %s" % self.get_name() - def _parse_old_school_action(self, v): - ''' given a action/local_action line, return the module and args ''' - tokens = v.split() - if len(tokens) < 2: - return [v,{}] - else: - if v not in [ 'command', 'shell' ]: - joined = " ".join(tokens[1:]) - return [tokens[0], parse_kv(joined)] - else: - return [tokens[0], joined] - - def _munge_action(self, ds, new_ds, k, v): - ''' take a module name and split into action and args ''' - - if self._action.value is not None or 'action' in ds or 'local_action' in ds: - raise AnsibleError("duplicate action in task: %s" % k) - new_ds['action'] = k - new_ds['args'] = v - - def _munge_loop(self, ds, new_ds, k, v): ''' take a lookup plugin name and store it correctly ''' @@ -154,22 +129,6 @@ class Task(Base): new_ds['loop'] = k new_ds['loop_args'] = v - def _munge_action2(self, ds, new_ds, k, v, local=False): - ''' take an old school action/local_action and reformat it ''' - - if isinstance(v, basestring): - tokens = self._parse_old_school_action(v) - new_ds['action'] = tokens[0] - if 'args' in ds: - raise AnsibleError("unexpected and redundant 'args'") - new_ds['args'] = args - if local: - if 'delegate_to' in ds: - raise AnsbileError("local_action and action conflict") - new_ds['delegate_to'] = 'localhost' - else: - raise AnsibleError("unexpected use of 'action'") - def munge(self, ds): ''' tasks are especially complex arguments so need pre-processing. @@ -178,18 +137,31 @@ class Task(Base): assert isinstance(ds, dict) + # the new, cleaned datastructure, which will have legacy + # items reduced to a standard structure suitable for the + # attributes of the task class new_ds = dict() + + # use the args parsing class to determine the action, args, + # and the delegate_to value from the various possible forms + # supported as legacy + args_parser = ModuleArgsParser() + (action, args, delegate_to) = args_parser.parse(ds) + + new_ds['action'] = action + new_ds['args'] = args + new_ds['delegate_to'] = delegate_to + for (k,v) in ds.iteritems(): - if k in module_finder: - self._munge_action(ds, new_ds, k, v) + if k in ('action', 'local_action', 'args', 'delegate_to') or k == action: + # we don't want to re-assign these values, which were + # determined by the ModuleArgsParser() above + continue elif "with_%s" % k in lookup_finder: self._munge_loop(ds, new_ds, k, v) - elif k == 'action': - self._munge_action2(ds, new_ds, k, v) - elif k == 'local_action': - self._munge_action2(ds, new_ds, k, v, local=True) else: new_ds[k] = v + return new_ds From b592d7653db69860ebda5873a600098384e49818 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Oct 2014 11:54:22 -0400 Subject: [PATCH 194/813] unchained list coercion as it removed the randomization --- lib/ansible/runner/filter_plugins/core.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/filter_plugins/core.py b/lib/ansible/runner/filter_plugins/core.py index 7d4c57155a..129d984bcb 100644 --- a/lib/ansible/runner/filter_plugins/core.py +++ b/lib/ansible/runner/filter_plugins/core.py @@ -237,7 +237,8 @@ def rand(environment, end, start=None, step=None): def randomize_list(mylist): try: - shuffle(list(mylist)) + mylist = list(mylist) + shuffle(mylist) except: pass return mylist From a6029264b8194f0e6577d3e8cb4f853d779ef4f6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 2 Oct 2014 15:29:14 -0400 Subject: [PATCH 195/813] remove complex_args_hack as it was only needed for Baby JSON --- lib/ansible/runner/__init__.py | 20 -------------------- lib/ansible/runner/action_plugins/normal.py | 2 -- 2 files changed, 22 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 19c90ba529..831e5d10e2 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -253,26 +253,6 @@ class Runner(object): # ensure we are using unique tmp paths random.seed() - # ***************************************************** - - def _complex_args_hack(self, complex_args, module_args): - """ - ansible-playbook both allows specifying key=value string arguments and complex arguments - however not all modules use our python common module system and cannot - access these. An example might be a Bash module. This hack allows users to still pass "args" - as a hash of simple scalars to those arguments and is short term. We could technically - just feed JSON to the module, but that makes it hard on Bash consumers. The way this is implemented - it does mean values in 'args' have LOWER priority than those on the key=value line, allowing - args to provide yet another way to have pluggable defaults. - """ - if complex_args is None: - return module_args - if not isinstance(complex_args, dict): - raise errors.AnsibleError("complex arguments are not a dictionary: %s" % complex_args) - for (k,v) in complex_args.iteritems(): - if isinstance(v, basestring): - module_args = "%s=%s %s" % (k, pipes.quote(v), module_args) - return module_args # ***************************************************** diff --git a/lib/ansible/runner/action_plugins/normal.py b/lib/ansible/runner/action_plugins/normal.py index 8500c6641c..d845fa886f 100644 --- a/lib/ansible/runner/action_plugins/normal.py +++ b/lib/ansible/runner/action_plugins/normal.py @@ -36,8 +36,6 @@ class ActionModule(object): def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs): ''' transfer & execute a module that is not 'copy' or 'template' ''' - module_args = self.runner._complex_args_hack(complex_args, module_args) - if self.runner.noop_on_check(inject): if module_name in [ 'shell', 'command' ]: return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for %s' % module_name)) From a10d10f6473b83aa8f19502ddfb21ad39e33ff79 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 8 Oct 2014 14:30:36 -0400 Subject: [PATCH 196/813] Workaround more python-2.6 shlex not being able to handle unicode strings --- lib/ansible/inventory/script.py | 5 +++-- lib/ansible/module_common.py | 9 ++++++++- lib/ansible/module_utils/basic.py | 22 +++++++++++++++++++++- lib/ansible/utils/__init__.py | 18 ------------------ 4 files changed, 32 insertions(+), 22 deletions(-) diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py index a69135aecb..6239be0140 100644 --- a/lib/ansible/inventory/script.py +++ b/lib/ansible/inventory/script.py @@ -22,6 +22,7 @@ import subprocess import ansible.constants as C from ansible.inventory.host import Host from ansible.inventory.group import Group +from ansible.module_utils.basic import json_dict_unicode_to_bytes from ansible import utils from ansible import errors import sys @@ -54,7 +55,7 @@ class InventoryScript(object): # not passing from_remote because data from CMDB is trusted self.raw = utils.parse_json(self.data) - self.raw = utils.json_dict_unicode_to_bytes(self.raw) + self.raw = json_dict_unicode_to_bytes(self.raw) all = Group('all') groups = dict(all=all) @@ -143,7 +144,7 @@ class InventoryScript(object): if out.strip() == '': return dict() try: - return utils.json_dict_unicode_to_bytes(utils.parse_json(out)) + return json_dict_unicode_to_bytes(utils.parse_json(out)) except ValueError: raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out)) diff --git a/lib/ansible/module_common.py b/lib/ansible/module_common.py index 8beff78d07..5e3732e967 100644 --- a/lib/ansible/module_common.py +++ b/lib/ansible/module_common.py @@ -151,11 +151,18 @@ class ModuleReplacer(object): complex_args_json = utils.jsonify(complex_args) # We force conversion of module_args to str because module_common calls shlex.split, # a standard library function that incorrectly handles Unicode input before Python 2.7.3. + # Note: it would be better to do all this conversion at the border + # (when the data is originally parsed into data structures) but + # it's currently coming from too many sources to make that + # effective. try: encoded_args = repr(module_args.encode('utf-8')) except UnicodeDecodeError: encoded_args = repr(module_args) - encoded_complex = repr(complex_args_json) + try: + encoded_complex = repr(complex_args_json.encode('utf-8')) + except UnicodeDecodeError: + encoded_complex = repr(complex_args_json.encode('utf-8')) # these strings should be part of the 'basic' snippet which is required to be included module_data = module_data.replace(REPLACER_VERSION, repr(__version__)) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 655464d40f..8a4548dc16 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -223,6 +223,26 @@ def load_platform_subclass(cls, *args, **kwargs): return super(cls, subclass).__new__(subclass) + +def json_dict_unicode_to_bytes(d): + ''' Recursively convert dict keys and values to byte str + + Specialized for json return because this only handles, lists, tuples, + and dict container types (the containers that the json module returns) + ''' + + if isinstance(d, unicode): + return d.encode('utf-8') + elif isinstance(d, dict): + return dict(map(json_dict_unicode_to_bytes, d.iteritems())) + elif isinstance(d, list): + return list(map(json_dict_unicode_to_bytes, d)) + elif isinstance(d, tuple): + return tuple(map(json_dict_unicode_to_bytes, d)) + else: + return d + + class AnsibleModule(object): def __init__(self, argument_spec, bypass_checks=False, no_log=False, @@ -968,7 +988,7 @@ class AnsibleModule(object): if k in params: self.fail_json(msg="duplicate parameter: %s (value=%s)" % (k, v)) params[k] = v - params2 = json.loads(MODULE_COMPLEX_ARGS) + params2 = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) params2.update(params) return (params2, args) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 195046caf0..7d2809cc8a 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1215,24 +1215,6 @@ def to_unicode(value): return value return value.decode("utf-8") -def json_dict_unicode_to_bytes(d): - ''' Recursively convert dict keys and values to byte str - - Specialized for json return because this only handles, lists, tuples, - and dict container types (the containers that the json module returns) - ''' - - if isinstance(d, unicode): - return d.encode('utf-8') - elif isinstance(d, dict): - return dict(map(json_dict_unicode_to_bytes, d.iteritems())) - elif isinstance(d, list): - return list(map(json_dict_unicode_to_bytes, d)) - elif isinstance(d, tuple): - return tuple(map(json_dict_unicode_to_bytes, d)) - else: - return d - def get_diff(diff): # called by --diff usage in playbook and runner via callbacks From 56b6cb5328c5d13a02917e3e53aab3b8a84d5fa5 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 8 Oct 2014 15:59:24 -0400 Subject: [PATCH 197/813] Teaching objects to load themselves, making the JSON/YAML parsing ambidexterous. --- test/v2/parsing/__init__.py | 1 + test/v2/parsing/test_general.py | 85 +++++++++++++++++++++++++++++ test/v2/parsing/test_mod_args.py | 9 ++- test/v2/parsing/yaml/__init__.py | 1 - test/v2/playbook/test_task.py | 14 ++--- v2/ansible/errors/__init__.py | 29 +++++++++- v2/ansible/parsing/__init__.py | 17 ++++++ v2/ansible/parsing/mod_args.py | 27 +++------ v2/ansible/parsing/yaml/__init__.py | 1 - v2/ansible/playbook/attribute.py | 1 - v2/ansible/playbook/base.py | 21 ++++--- v2/ansible/playbook/task.py | 33 ++++++----- 12 files changed, 180 insertions(+), 59 deletions(-) create mode 100644 test/v2/parsing/test_general.py diff --git a/test/v2/parsing/__init__.py b/test/v2/parsing/__init__.py index e69de29bb2..8b13789179 100644 --- a/test/v2/parsing/__init__.py +++ b/test/v2/parsing/__init__.py @@ -0,0 +1 @@ + diff --git a/test/v2/parsing/test_general.py b/test/v2/parsing/test_general.py new file mode 100644 index 0000000000..a6277edfc6 --- /dev/null +++ b/test/v2/parsing/test_general.py @@ -0,0 +1,85 @@ +# TODO: header + +import unittest +from ansible.parsing import load +from ansible.errors import AnsibleParserError + +import json + +class MockFile(file): + + def __init__(self, ds, method='json'): + self.ds = ds + self.method = method + + def read(self): + if method == 'json': + return json.dumps(ds) + elif method == 'yaml': + return yaml.dumps(ds) + elif method == 'fail': + return """ + AAARGGGGH + THIS WON'T PARSE !!! + NOOOOOOOOOOOOOOOOOO + """ + else: + raise Exception("untestable serializer") + + def close(self): + pass + +class TestGeneralParsing(unittest.TestCase): + + def __init__(self): + pass + + def setUp(self): + pass + + def tearDown(self): + pass + + def parse_json_from_string(self): + input = """ + { + "asdf" : "1234", + "jkl" : 5678 + } + """ + output = load(input) + assert output['asdf'] == '1234' + assert output['jkl'] == 5678 + + def parse_json_from_file(self): + output = load(MockFile(dict(a=1,b=2,c=3)),'json') + assert ouput == dict(a=1,b=2,c=3) + + def parse_yaml_from_dict(self): + input = """ + asdf: '1234' + jkl: 5678 + """ + output = load(input) + assert output['asdf'] == '1234' + assert output['jkl'] == 5678 + + def parse_yaml_from_file(self): + output = load(MockFile(dict(a=1,b=2,c=3),'yaml')) + assert output == dict(a=1,b=2,c=3) + + def parse_fail(self): + input = """ + TEXT + *** + NOT VALID + """ + self.failUnlessRaises(load(input), AnsibleParserError) + + def parse_fail_from_file(self): + self.failUnlessRaises(load(MockFile(None,'fail')), AnsibleParserError) + + def parse_fail_invalid_type(self): + self.failUnlessRaises(3000, AnsibleParsingError) + self.failUnlessRaises(dict(a=1,b=2,c=3), AnsibleParserError) + diff --git a/test/v2/parsing/test_mod_args.py b/test/v2/parsing/test_mod_args.py index 2e98cd5b00..71a5e17e55 100644 --- a/test/v2/parsing/test_mod_args.py +++ b/test/v2/parsing/test_mod_args.py @@ -5,10 +5,14 @@ import unittest class TestModArgsDwim(unittest.TestCase): + # TODO: add tests that construct ModuleArgsParser with a task reference + # TODO: verify the AnsibleError raised on failure knows the task + # and the task knows the line numbers + def setUp(self): self.m = ModuleArgsParser() pass - + def tearDown(self): pass @@ -77,5 +81,4 @@ class TestModArgsDwim(unittest.TestCase): mod, args, to = self.m.parse(dict(local_action='copy src=a dest=b')) assert mod == 'copy' assert args == dict(src='a', dest='b') - assert to is 'localhost' - + assert to is 'localhost' diff --git a/test/v2/parsing/yaml/__init__.py b/test/v2/parsing/yaml/__init__.py index ec86ee6101..44026bdff0 100644 --- a/test/v2/parsing/yaml/__init__.py +++ b/test/v2/parsing/yaml/__init__.py @@ -1,2 +1 @@ # TODO: header - diff --git a/test/v2/playbook/test_task.py b/test/v2/playbook/test_task.py index ed85ad64e7..e6ffe2ece3 100644 --- a/test/v2/playbook/test_task.py +++ b/test/v2/playbook/test_task.py @@ -16,13 +16,13 @@ class TestTask(unittest.TestCase): def setUp(self): pass - + def tearDown(self): pass def test_construct_empty_task(self): t = Task() - + def test_construct_task_with_role(self): pass @@ -57,15 +57,13 @@ class TestTask(unittest.TestCase): pass def test_can_load_module_complex_form(self): - pass + pass def test_local_action_implies_delegate(self): - pass - + pass + def test_local_action_conflicts_with_delegate(self): - pass + pass def test_delegate_to_parses(self): pass - - diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py index 8b250383d5..ae629ba4bb 100644 --- a/v2/ansible/errors/__init__.py +++ b/v2/ansible/errors/__init__.py @@ -16,4 +16,31 @@ # along with Ansible. If not, see . class AnsibleError(Exception): - pass + def __init__(self, message, object=None): + self.message = message + self.object = object + + # TODO: nice __repr__ message that includes the line number if the object + # it was constructed with had the line number + + # TODO: tests for the line number functionality + +class AnsibleParserError(AnsibleError): + ''' something was detected early that is wrong about a playbook or data file ''' + pass + +class AnsibleInternalError(AnsibleError): + ''' internal safeguards tripped, something happened in the code that should never happen ''' + pass + +class AnsibleRuntimeError(AnsibleError): + ''' ansible had a problem while running a playbook ''' + pass + +class AnsibleModuleError(AnsibleRuntimeError): + ''' a module failed somehow ''' + pass + +class AnsibleConnectionFailure(AnsibleRuntimeError): + ''' the transport / connection_plugin had a fatal error ''' + pass diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index 44026bdff0..9e6329bab2 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -1 +1,18 @@ # TODO: header + +from ansible.errors import AnsibleError, AnsibleInternalError + +def load(self, data): + + if instanceof(data, file): + fd = open(f) + data = fd.read() + fd.close() + + if instanceof(data, basestring): + try: + return json.loads(data) + except: + return safe_load(data) + + raise AnsibleInternalError("expected file or string, got %s" % type(data)) diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index b8f63123f9..c43f30c916 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -55,15 +55,16 @@ class ModuleArgsParser(object): will tell you about the modules in a predictable way. """ - def __init__(self): + def __init__(self, task=None): self._ds = None + self._task = task def _get_delegate_to(self): ''' Returns the value of the delegate_to key from the task datastructure, or None if the value was not directly specified ''' - return self._ds.get('delegate_to') + return self._ds.get('delegate_to', None) def _get_old_style_action(self): ''' @@ -108,29 +109,24 @@ class ModuleArgsParser(object): if 'module' in other_args: del other_args['module'] args.update(other_args) + elif isinstance(action_data, basestring): action_data = action_data.strip() if not action_data: - # TODO: change to an AnsibleParsingError so that the - # filename/line number can be reported in the error - raise AnsibleError("when using 'action:' or 'local_action:', the module name must be specified") + raise AnsibleError("when using 'action:' or 'local_action:', the module name must be specified", object=self._task) else: # split up the string based on spaces, where the first # item specified must be a valid module name parts = action_data.split(' ', 1) action = parts[0] if action not in module_finder: - # TODO: change to an AnsibleParsingError so that the - # filename/line number can be reported in the error - raise AnsibleError("the module '%s' was not found in the list of loaded modules") + raise AnsibleError("the module '%s' was not found in the list of loaded modules" % action, object=self._task) if len(parts) > 1: args = self._get_args_from_action(action, ' '.join(parts[1:])) else: args = {} else: - # TODO: change to an AnsibleParsingError so that the - # filename/line number can be reported in the error - raise AnsibleError('module args must be specified as a dictionary or string') + raise AnsibleError('module args must be specified as a dictionary or string', object=self._task) return dict(action=action, args=args, delegate_to=delegate_to) @@ -277,7 +273,7 @@ class ModuleArgsParser(object): assert type(ds) == dict self._ds = ds - + # first we try to get the module action/args based on the # new-style format, where the module name is the key result = self._get_new_style_action() @@ -286,9 +282,7 @@ class ModuleArgsParser(object): # where 'action' or 'local_action' is the key result = self._get_old_style_action() if result is None: - # TODO: change to an AnsibleParsingError so that the - # filename/line number can be reported in the error - raise AnsibleError('no action specified for this task') + raise AnsibleError('no action specified for this task', object=self._task) # if the action is set to 'shell', we switch that to 'command' and # set the special parameter '_uses_shell' to true in the args dict @@ -302,11 +296,8 @@ class ModuleArgsParser(object): specified_delegate_to = self._get_delegate_to() if specified_delegate_to is not None: if result['delegate_to'] is not None: - # TODO: change to an AnsibleParsingError so that the - # filename/line number can be reported in the error raise AnsibleError('delegate_to cannot be used with local_action') else: result['delegate_to'] = specified_delegate_to return (result['action'], result['args'], result['delegate_to']) - diff --git a/v2/ansible/parsing/yaml/__init__.py b/v2/ansible/parsing/yaml/__init__.py index 730515089e..af492d2a73 100644 --- a/v2/ansible/parsing/yaml/__init__.py +++ b/v2/ansible/parsing/yaml/__init__.py @@ -4,4 +4,3 @@ from ansible.parsing.yaml.loader import AnsibleLoader def safe_load(stream): ''' implements yaml.safe_load(), except using our custom loader class ''' return load(stream, AnsibleLoader) - diff --git a/v2/ansible/playbook/attribute.py b/v2/ansible/playbook/attribute.py index a10da490c7..45d596fafe 100644 --- a/v2/ansible/playbook/attribute.py +++ b/v2/ansible/playbook/attribute.py @@ -31,4 +31,3 @@ class Attribute(object): class FieldAttribute(Attribute): pass - diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 6390f3432f..3c07dba29a 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -16,12 +16,13 @@ # along with Ansible. If not, see . from ansible.playbook.attribute import Attribute, FieldAttribute +from ansible.parsing import load as ds_load class Base(object): def __init__(self): - - # each class knows attributes set upon it, see Task.py for example + + # each class knows attributes set upon it, see Task.py for example self._attributes = dict() for (name, value) in self.__class__.__dict__.iteritems(): @@ -39,6 +40,9 @@ class Base(object): assert ds is not None + if isinstance(ds, basestring) or isinstance(ds, file): + ds = ds_load(ds) + # we currently don't do anything with private attributes but may # later decide to filter them out of 'ds' here. @@ -59,14 +63,14 @@ class Base(object): else: if aname in ds: self._attributes[aname] = ds[aname] - + # return the constructed object self.validate() return self def validate(self): - ''' validation that is done at parse time, not load time ''' + ''' validation that is done at parse time, not load time ''' # walk all fields in the object for (name, attribute) in self.__dict__.iteritems(): @@ -76,9 +80,9 @@ class Base(object): if not name.startswith("_"): raise AnsibleError("FieldAttribute %s must start with _" % name) - + aname = name[1:] - + # run validator only if present method = getattr(self, '_validate_%s' % (prefix, aname), None) if method: @@ -87,9 +91,9 @@ class Base(object): def post_validate(self, runner_context): ''' we can't tell that everything is of the right type until we have - all the variables. Run basic types (from isa) as well as + all the variables. Run basic types (from isa) as well as any _post_validate_ functions. - ''' + ''' raise exception.NotImplementedError @@ -107,4 +111,3 @@ class Base(object): return self._attributes[needle] raise AttributeError("attribute not found: %s" % needle) - diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 0bccedaa21..de75a0ec9c 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -27,7 +27,7 @@ from ansible.plugins import module_finder, lookup_finder class Task(Base): """ - A task is a language feature that represents a call to a module, with given arguments and other parameters. + A task is a language feature that represents a call to a module, with given arguments and other parameters. A handler is a subclass of a task. Usage: @@ -41,14 +41,14 @@ class Task(Base): # load_ and # validate_ # will be used if defined - # might be possible to define others - + # might be possible to define others + _args = FieldAttribute(isa='dict') _action = FieldAttribute(isa='string') - + _always_run = FieldAttribute(isa='bool') _any_errors_fatal = FieldAttribute(isa='bool') - _async = FieldAttribute(isa='int') + _async = FieldAttribute(isa='int') _connection = FieldAttribute(isa='string') _delay = FieldAttribute(isa='int') _delegate_to = FieldAttribute(isa='string') @@ -59,9 +59,9 @@ class Task(Base): _loop = FieldAttribute(isa='string', private=True) _loop_args = FieldAttribute(isa='list', private=True) _local_action = FieldAttribute(isa='string') - + # FIXME: this should not be a Task - _meta = FieldAttribute(isa='string') + _meta = FieldAttribute(isa='string') _name = FieldAttribute(isa='string') @@ -120,7 +120,7 @@ class Task(Base): def __repr__(self): ''' returns a human readable representation of the task ''' return "TASK: %s" % self.get_name() - + def _munge_loop(self, ds, new_ds, k, v): ''' take a lookup plugin name and store it correctly ''' @@ -128,9 +128,9 @@ class Task(Base): raise AnsibleError("duplicate loop in task: %s" % k) new_ds['loop'] = k new_ds['loop_args'] = v - + def munge(self, ds): - ''' + ''' tasks are especially complex arguments so need pre-processing. keep it short. ''' @@ -202,7 +202,7 @@ LEGACY = """ results['_module_name'] = k if isinstance(v, dict) and 'args' in ds: raise AnsibleError("can't combine args: and a dict for %s: in task %s" % (k, ds.get('name', "%s: %s" % (k, v)))) - results['_parameters'] = self._load_parameters(v) + results['_parameters'] = self._load_parameters(v) return results def _load_loop(self, ds, k, v): @@ -264,7 +264,7 @@ LEGACY = """ def _load_invalid_key(self, ds, k, v): ''' handle any key we do not recognize ''' - + raise AnsibleError("%s is not a legal parameter in an Ansible task or handler" % k) def _load_other_valid_key(self, ds, k, v): @@ -296,7 +296,7 @@ LEGACY = """ return self._load_invalid_key else: return self._load_other_valid_key - + # ================================================================================== # PRE-VALIDATION - expected to be uncommonly used, this checks for arguments that # are aliases of each other. Most everything else should be in the LOAD block @@ -311,7 +311,7 @@ LEGACY = """ # ================================================================================= # POST-VALIDATION: checks for internal inconsistency between fields # validation can result in an error but also corrections - + def _post_validate(self): ''' is the loaded datastructure sane? ''' @@ -321,13 +321,13 @@ LEGACY = """ # incompatible items self._validate_conflicting_su_and_sudo() self._validate_conflicting_first_available_file_and_loookup() - + def _post_validate_fixed_name(self): '' construct a name for the task if no name was specified ''' flat_params = " ".join(["%s=%s" % (k,v) for k,v in self._parameters.iteritems()]) return = "%s %s" % (self._module_name, flat_params) - + def _post_validate_conflicting_su_and_sudo(self): ''' make sure su/sudo usage doesn't conflict ''' @@ -342,4 +342,3 @@ LEGACY = """ raise AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task") """ - From 79f41d9c1a32d7b6b655a81ed3856f7fcf918145 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 8 Oct 2014 19:46:34 -0400 Subject: [PATCH 198/813] This makes the module args parser more functional to eliminate side effects and eliminiates the 'return None' error path to make sure we are handling more use cases. Some paths are not yet complete, including most likely handling of the 'raw' module. --- test/v2/parsing/test_mod_args.py | 22 +- v2/ansible/parsing/mod_args.py | 364 +++++++++++++------------------ v2/ansible/parsing/splitter.py | 3 +- 3 files changed, 165 insertions(+), 224 deletions(-) diff --git a/test/v2/parsing/test_mod_args.py b/test/v2/parsing/test_mod_args.py index 71a5e17e55..15d725956a 100644 --- a/test/v2/parsing/test_mod_args.py +++ b/test/v2/parsing/test_mod_args.py @@ -13,20 +13,17 @@ class TestModArgsDwim(unittest.TestCase): self.m = ModuleArgsParser() pass + def _debug(self, mod, args, to): + print "RETURNED module = %s" % mod + print " args = %s" % args + print " to = %s" % to + def tearDown(self): pass - def test_action_to_shell(self): - mod, args, to = self.m.parse(dict(action='shell echo hi')) - assert mod == 'command' - assert args == dict( - _raw_params = 'echo hi', - _uses_shell = True, - ) - assert to is None - def test_basic_shell(self): mod, args, to = self.m.parse(dict(shell='echo hi')) + self._debug(mod, args, to) assert mod == 'command' assert args == dict( _raw_params = 'echo hi', @@ -36,6 +33,7 @@ class TestModArgsDwim(unittest.TestCase): def test_basic_command(self): mod, args, to = self.m.parse(dict(command='echo hi')) + self._debug(mod, args, to) assert mod == 'command' assert args == dict( _raw_params = 'echo hi', @@ -44,6 +42,7 @@ class TestModArgsDwim(unittest.TestCase): def test_shell_with_modifiers(self): mod, args, to = self.m.parse(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep')) + self._debug(mod, args, to) assert mod == 'command' assert args == dict( creates = '/tmp/baz', @@ -55,30 +54,35 @@ class TestModArgsDwim(unittest.TestCase): def test_normal_usage(self): mod, args, to = self.m.parse(dict(copy='src=a dest=b')) + self._debug(mod, args, to) assert mod == 'copy' assert args == dict(src='a', dest='b') assert to is None def test_complex_args(self): mod, args, to = self.m.parse(dict(copy=dict(src='a', dest='b'))) + self._debug(mod, args, to) assert mod == 'copy' assert args == dict(src='a', dest='b') assert to is None def test_action_with_complex(self): mod, args, to = self.m.parse(dict(action=dict(module='copy', src='a', dest='b'))) + self._debug(mod, args, to) assert mod == 'copy' assert args == dict(src='a', dest='b') assert to is None def test_action_with_complex_and_complex_args(self): mod, args, to = self.m.parse(dict(action=dict(module='copy', args=dict(src='a', dest='b')))) + self._debug(mod, args, to) assert mod == 'copy' assert args == dict(src='a', dest='b') assert to is None def test_local_action_string(self): mod, args, to = self.m.parse(dict(local_action='copy src=a dest=b')) + self._debug(mod, args, to) assert mod == 'copy' assert args == dict(src='a', dest='b') assert to is 'localhost' diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index c43f30c916..4a4a7c9666 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -50,254 +50,192 @@ class ModuleArgsParser(object): src: a dest: b - This class exists so other things don't have to remember how this - all works. Pass it "part1" and "part2", and the parse function - will tell you about the modules in a predictable way. + This class has some of the logic to canonicalize these into the form + + - module: + delegate_to: + args: + + Args may also be munged for certain shell command parameters. """ def __init__(self, task=None): - self._ds = None self._task = task - def _get_delegate_to(self): - ''' - Returns the value of the delegate_to key from the task datastructure, - or None if the value was not directly specified - ''' - return self._ds.get('delegate_to', None) - def _get_old_style_action(self): + def _split_module_string(self, str): ''' - Searches the datastructure for 'action:' or 'local_action:' keywords. - When local_action is found, the delegate_to value is set to the localhost - IP, otherwise delegate_to is left as None. - - Inputs: - - None - - Outputs: - - None (if neither keyword is found), or a dictionary containing: - action: - the module name to be executed - args: - a dictionary containing the arguments to the module - delegate_to: - None or 'localhost' + when module names are expressed like: + action: copy src=a dest=b + the first part of the string is the name of the module + and the rest are strings pertaining to the arguments. ''' - - # determine if this is an 'action' or 'local_action' - if 'action' in self._ds: - action_data = self._ds.get('action', '') - delegate_to = None - elif 'local_action' in self._ds: - action_data = self._ds.get('local_action', '') - delegate_to = 'localhost' + + tokens = str.split() + if len(tokens) > 1: + return (tokens[0], " ".join(tokens[1:])) else: - return None + return (tokens[0], "") - # now we get the arguments for the module, which may be a - # string of key=value pairs, a dictionary of values, or a - # dictionary with a special 'args:' value in it - if isinstance(action_data, dict): - action = self._get_specified_module(action_data) - args = dict() - if 'args' in action_data: - args = self._get_args_from_ds(action, action_data) - del action_data['args'] - other_args = action_data.copy() - # remove things we don't want in the args - if 'module' in other_args: - del other_args['module'] - args.update(other_args) - - elif isinstance(action_data, basestring): - action_data = action_data.strip() - if not action_data: - raise AnsibleError("when using 'action:' or 'local_action:', the module name must be specified", object=self._task) - else: - # split up the string based on spaces, where the first - # item specified must be a valid module name - parts = action_data.split(' ', 1) - action = parts[0] - if action not in module_finder: - raise AnsibleError("the module '%s' was not found in the list of loaded modules" % action, object=self._task) - if len(parts) > 1: - args = self._get_args_from_action(action, ' '.join(parts[1:])) - else: - args = {} + + def _handle_shell_weirdness(self, action, args): + ''' + given an action name and an args dictionary, return the + proper action name and args dictionary. This mostly is due + to shell/command being treated special and nothing else + ''' + + # don't handle non shell/command modules in this function + # TODO: in terms of the whole app, should 'raw' also fit here? + if action not in ['shell', 'command']: + return (action, args) + + new_args = {} + + # the shell module really is the command module with an additional + # parameter + if action == 'shell': + action = 'command' + new_args['_uses_shell'] = True + + # make sure the non-key-value params hop in the data + new_args['_raw_params'] = args['_raw_params'] + + return (action, new_args) + + def _normalize_parameters(self, thing, action=None): + ''' + arguments can be fuzzy. Deal with all the forms. + ''' + + args = dict() + + # how we normalize depends if we figured out what the module name is + # yet. If we have already figured it out, it's an 'old style' invocation. + # otherwise, it's not + + if action is not None: + args = self._normalize_old_style_args(thing) else: - raise AnsibleError('module args must be specified as a dictionary or string', object=self._task) + (action, args) = self._normalize_new_style_args(thing) - return dict(action=action, args=args, delegate_to=delegate_to) + # this can occasionally happen, simplify + if 'args' in args: + args = args['args'] - def _get_new_style_action(self): + return (action, args) + + def _normalize_old_style_args(self, thing): ''' - Searches the datastructure for 'module_name:', where the module_name is a - valid module loaded by the module_finder plugin. + deals with fuzziness in old-style (action/local_action) module invocations + returns tuple of (module_name, dictionary_args) - Inputs: - - None - - Outputs: - - None (if no valid module is found), or a dictionary containing: - action: - the module name to be executed - args: - a dictionary containing the arguments to the module - delegate_to: - None + possible example inputs: + { 'local_action' : 'shell echo hi' } + { 'action' : 'shell echo hi' } + { 'local_action' : { 'module' : 'ec2', 'x' : 1, 'y': 2 }} + standardized outputs like: + ( 'command', { _raw_params: 'echo hi', _uses_shell: True } ''' - # for all keys in the datastructure, check to see if the value - # corresponds to a module found by the module_finder plugin - action = None - for item in self._ds: - if item in module_finder: - action = item - break + if isinstance(thing, dict): + # form is like: local_action: { module: 'xyz', x: 2, y: 3 } ... uncommon! + args = thing + elif isinstance(thing, basestring): + # form is like: local_action: copy src=a dest=b ... pretty common + args = parse_kv(thing) else: - # none of the keys matched a known module name - return None - - # now we get the arguments for the module, which may be a - # string of key=value pairs, a dictionary of values, or a - # dictionary with a special 'args:' value in it - action_data = self._ds.get(action, '') - if isinstance(action_data, dict): - args = dict() - if 'args' in action_data: - args = self._get_args_from_ds(action, action_data) - del action_data['args'] - other_args = action_data.copy() - # remove things we don't want in the args - if 'module' in other_args: - del other_args['module'] - args.update(other_args) - else: - args = self._get_args_from_action(action, action_data.strip()) - - return dict(action=action, args=args, delegate_to=None) - - def _get_args_from_ds(self, action, action_data): - ''' - Gets the module arguments from the 'args' value of the - action_data, when action_data is a dict. The value of - 'args' can be either a string or a dictionary itself, so - we use parse_kv() to split up the key=value pairs when - a string is found. - - Inputs: - - action_data: - a dictionary of values, which may or may not contain a - key named 'args' - - Outputs: - - a dictionary of values, representing the arguments to the - module action specified - ''' - args = action_data.get('args', {}).copy() - if isinstance(args, basestring): - if action in ('command', 'shell'): - args = parse_kv(args, check_raw=True) - else: - args = parse_kv(args) + raise AnsibleParsingError("unexpected parameter type in action: %s" % type(thing), obj=self._task) return args - def _get_args_from_action(self, action, action_data): + def _normalize_new_style_args(self, thing): ''' - Gets the module arguments from the action data when it is - specified as a string of key=value pairs. Special handling - is used for the command/shell modules, which allow free- - form syntax for the options. + deals with fuzziness in new style module invocations + accepting key=value pairs and dictionaries, and always returning dictionaries + returns tuple of (module_name, dictionary_args) - Inputs: - - action: - the module to be executed - - action_data: - a string of key=value pairs (and possibly free-form arguments) - - Outputs: - - A dictionary of values, representing the arguments to the - module action specified OR a string of key=value pairs (when - the module action is command or shell) + possible example inputs: + { 'shell' : 'echo hi' } + { 'ec2' : { 'region' : 'xyz' } + { 'ec2' : 'region=xyz' } + standardized outputs like: + ('ec2', { region: 'xyz'} ) ''' - tokens = action_data.split() - if len(tokens) == 0: - return {} + + action = None + args = None + + if isinstance(thing, dict): + # form is like: copy: { src: 'a', dest: 'b' } ... common for structured (aka "complex") args + thing = thing.copy() + if 'module' in thing: + action = thing['module'] + args = thing.copy() + del args['module'] + + elif isinstance(thing, basestring): + # form is like: copy: src=a dest=b ... common shorthand throughout ansible + (action, args) = self._split_module_string(thing) + args = parse_kv(args) + else: - joined = " ".join(tokens) - if action in ('command', 'shell'): - return parse_kv(joined, check_raw=True) - else: - return parse_kv(joined) + # need a dict or a string, so giving up + raise AnsibleParsingError("unexpected parameter type in action: %s" % type(thing), obj=self._task) - def _get_specified_module(self, action_data): - ''' - gets the module if specified directly in the arguments, ie: - - action: - module: foo - - Inputs: - - action_data: - a dictionary of values, which may or may not contain the - key 'module' - - Outputs: - - a string representing the module specified in the data, or - None if that key was not found - ''' - return action_data.get('module') + return (action, args) def parse(self, ds): ''' Given a task in one of the supported forms, parses and returns returns the action, arguments, and delegate_to values for the - task. - - Inputs: - - ds: - a dictionary datastructure representing the task as parsed - from a YAML file - - Outputs: - - A tuple containing 3 values: - action: - the action (module name) to be executed - args: - the args for the action - delegate_to: - the delegate_to option (which may be None, if no delegate_to - option was specified and this is not a local_action) + task, dealing with all sorts of levels of fuzziness. ''' assert type(ds) == dict - self._ds = ds + thing = None - # first we try to get the module action/args based on the - # new-style format, where the module name is the key - result = self._get_new_style_action() - if result is None: - # failing that, we resort to checking for the old-style syntax, - # where 'action' or 'local_action' is the key - result = self._get_old_style_action() - if result is None: - raise AnsibleError('no action specified for this task', object=self._task) + action = None + delegate_to = None + args = dict() - # if the action is set to 'shell', we switch that to 'command' and - # set the special parameter '_uses_shell' to true in the args dict - if result['action'] == 'shell': - result['action'] = 'command' - result['args']['_uses_shell'] = True + if 'action' in ds: - # finally, we check to see if a delegate_to value was specified - # in the task datastructure (and raise an error for local_action, - # which essentially means we're delegating to localhost) - specified_delegate_to = self._get_delegate_to() - if specified_delegate_to is not None: - if result['delegate_to'] is not None: - raise AnsibleError('delegate_to cannot be used with local_action') - else: - result['delegate_to'] = specified_delegate_to + # an old school 'action' statement + thing = ds['action'] + delegate_to = None + action, args = self._normalize_parameters(thing) - return (result['action'], result['args'], result['delegate_to']) + elif 'local_action' in ds: + + # local_action is similar but also implies a delegate_to + if action is not None: + raise AnsibleError("action and local_action are mutually exclusive") + thing = ds.get('local_action', '') + delegate_to = 'localhost' + action, args = self._normalize_parameters(thing) + + else: + + # module: is the more new-style invocation + if action is not None: + raise AnsibleError("conflicting action statements") + + # walk the input dictionary to see we recognize a module name + for (item, value) in ds.iteritems(): + if item in module_finder: + # finding more than one module name is a problem + if action is not None: + raise AnsibleError("conflicting action statements") + action = item + thing = value + action, args = self._normalize_parameters(value, action=action) + + # if we didn't see any module in the task at all, it's not a task really + if action is None: + raise AnsibleParserError("no action detected in task", obj=self._task) + + # shell modules require special handling + (action, args) = self._handle_shell_weirdness(action, args) + + return (action, args, delegate_to) diff --git a/v2/ansible/parsing/splitter.py b/v2/ansible/parsing/splitter.py index 48367ca5d9..17946f663b 100644 --- a/v2/ansible/parsing/splitter.py +++ b/v2/ansible/parsing/splitter.py @@ -56,7 +56,7 @@ def parse_kv(args, check_raw=False): # them to a special option for use later by the shell/command module if len(raw_params) > 0: options['_raw_params'] = ' '.join(raw_params) - + return options def _get_quote_state(token, quote_char): @@ -239,4 +239,3 @@ def unquote(data): if is_quoted(data): return data[1:-1] return data - From 98ed69213ba389f4a7018644a3025ba92e02627e Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 8 Oct 2014 19:53:09 -0400 Subject: [PATCH 199/813] Error -> ParserError --- v2/ansible/parsing/mod_args.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index 4a4a7c9666..2e5b401601 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -17,7 +17,7 @@ import exceptions -from ansible.errors import AnsibleError +from ansible.errors import AnsibleParserError from ansible.plugins import module_finder from ansible.parsing.splitter import parse_kv @@ -70,7 +70,7 @@ class ModuleArgsParser(object): the first part of the string is the name of the module and the rest are strings pertaining to the arguments. ''' - + tokens = str.split() if len(tokens) > 1: return (tokens[0], " ".join(tokens[1:])) @@ -210,7 +210,7 @@ class ModuleArgsParser(object): # local_action is similar but also implies a delegate_to if action is not None: - raise AnsibleError("action and local_action are mutually exclusive") + raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task) thing = ds.get('local_action', '') delegate_to = 'localhost' action, args = self._normalize_parameters(thing) @@ -219,14 +219,14 @@ class ModuleArgsParser(object): # module: is the more new-style invocation if action is not None: - raise AnsibleError("conflicting action statements") + raise AnsibleParserError("conflicting action statements", obj=self._task) # walk the input dictionary to see we recognize a module name for (item, value) in ds.iteritems(): if item in module_finder: # finding more than one module name is a problem if action is not None: - raise AnsibleError("conflicting action statements") + raise AnsibleParserError("conflicting action statements", obj=self._task) action = item thing = value action, args = self._normalize_parameters(value, action=action) From 17d0022384c9f878b348fa65d6de88b76acf4894 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Wed, 8 Oct 2014 20:55:49 -0400 Subject: [PATCH 200/813] Typo: mo[u]dules Fix typo: moudules -> modules --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 3d430326c9..5a92ec8ddc 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -18,7 +18,7 @@ The directory "./library", alongside your top level playbooks, is also automatic added as a search directory. Should you develop an interesting Ansible module, consider sending a pull request to the -`moudules-extras project `_. There's also a core +`modules-extras project `_. There's also a core repo for more established and widely used modules. "Extras" modules may be promoted to core periodically, but there's no fundamental difference in the end - both ship with ansible, all in one package, regardless of how you acquire ansible. From 48a308a87cf42cd1a88a808e67772460dcb22ac6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 9 Oct 2014 02:47:41 -0400 Subject: [PATCH 201/813] Allow both old-style and new-style role dependencies to be valid. Fixes #9173 --- lib/ansible/utils/__init__.py | 38 ++++++++++++++++----- test/units/TestUtils.py | 64 +++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+), 8 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 15f387b55a..952e8537d0 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -416,14 +416,36 @@ def role_spec_parse(role_spec): def role_yaml_parse(role): - if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'): - role["src"] = "git+" + role["src"] - if '+' in role["src"]: - (scm, src) = role["src"].split('+') - role["scm"] = scm - role["src"] = src - if 'name' not in role: - role["name"] = repo_url_to_role_name(role["src"]) + if 'role' in role: + # Old style: {role: "galaxy.role,version,name", other_vars: "here" } + role_info = role_spec_parse(role['role']) + if isinstance(role_info, dict): + # Warning: Slight change in behaviour here. name may be being + # overloaded. Previously, name was only a parameter to the role. + # Now it is both a parameter to the role and the name that + # ansible-galaxy will install under on the local system. + if 'name' in role and 'name' in role_info: + del role_info['name'] + role.update(role_info) + else: + # New style: { src: 'galaxy.role,version,name', other_vars: "here" } + if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'): + role["src"] = "git+" + role["src"] + + if '+' in role["src"]: + (scm, src) = role["src"].split('+') + role["scm"] = scm + role["src"] = src + + if 'name' not in role: + role["name"] = repo_url_to_role_name(role["src"]) + + if 'version' not in role: + role['version'] = '' + + if 'scm' not in role: + role['scm'] = None + return role diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index 07b52a9d38..af10a1e055 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -841,3 +841,67 @@ class TestUtils(unittest.TestCase): for (spec, result) in tests: self.assertEqual(ansible.utils.role_spec_parse(spec), result) + def test_role_yaml_parse(self): + tests = ( + ( + # Old style + { + 'role': 'debops.elasticsearch', + 'name': 'elks' + }, + { + 'role': 'debops.elasticsearch', + 'name': 'elks', + 'scm': None, + 'src': 'debops.elasticsearch', + 'version': '', + } + ), + ( + { + 'role': 'debops.elasticsearch,1.0,elks', + 'my_param': 'foo' + }, + { + 'role': 'debops.elasticsearch,1.0,elks', + 'name': 'elks', + 'scm': None, + 'src': 'debops.elasticsearch', + 'version': '1.0', + 'my_param': 'foo', + } + ), + ( + { + 'role': 'debops.elasticsearch,1.0', + 'my_param': 'foo' + }, + { + 'role': 'debops.elasticsearch,1.0', + 'name': 'debops.elasticsearch', + 'scm': None, + 'src': 'debops.elasticsearch', + 'version': '1.0', + 'my_param': 'foo', + } + ), + # New style + ( + { + 'src': 'debops.elasticsearch', + 'name': 'elks', + 'my_param': 'foo' + }, + { + 'name': 'elks', + 'scm': None, + 'src': 'debops.elasticsearch', + 'version': '', + 'my_param': 'foo' + } + ), + ) + + for (role, result) in tests: + self.assertEqual(ansible.utils.role_yaml_parse(role), result) + From e91e30cd5e457ef702ac7de1e07698dcfb1f9336 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 9 Oct 2014 03:11:46 -0400 Subject: [PATCH 202/813] Fix unspecified role versions. Fixes #9236 --- bin/ansible-galaxy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy index adc93166bf..9018e6c205 100755 --- a/bin/ansible-galaxy +++ b/bin/ansible-galaxy @@ -755,7 +755,7 @@ def execute_install(args, options, parser): continue role_versions = api_fetch_role_related(api_server, 'versions', role_data['id']) - if "version" not in role: + if "version" not in role or role['version'] == '': # convert the version names to LooseVersion objects # and sort them to get the latest version. If there # are no versions in the list, we'll grab the head From 0953322b23779de9536f342be2949314fc012e12 Mon Sep 17 00:00:00 2001 From: Pete Smith Date: Thu, 9 Oct 2014 17:04:45 +0100 Subject: [PATCH 203/813] Remove no-op string declaration --- bin/ansible | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/bin/ansible b/bin/ansible index 792b259144..b82a18d3d3 100755 --- a/bin/ansible +++ b/bin/ansible @@ -90,26 +90,6 @@ class Cli(object): pattern = args[0] - """ - inventory_manager = inventory.Inventory(options.inventory) - if options.subset: - inventory_manager.subset(options.subset) - hosts = inventory_manager.list_hosts(pattern) - if len(hosts) == 0: - callbacks.display("No hosts matched", stderr=True) - sys.exit(0) - - if options.listhosts: - for host in hosts: - callbacks.display(' %s' % host) - sys.exit(0) - - if ((options.module_name == 'command' or options.module_name == 'shell') - and not options.module_args): - callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True) - sys.exit(1) - """ - sshpass = None sudopass = None su_pass = None From 383a44a4627294ac14e66f2d7bb1c235e78189f1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 8 Oct 2014 14:50:42 -0500 Subject: [PATCH 204/813] Updating submodule pointer for core --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index cb69744bce..5af8d55b03 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit cb69744bcee4b4217d83b4a30006635ba69e2aa0 +Subproject commit 5af8d55b0365a5c3278c43b5424bf5f2ddf897b8 From a50332fc8a4b6d68a3131d3a479f75b1cf76dcea Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 9 Oct 2014 12:52:09 -0500 Subject: [PATCH 205/813] Adding v2 error line support, and tests --- test/v2/errors/__init__.py | 18 +++++++++++++ test/v2/errors/test_errors.py | 33 +++++++++++++++++++++++ v2/ansible/errors/__init__.py | 43 +++++++++++++++++++++++++----- v2/ansible/parsing/yaml/objects.py | 3 +++ 4 files changed, 91 insertions(+), 6 deletions(-) create mode 100644 test/v2/errors/__init__.py create mode 100644 test/v2/errors/test_errors.py diff --git a/test/v2/errors/__init__.py b/test/v2/errors/__init__.py new file mode 100644 index 0000000000..674334b15a --- /dev/null +++ b/test/v2/errors/__init__.py @@ -0,0 +1,18 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + diff --git a/test/v2/errors/test_errors.py b/test/v2/errors/test_errors.py new file mode 100644 index 0000000000..874195e476 --- /dev/null +++ b/test/v2/errors/test_errors.py @@ -0,0 +1,33 @@ +# TODO: header + +import unittest + +from mock import mock_open, patch + +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject +from ansible.errors import AnsibleError + +class TestErrors(unittest.TestCase): + + def setUp(self): + self.message = 'this is the error message' + + def tearDown(self): + pass + + def test_basic_error(self): + e = AnsibleError(self.message) + assert e.message == self.message + + def test_error_with_object(self): + obj = AnsibleBaseYAMLObject() + obj._data_source = 'foo.yml' + obj._line_number = 1 + obj._column_number = 1 + + m = mock_open() + m.return_value.readlines.return_value = ['this is line 1\n', 'this is line 2\n', 'this is line 3\n'] + with patch('__builtin__.open', m): + e = AnsibleError(self.message, obj) + + assert e.message == 'this is the error message\nThe error occurred on line 1 of the file foo.yml:\nthis is line 1\n^' diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py index ae629ba4bb..54406ef6c2 100644 --- a/v2/ansible/errors/__init__.py +++ b/v2/ansible/errors/__init__.py @@ -15,15 +15,46 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import os +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject + class AnsibleError(Exception): - def __init__(self, message, object=None): - self.message = message - self.object = object + def __init__(self, message, obj=None): + self._obj = obj + if isinstance(self._obj, AnsibleBaseYAMLObject): + extended_error = self._get_extended_error() + if extended_error: + self.message = '%s\n%s' % (message, extended_error) + else: + self.message = message - # TODO: nice __repr__ message that includes the line number if the object - # it was constructed with had the line number + def __repr__(self): + return self.message - # TODO: tests for the line number functionality + def _get_line_from_file(self, filename, line_number): + with open(filename, 'r') as f: + lines = f.readlines() + if line_number < len(lines): + return lines[line_number] + return None + + def _get_extended_error(self): + error_message = '' + + try: + (src_file, line_number, col_number) = self._obj.get_position_info() + error_message += 'The error occurred on line %d of the file %s:\n' % (line_number, src_file) + if src_file not in ('', ''): + responsible_line = self._get_line_from_file(src_file, line_number - 1) + if responsible_line: + error_message += responsible_line + error_message += (' ' * (col_number-1)) + '^' + except IOError: + error_message += '\n(could not open file to display line)' + except IndexError: + error_message += '\n(specified line no longer in file, maybe it changed?)' + + return error_message class AnsibleParserError(AnsibleError): ''' something was detected early that is wrong about a playbook or data file ''' diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py index cc9fc445d2..5870ea8cbe 100644 --- a/v2/ansible/parsing/yaml/objects.py +++ b/v2/ansible/parsing/yaml/objects.py @@ -8,6 +8,9 @@ class AnsibleBaseYAMLObject(object): _line_number = None _column_number = None + def get_position_info(self): + return (self._data_source, self._line_number, self._column_number) + class AnsibleMapping(AnsibleBaseYAMLObject, dict): ''' sub class for dictionaries ''' pass From 2a0d18b0926e809687f1d12909b57adfaaf93ca2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 9 Oct 2014 16:45:08 -0500 Subject: [PATCH 206/813] Insert newlines correctly when splitting literal blocks Fixes #9274 --- lib/ansible/module_utils/splitter.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/splitter.py b/lib/ansible/module_utils/splitter.py index 41b337773f..899fa8cd92 100644 --- a/lib/ansible/module_utils/splitter.py +++ b/lib/ansible/module_utils/splitter.py @@ -76,7 +76,7 @@ def split_args(args): do_decode = True except UnicodeDecodeError: do_decode = False - items = args.strip().split('\n') + items = args.split('\n') # iterate over the tokens, and reassemble any that may have been # split on a space inside a jinja2 block. @@ -138,7 +138,10 @@ def split_args(args): spacer = ' ' params[-1] = "%s%s%s" % (params[-1], spacer, token) else: - params[-1] = "%s\n%s" % (params[-1], token) + spacer = '' + if not params[-1].endswith('\n') and idx == 0: + spacer = '\n' + params[-1] = "%s%s%s" % (params[-1], spacer, token) appended = True # if the number of paired block tags is not the same, the depth has changed, so we calculate that here @@ -170,7 +173,7 @@ def split_args(args): # one item (meaning we split on newlines), add a newline back here # to preserve the original structure if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation: - if not params[-1].endswith('\n'): + if not params[-1].endswith('\n') or item == '': params[-1] += '\n' # always clear the line continuation flag From 545f8166398f5a95748fcfb4de064ebb1e134824 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Fri, 10 Oct 2014 10:13:46 +1000 Subject: [PATCH 207/813] Use https for git submodules It's much more likely that people can use the https protocol than the git protocol (many firewalls block the latter) --- .gitmodules | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index f33b632fb6..6ab000c816 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,9 +1,9 @@ [submodule "lib/ansible/modules/core"] path = lib/ansible/modules/core - url = git://github.com/ansible/ansible-modules-core.git + url = https://github.com/ansible/ansible-modules-core.git [submodule "lib/ansible/modules/extras"] path = lib/ansible/modules/extras - url = git://github.com/ansible/ansible-modules-extras.git + url = https://github.com/ansible/ansible-modules-extras.git [submodule "v2/ansible/modules/core"] path = v2/ansible/modules/core url = https://github.com/ansible/ansible-modules-core.git From d19fe8d95d8e30a4e6c0dab319f2f76302e78a15 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 10 Oct 2014 01:18:18 -0500 Subject: [PATCH 208/813] Fetch vars for host directly when calculating the delegated user This fixes the case in which the delegated to host may not be in the specified hosts list, in which cases facts/vars for the host were not available in the injected hostvars. This also fixes the inventory variable fetching function, so that an unknown host raises a proper error as opposed to a NoneType exception. Fixes #8224 --- lib/ansible/inventory/__init__.py | 5 ++++- lib/ansible/runner/__init__.py | 10 ++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 972042c9b1..7d279b7b4d 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -437,7 +437,10 @@ class Inventory(object): def get_variables(self, hostname, update_cached=False, vault_password=None): - return self.get_host(hostname).get_variables() + host = self.get_host(hostname) + if not host: + raise Exception("host not found: %s" % hostname) + return host.get_variables() def get_host_variables(self, hostname, update_cached=False, vault_password=None): diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 831e5d10e2..41e5d6054d 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -367,6 +367,16 @@ class Runner(object): if inject['hostvars'][host].get('ansible_ssh_user'): # user for delegate host in inventory thisuser = inject['hostvars'][host].get('ansible_ssh_user') + else: + # look up the variables for the host directly from inventory + try: + host_vars = self.inventory.get_variables(host, vault_password=self.vault_pass) + if 'ansible_ssh_user' in host_vars: + thisuser = host_vars['ansible_ssh_user'] + except Exception, e: + # the hostname was not found in the inventory, so + # we just ignore this and try the next method + pass if thisuser is None and self.remote_user: # user defined by play/runner From 2b9e235ffd8f7563779979a78f125d0ab061517b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 10 Oct 2014 10:28:07 -0400 Subject: [PATCH 209/813] Rework distribution fact checking to be a little less heinous and add support for Mandriva. Fixes #9282 --- lib/ansible/module_utils/facts.py | 104 ++++++++++++++++++++---------- 1 file changed, 69 insertions(+), 35 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index f9d2fdbf33..04a25ab9d6 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -85,16 +85,18 @@ class Facts(object): _I386RE = re.compile(r'i[3456]86') # For the most part, we assume that platform.dist() will tell the truth. # This is the fallback to handle unknowns or exceptions - OSDIST_DICT = { '/etc/redhat-release': 'RedHat', - '/etc/vmware-release': 'VMwareESX', - '/etc/openwrt_release': 'OpenWrt', - '/etc/system-release': 'OtherLinux', - '/etc/alpine-release': 'Alpine', - '/etc/release': 'Solaris', - '/etc/arch-release': 'Archlinux', - '/etc/SuSE-release': 'SuSE', - '/etc/gentoo-release': 'Gentoo', - '/etc/os-release': 'Debian' } + OSDIST_LIST = ( ('/etc/redhat-release', 'RedHat'), + ('/etc/vmware-release', 'VMwareESX'), + ('/etc/openwrt_release', 'OpenWrt'), + ('/etc/system-release', 'OtherLinux'), + ('/etc/alpine-release', 'Alpine'), + ('/etc/release', 'Solaris'), + ('/etc/arch-release', 'Archlinux'), + ('/etc/SuSE-release', 'SuSE'), + ('/etc/os-release', 'SuSE'), + ('/etc/gentoo-release', 'Gentoo'), + ('/etc/os-release', 'Debian'), + ('/etc/lsb-release', 'Mandriva') ) SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } # A list of dicts. If there is a platform with more than one @@ -230,6 +232,8 @@ class Facts(object): FreeBSD = 'FreeBSD', HPUX = 'HP-UX' ) + # TODO: Rewrite this to use the function references in a dict pattern + # as it's much cleaner than this massive if-else if self.facts['system'] == 'AIX': self.facts['distribution'] = 'AIX' rc, out, err = module.run_command("/usr/bin/oslevel") @@ -268,54 +272,84 @@ class Facts(object): self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA' self.facts['distribution_release'] = dist[2] or 'NA' # Try to handle the exceptions now ... - for (path, name) in Facts.OSDIST_DICT.items(): + for (path, name) in Facts.OSDIST_LIST: if os.path.exists(path) and os.path.getsize(path) > 0: - if self.facts['distribution'] == 'Fedora': - pass + if self.facts['distribution'] in ('Fedora', ): + # Once we determine the value is one of these distros + # we trust the values are always correct + break elif name == 'RedHat': data = get_file_content(path) if 'Red Hat' in data: self.facts['distribution'] = name else: self.facts['distribution'] = data.split()[0] + break elif name == 'OtherLinux': data = get_file_content(path) if 'Amazon' in data: self.facts['distribution'] = 'Amazon' self.facts['distribution_version'] = data.split()[-1] + break elif name == 'OpenWrt': data = get_file_content(path) if 'OpenWrt' in data: self.facts['distribution'] = name - version = re.search('DISTRIB_RELEASE="(.*)"', data) - if version: - self.facts['distribution_version'] = version.groups()[0] - release = re.search('DISTRIB_CODENAME="(.*)"', data) - if release: - self.facts['distribution_release'] = release.groups()[0] + version = re.search('DISTRIB_RELEASE="(.*)"', data) + if version: + self.facts['distribution_version'] = version.groups()[0] + release = re.search('DISTRIB_CODENAME="(.*)"', data) + if release: + self.facts['distribution_release'] = release.groups()[0] + break elif name == 'Alpine': data = get_file_content(path) - self.facts['distribution'] = 'Alpine' + self.facts['distribution'] = name self.facts['distribution_version'] = data + break elif name == 'Solaris': data = get_file_content(path).split('\n')[0] - ora_prefix = '' - if 'Oracle Solaris' in data: - data = data.replace('Oracle ','') - ora_prefix = 'Oracle ' - self.facts['distribution'] = data.split()[0] - self.facts['distribution_version'] = data.split()[1] - self.facts['distribution_release'] = ora_prefix + data + if 'Solaris' in data: + ora_prefix = '' + if 'Oracle Solaris' in data: + data = data.replace('Oracle ','') + ora_prefix = 'Oracle ' + self.facts['distribution'] = data.split()[0] + self.facts['distribution_version'] = data.split()[1] + self.facts['distribution_release'] = ora_prefix + data + break elif name == 'SuSE': - data = get_file_content(path).splitlines() - for line in data: - if '=' in line: - self.facts['distribution_release'] = line.split('=')[1].strip() + data = get_file_content(path) + if 'suse' in data.lower(): + if path == '/etc/os-release': + release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) + if release: + self.facts['distribution_release'] = release.groups()[0] + break + elif path == '/etc/SuSE-release': + data = data.splitlines() + release = re.search('CODENAME *= *([^\n]+)\n', data) + if release: + self.facts['distribution_release'] = release.groups()[0].strip() + break elif name == 'Debian': - data = get_file_content(path).split('\n')[0] - release = re.search("PRETTY_NAME.+ \(?([^ ]+?)\)?\"", data) - if release: - self.facts['distribution_release'] = release.groups()[0] + data = get_file_content(path) + if 'Debian' in data: + release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) + if release: + self.facts['distribution_release'] = release.groups()[0] + break + elif name == 'Mandriva': + data = get_file_content(path) + if 'Mandriva' in data: + version = re.search('DISTRIB_RELEASE="(.*)"', data) + if version: + self.facts['distribution_version'] = version.groups()[0] + release = re.search('DISTRIB_CODENAME="(.*)"', data) + if release: + self.facts['distribution_release'] = release.groups()[0] + self.facts['distribution'] = name + break else: self.facts['distribution'] = name From b4f84c5d9a8f1e33025aa81cd277c8faa06a5ed5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 10 Oct 2014 12:11:08 -0400 Subject: [PATCH 210/813] Update the core and extras submodules --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 8a4f07eecd..681db4ce2c 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 8a4f07eecd2bb877f51b7b04b5352efa6076cce5 +Subproject commit 681db4ce2c534eca4fca57c5a83a5be8035c257d From 2eda9a3a47731961ae4797036cae8613a04667b8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 10 Oct 2014 12:23:58 -0500 Subject: [PATCH 211/813] Fixing item loop when undefined variable errors occur because of missing attributes Fixes a case where the variable 'foo' may exist, but the with_items loop was used on something like 'foo.results', where 'results' was not a valid attribute of 'foo'. Prior to this patch, conditionals were not evaluated until later, meaning there was no opportunity to allow a test to skip the task or item based on it being undefined. --- lib/ansible/runner/__init__.py | 21 +++++++++++++--- .../roles/test_conditionals/tasks/main.yml | 25 +++++++++++++++++++ .../roles/test_conditionals/vars/main.yml | 13 ++++++++++ 3 files changed, 56 insertions(+), 3 deletions(-) create mode 100644 test/integration/roles/test_conditionals/vars/main.yml diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 41e5d6054d..75a8a0766a 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -663,9 +663,24 @@ class Runner(object): if os.path.exists(filesdir): basedir = filesdir - items_terms = self.module_vars.get('items_lookup_terms', '') - items_terms = template.template(basedir, items_terms, inject) - items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject) + try: + items_terms = self.module_vars.get('items_lookup_terms', '') + items_terms = template.template(basedir, items_terms, inject) + items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject) + except errors.AnsibleUndefinedVariable, e: + if 'has no attribute' in str(e): + # the undefined variable was an attribute of a variable that does + # exist, so try and run this through the conditional check to see + # if the user wanted to skip something on being undefined + if utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=True): + # the conditional check passed, so we have to fail here + raise + else: + # the conditional failed, so we skip this task + result = utils.jsonify(dict(changed=False, skipped=True)) + self.callbacks.on_skipped(host, None) + return ReturnData(host=host, result=result) + # strip out any jinja2 template syntax within # the data returned by the lookup plugin items = utils._clean_data_struct(items, from_remote=True) diff --git a/test/integration/roles/test_conditionals/tasks/main.yml b/test/integration/roles/test_conditionals/tasks/main.yml index f2aa0068c6..136e9501ea 100644 --- a/test/integration/roles/test_conditionals/tasks/main.yml +++ b/test/integration/roles/test_conditionals/tasks/main.yml @@ -267,3 +267,28 @@ that: - "result.changed" +- name: test a with_items loop using a variable with a missing attribute + debug: var=item + with_items: foo.results + when: foo is defined and 'results' in foo + register: result + +- name: assert the task was skipped + assert: + that: + - "'skipped' in result" + - result.skipped + +- name: test a with_items loop skipping a single item + debug: var=item + with_items: items.results + when: item != 'b' + register: result + +- debug: var=result + +- name: assert only a single item was skipped + assert: + that: + - result.results|length == 3 + - result.results[1].skipped diff --git a/test/integration/roles/test_conditionals/vars/main.yml b/test/integration/roles/test_conditionals/vars/main.yml new file mode 100644 index 0000000000..dddcfc5998 --- /dev/null +++ b/test/integration/roles/test_conditionals/vars/main.yml @@ -0,0 +1,13 @@ +--- +# foo is a dictionary that will be used to check that +# a conditional passes a with_items loop on a variable +# with a missing attribute (ie. foo.results) +foo: + bar: a + +items: + results: + - a + - b + - c + From fe5a7bcabfa6aa19f9349b2d5e4264c695c2af27 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 10 Oct 2014 12:45:33 -0500 Subject: [PATCH 212/813] Fixing bad variable naming choices --- test/integration/roles/test_conditionals/tasks/main.yml | 6 +++--- test/integration/roles/test_conditionals/vars/main.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/integration/roles/test_conditionals/tasks/main.yml b/test/integration/roles/test_conditionals/tasks/main.yml index 136e9501ea..3d3c2ec9de 100644 --- a/test/integration/roles/test_conditionals/tasks/main.yml +++ b/test/integration/roles/test_conditionals/tasks/main.yml @@ -269,8 +269,8 @@ - name: test a with_items loop using a variable with a missing attribute debug: var=item - with_items: foo.results - when: foo is defined and 'results' in foo + with_items: cond_bad_attribute.results + when: cond_bad_attribute is defined and 'results' in cond_bad_attribute register: result - name: assert the task was skipped @@ -281,7 +281,7 @@ - name: test a with_items loop skipping a single item debug: var=item - with_items: items.results + with_items: cond_list_of_items.results when: item != 'b' register: result diff --git a/test/integration/roles/test_conditionals/vars/main.yml b/test/integration/roles/test_conditionals/vars/main.yml index dddcfc5998..a6ecf62f53 100644 --- a/test/integration/roles/test_conditionals/vars/main.yml +++ b/test/integration/roles/test_conditionals/vars/main.yml @@ -2,10 +2,10 @@ # foo is a dictionary that will be used to check that # a conditional passes a with_items loop on a variable # with a missing attribute (ie. foo.results) -foo: +cond_bad_attribute: bar: a -items: +cond_list_of_items: results: - a - b From ccd559137f199e37f4061e1946ba9667c2f869fa Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 10 Oct 2014 16:47:55 -0400 Subject: [PATCH 213/813] add missing headers, rm the subtree of ansible/inventory as we wish to overhaul it. --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- test/v2/errors/test_errors.py | 17 ++++++++- test/v2/parsing/__init__.py | 17 ++++++++- test/v2/parsing/test_general.py | 17 ++++++++- test/v2/parsing/test_mod_args.py | 17 ++++++++- test/v2/parsing/yaml/__init__.py | 17 ++++++++- test/v2/parsing/yaml/test_yaml.py | 17 ++++++++- test/v2/playbook/__init__.py | 18 ++++++++-- test/v2/playbook/test_task.py | 17 ++++++++- v2/ansible/__init__.py | 17 ++++++++- v2/ansible/inventory/__init__.py | 26 -------------- v2/ansible/inventory/group.py | 44 ------------------------ v2/ansible/inventory/host.py | 27 --------------- v2/ansible/inventory/loaders/__init__.py | 17 --------- v2/ansible/inventory/loaders/dir.py | 17 --------- v2/ansible/inventory/loaders/ini.py | 17 --------- v2/ansible/inventory/loaders/script.py | 17 --------- v2/ansible/inventory/pattern.py | 36 ------------------- v2/ansible/modules/__init__.py | 18 ++++++++-- v2/ansible/parsing/__init__.py | 17 ++++++++- v2/scripts/ansible | 16 +++++++++ 22 files changed, 194 insertions(+), 216 deletions(-) delete mode 100644 v2/ansible/inventory/__init__.py delete mode 100644 v2/ansible/inventory/group.py delete mode 100644 v2/ansible/inventory/host.py delete mode 100644 v2/ansible/inventory/loaders/__init__.py delete mode 100644 v2/ansible/inventory/loaders/dir.py delete mode 100644 v2/ansible/inventory/loaders/ini.py delete mode 100644 v2/ansible/inventory/loaders/script.py delete mode 100644 v2/ansible/inventory/pattern.py diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 5af8d55b03..cb69744bce 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 5af8d55b0365a5c3278c43b5424bf5f2ddf897b8 +Subproject commit cb69744bcee4b4217d83b4a30006635ba69e2aa0 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 681db4ce2c..8a4f07eecd 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 681db4ce2c534eca4fca57c5a83a5be8035c257d +Subproject commit 8a4f07eecd2bb877f51b7b04b5352efa6076cce5 diff --git a/test/v2/errors/test_errors.py b/test/v2/errors/test_errors.py index 874195e476..762aaceb6d 100644 --- a/test/v2/errors/test_errors.py +++ b/test/v2/errors/test_errors.py @@ -1,4 +1,19 @@ -# TODO: header +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . import unittest diff --git a/test/v2/parsing/__init__.py b/test/v2/parsing/__init__.py index 8b13789179..1f84012e01 100644 --- a/test/v2/parsing/__init__.py +++ b/test/v2/parsing/__init__.py @@ -1 +1,16 @@ - +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/test/v2/parsing/test_general.py b/test/v2/parsing/test_general.py index a6277edfc6..e932d3e448 100644 --- a/test/v2/parsing/test_general.py +++ b/test/v2/parsing/test_general.py @@ -1,4 +1,19 @@ -# TODO: header +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . import unittest from ansible.parsing import load diff --git a/test/v2/parsing/test_mod_args.py b/test/v2/parsing/test_mod_args.py index 15d725956a..5fa60e1aca 100644 --- a/test/v2/parsing/test_mod_args.py +++ b/test/v2/parsing/test_mod_args.py @@ -1,4 +1,19 @@ -# TODO: header +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . from ansible.parsing.mod_args import ModuleArgsParser import unittest diff --git a/test/v2/parsing/yaml/__init__.py b/test/v2/parsing/yaml/__init__.py index 44026bdff0..1f84012e01 100644 --- a/test/v2/parsing/yaml/__init__.py +++ b/test/v2/parsing/yaml/__init__.py @@ -1 +1,16 @@ -# TODO: header +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/test/v2/parsing/yaml/test_yaml.py b/test/v2/parsing/yaml/test_yaml.py index 00394f6091..6be75032ae 100644 --- a/test/v2/parsing/yaml/test_yaml.py +++ b/test/v2/parsing/yaml/test_yaml.py @@ -1,4 +1,19 @@ -# TODO: header +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . import unittest diff --git a/test/v2/playbook/__init__.py b/test/v2/playbook/__init__.py index ec86ee6101..1f84012e01 100644 --- a/test/v2/playbook/__init__.py +++ b/test/v2/playbook/__init__.py @@ -1,2 +1,16 @@ -# TODO: header - +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/test/v2/playbook/test_task.py b/test/v2/playbook/test_task.py index e6ffe2ece3..76ad0e2447 100644 --- a/test/v2/playbook/test_task.py +++ b/test/v2/playbook/test_task.py @@ -1,4 +1,19 @@ -# TODO: header +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . from ansible.playbook.task import Task import unittest diff --git a/v2/ansible/__init__.py b/v2/ansible/__init__.py index 44026bdff0..1f84012e01 100644 --- a/v2/ansible/__init__.py +++ b/v2/ansible/__init__.py @@ -1 +1,16 @@ -# TODO: header +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/v2/ansible/inventory/__init__.py b/v2/ansible/inventory/__init__.py deleted file mode 100644 index 29c9a8324d..0000000000 --- a/v2/ansible/inventory/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -class Inventory(object): - def __init__(self): - pass - - def get_hosts(self): - return [] - - def get_groups(self): - return [] diff --git a/v2/ansible/inventory/group.py b/v2/ansible/inventory/group.py deleted file mode 100644 index 9a277178f4..0000000000 --- a/v2/ansible/inventory/group.py +++ /dev/null @@ -1,44 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -class Group(object): - def __init__(self, name, hosts=[]): - self.name = name - self.hosts = hosts - self.parents = [] - self.children = [] - - def get_vars(self): - return dict() - - def get_hosts(self): - return self.hosts - - def get_direct_subgroups(self): - direct_children = [] - for child in self.children: - direct_children.append(child.name) - return direct_children - - def get_all_subgroups(self): - all_children = [] - for child in self.children: - all_children.extend(child.get_all_subgroups()) - return all_children - - def get_parent_groups(self): - return self.parents diff --git a/v2/ansible/inventory/host.py b/v2/ansible/inventory/host.py deleted file mode 100644 index 390f0f8744..0000000000 --- a/v2/ansible/inventory/host.py +++ /dev/null @@ -1,27 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -class Host(object): - def __init__(self, name): - self.name = name - self.groups = [] - - def get_vars(self): - return dict() - - def get_groups(self): - return self.groups diff --git a/v2/ansible/inventory/loaders/__init__.py b/v2/ansible/inventory/loaders/__init__.py deleted file mode 100644 index d6c11ffa74..0000000000 --- a/v2/ansible/inventory/loaders/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - diff --git a/v2/ansible/inventory/loaders/dir.py b/v2/ansible/inventory/loaders/dir.py deleted file mode 100644 index d6c11ffa74..0000000000 --- a/v2/ansible/inventory/loaders/dir.py +++ /dev/null @@ -1,17 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - diff --git a/v2/ansible/inventory/loaders/ini.py b/v2/ansible/inventory/loaders/ini.py deleted file mode 100644 index d6c11ffa74..0000000000 --- a/v2/ansible/inventory/loaders/ini.py +++ /dev/null @@ -1,17 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - diff --git a/v2/ansible/inventory/loaders/script.py b/v2/ansible/inventory/loaders/script.py deleted file mode 100644 index d6c11ffa74..0000000000 --- a/v2/ansible/inventory/loaders/script.py +++ /dev/null @@ -1,17 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - diff --git a/v2/ansible/inventory/pattern.py b/v2/ansible/inventory/pattern.py deleted file mode 100644 index dd7068bdbc..0000000000 --- a/v2/ansible/inventory/pattern.py +++ /dev/null @@ -1,36 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from v2.inventory import Host, Group - -class HostPattern(object): - def __init__(self, pattern): - self.pattern = pattern - - def match(thing): - ''' - return a list of matches - ''' - - matches = [] - if isinstance(thing, Host): - # simple match against a single host - pass - elif isinstance(thing, Group): - # match against the list of hosts in the group - pass - return matches diff --git a/v2/ansible/modules/__init__.py b/v2/ansible/modules/__init__.py index ec86ee6101..1f84012e01 100644 --- a/v2/ansible/modules/__init__.py +++ b/v2/ansible/modules/__init__.py @@ -1,2 +1,16 @@ -# TODO: header - +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index 9e6329bab2..232d95b834 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -1,4 +1,19 @@ -# TODO: header +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . from ansible.errors import AnsibleError, AnsibleInternalError diff --git a/v2/scripts/ansible b/v2/scripts/ansible index e69de29bb2..1f84012e01 100644 --- a/v2/scripts/ansible +++ b/v2/scripts/ansible @@ -0,0 +1,16 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . From 428674cb90ead171f4cd352225d8ba089bb4afcc Mon Sep 17 00:00:00 2001 From: Adam Young Date: Fri, 10 Oct 2014 21:26:17 -0400 Subject: [PATCH 214/813] Inventory Plugin to get hostgroups from FreeIPA --- plugins/inventory/freeipa.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100755 plugins/inventory/freeipa.py diff --git a/plugins/inventory/freeipa.py b/plugins/inventory/freeipa.py new file mode 100755 index 0000000000..ae4ad59fd2 --- /dev/null +++ b/plugins/inventory/freeipa.py @@ -0,0 +1,19 @@ +#!/usr/bin/python + +import json +from ipalib import api +api.bootstrap(context='cli') +api.finalize() +api.Backend.xmlclient.connect() +inventory = {} +hostvars={} +meta={} +result =api.Command.hostgroup_find()['result'] +for hostgroup in result: + inventory[hostgroup['cn'][0]] = { 'hosts': [host for host in hostgroup['member_host']]} + for host in hostgroup['member_host']: + hostvars[host] = {} +inventory['_meta'] = {'hostvars': hostvars} +inv_string = json.dumps( inventory) +print inv_string + From c752f012f7c22398b6e4eefce687950eeac066df Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 11 Oct 2014 22:22:10 -0400 Subject: [PATCH 215/813] Start converting asserts to self.assert*() so we get better error messages --- test/v2/errors/test_errors.py | 4 ++-- test/v2/playbook/test_task.py | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/test/v2/errors/test_errors.py b/test/v2/errors/test_errors.py index 762aaceb6d..ca5ecc1493 100644 --- a/test/v2/errors/test_errors.py +++ b/test/v2/errors/test_errors.py @@ -32,7 +32,7 @@ class TestErrors(unittest.TestCase): def test_basic_error(self): e = AnsibleError(self.message) - assert e.message == self.message + self.assertEqual(e.message, self.message) def test_error_with_object(self): obj = AnsibleBaseYAMLObject() @@ -45,4 +45,4 @@ class TestErrors(unittest.TestCase): with patch('__builtin__.open', m): e = AnsibleError(self.message, obj) - assert e.message == 'this is the error message\nThe error occurred on line 1 of the file foo.yml:\nthis is line 1\n^' + self.assertEqual(e.message, 'this is the error message\nThe error occurred on line 1 of the file foo.yml:\nthis is line 1\n^') diff --git a/test/v2/playbook/test_task.py b/test/v2/playbook/test_task.py index 76ad0e2447..b6f869cd21 100644 --- a/test/v2/playbook/test_task.py +++ b/test/v2/playbook/test_task.py @@ -50,20 +50,20 @@ class TestTask(unittest.TestCase): def test_load_task_simple(self): t = Task.load(basic_shell_task) assert t is not None - assert t.name == basic_shell_task['name'] - assert t.action == 'command' - assert t.args == dict(_raw_params='echo hi', _uses_shell=True) + self.assertEqual(t.name, basic_shell_task['name']) + self.assertEqual(t.action, 'command') + self.assertEqual(t.args, dict(_raw_params='echo hi', _uses_shell=True)) def test_load_task_kv_form(self): t = Task.load(kv_shell_task) - print "task action is %s" % t.action - assert t.action == 'command' - assert t.args == dict(_raw_params='echo hi', _uses_shell=True) + print("task action is %s" % t.action) + self.assertEqual(t.action, 'command') + self.assertEqual(t.args, dict(_raw_params='echo hi', _uses_shell=True)) def test_task_auto_name(self): assert 'name' not in kv_shell_task t = Task.load(kv_shell_task) - #assert t.name == 'shell echo hi' + #self.assertEqual(t.name, 'shell echo hi') def test_task_auto_name_with_role(self): pass From 25ac4ccef3125a4fefd287a4003cbc52d788de8c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 12 Oct 2014 03:09:37 -0400 Subject: [PATCH 216/813] Update other asserts that have a py2.6 unittest method --- test/v2/parsing/test_general.py | 22 ++++++------- test/v2/parsing/test_mod_args.py | 52 +++++++++++++++---------------- test/v2/parsing/yaml/test_yaml.py | 28 ++++++++--------- 3 files changed, 51 insertions(+), 51 deletions(-) diff --git a/test/v2/parsing/test_general.py b/test/v2/parsing/test_general.py index e932d3e448..8afb75acf8 100644 --- a/test/v2/parsing/test_general.py +++ b/test/v2/parsing/test_general.py @@ -63,12 +63,12 @@ class TestGeneralParsing(unittest.TestCase): } """ output = load(input) - assert output['asdf'] == '1234' - assert output['jkl'] == 5678 + self.assertEqual(output['asdf'], '1234') + self.assertEqual(output['jkl'], 5678) def parse_json_from_file(self): output = load(MockFile(dict(a=1,b=2,c=3)),'json') - assert ouput == dict(a=1,b=2,c=3) + self.assertEqual(ouput, dict(a=1,b=2,c=3)) def parse_yaml_from_dict(self): input = """ @@ -76,12 +76,12 @@ class TestGeneralParsing(unittest.TestCase): jkl: 5678 """ output = load(input) - assert output['asdf'] == '1234' - assert output['jkl'] == 5678 + self.assertEqual(output['asdf'], '1234') + self.assertEqual(output['jkl'], 5678) def parse_yaml_from_file(self): output = load(MockFile(dict(a=1,b=2,c=3),'yaml')) - assert output == dict(a=1,b=2,c=3) + self.assertEqual(output, dict(a=1,b=2,c=3)) def parse_fail(self): input = """ @@ -89,12 +89,12 @@ class TestGeneralParsing(unittest.TestCase): *** NOT VALID """ - self.failUnlessRaises(load(input), AnsibleParserError) + self.assertRaises(load(input), AnsibleParserError) def parse_fail_from_file(self): - self.failUnlessRaises(load(MockFile(None,'fail')), AnsibleParserError) + self.assertRaises(load(MockFile(None,'fail')), AnsibleParserError) def parse_fail_invalid_type(self): - self.failUnlessRaises(3000, AnsibleParsingError) - self.failUnlessRaises(dict(a=1,b=2,c=3), AnsibleParserError) - + self.assertRaises(3000, AnsibleParsingError) + self.assertRaises(dict(a=1,b=2,c=3), AnsibleParserError) + diff --git a/test/v2/parsing/test_mod_args.py b/test/v2/parsing/test_mod_args.py index 5fa60e1aca..e077a55d3b 100644 --- a/test/v2/parsing/test_mod_args.py +++ b/test/v2/parsing/test_mod_args.py @@ -39,65 +39,65 @@ class TestModArgsDwim(unittest.TestCase): def test_basic_shell(self): mod, args, to = self.m.parse(dict(shell='echo hi')) self._debug(mod, args, to) - assert mod == 'command' - assert args == dict( - _raw_params = 'echo hi', - _uses_shell = True, - ) + self.assertEqual(mod, 'command') + self.assertEqual(args, dict( + _raw_params = 'echo hi', + _uses_shell = True, + )) assert to is None def test_basic_command(self): mod, args, to = self.m.parse(dict(command='echo hi')) self._debug(mod, args, to) - assert mod == 'command' - assert args == dict( - _raw_params = 'echo hi', - ) + self.assertEqual(mod, 'command') + self.assertEqual(args, dict( + _raw_params = 'echo hi', + )) assert to is None def test_shell_with_modifiers(self): mod, args, to = self.m.parse(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep')) self._debug(mod, args, to) - assert mod == 'command' - assert args == dict( - creates = '/tmp/baz', - removes = '/tmp/bleep', - _raw_params = '/bin/foo', - _uses_shell = True, - ) + self.assertEqual(mod, 'command') + self.assertEqual(args, dict( + creates = '/tmp/baz', + removes = '/tmp/bleep', + _raw_params = '/bin/foo', + _uses_shell = True, + )) assert to is None def test_normal_usage(self): mod, args, to = self.m.parse(dict(copy='src=a dest=b')) self._debug(mod, args, to) - assert mod == 'copy' - assert args == dict(src='a', dest='b') + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) assert to is None def test_complex_args(self): mod, args, to = self.m.parse(dict(copy=dict(src='a', dest='b'))) self._debug(mod, args, to) - assert mod == 'copy' - assert args == dict(src='a', dest='b') + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) assert to is None def test_action_with_complex(self): mod, args, to = self.m.parse(dict(action=dict(module='copy', src='a', dest='b'))) self._debug(mod, args, to) - assert mod == 'copy' - assert args == dict(src='a', dest='b') + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) assert to is None def test_action_with_complex_and_complex_args(self): mod, args, to = self.m.parse(dict(action=dict(module='copy', args=dict(src='a', dest='b')))) self._debug(mod, args, to) - assert mod == 'copy' - assert args == dict(src='a', dest='b') + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) assert to is None def test_local_action_string(self): mod, args, to = self.m.parse(dict(local_action='copy src=a dest=b')) self._debug(mod, args, to) - assert mod == 'copy' - assert args == dict(src='a', dest='b') + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) assert to is 'localhost' diff --git a/test/v2/parsing/yaml/test_yaml.py b/test/v2/parsing/yaml/test_yaml.py index 6be75032ae..90a4bc3a84 100644 --- a/test/v2/parsing/yaml/test_yaml.py +++ b/test/v2/parsing/yaml/test_yaml.py @@ -74,23 +74,23 @@ class TestSafeLoad(unittest.TestCase): def test_safe_load(self): # test basic dictionary res = safe_load(data1) - assert type(res) == AnsibleMapping - assert res._line_number == 2 + self.assertEqual(type(res), AnsibleMapping) + self.assertEqual(res._line_number, 2) # test data with multiple dictionaries res = safe_load(data2) - assert len(res) == 4 - assert res[0]._line_number == 2 - assert res[1]._line_number == 3 - assert res[2]._line_number == 5 - assert res[3]._line_number == 8 + self.assertEqual(len(res), 4) + self.assertEqual(res[0]._line_number, 2) + self.assertEqual(res[1]._line_number, 3) + self.assertEqual(res[2]._line_number, 5) + self.assertEqual(res[3]._line_number, 8) # test data with multiple sub-dictionaries res = safe_load(data3) - assert len(res) == 3 - assert res[0]._line_number == 2 - assert res[1]._line_number == 7 - assert res[2]._line_number == 9 - assert res[0]['key1']._line_number == 3 - assert res[1]['key2']._line_number == 8 - assert res[2]['list1'][0]._line_number == 10 + self.assertEqual(len(res), 3) + self.assertEqual(res[0]._line_number, 2) + self.assertEqual(res[1]._line_number, 7) + self.assertEqual(res[2]._line_number, 9) + self.assertEqual(res[0]['key1']._line_number, 3) + self.assertEqual(res[1]['key2']._line_number, 8) + self.assertEqual(res[2]['list1'][0]._line_number, 10) From 1e255a72a81a0b0f2c5affc83a3be07ab378dff4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 12 Oct 2014 10:40:48 -0400 Subject: [PATCH 217/813] Python3 fix --- test/v2/parsing/test_general.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/v2/parsing/test_general.py b/test/v2/parsing/test_general.py index 8afb75acf8..9373968cc8 100644 --- a/test/v2/parsing/test_general.py +++ b/test/v2/parsing/test_general.py @@ -21,7 +21,9 @@ from ansible.errors import AnsibleParserError import json -class MockFile(file): +from io import FileIO + +class MockFile(FileIO): def __init__(self, ds, method='json'): self.ds = ds From 692d31d2215914260fbaf6d2be87afa637ea2ab8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 12 Oct 2014 10:49:42 -0400 Subject: [PATCH 218/813] Python3 fix --- test/v2/parsing/test_mod_args.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/v2/parsing/test_mod_args.py b/test/v2/parsing/test_mod_args.py index e077a55d3b..ddd5c6f6d4 100644 --- a/test/v2/parsing/test_mod_args.py +++ b/test/v2/parsing/test_mod_args.py @@ -29,9 +29,9 @@ class TestModArgsDwim(unittest.TestCase): pass def _debug(self, mod, args, to): - print "RETURNED module = %s" % mod - print " args = %s" % args - print " to = %s" % to + print("RETURNED module = {0}".format(mod)) + print(" args = {0}".format(args)) + print(" to = {0}".format(to)) def tearDown(self): pass From 617352a38e93ea291a80d383079ea6e5cd93798e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 12 Oct 2014 18:54:01 -0400 Subject: [PATCH 219/813] Python3 test target and use coverage for all v2 tests. --- Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index b659e044f8..b7c84a51fc 100644 --- a/Makefile +++ b/Makefile @@ -86,16 +86,20 @@ MOCK_CFG ?= NOSETESTS ?= nosetests +NOSETESTS3 ?= nosetests-3.3 + ######################################################## all: clean python tests: - PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v + PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v # Could do: --with-coverage --cover-package=ansible newtests: - PYTHONPATH=./v2:./lib $(NOSETESTS) -d -w test/v2 -v + PYTHONPATH=./v2:./lib $(NOSETESTS) -d -w test/v2 -v --with-coverage --cover-package=ansible +newtests-py3: + PYTHONPATH=./v2:./lib $(NOSETESTS3) -d -w test/v2 -v --with-coverage --cover-package=ansible authors: sh hacking/authors.sh From 10113e6a67fdc31ebbed62db95f4e26a9b32c040 Mon Sep 17 00:00:00 2001 From: Matt Coddington Date: Mon, 13 Oct 2014 16:07:06 -0400 Subject: [PATCH 220/813] python2.4 compatibility issue with urlparse --- lib/ansible/module_utils/known_hosts.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 3406e2c7da..c997596fd4 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -72,12 +72,14 @@ def get_fqdn(repo_url): if 'ssh' not in parts[0] and 'git' not in parts[0]: # don't try and scan a hostname that's not ssh return None + # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so + # ensure we actually have a parts[1] before continuing. if parts[1] != '': result = parts[1] if ":" in result: result = result.split(":")[0] - if "@" in result: - result = result.split("@", 1)[1] + if "@" in result: + result = result.split("@", 1)[1] return result From 2845f0c45564e304042e25175ea75444cbb8a884 Mon Sep 17 00:00:00 2001 From: Baptiste Mathus Date: Mon, 13 Oct 2014 22:10:39 +0200 Subject: [PATCH 221/813] Typo: recieve -> receive. --- hacking/templates/rst.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index f02b32fd11..db9b184d19 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -115,7 +115,7 @@ Should you have a question rather than a bug report, inquries are welcome on the Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. -This is a "core" ansible module, which means it will recieve slightly higher priority for all requests than those in the "extras" repos. +This is a "core" ansible module, which means it will receive slightly higher priority for all requests than those in the "extras" repos. {% else %} @@ -130,7 +130,7 @@ Should you have a question rather than a bug report, inquries are welcome on the Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. -Note that this module is designated a "extras" module. Non-core modules are still fully usuable, but may recieve slightly lower response rates for issues and pull requests. +Note that this module is designated a "extras" module. Non-core modules are still fully usuable, but may receive slightly lower response rates for issues and pull requests. Popular "extras" modules may be promoted to core modules over time. {% endif %} From 1917906dd6c279b8b1281e5d6c99fa4a886b6dfe Mon Sep 17 00:00:00 2001 From: Marcin Praczko Date: Mon, 13 Oct 2014 22:57:03 +0200 Subject: [PATCH 222/813] Fix detecting distribution release on OpenSuSE Ansible raised exception during parsering /etc/SuSE-release file. Regular expresion should use string instead of list. Fix tested on OpenSuse 13.1 --- lib/ansible/module_utils/facts.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 04a25ab9d6..8ec1b4f7c7 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -328,10 +328,11 @@ class Facts(object): break elif path == '/etc/SuSE-release': data = data.splitlines() - release = re.search('CODENAME *= *([^\n]+)\n', data) - if release: - self.facts['distribution_release'] = release.groups()[0].strip() - break + for line in data: + release = re.search('CODENAME *= *([^\n]+)', line) + if release: + self.facts['distribution_release'] = release.groups()[0].strip() + break elif name == 'Debian': data = get_file_content(path) if 'Debian' in data: From 959520115cb5e806de2f4c929f04c87d683f4e7a Mon Sep 17 00:00:00 2001 From: David Kirchner Date: Tue, 14 Oct 2014 14:43:28 +0000 Subject: [PATCH 223/813] Added _meta hostvars key to gce.py plugin per the discussion in issue #9291. --- plugins/inventory/gce.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/plugins/inventory/gce.py b/plugins/inventory/gce.py index c8eeb43ab1..9bb8d84065 100755 --- a/plugins/inventory/gce.py +++ b/plugins/inventory/gce.py @@ -229,9 +229,14 @@ class GceInventory(object): def group_instances(self): '''Group all instances''' groups = {} + meta = {} + meta["hostvars"] = {} + for node in self.driver.list_nodes(): name = node.name + meta["hostvars"][name] = self.node_to_dict(node) + zone = node.extra['zone'].name if groups.has_key(zone): groups[zone].append(name) else: groups[zone] = [name] @@ -259,6 +264,9 @@ class GceInventory(object): stat = 'status_%s' % status.lower() if groups.has_key(stat): groups[stat].append(name) else: groups[stat] = [name] + + groups["_meta"] = meta + return groups def json_format_dict(self, data, pretty=False): From 36c75d6c5e1e8fdf1fbdf969b176a15914adcf5e Mon Sep 17 00:00:00 2001 From: "szk.kentaro" Date: Wed, 15 Oct 2014 01:16:49 +0900 Subject: [PATCH 224/813] Add --pretty option for gce inventory plugin --- plugins/inventory/gce.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/plugins/inventory/gce.py b/plugins/inventory/gce.py index c8eeb43ab1..c98e6b585b 100755 --- a/plugins/inventory/gce.py +++ b/plugins/inventory/gce.py @@ -103,11 +103,13 @@ class GceInventory(object): # Just display data for specific host if self.args.host: print self.json_format_dict(self.node_to_dict( - self.get_instance(self.args.host))) + self.get_instance(self.args.host)), + pretty=self.args.pretty) sys.exit(0) # Otherwise, assume user wants all instances grouped - print(self.json_format_dict(self.group_instances())) + print(self.json_format_dict(self.group_instances(), + pretty=self.args.pretty)) sys.exit(0) def get_gce_driver(self): @@ -187,6 +189,8 @@ class GceInventory(object): help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all information about an instance') + parser.add_argument('--pretty', action='store_true', default=False, + help='Pretty format (default: False)') self.args = parser.parse_args() From 39ce134c2c3843156e9e0bf560c3afd23b5799bc Mon Sep 17 00:00:00 2001 From: Emilien Kenler Date: Wed, 15 Oct 2014 11:12:35 +0900 Subject: [PATCH 225/813] Add integration test for the shell module * it ensures that it's possible to define options in the multiline block --- .../roles/test_command_shell/tasks/main.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/test/integration/roles/test_command_shell/tasks/main.yml b/test/integration/roles/test_command_shell/tasks/main.yml index 8a15c99957..3c273260c1 100644 --- a/test/integration/roles/test_command_shell/tasks/main.yml +++ b/test/integration/roles/test_command_shell/tasks/main.yml @@ -198,3 +198,19 @@ that: - "shell_result6.changed" - "shell_result6.stdout == '32f3cc201b69ed8afa3902b80f554ca8\nthis is a second line'" + +- name: execute a shell command using a literal multiline block with arguments in it + shell: | + executable=/bin/bash + creates={{output_dir_test | expanduser}}/afile.txt + echo "test" + register: shell_result7 + +- name: assert the multiline shell command with arguments in it run as expected + assert: + that: + - "shell_result7.changed" + - "shell_result7.stdout == 'test'" + +- name: remove the previously created file + file: path={{output_dir_test}}/afile.txt state=absent From e19f3f8a5c0ed5ad9bf54f75aa05f960dbddcf99 Mon Sep 17 00:00:00 2001 From: Igor Vuk Date: Wed, 15 Oct 2014 21:08:38 +0200 Subject: [PATCH 226/813] Typo: greatful -> grateful --- hacking/templates/rst.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index db9b184d19..648a444f9f 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -109,7 +109,7 @@ This is a Core Module This source of this module is hosted on GitHub in the `ansible-modules-core `_ repo. -If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be greatful if you would file one. +If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. @@ -124,7 +124,7 @@ This is an Extras Module This source of this module is hosted on GitHub in the `ansible-modules-extras `_ repo. -If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be greatful if you would file one. +If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. From 187da236b417e734967d97de0a1da0fa6fb20fa6 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 15 Oct 2014 12:16:41 -0700 Subject: [PATCH 227/813] Add new class stubs --- .../{runner/__init__.py => executor/HostLog.py} | 6 ------ v2/ansible/executor/HostLogManager.py | 16 ++++++++++++++++ v2/ansible/executor/PlaybookExecutor.py | 16 ++++++++++++++++ v2/ansible/executor/TaskExecutor.py | 16 ++++++++++++++++ v2/ansible/executor/TaskQueueManager.py | 16 ++++++++++++++++ v2/ansible/executor/TaskResult.py | 16 ++++++++++++++++ v2/ansible/executor/TemplateEngine.py | 16 ++++++++++++++++ v2/ansible/executor/VariableCache.py | 16 ++++++++++++++++ v2/ansible/executor/VariableManager.py | 16 ++++++++++++++++ 9 files changed, 128 insertions(+), 6 deletions(-) rename v2/ansible/{runner/__init__.py => executor/HostLog.py} (88%) create mode 100644 v2/ansible/executor/HostLogManager.py create mode 100644 v2/ansible/executor/PlaybookExecutor.py create mode 100644 v2/ansible/executor/TaskExecutor.py create mode 100644 v2/ansible/executor/TaskQueueManager.py create mode 100644 v2/ansible/executor/TaskResult.py create mode 100644 v2/ansible/executor/TemplateEngine.py create mode 100644 v2/ansible/executor/VariableCache.py create mode 100644 v2/ansible/executor/VariableManager.py diff --git a/v2/ansible/runner/__init__.py b/v2/ansible/executor/HostLog.py similarity index 88% rename from v2/ansible/runner/__init__.py rename to v2/ansible/executor/HostLog.py index b8cc0a9219..1f84012e01 100644 --- a/v2/ansible/runner/__init__.py +++ b/v2/ansible/executor/HostLog.py @@ -14,9 +14,3 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . - -#from v2.inventory import Host -#from v2.playbook import Task - -class Runner(object): - pass diff --git a/v2/ansible/executor/HostLogManager.py b/v2/ansible/executor/HostLogManager.py new file mode 100644 index 0000000000..1f84012e01 --- /dev/null +++ b/v2/ansible/executor/HostLogManager.py @@ -0,0 +1,16 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/v2/ansible/executor/PlaybookExecutor.py b/v2/ansible/executor/PlaybookExecutor.py new file mode 100644 index 0000000000..1f84012e01 --- /dev/null +++ b/v2/ansible/executor/PlaybookExecutor.py @@ -0,0 +1,16 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/v2/ansible/executor/TaskExecutor.py b/v2/ansible/executor/TaskExecutor.py new file mode 100644 index 0000000000..1f84012e01 --- /dev/null +++ b/v2/ansible/executor/TaskExecutor.py @@ -0,0 +1,16 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/v2/ansible/executor/TaskQueueManager.py b/v2/ansible/executor/TaskQueueManager.py new file mode 100644 index 0000000000..1f84012e01 --- /dev/null +++ b/v2/ansible/executor/TaskQueueManager.py @@ -0,0 +1,16 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/v2/ansible/executor/TaskResult.py b/v2/ansible/executor/TaskResult.py new file mode 100644 index 0000000000..1f84012e01 --- /dev/null +++ b/v2/ansible/executor/TaskResult.py @@ -0,0 +1,16 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/v2/ansible/executor/TemplateEngine.py b/v2/ansible/executor/TemplateEngine.py new file mode 100644 index 0000000000..1f84012e01 --- /dev/null +++ b/v2/ansible/executor/TemplateEngine.py @@ -0,0 +1,16 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/v2/ansible/executor/VariableCache.py b/v2/ansible/executor/VariableCache.py new file mode 100644 index 0000000000..1f84012e01 --- /dev/null +++ b/v2/ansible/executor/VariableCache.py @@ -0,0 +1,16 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/v2/ansible/executor/VariableManager.py b/v2/ansible/executor/VariableManager.py new file mode 100644 index 0000000000..1f84012e01 --- /dev/null +++ b/v2/ansible/executor/VariableManager.py @@ -0,0 +1,16 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . From 938b2108d04183cc8a5358835cbff3b92222a51f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Oct 2014 18:08:28 -0400 Subject: [PATCH 228/813] Move the v2 tests under the v2 tree --- Makefile | 4 +- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- test/v2/errors/__init__.py | 18 ------ test/v2/errors/test_errors.py | 48 -------------- test/v2/parsing/__init__.py | 16 ----- test/v2/parsing/test_general.py | 102 ----------------------------- test/v2/parsing/test_mod_args.py | 103 ------------------------------ test/v2/parsing/yaml/__init__.py | 16 ----- test/v2/parsing/yaml/test_yaml.py | 96 ---------------------------- test/v2/playbook/__init__.py | 16 ----- test/v2/playbook/test_task.py | 84 ------------------------ v2/ansible/constants.py | 13 ++-- v2/ansible/parsing/mod_args.py | 8 +-- v2/ansible/parsing/splitter.py | 2 +- v2/ansible/playbook/base.py | 10 ++- 16 files changed, 25 insertions(+), 515 deletions(-) delete mode 100644 test/v2/errors/__init__.py delete mode 100644 test/v2/errors/test_errors.py delete mode 100644 test/v2/parsing/__init__.py delete mode 100644 test/v2/parsing/test_general.py delete mode 100644 test/v2/parsing/test_mod_args.py delete mode 100644 test/v2/parsing/yaml/__init__.py delete mode 100644 test/v2/parsing/yaml/test_yaml.py delete mode 100644 test/v2/playbook/__init__.py delete mode 100644 test/v2/playbook/test_task.py diff --git a/Makefile b/Makefile index b7c84a51fc..ec2742f89d 100644 --- a/Makefile +++ b/Makefile @@ -96,10 +96,10 @@ tests: PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v # Could do: --with-coverage --cover-package=ansible newtests: - PYTHONPATH=./v2:./lib $(NOSETESTS) -d -w test/v2 -v --with-coverage --cover-package=ansible + PYTHONPATH=./v2:./lib $(NOSETESTS) -d -w v2/test -v --with-coverage --cover-package=ansible newtests-py3: - PYTHONPATH=./v2:./lib $(NOSETESTS3) -d -w test/v2 -v --with-coverage --cover-package=ansible + PYTHONPATH=./v2:./lib $(NOSETESTS3) -d -w v2/test -v --with-coverage --cover-package=ansible authors: sh hacking/authors.sh diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index cb69744bce..5af8d55b03 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit cb69744bcee4b4217d83b4a30006635ba69e2aa0 +Subproject commit 5af8d55b0365a5c3278c43b5424bf5f2ddf897b8 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 8a4f07eecd..681db4ce2c 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 8a4f07eecd2bb877f51b7b04b5352efa6076cce5 +Subproject commit 681db4ce2c534eca4fca57c5a83a5be8035c257d diff --git a/test/v2/errors/__init__.py b/test/v2/errors/__init__.py deleted file mode 100644 index 674334b15a..0000000000 --- a/test/v2/errors/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - diff --git a/test/v2/errors/test_errors.py b/test/v2/errors/test_errors.py deleted file mode 100644 index ca5ecc1493..0000000000 --- a/test/v2/errors/test_errors.py +++ /dev/null @@ -1,48 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import unittest - -from mock import mock_open, patch - -from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject -from ansible.errors import AnsibleError - -class TestErrors(unittest.TestCase): - - def setUp(self): - self.message = 'this is the error message' - - def tearDown(self): - pass - - def test_basic_error(self): - e = AnsibleError(self.message) - self.assertEqual(e.message, self.message) - - def test_error_with_object(self): - obj = AnsibleBaseYAMLObject() - obj._data_source = 'foo.yml' - obj._line_number = 1 - obj._column_number = 1 - - m = mock_open() - m.return_value.readlines.return_value = ['this is line 1\n', 'this is line 2\n', 'this is line 3\n'] - with patch('__builtin__.open', m): - e = AnsibleError(self.message, obj) - - self.assertEqual(e.message, 'this is the error message\nThe error occurred on line 1 of the file foo.yml:\nthis is line 1\n^') diff --git a/test/v2/parsing/__init__.py b/test/v2/parsing/__init__.py deleted file mode 100644 index 1f84012e01..0000000000 --- a/test/v2/parsing/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . diff --git a/test/v2/parsing/test_general.py b/test/v2/parsing/test_general.py deleted file mode 100644 index 9373968cc8..0000000000 --- a/test/v2/parsing/test_general.py +++ /dev/null @@ -1,102 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import unittest -from ansible.parsing import load -from ansible.errors import AnsibleParserError - -import json - -from io import FileIO - -class MockFile(FileIO): - - def __init__(self, ds, method='json'): - self.ds = ds - self.method = method - - def read(self): - if method == 'json': - return json.dumps(ds) - elif method == 'yaml': - return yaml.dumps(ds) - elif method == 'fail': - return """ - AAARGGGGH - THIS WON'T PARSE !!! - NOOOOOOOOOOOOOOOOOO - """ - else: - raise Exception("untestable serializer") - - def close(self): - pass - -class TestGeneralParsing(unittest.TestCase): - - def __init__(self): - pass - - def setUp(self): - pass - - def tearDown(self): - pass - - def parse_json_from_string(self): - input = """ - { - "asdf" : "1234", - "jkl" : 5678 - } - """ - output = load(input) - self.assertEqual(output['asdf'], '1234') - self.assertEqual(output['jkl'], 5678) - - def parse_json_from_file(self): - output = load(MockFile(dict(a=1,b=2,c=3)),'json') - self.assertEqual(ouput, dict(a=1,b=2,c=3)) - - def parse_yaml_from_dict(self): - input = """ - asdf: '1234' - jkl: 5678 - """ - output = load(input) - self.assertEqual(output['asdf'], '1234') - self.assertEqual(output['jkl'], 5678) - - def parse_yaml_from_file(self): - output = load(MockFile(dict(a=1,b=2,c=3),'yaml')) - self.assertEqual(output, dict(a=1,b=2,c=3)) - - def parse_fail(self): - input = """ - TEXT - *** - NOT VALID - """ - self.assertRaises(load(input), AnsibleParserError) - - def parse_fail_from_file(self): - self.assertRaises(load(MockFile(None,'fail')), AnsibleParserError) - - def parse_fail_invalid_type(self): - self.assertRaises(3000, AnsibleParsingError) - self.assertRaises(dict(a=1,b=2,c=3), AnsibleParserError) - diff --git a/test/v2/parsing/test_mod_args.py b/test/v2/parsing/test_mod_args.py deleted file mode 100644 index ddd5c6f6d4..0000000000 --- a/test/v2/parsing/test_mod_args.py +++ /dev/null @@ -1,103 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from ansible.parsing.mod_args import ModuleArgsParser -import unittest - -class TestModArgsDwim(unittest.TestCase): - - # TODO: add tests that construct ModuleArgsParser with a task reference - # TODO: verify the AnsibleError raised on failure knows the task - # and the task knows the line numbers - - def setUp(self): - self.m = ModuleArgsParser() - pass - - def _debug(self, mod, args, to): - print("RETURNED module = {0}".format(mod)) - print(" args = {0}".format(args)) - print(" to = {0}".format(to)) - - def tearDown(self): - pass - - def test_basic_shell(self): - mod, args, to = self.m.parse(dict(shell='echo hi')) - self._debug(mod, args, to) - self.assertEqual(mod, 'command') - self.assertEqual(args, dict( - _raw_params = 'echo hi', - _uses_shell = True, - )) - assert to is None - - def test_basic_command(self): - mod, args, to = self.m.parse(dict(command='echo hi')) - self._debug(mod, args, to) - self.assertEqual(mod, 'command') - self.assertEqual(args, dict( - _raw_params = 'echo hi', - )) - assert to is None - - def test_shell_with_modifiers(self): - mod, args, to = self.m.parse(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep')) - self._debug(mod, args, to) - self.assertEqual(mod, 'command') - self.assertEqual(args, dict( - creates = '/tmp/baz', - removes = '/tmp/bleep', - _raw_params = '/bin/foo', - _uses_shell = True, - )) - assert to is None - - def test_normal_usage(self): - mod, args, to = self.m.parse(dict(copy='src=a dest=b')) - self._debug(mod, args, to) - self.assertEqual(mod, 'copy') - self.assertEqual(args, dict(src='a', dest='b')) - assert to is None - - def test_complex_args(self): - mod, args, to = self.m.parse(dict(copy=dict(src='a', dest='b'))) - self._debug(mod, args, to) - self.assertEqual(mod, 'copy') - self.assertEqual(args, dict(src='a', dest='b')) - assert to is None - - def test_action_with_complex(self): - mod, args, to = self.m.parse(dict(action=dict(module='copy', src='a', dest='b'))) - self._debug(mod, args, to) - self.assertEqual(mod, 'copy') - self.assertEqual(args, dict(src='a', dest='b')) - assert to is None - - def test_action_with_complex_and_complex_args(self): - mod, args, to = self.m.parse(dict(action=dict(module='copy', args=dict(src='a', dest='b')))) - self._debug(mod, args, to) - self.assertEqual(mod, 'copy') - self.assertEqual(args, dict(src='a', dest='b')) - assert to is None - - def test_local_action_string(self): - mod, args, to = self.m.parse(dict(local_action='copy src=a dest=b')) - self._debug(mod, args, to) - self.assertEqual(mod, 'copy') - self.assertEqual(args, dict(src='a', dest='b')) - assert to is 'localhost' diff --git a/test/v2/parsing/yaml/__init__.py b/test/v2/parsing/yaml/__init__.py deleted file mode 100644 index 1f84012e01..0000000000 --- a/test/v2/parsing/yaml/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . diff --git a/test/v2/parsing/yaml/test_yaml.py b/test/v2/parsing/yaml/test_yaml.py deleted file mode 100644 index 90a4bc3a84..0000000000 --- a/test/v2/parsing/yaml/test_yaml.py +++ /dev/null @@ -1,96 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import unittest - -from yaml.scanner import ScannerError - -from ansible.parsing.yaml import safe_load -from ansible.parsing.yaml.objects import AnsibleMapping - -# a single dictionary instance -data1 = '''--- -key: value -''' - -# multiple dictionary instances -data2 = '''--- -- key1: value1 -- key2: value2 - -- key3: value3 - - -- key4: value4 -''' - -# multiple dictionary instances with other nested -# dictionaries contained within those -data3 = '''--- -- key1: - subkey1: subvalue1 - subkey2: subvalue2 - subkey3: - subsubkey1: subsubvalue1 -- key2: - subkey4: subvalue4 -- list1: - - list1key1: list1value1 - list1key2: list1value2 - list1key3: list1value3 -''' - -bad_data1 = '''--- -foo: bar - bam: baz -''' - -class TestSafeLoad(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_safe_load_bad(self): - # test the loading of bad yaml data - self.assertRaises(ScannerError, safe_load, bad_data1) - - def test_safe_load(self): - # test basic dictionary - res = safe_load(data1) - self.assertEqual(type(res), AnsibleMapping) - self.assertEqual(res._line_number, 2) - - # test data with multiple dictionaries - res = safe_load(data2) - self.assertEqual(len(res), 4) - self.assertEqual(res[0]._line_number, 2) - self.assertEqual(res[1]._line_number, 3) - self.assertEqual(res[2]._line_number, 5) - self.assertEqual(res[3]._line_number, 8) - - # test data with multiple sub-dictionaries - res = safe_load(data3) - self.assertEqual(len(res), 3) - self.assertEqual(res[0]._line_number, 2) - self.assertEqual(res[1]._line_number, 7) - self.assertEqual(res[2]._line_number, 9) - self.assertEqual(res[0]['key1']._line_number, 3) - self.assertEqual(res[1]['key2']._line_number, 8) - self.assertEqual(res[2]['list1'][0]._line_number, 10) diff --git a/test/v2/playbook/__init__.py b/test/v2/playbook/__init__.py deleted file mode 100644 index 1f84012e01..0000000000 --- a/test/v2/playbook/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . diff --git a/test/v2/playbook/test_task.py b/test/v2/playbook/test_task.py deleted file mode 100644 index b6f869cd21..0000000000 --- a/test/v2/playbook/test_task.py +++ /dev/null @@ -1,84 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from ansible.playbook.task import Task -import unittest - -basic_shell_task = dict( - name = 'Test Task', - shell = 'echo hi' -) - -kv_shell_task = dict( - action = 'shell echo hi' -) - -class TestTask(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_construct_empty_task(self): - t = Task() - - def test_construct_task_with_role(self): - pass - - def test_construct_task_with_block(self): - pass - - def test_construct_task_with_role_and_block(self): - pass - - def test_load_task_simple(self): - t = Task.load(basic_shell_task) - assert t is not None - self.assertEqual(t.name, basic_shell_task['name']) - self.assertEqual(t.action, 'command') - self.assertEqual(t.args, dict(_raw_params='echo hi', _uses_shell=True)) - - def test_load_task_kv_form(self): - t = Task.load(kv_shell_task) - print("task action is %s" % t.action) - self.assertEqual(t.action, 'command') - self.assertEqual(t.args, dict(_raw_params='echo hi', _uses_shell=True)) - - def test_task_auto_name(self): - assert 'name' not in kv_shell_task - t = Task.load(kv_shell_task) - #self.assertEqual(t.name, 'shell echo hi') - - def test_task_auto_name_with_role(self): - pass - - def test_load_task_complex_form(self): - pass - - def test_can_load_module_complex_form(self): - pass - - def test_local_action_implies_delegate(self): - pass - - def test_local_action_conflicts_with_delegate(self): - pass - - def test_delegate_to_parses(self): - pass diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index 861dd5325c..d1d9b69fd3 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -18,7 +18,12 @@ import os import pwd import sys -import ConfigParser +try: + import configparser +except ImportError: + # Python 2.7 + import ConfigParser as configparser + from string import ascii_letters, digits # copied from utils, avoid circular reference fun :) @@ -60,7 +65,7 @@ def _get_config(p, section, key, env_var, default): def load_config_file(): ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible ''' - p = ConfigParser.ConfigParser() + p = configparser.ConfigParser() path0 = os.getenv("ANSIBLE_CONFIG", None) if path0 is not None: @@ -73,8 +78,8 @@ def load_config_file(): if path is not None and os.path.exists(path): try: p.read(path) - except ConfigParser.Error as e: - print "Error reading config file: \n%s" % e + except configparser.Error as e: + print("Error reading config file: \n{0}".format(e)) sys.exit(1) return p return None diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index 2e5b401601..534aca8cd8 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -15,7 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import exceptions +from six import iteritems, string_types from ansible.errors import AnsibleParserError from ansible.plugins import module_finder @@ -141,7 +141,7 @@ class ModuleArgsParser(object): if isinstance(thing, dict): # form is like: local_action: { module: 'xyz', x: 2, y: 3 } ... uncommon! args = thing - elif isinstance(thing, basestring): + elif isinstance(thing, string_types): # form is like: local_action: copy src=a dest=b ... pretty common args = parse_kv(thing) else: @@ -173,7 +173,7 @@ class ModuleArgsParser(object): args = thing.copy() del args['module'] - elif isinstance(thing, basestring): + elif isinstance(thing, string_types): # form is like: copy: src=a dest=b ... common shorthand throughout ansible (action, args) = self._split_module_string(thing) args = parse_kv(args) @@ -222,7 +222,7 @@ class ModuleArgsParser(object): raise AnsibleParserError("conflicting action statements", obj=self._task) # walk the input dictionary to see we recognize a module name - for (item, value) in ds.iteritems(): + for (item, value) in iteritems(ds): if item in module_finder: # finding more than one module name is a problem if action is not None: diff --git a/v2/ansible/parsing/splitter.py b/v2/ansible/parsing/splitter.py index 17946f663b..d96a8c313a 100644 --- a/v2/ansible/parsing/splitter.py +++ b/v2/ansible/parsing/splitter.py @@ -27,7 +27,7 @@ def parse_kv(args, check_raw=False): if args is not None: try: vargs = split_args(args) - except ValueError, ve: + except ValueError as ve: if 'no closing quotation' in str(ve).lower(): raise errors.AnsibleError("error parsing argument string, try quoting the entire line.") else: diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 3c07dba29a..3d236df761 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from io import FileIO + +from six import iteritems, string_types + from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.parsing import load as ds_load @@ -25,7 +29,7 @@ class Base(object): # each class knows attributes set upon it, see Task.py for example self._attributes = dict() - for (name, value) in self.__class__.__dict__.iteritems(): + for (name, value) in iteritems(self.__class__.__dict__): aname = name[1:] if isinstance(value, Attribute): self._attributes[aname] = value.default @@ -40,7 +44,7 @@ class Base(object): assert ds is not None - if isinstance(ds, basestring) or isinstance(ds, file): + if isinstance(ds, string_types) or isinstance(ds, FileIO): ds = ds_load(ds) # we currently don't do anything with private attributes but may @@ -49,7 +53,7 @@ class Base(object): ds = self.munge(ds) # walk all attributes in the class - for (name, attribute) in self.__class__.__dict__.iteritems(): + for (name, attribute) in iteritems(self.__class__.__dict__): aname = name[1:] # process Field attributes which get loaded from the YAML From 46b54443f11c3b5dc1ae68547b2d9eb8ef50f7ee Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Oct 2014 18:25:54 -0400 Subject: [PATCH 229/813] Add a README for the v2 unittests --- v2/README-tests.md | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 v2/README-tests.md diff --git a/v2/README-tests.md b/v2/README-tests.md new file mode 100644 index 0000000000..956160b653 --- /dev/null +++ b/v2/README-tests.md @@ -0,0 +1,33 @@ +Ansible Test System +=================== + +Folders +======= + +test +---- + +Unit tests that test small pieces of code not suited for the integration test +layer, usually very API based, and should leverage mock interfaces rather than +producing side effects. + +Playbook engine code is better suited for integration tests. + +Requirements: sudo pip install paramiko PyYAML jinja2 httplib2 passlib unittest2 mock + +integration +----------- + +Integration test layer, constructed using playbooks. + +Some tests may require cloud credentials, others will not, and destructive +tests are separated from non-destructive so a subset can be run on development +machines. + +learn more +---------- + +hop into a subdirectory and see the associated README.md for more info. + + + From b62e7ae8f5abb1ae4498c295468763b7c2f01af5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Oct 2014 18:26:05 -0400 Subject: [PATCH 230/813] Remove extraneous __init__ to fix v2 unittests --- v2/__init__.py | 19 ------------------- 1 file changed, 19 deletions(-) delete mode 100644 v2/__init__.py diff --git a/v2/__init__.py b/v2/__init__.py deleted file mode 100644 index 05b82a40c7..0000000000 --- a/v2/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -__version__ = '1.8' -__author__ = 'Michael DeHaan' From cf5ce972758f93cbc06feb465cc7b6c81b10ba3a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Oct 2014 18:35:16 -0400 Subject: [PATCH 231/813] Somehow, git mv only rm'd test/v2 and didn't add v2/test --- v2/test/__init__.py | 0 v2/test/compat.py | 51 +++++++++++++ v2/test/errors/__init__.py | 18 +++++ v2/test/errors/test_errors.py | 48 +++++++++++++ v2/test/parsing/__init__.py | 16 +++++ v2/test/parsing/test_general.py | 102 ++++++++++++++++++++++++++ v2/test/parsing/test_mod_args.py | 104 +++++++++++++++++++++++++++ v2/test/parsing/test_mod_args.py.py3 | 104 +++++++++++++++++++++++++++ v2/test/parsing/yaml/__init__.py | 16 +++++ v2/test/parsing/yaml/test_yaml.py | 96 +++++++++++++++++++++++++ v2/test/playbook/__init__.py | 16 +++++ v2/test/playbook/test_task.py | 84 ++++++++++++++++++++++ v2/test/test.yml | 2 + 13 files changed, 657 insertions(+) create mode 100644 v2/test/__init__.py create mode 100644 v2/test/compat.py create mode 100644 v2/test/errors/__init__.py create mode 100644 v2/test/errors/test_errors.py create mode 100644 v2/test/parsing/__init__.py create mode 100644 v2/test/parsing/test_general.py create mode 100644 v2/test/parsing/test_mod_args.py create mode 100644 v2/test/parsing/test_mod_args.py.py3 create mode 100644 v2/test/parsing/yaml/__init__.py create mode 100644 v2/test/parsing/yaml/test_yaml.py create mode 100644 v2/test/playbook/__init__.py create mode 100644 v2/test/playbook/test_task.py create mode 100644 v2/test/test.yml diff --git a/v2/test/__init__.py b/v2/test/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/v2/test/compat.py b/v2/test/compat.py new file mode 100644 index 0000000000..6c122f2fad --- /dev/null +++ b/v2/test/compat.py @@ -0,0 +1,51 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import unittest + +# +# Compat for python2.6 +# + +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + import unittest2 as unittest + except ImportError: + print('You need unittest2 installed on python2.x') +else: + import unittest + + +# +# Compat for python2.7 +# + +# Could use the pypi mock library on py3 as well as py2. They are the same +try: + from unittest.mock import mock_open, patch +except ImportError: + # Python2 + from mock import mock_open, patch + +try: + import __builtin__ +except ImportError: + BUILTINS = 'builtins' +else: + BUILTINS = '__builtin__' + diff --git a/v2/test/errors/__init__.py b/v2/test/errors/__init__.py new file mode 100644 index 0000000000..674334b15a --- /dev/null +++ b/v2/test/errors/__init__.py @@ -0,0 +1,18 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + diff --git a/v2/test/errors/test_errors.py b/v2/test/errors/test_errors.py new file mode 100644 index 0000000000..ff3b562d25 --- /dev/null +++ b/v2/test/errors/test_errors.py @@ -0,0 +1,48 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ..compat import unittest + +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject +from ansible.errors import AnsibleError + +from .. compat import BUILTINS, mock_open, patch + +class TestErrors(unittest.TestCase): + + def setUp(self): + self.message = 'this is the error message' + + def tearDown(self): + pass + + def test_basic_error(self): + e = AnsibleError(self.message) + self.assertEqual(e.message, self.message) + + def test_error_with_object(self): + obj = AnsibleBaseYAMLObject() + obj._data_source = 'foo.yml' + obj._line_number = 1 + obj._column_number = 1 + + m = mock_open() + m.return_value.readlines.return_value = ['this is line 1\n', 'this is line 2\n', 'this is line 3\n'] + with patch('{0}.open'.format(BUILTINS), m): + e = AnsibleError(self.message, obj) + + self.assertEqual(e.message, 'this is the error message\nThe error occurred on line 1 of the file foo.yml:\nthis is line 1\n^') diff --git a/v2/test/parsing/__init__.py b/v2/test/parsing/__init__.py new file mode 100644 index 0000000000..1f84012e01 --- /dev/null +++ b/v2/test/parsing/__init__.py @@ -0,0 +1,16 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/v2/test/parsing/test_general.py b/v2/test/parsing/test_general.py new file mode 100644 index 0000000000..377aa88693 --- /dev/null +++ b/v2/test/parsing/test_general.py @@ -0,0 +1,102 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ..compat import unittest +from ansible.parsing import load +from ansible.errors import AnsibleParserError + +import json + +from io import FileIO + +class MockFile(FileIO): + + def __init__(self, ds, method='json'): + self.ds = ds + self.method = method + + def read(self): + if method == 'json': + return json.dumps(ds) + elif method == 'yaml': + return yaml.dumps(ds) + elif method == 'fail': + return """ + AAARGGGGH + THIS WON'T PARSE !!! + NOOOOOOOOOOOOOOOOOO + """ + else: + raise Exception("untestable serializer") + + def close(self): + pass + +class TestGeneralParsing(unittest.TestCase): + + def __init__(self): + pass + + def setUp(self): + pass + + def tearDown(self): + pass + + def parse_json_from_string(self): + input = """ + { + "asdf" : "1234", + "jkl" : 5678 + } + """ + output = load(input) + self.assertEqual(output['asdf'], '1234') + self.assertEqual(output['jkl'], 5678) + + def parse_json_from_file(self): + output = load(MockFile(dict(a=1,b=2,c=3)),'json') + self.assertEqual(ouput, dict(a=1,b=2,c=3)) + + def parse_yaml_from_dict(self): + input = """ + asdf: '1234' + jkl: 5678 + """ + output = load(input) + self.assertEqual(output['asdf'], '1234') + self.assertEqual(output['jkl'], 5678) + + def parse_yaml_from_file(self): + output = load(MockFile(dict(a=1,b=2,c=3),'yaml')) + self.assertEqual(output, dict(a=1,b=2,c=3)) + + def parse_fail(self): + input = """ + TEXT + *** + NOT VALID + """ + self.assertRaises(load(input), AnsibleParserError) + + def parse_fail_from_file(self): + self.assertRaises(load(MockFile(None,'fail')), AnsibleParserError) + + def parse_fail_invalid_type(self): + self.assertRaises(3000, AnsibleParsingError) + self.assertRaises(dict(a=1,b=2,c=3), AnsibleParserError) + diff --git a/v2/test/parsing/test_mod_args.py b/v2/test/parsing/test_mod_args.py new file mode 100644 index 0000000000..b52b6f9cc3 --- /dev/null +++ b/v2/test/parsing/test_mod_args.py @@ -0,0 +1,104 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.parsing.mod_args import ModuleArgsParser + +from .. compat import CompatTestCase + +class TestModArgsDwim(CompatTestCase): + + # TODO: add tests that construct ModuleArgsParser with a task reference + # TODO: verify the AnsibleError raised on failure knows the task + # and the task knows the line numbers + + def setUp(self): + self.m = ModuleArgsParser() + pass + + def _debug(self, mod, args, to): + print("RETURNED module = {0}".format(mod)) + print(" args = {0}".format(args)) + print(" to = {0}".format(to)) + + def tearDown(self): + pass + + def test_basic_shell(self): + mod, args, to = self.m.parse(dict(shell='echo hi')) + self._debug(mod, args, to) + self.assertEqual(mod, 'command') + self.assertEqual(args, dict( + _raw_params = 'echo hi', + _uses_shell = True, + )) + self.assertIsNone(to) + + def test_basic_command(self): + mod, args, to = self.m.parse(dict(command='echo hi')) + self._debug(mod, args, to) + self.assertEqual(mod, 'command') + self.assertEqual(args, dict( + _raw_params = 'echo hi', + )) + self.assertIsNone(to) + + def test_shell_with_modifiers(self): + mod, args, to = self.m.parse(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep')) + self._debug(mod, args, to) + self.assertEqual(mod, 'command') + self.assertEqual(args, dict( + creates = '/tmp/baz', + removes = '/tmp/bleep', + _raw_params = '/bin/foo', + _uses_shell = True, + )) + self.assertIsNone(to) + + def test_normal_usage(self): + mod, args, to = self.m.parse(dict(copy='src=a dest=b')) + self._debug(mod, args, to) + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) + self.assertIsNone(to) + + def test_complex_args(self): + mod, args, to = self.m.parse(dict(copy=dict(src='a', dest='b'))) + self._debug(mod, args, to) + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) + self.assertIsNone(to) + + def test_action_with_complex(self): + mod, args, to = self.m.parse(dict(action=dict(module='copy', src='a', dest='b'))) + self._debug(mod, args, to) + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) + self.assertIsNone(to) + + def test_action_with_complex_and_complex_args(self): + mod, args, to = self.m.parse(dict(action=dict(module='copy', args=dict(src='a', dest='b')))) + self._debug(mod, args, to) + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) + self.assertIsNone(to) + + def test_local_action_string(self): + mod, args, to = self.m.parse(dict(local_action='copy src=a dest=b')) + self._debug(mod, args, to) + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) + self.assertIs(to, 'localhost') diff --git a/v2/test/parsing/test_mod_args.py.py3 b/v2/test/parsing/test_mod_args.py.py3 new file mode 100644 index 0000000000..b52b6f9cc3 --- /dev/null +++ b/v2/test/parsing/test_mod_args.py.py3 @@ -0,0 +1,104 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.parsing.mod_args import ModuleArgsParser + +from .. compat import CompatTestCase + +class TestModArgsDwim(CompatTestCase): + + # TODO: add tests that construct ModuleArgsParser with a task reference + # TODO: verify the AnsibleError raised on failure knows the task + # and the task knows the line numbers + + def setUp(self): + self.m = ModuleArgsParser() + pass + + def _debug(self, mod, args, to): + print("RETURNED module = {0}".format(mod)) + print(" args = {0}".format(args)) + print(" to = {0}".format(to)) + + def tearDown(self): + pass + + def test_basic_shell(self): + mod, args, to = self.m.parse(dict(shell='echo hi')) + self._debug(mod, args, to) + self.assertEqual(mod, 'command') + self.assertEqual(args, dict( + _raw_params = 'echo hi', + _uses_shell = True, + )) + self.assertIsNone(to) + + def test_basic_command(self): + mod, args, to = self.m.parse(dict(command='echo hi')) + self._debug(mod, args, to) + self.assertEqual(mod, 'command') + self.assertEqual(args, dict( + _raw_params = 'echo hi', + )) + self.assertIsNone(to) + + def test_shell_with_modifiers(self): + mod, args, to = self.m.parse(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep')) + self._debug(mod, args, to) + self.assertEqual(mod, 'command') + self.assertEqual(args, dict( + creates = '/tmp/baz', + removes = '/tmp/bleep', + _raw_params = '/bin/foo', + _uses_shell = True, + )) + self.assertIsNone(to) + + def test_normal_usage(self): + mod, args, to = self.m.parse(dict(copy='src=a dest=b')) + self._debug(mod, args, to) + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) + self.assertIsNone(to) + + def test_complex_args(self): + mod, args, to = self.m.parse(dict(copy=dict(src='a', dest='b'))) + self._debug(mod, args, to) + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) + self.assertIsNone(to) + + def test_action_with_complex(self): + mod, args, to = self.m.parse(dict(action=dict(module='copy', src='a', dest='b'))) + self._debug(mod, args, to) + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) + self.assertIsNone(to) + + def test_action_with_complex_and_complex_args(self): + mod, args, to = self.m.parse(dict(action=dict(module='copy', args=dict(src='a', dest='b')))) + self._debug(mod, args, to) + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) + self.assertIsNone(to) + + def test_local_action_string(self): + mod, args, to = self.m.parse(dict(local_action='copy src=a dest=b')) + self._debug(mod, args, to) + self.assertEqual(mod, 'copy') + self.assertEqual(args, dict(src='a', dest='b')) + self.assertIs(to, 'localhost') diff --git a/v2/test/parsing/yaml/__init__.py b/v2/test/parsing/yaml/__init__.py new file mode 100644 index 0000000000..1f84012e01 --- /dev/null +++ b/v2/test/parsing/yaml/__init__.py @@ -0,0 +1,16 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/v2/test/parsing/yaml/test_yaml.py b/v2/test/parsing/yaml/test_yaml.py new file mode 100644 index 0000000000..b8d7771c7c --- /dev/null +++ b/v2/test/parsing/yaml/test_yaml.py @@ -0,0 +1,96 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ..compat import unittest + +from yaml.scanner import ScannerError + +from ansible.parsing.yaml import safe_load +from ansible.parsing.yaml.objects import AnsibleMapping + +# a single dictionary instance +data1 = '''--- +key: value +''' + +# multiple dictionary instances +data2 = '''--- +- key1: value1 +- key2: value2 + +- key3: value3 + + +- key4: value4 +''' + +# multiple dictionary instances with other nested +# dictionaries contained within those +data3 = '''--- +- key1: + subkey1: subvalue1 + subkey2: subvalue2 + subkey3: + subsubkey1: subsubvalue1 +- key2: + subkey4: subvalue4 +- list1: + - list1key1: list1value1 + list1key2: list1value2 + list1key3: list1value3 +''' + +bad_data1 = '''--- +foo: bar + bam: baz +''' + +class TestSafeLoad(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_safe_load_bad(self): + # test the loading of bad yaml data + self.assertRaises(ScannerError, safe_load, bad_data1) + + def test_safe_load(self): + # test basic dictionary + res = safe_load(data1) + self.assertEqual(type(res), AnsibleMapping) + self.assertEqual(res._line_number, 2) + + # test data with multiple dictionaries + res = safe_load(data2) + self.assertEqual(len(res), 4) + self.assertEqual(res[0]._line_number, 2) + self.assertEqual(res[1]._line_number, 3) + self.assertEqual(res[2]._line_number, 5) + self.assertEqual(res[3]._line_number, 8) + + # test data with multiple sub-dictionaries + res = safe_load(data3) + self.assertEqual(len(res), 3) + self.assertEqual(res[0]._line_number, 2) + self.assertEqual(res[1]._line_number, 7) + self.assertEqual(res[2]._line_number, 9) + self.assertEqual(res[0]['key1']._line_number, 3) + self.assertEqual(res[1]['key2']._line_number, 8) + self.assertEqual(res[2]['list1'][0]._line_number, 10) diff --git a/v2/test/playbook/__init__.py b/v2/test/playbook/__init__.py new file mode 100644 index 0000000000..1f84012e01 --- /dev/null +++ b/v2/test/playbook/__init__.py @@ -0,0 +1,16 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/v2/test/playbook/test_task.py b/v2/test/playbook/test_task.py new file mode 100644 index 0000000000..ec7ce9012e --- /dev/null +++ b/v2/test/playbook/test_task.py @@ -0,0 +1,84 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.playbook.task import Task +from ..compat import unittest + +basic_shell_task = dict( + name = 'Test Task', + shell = 'echo hi' +) + +kv_shell_task = dict( + action = 'shell echo hi' +) + +class TestTask(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_construct_empty_task(self): + t = Task() + + def test_construct_task_with_role(self): + pass + + def test_construct_task_with_block(self): + pass + + def test_construct_task_with_role_and_block(self): + pass + + def test_load_task_simple(self): + t = Task.load(basic_shell_task) + assert t is not None + self.assertEqual(t.name, basic_shell_task['name']) + self.assertEqual(t.action, 'command') + self.assertEqual(t.args, dict(_raw_params='echo hi', _uses_shell=True)) + + def test_load_task_kv_form(self): + t = Task.load(kv_shell_task) + print("task action is %s" % t.action) + self.assertEqual(t.action, 'command') + self.assertEqual(t.args, dict(_raw_params='echo hi', _uses_shell=True)) + + def test_task_auto_name(self): + assert 'name' not in kv_shell_task + t = Task.load(kv_shell_task) + #self.assertEqual(t.name, 'shell echo hi') + + def test_task_auto_name_with_role(self): + pass + + def test_load_task_complex_form(self): + pass + + def test_can_load_module_complex_form(self): + pass + + def test_local_action_implies_delegate(self): + pass + + def test_local_action_conflicts_with_delegate(self): + pass + + def test_delegate_to_parses(self): + pass diff --git a/v2/test/test.yml b/v2/test/test.yml new file mode 100644 index 0000000000..299b66610d --- /dev/null +++ b/v2/test/test.yml @@ -0,0 +1,2 @@ +- name: Test +filename: /usr/café/ÿ/are_doing_this_to_me From 057c1df303a294432a03ad17aef1d3750dce117b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Oct 2014 18:36:30 -0400 Subject: [PATCH 232/813] Remove temp file --- v2/test/parsing/test_mod_args.py.py3 | 104 --------------------------- 1 file changed, 104 deletions(-) delete mode 100644 v2/test/parsing/test_mod_args.py.py3 diff --git a/v2/test/parsing/test_mod_args.py.py3 b/v2/test/parsing/test_mod_args.py.py3 deleted file mode 100644 index b52b6f9cc3..0000000000 --- a/v2/test/parsing/test_mod_args.py.py3 +++ /dev/null @@ -1,104 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from ansible.parsing.mod_args import ModuleArgsParser - -from .. compat import CompatTestCase - -class TestModArgsDwim(CompatTestCase): - - # TODO: add tests that construct ModuleArgsParser with a task reference - # TODO: verify the AnsibleError raised on failure knows the task - # and the task knows the line numbers - - def setUp(self): - self.m = ModuleArgsParser() - pass - - def _debug(self, mod, args, to): - print("RETURNED module = {0}".format(mod)) - print(" args = {0}".format(args)) - print(" to = {0}".format(to)) - - def tearDown(self): - pass - - def test_basic_shell(self): - mod, args, to = self.m.parse(dict(shell='echo hi')) - self._debug(mod, args, to) - self.assertEqual(mod, 'command') - self.assertEqual(args, dict( - _raw_params = 'echo hi', - _uses_shell = True, - )) - self.assertIsNone(to) - - def test_basic_command(self): - mod, args, to = self.m.parse(dict(command='echo hi')) - self._debug(mod, args, to) - self.assertEqual(mod, 'command') - self.assertEqual(args, dict( - _raw_params = 'echo hi', - )) - self.assertIsNone(to) - - def test_shell_with_modifiers(self): - mod, args, to = self.m.parse(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep')) - self._debug(mod, args, to) - self.assertEqual(mod, 'command') - self.assertEqual(args, dict( - creates = '/tmp/baz', - removes = '/tmp/bleep', - _raw_params = '/bin/foo', - _uses_shell = True, - )) - self.assertIsNone(to) - - def test_normal_usage(self): - mod, args, to = self.m.parse(dict(copy='src=a dest=b')) - self._debug(mod, args, to) - self.assertEqual(mod, 'copy') - self.assertEqual(args, dict(src='a', dest='b')) - self.assertIsNone(to) - - def test_complex_args(self): - mod, args, to = self.m.parse(dict(copy=dict(src='a', dest='b'))) - self._debug(mod, args, to) - self.assertEqual(mod, 'copy') - self.assertEqual(args, dict(src='a', dest='b')) - self.assertIsNone(to) - - def test_action_with_complex(self): - mod, args, to = self.m.parse(dict(action=dict(module='copy', src='a', dest='b'))) - self._debug(mod, args, to) - self.assertEqual(mod, 'copy') - self.assertEqual(args, dict(src='a', dest='b')) - self.assertIsNone(to) - - def test_action_with_complex_and_complex_args(self): - mod, args, to = self.m.parse(dict(action=dict(module='copy', args=dict(src='a', dest='b')))) - self._debug(mod, args, to) - self.assertEqual(mod, 'copy') - self.assertEqual(args, dict(src='a', dest='b')) - self.assertIsNone(to) - - def test_local_action_string(self): - mod, args, to = self.m.parse(dict(local_action='copy src=a dest=b')) - self._debug(mod, args, to) - self.assertEqual(mod, 'copy') - self.assertEqual(args, dict(src='a', dest='b')) - self.assertIs(to, 'localhost') From 6ecca227b87bdc14f8f2620d370851647394f141 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Oct 2014 18:37:17 -0400 Subject: [PATCH 233/813] Fix missing import --- v2/test/compat.py | 1 + 1 file changed, 1 insertion(+) diff --git a/v2/test/compat.py b/v2/test/compat.py index 6c122f2fad..9fe93b7763 100644 --- a/v2/test/compat.py +++ b/v2/test/compat.py @@ -15,6 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import sys import unittest # From a707f5acfed51090583d50f9bb35d7a3aae71b55 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Oct 2014 18:40:03 -0400 Subject: [PATCH 234/813] Fix relative imports --- v2/test/errors/test_errors.py | 2 +- v2/test/parsing/test_general.py | 2 +- v2/test/parsing/test_mod_args.py | 4 ++-- v2/test/parsing/yaml/test_yaml.py | 2 +- v2/test/playbook/test_task.py | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/v2/test/errors/test_errors.py b/v2/test/errors/test_errors.py index ff3b562d25..f67af449dc 100644 --- a/v2/test/errors/test_errors.py +++ b/v2/test/errors/test_errors.py @@ -15,7 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from ..compat import unittest +from .. compat import unittest from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject from ansible.errors import AnsibleError diff --git a/v2/test/parsing/test_general.py b/v2/test/parsing/test_general.py index 377aa88693..d003f15d2b 100644 --- a/v2/test/parsing/test_general.py +++ b/v2/test/parsing/test_general.py @@ -15,7 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from ..compat import unittest +from .. compat import unittest from ansible.parsing import load from ansible.errors import AnsibleParserError diff --git a/v2/test/parsing/test_mod_args.py b/v2/test/parsing/test_mod_args.py index b52b6f9cc3..4c7d5b5f9e 100644 --- a/v2/test/parsing/test_mod_args.py +++ b/v2/test/parsing/test_mod_args.py @@ -17,9 +17,9 @@ from ansible.parsing.mod_args import ModuleArgsParser -from .. compat import CompatTestCase +from .. compat import unittest -class TestModArgsDwim(CompatTestCase): +class TestModArgsDwim(unittest.TestCase): # TODO: add tests that construct ModuleArgsParser with a task reference # TODO: verify the AnsibleError raised on failure knows the task diff --git a/v2/test/parsing/yaml/test_yaml.py b/v2/test/parsing/yaml/test_yaml.py index b8d7771c7c..61859b0d16 100644 --- a/v2/test/parsing/yaml/test_yaml.py +++ b/v2/test/parsing/yaml/test_yaml.py @@ -15,7 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from ..compat import unittest +from ... compat import unittest from yaml.scanner import ScannerError diff --git a/v2/test/playbook/test_task.py b/v2/test/playbook/test_task.py index ec7ce9012e..286437b4d4 100644 --- a/v2/test/playbook/test_task.py +++ b/v2/test/playbook/test_task.py @@ -16,7 +16,7 @@ # along with Ansible. If not, see . from ansible.playbook.task import Task -from ..compat import unittest +from .. compat import unittest basic_shell_task = dict( name = 'Test Task', From 6ca67c61cb777699dca72ba19cdb7007be55988f Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 15 Oct 2014 15:53:43 -0700 Subject: [PATCH 235/813] Starting to stub out some classes. --- v2/ansible/executor/HostLog.py | 23 +++++++++++++++ v2/ansible/executor/HostLogManager.py | 9 ++++++ v2/ansible/executor/HostPlaybookIterator.py | 32 +++++++++++++++++++++ v2/ansible/executor/PlaybookExecutor.py | 15 ++++++++++ v2/ansible/executor/TaskExecutor.py | 12 ++++++++ v2/ansible/executor/TaskQueueManager.py | 16 +++++++++++ 6 files changed, 107 insertions(+) create mode 100644 v2/ansible/executor/HostPlaybookIterator.py diff --git a/v2/ansible/executor/HostLog.py b/v2/ansible/executor/HostLog.py index 1f84012e01..21d3feb2bf 100644 --- a/v2/ansible/executor/HostLog.py +++ b/v2/ansible/executor/HostLog.py @@ -14,3 +14,26 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +class HostLog(object): + + def __init__(self, host): + self.host = host + + def add_task_result(self, task_result): + pass + + def has_failures(self): + assert False + + def has_changes(self): + assert False + + def get_tasks(self, are_executed=None, are_changed=None, are_successful=None): + assert False + + def get_current_running_task(self) + # atomic decorator likely required? + assert False + + diff --git a/v2/ansible/executor/HostLogManager.py b/v2/ansible/executor/HostLogManager.py index 1f84012e01..81cf520cf1 100644 --- a/v2/ansible/executor/HostLogManager.py +++ b/v2/ansible/executor/HostLogManager.py @@ -14,3 +14,12 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +class HostLogManager(object): + + def __init__(self): + pass + + def get_log_for_host(self, host): + assert False + diff --git a/v2/ansible/executor/HostPlaybookIterator.py b/v2/ansible/executor/HostPlaybookIterator.py new file mode 100644 index 0000000000..5292b6c72b --- /dev/null +++ b/v2/ansible/executor/HostPlaybookIterator.py @@ -0,0 +1,32 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +class HostPlaybookIterator(object): + + def __init__(self, host, playbook): + pass + + def get_next_task(self): + assert False + + def is_blocked(self): + # depending on strategy, either + # ‘linear’ -- all prev tasks must be completed for all hosts + # ‘free’ -- this host doesn’t have any more work to do + assert False + + diff --git a/v2/ansible/executor/PlaybookExecutor.py b/v2/ansible/executor/PlaybookExecutor.py index 1f84012e01..7a7ed3d293 100644 --- a/v2/ansible/executor/PlaybookExecutor.py +++ b/v2/ansible/executor/PlaybookExecutor.py @@ -14,3 +14,18 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +class PlaybookExecutor(object): + + def __init__(self, list_of_plays=[]): + # self.tqm = TaskQueueManager(forks) + assert False + + def run(self): + # for play in list_of_plays: + # for block in play.blocks: + # # block must know it’s playbook class and context + # tqm.enqueue(block) + # tqm.go()... + assert False + diff --git a/v2/ansible/executor/TaskExecutor.py b/v2/ansible/executor/TaskExecutor.py index 1f84012e01..0591931372 100644 --- a/v2/ansible/executor/TaskExecutor.py +++ b/v2/ansible/executor/TaskExecutor.py @@ -14,3 +14,15 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +class TaskExecutor(object): + + def __init__(self, task, host): + pass + + def run(self): + # returns TaskResult + pass + + + diff --git a/v2/ansible/executor/TaskQueueManager.py b/v2/ansible/executor/TaskQueueManager.py index 1f84012e01..1016f35104 100644 --- a/v2/ansible/executor/TaskQueueManager.py +++ b/v2/ansible/executor/TaskQueueManager.py @@ -14,3 +14,19 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +class TaskQueueManagerHostPlaybookIterator(object): + + def __init__(self, host, playbook): + pass + + def get_next_task(self): + assert False + + def is_blocked(self): + # depending on strategy, either + # ‘linear’ -- all prev tasks must be completed for all hosts + # ‘free’ -- this host doesn’t have any more work to do + assert False + + From c9abd0fd580eb0ad5d0cb101c0e9fb2d53003de9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Oct 2014 18:56:23 -0400 Subject: [PATCH 236/813] Stub out Inventory class - we're going to draw heavily on the existing inventory for implementation --- v2/ansible/inventory/__init__.py | 75 ++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 v2/ansible/inventory/__init__.py diff --git a/v2/ansible/inventory/__init__.py b/v2/ansible/inventory/__init__.py new file mode 100644 index 0000000000..82da01f68c --- /dev/null +++ b/v2/ansible/inventory/__init__.py @@ -0,0 +1,75 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +############################################# + +class Inventory(object): + def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): + pass + def get_hosts(self, pattern="all"): + pass + def clear_pattern_cache(self): + pass + def groups_for_host(self, host): + pass + def groups_list(self): + pass + def get_groups(self): + pass + def get_host(self, hostname): + pass + def get_group(self, groupname): + pass + def get_group_variables(self, groupname, update_cached=False, vault_password=None): + pass + def get_variables(self, hostname, update_cached=False, vault_password=None): + pass + def get_host_variables(self, hostname, update_cached=False, vault_password=None): + pass + def add_group(self, group): + pass + def list_hosts(self, pattern="all"): + pass + def list_groups(self): + pass + def get_restriction(self): + pass + def restrict_to(self, restriction): + pass + def also_restrict_to(self, restriction): + pass + def subset(self, subset_pattern): + pass + def lift_restriction(self): + pass + def lift_also_restriction(self): + pass + def is_file(self): + pass + def basedir(self): + pass + def src(self): + pass + def playbook_basedir(self): + pass + def set_playbook_basedir(self, dir): + pass + def get_host_vars(self, host, new_pb_basedir=False): + pass + def get_group_vars(self, group, new_pb_basedir=False): + pass + From ff8042c5c39f01f164f946fe5cdec058dddb07f6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Oct 2014 19:18:12 -0400 Subject: [PATCH 237/813] Add python3 compat boilerplate to executor --- v2/ansible/executor/HostLog.py | 4 ++++ v2/ansible/executor/HostLogManager.py | 6 +++++- v2/ansible/executor/HostPlaybookIterator.py | 4 ++++ v2/ansible/executor/PlaybookExecutor.py | 4 ++++ v2/ansible/executor/TaskExecutor.py | 4 ++++ v2/ansible/executor/TaskQueueManager.py | 4 ++++ v2/ansible/executor/TaskResult.py | 5 +++++ v2/ansible/executor/TemplateEngine.py | 5 +++++ v2/ansible/executor/VariableCache.py | 5 +++++ v2/ansible/executor/VariableManager.py | 5 +++++ 10 files changed, 45 insertions(+), 1 deletion(-) diff --git a/v2/ansible/executor/HostLog.py b/v2/ansible/executor/HostLog.py index 21d3feb2bf..9c0565b199 100644 --- a/v2/ansible/executor/HostLog.py +++ b/v2/ansible/executor/HostLog.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + class HostLog(object): def __init__(self, host): diff --git a/v2/ansible/executor/HostLogManager.py b/v2/ansible/executor/HostLogManager.py index 81cf520cf1..727d06ce59 100644 --- a/v2/ansible/executor/HostLogManager.py +++ b/v2/ansible/executor/HostLogManager.py @@ -15,7 +15,11 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -class HostLogManager(object): +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +class HostLogManager: def __init__(self): pass diff --git a/v2/ansible/executor/HostPlaybookIterator.py b/v2/ansible/executor/HostPlaybookIterator.py index 5292b6c72b..22189583f9 100644 --- a/v2/ansible/executor/HostPlaybookIterator.py +++ b/v2/ansible/executor/HostPlaybookIterator.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + class HostPlaybookIterator(object): def __init__(self, host, playbook): diff --git a/v2/ansible/executor/PlaybookExecutor.py b/v2/ansible/executor/PlaybookExecutor.py index 7a7ed3d293..0f57b4bbed 100644 --- a/v2/ansible/executor/PlaybookExecutor.py +++ b/v2/ansible/executor/PlaybookExecutor.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + class PlaybookExecutor(object): def __init__(self, list_of_plays=[]): diff --git a/v2/ansible/executor/TaskExecutor.py b/v2/ansible/executor/TaskExecutor.py index 0591931372..b3156bc262 100644 --- a/v2/ansible/executor/TaskExecutor.py +++ b/v2/ansible/executor/TaskExecutor.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + class TaskExecutor(object): def __init__(self, task, host): diff --git a/v2/ansible/executor/TaskQueueManager.py b/v2/ansible/executor/TaskQueueManager.py index 1016f35104..93066f6b40 100644 --- a/v2/ansible/executor/TaskQueueManager.py +++ b/v2/ansible/executor/TaskQueueManager.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + class TaskQueueManagerHostPlaybookIterator(object): def __init__(self, host, playbook): diff --git a/v2/ansible/executor/TaskResult.py b/v2/ansible/executor/TaskResult.py index 1f84012e01..785fc45992 100644 --- a/v2/ansible/executor/TaskResult.py +++ b/v2/ansible/executor/TaskResult.py @@ -14,3 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/executor/TemplateEngine.py b/v2/ansible/executor/TemplateEngine.py index 1f84012e01..785fc45992 100644 --- a/v2/ansible/executor/TemplateEngine.py +++ b/v2/ansible/executor/TemplateEngine.py @@ -14,3 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/executor/VariableCache.py b/v2/ansible/executor/VariableCache.py index 1f84012e01..785fc45992 100644 --- a/v2/ansible/executor/VariableCache.py +++ b/v2/ansible/executor/VariableCache.py @@ -14,3 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/executor/VariableManager.py b/v2/ansible/executor/VariableManager.py index 1f84012e01..785fc45992 100644 --- a/v2/ansible/executor/VariableManager.py +++ b/v2/ansible/executor/VariableManager.py @@ -14,3 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + From 18f30b00ba1406cae64cd3e0edbcdd5f5ceb7381 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Oct 2014 19:22:54 -0400 Subject: [PATCH 238/813] Add python3-compat boiler to all core files --- v2/ansible/__init__.py | 4 ++++ v2/ansible/config/__init__.py | 3 +++ v2/ansible/constants.py | 5 +++++ v2/ansible/errors/__init__.py | 4 ++++ v2/ansible/inventory/__init__.py | 7 +++++++ v2/ansible/modules/__init__.py | 4 ++++ v2/ansible/parsing/__init__.py | 4 ++++ v2/ansible/parsing/mod_args.py | 4 ++++ v2/ansible/parsing/splitter.py | 4 ++++ v2/ansible/parsing/yaml/__init__.py | 21 ++++++++++++++++++++ v2/ansible/parsing/yaml/composer.py | 21 ++++++++++++++++++++ v2/ansible/parsing/yaml/constructor.py | 21 ++++++++++++++++++++ v2/ansible/parsing/yaml/loader.py | 21 ++++++++++++++++++++ v2/ansible/parsing/yaml/objects.py | 23 +++++++++++++++++++++- v2/ansible/playbook/__init__.py | 4 ++++ v2/ansible/playbook/attribute.py | 4 ++++ v2/ansible/playbook/base.py | 4 ++++ v2/ansible/playbook/block.py | 4 ++++ v2/ansible/playbook/conditional.py | 4 ++++ v2/ansible/playbook/handler.py | 4 ++++ v2/ansible/playbook/include.py | 3 +++ v2/ansible/playbook/play.py | 3 +++ v2/ansible/playbook/playbook_include.py | 3 +++ v2/ansible/playbook/role.py | 4 ++++ v2/ansible/playbook/tag.py | 4 ++++ v2/ansible/playbook/task.py | 4 ++++ v2/ansible/playbook/task_include.py | 4 ++++ v2/ansible/playbook/vars.py | 4 ++++ v2/ansible/playbook/vars_file.py | 4 ++++ v2/ansible/plugins/__init__.py | 4 ++++ v2/ansible/plugins/action/__init__.py | 4 ++++ v2/ansible/plugins/callback/__init__.py | 4 ++++ v2/ansible/plugins/connections/__init__.py | 4 ++++ v2/ansible/plugins/filter/__init__.py | 4 ++++ v2/ansible/plugins/inventory/__init__.py | 4 ++++ v2/ansible/plugins/lookup/__init__.py | 4 ++++ v2/ansible/plugins/shell/__init__.py | 4 ++++ v2/ansible/plugins/vars/__init__.py | 4 ++++ 38 files changed, 238 insertions(+), 1 deletion(-) diff --git a/v2/ansible/__init__.py b/v2/ansible/__init__.py index 1f84012e01..ae8ccff595 100644 --- a/v2/ansible/__init__.py +++ b/v2/ansible/__init__.py @@ -14,3 +14,7 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type diff --git a/v2/ansible/config/__init__.py b/v2/ansible/config/__init__.py index d6c11ffa74..ae8ccff595 100644 --- a/v2/ansible/config/__init__.py +++ b/v2/ansible/config/__init__.py @@ -15,3 +15,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index d1d9b69fd3..97d6870a3d 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -15,9 +15,14 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import os import pwd import sys + try: import configparser except ImportError: diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py index 54406ef6c2..ccc8a1a203 100644 --- a/v2/ansible/errors/__init__.py +++ b/v2/ansible/errors/__init__.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import os from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject diff --git a/v2/ansible/inventory/__init__.py b/v2/ansible/inventory/__init__.py index 82da01f68c..7fc8112807 100644 --- a/v2/ansible/inventory/__init__.py +++ b/v2/ansible/inventory/__init__.py @@ -17,12 +17,17 @@ ############################################# +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + class Inventory(object): def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): pass def get_hosts(self, pattern="all"): pass def clear_pattern_cache(self): + # Possibly not needed? pass def groups_for_host(self, host): pass @@ -55,8 +60,10 @@ class Inventory(object): def subset(self, subset_pattern): pass def lift_restriction(self): + # HACK -- pass def lift_also_restriction(self): + # HACK -- dead host skipping pass def is_file(self): pass diff --git a/v2/ansible/modules/__init__.py b/v2/ansible/modules/__init__.py index 1f84012e01..ae8ccff595 100644 --- a/v2/ansible/modules/__init__.py +++ b/v2/ansible/modules/__init__.py @@ -14,3 +14,7 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index 232d95b834..4641623c03 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from ansible.errors import AnsibleError, AnsibleInternalError def load(self, data): diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index 534aca8cd8..2a29c43742 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from six import iteritems, string_types from ansible.errors import AnsibleParserError diff --git a/v2/ansible/parsing/splitter.py b/v2/ansible/parsing/splitter.py index d96a8c313a..470ab90c3c 100644 --- a/v2/ansible/parsing/splitter.py +++ b/v2/ansible/parsing/splitter.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + def parse_kv(args, check_raw=False): ''' Convert a string of key/value items to a dict. If any free-form params diff --git a/v2/ansible/parsing/yaml/__init__.py b/v2/ansible/parsing/yaml/__init__.py index af492d2a73..6cc55bfc84 100644 --- a/v2/ansible/parsing/yaml/__init__.py +++ b/v2/ansible/parsing/yaml/__init__.py @@ -1,3 +1,24 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from yaml import load from ansible.parsing.yaml.loader import AnsibleLoader diff --git a/v2/ansible/parsing/yaml/composer.py b/v2/ansible/parsing/yaml/composer.py index b0acc08a24..0f9c90606f 100644 --- a/v2/ansible/parsing/yaml/composer.py +++ b/v2/ansible/parsing/yaml/composer.py @@ -1,3 +1,24 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from yaml.composer import Composer from yaml.nodes import MappingNode diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py index 8d0ed2b8f6..1e94b808fa 100644 --- a/v2/ansible/parsing/yaml/constructor.py +++ b/v2/ansible/parsing/yaml/constructor.py @@ -1,3 +1,24 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from yaml.constructor import Constructor from ansible.parsing.yaml.objects import AnsibleMapping diff --git a/v2/ansible/parsing/yaml/loader.py b/v2/ansible/parsing/yaml/loader.py index 9b15a7f3c1..f75e5b4b27 100644 --- a/v2/ansible/parsing/yaml/loader.py +++ b/v2/ansible/parsing/yaml/loader.py @@ -1,3 +1,24 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from yaml.reader import Reader from yaml.scanner import Scanner from yaml.parser import Parser diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py index 5870ea8cbe..6ea8f196f1 100644 --- a/v2/ansible/parsing/yaml/objects.py +++ b/v2/ansible/parsing/yaml/objects.py @@ -1,8 +1,29 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + class AnsibleBaseYAMLObject(object): ''' the base class used to sub-class python built-in objects so that we can add attributes to them during yaml parsing - + ''' _data_source = None _line_number = None diff --git a/v2/ansible/playbook/__init__.py b/v2/ansible/playbook/__init__.py index d2430dfc0c..fa7ec5faa5 100644 --- a/v2/ansible/playbook/__init__.py +++ b/v2/ansible/playbook/__init__.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + class Playbook(object): def __init__(self, filename): self.ds = v2.utils.load_yaml_from_file(filename) diff --git a/v2/ansible/playbook/attribute.py b/v2/ansible/playbook/attribute.py index 45d596fafe..d029c3e610 100644 --- a/v2/ansible/playbook/attribute.py +++ b/v2/ansible/playbook/attribute.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + #from ansible.common.errors import AnsibleError class Attribute(object): diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 3d236df761..6fe6bc1d2d 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from io import FileIO from six import iteritems, string_types diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index c5ab518efa..70af215923 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from v2.playbook.base import PlaybookBase class Block(PlaybookBase): diff --git a/v2/ansible/playbook/conditional.py b/v2/ansible/playbook/conditional.py index 56028e29ea..b3770a276e 100644 --- a/v2/ansible/playbook/conditional.py +++ b/v2/ansible/playbook/conditional.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + class Conditional(object): def __init__(self, task): diff --git a/v2/ansible/playbook/handler.py b/v2/ansible/playbook/handler.py index 42d937ec46..1f7aa957a5 100644 --- a/v2/ansible/playbook/handler.py +++ b/v2/ansible/playbook/handler.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from v2.errors import AnsibleError from v2.inventory import Host from v2.playbook import Task diff --git a/v2/ansible/playbook/include.py b/v2/ansible/playbook/include.py index d6c11ffa74..ae8ccff595 100644 --- a/v2/ansible/playbook/include.py +++ b/v2/ansible/playbook/include.py @@ -15,3 +15,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index d6c11ffa74..ae8ccff595 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -15,3 +15,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type diff --git a/v2/ansible/playbook/playbook_include.py b/v2/ansible/playbook/playbook_include.py index d6c11ffa74..ae8ccff595 100644 --- a/v2/ansible/playbook/playbook_include.py +++ b/v2/ansible/playbook/playbook_include.py @@ -15,3 +15,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type diff --git a/v2/ansible/playbook/role.py b/v2/ansible/playbook/role.py index dd1426ee8a..f36207874d 100644 --- a/v2/ansible/playbook/role.py +++ b/v2/ansible/playbook/role.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from v2.playbook.base import PlaybookBase from v2.utils import list_union diff --git a/v2/ansible/playbook/tag.py b/v2/ansible/playbook/tag.py index a992f8dee0..50dae41172 100644 --- a/v2/ansible/playbook/tag.py +++ b/v2/ansible/playbook/tag.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from errors import AnsibleError from ansible.utils import list_union diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index de75a0ec9c..91ca7558d6 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from ansible.playbook.base import Base from ansible.playbook.attribute import Attribute, FieldAttribute diff --git a/v2/ansible/playbook/task_include.py b/v2/ansible/playbook/task_include.py index d6c11ffa74..785fc45992 100644 --- a/v2/ansible/playbook/task_include.py +++ b/v2/ansible/playbook/task_include.py @@ -15,3 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/playbook/vars.py b/v2/ansible/playbook/vars.py index d6c11ffa74..785fc45992 100644 --- a/v2/ansible/playbook/vars.py +++ b/v2/ansible/playbook/vars.py @@ -15,3 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/playbook/vars_file.py b/v2/ansible/playbook/vars_file.py index d6c11ffa74..785fc45992 100644 --- a/v2/ansible/playbook/vars_file.py +++ b/v2/ansible/playbook/vars_file.py @@ -15,3 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py index faa284ce16..445fbd539f 100644 --- a/v2/ansible/plugins/__init__.py +++ b/v2/ansible/plugins/__init__.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import os import os.path import sys diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index d6c11ffa74..785fc45992 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -15,3 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/plugins/callback/__init__.py b/v2/ansible/plugins/callback/__init__.py index d6c11ffa74..785fc45992 100644 --- a/v2/ansible/plugins/callback/__init__.py +++ b/v2/ansible/plugins/callback/__init__.py @@ -15,3 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/plugins/connections/__init__.py b/v2/ansible/plugins/connections/__init__.py index d6c11ffa74..785fc45992 100644 --- a/v2/ansible/plugins/connections/__init__.py +++ b/v2/ansible/plugins/connections/__init__.py @@ -15,3 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/plugins/filter/__init__.py b/v2/ansible/plugins/filter/__init__.py index d6c11ffa74..785fc45992 100644 --- a/v2/ansible/plugins/filter/__init__.py +++ b/v2/ansible/plugins/filter/__init__.py @@ -15,3 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/plugins/inventory/__init__.py b/v2/ansible/plugins/inventory/__init__.py index d6c11ffa74..785fc45992 100644 --- a/v2/ansible/plugins/inventory/__init__.py +++ b/v2/ansible/plugins/inventory/__init__.py @@ -15,3 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/plugins/lookup/__init__.py b/v2/ansible/plugins/lookup/__init__.py index d6c11ffa74..785fc45992 100644 --- a/v2/ansible/plugins/lookup/__init__.py +++ b/v2/ansible/plugins/lookup/__init__.py @@ -15,3 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/plugins/shell/__init__.py b/v2/ansible/plugins/shell/__init__.py index d6c11ffa74..785fc45992 100644 --- a/v2/ansible/plugins/shell/__init__.py +++ b/v2/ansible/plugins/shell/__init__.py @@ -15,3 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/ansible/plugins/vars/__init__.py b/v2/ansible/plugins/vars/__init__.py index d6c11ffa74..785fc45992 100644 --- a/v2/ansible/plugins/vars/__init__.py +++ b/v2/ansible/plugins/vars/__init__.py @@ -15,3 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + From acf4bc95a3596256c552944c99011dc8de20e2f5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Oct 2014 19:25:48 -0400 Subject: [PATCH 239/813] Give all v2 tests the python3 compat boilerplate --- v2/test/__init__.py | 5 +++++ v2/test/compat.py | 4 ++++ v2/test/errors/__init__.py | 4 ++++ v2/test/errors/test_errors.py | 4 ++++ v2/test/parsing/__init__.py | 5 +++++ v2/test/parsing/test_general.py | 4 ++++ v2/test/parsing/test_mod_args.py | 4 ++++ v2/test/parsing/yaml/__init__.py | 5 +++++ v2/test/parsing/yaml/test_yaml.py | 4 ++++ v2/test/playbook/__init__.py | 5 +++++ v2/test/playbook/test_task.py | 4 ++++ 11 files changed, 48 insertions(+) diff --git a/v2/test/__init__.py b/v2/test/__init__.py index e69de29bb2..e7489db6fb 100644 --- a/v2/test/__init__.py +++ b/v2/test/__init__.py @@ -0,0 +1,5 @@ + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/test/compat.py b/v2/test/compat.py index 9fe93b7763..6930de94b0 100644 --- a/v2/test/compat.py +++ b/v2/test/compat.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import sys import unittest diff --git a/v2/test/errors/__init__.py b/v2/test/errors/__init__.py index 674334b15a..20207b272d 100644 --- a/v2/test/errors/__init__.py +++ b/v2/test/errors/__init__.py @@ -15,4 +15,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/test/errors/test_errors.py b/v2/test/errors/test_errors.py index f67af449dc..42174ae08c 100644 --- a/v2/test/errors/test_errors.py +++ b/v2/test/errors/test_errors.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from .. compat import unittest from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject diff --git a/v2/test/parsing/__init__.py b/v2/test/parsing/__init__.py index 1f84012e01..785fc45992 100644 --- a/v2/test/parsing/__init__.py +++ b/v2/test/parsing/__init__.py @@ -14,3 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/test/parsing/test_general.py b/v2/test/parsing/test_general.py index d003f15d2b..0a150e1a23 100644 --- a/v2/test/parsing/test_general.py +++ b/v2/test/parsing/test_general.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from .. compat import unittest from ansible.parsing import load from ansible.errors import AnsibleParserError diff --git a/v2/test/parsing/test_mod_args.py b/v2/test/parsing/test_mod_args.py index 4c7d5b5f9e..e593522aa8 100644 --- a/v2/test/parsing/test_mod_args.py +++ b/v2/test/parsing/test_mod_args.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from ansible.parsing.mod_args import ModuleArgsParser from .. compat import unittest diff --git a/v2/test/parsing/yaml/__init__.py b/v2/test/parsing/yaml/__init__.py index 1f84012e01..785fc45992 100644 --- a/v2/test/parsing/yaml/__init__.py +++ b/v2/test/parsing/yaml/__init__.py @@ -14,3 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/test/parsing/yaml/test_yaml.py b/v2/test/parsing/yaml/test_yaml.py index 61859b0d16..6b1d09d741 100644 --- a/v2/test/parsing/yaml/test_yaml.py +++ b/v2/test/parsing/yaml/test_yaml.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from ... compat import unittest from yaml.scanner import ScannerError diff --git a/v2/test/playbook/__init__.py b/v2/test/playbook/__init__.py index 1f84012e01..785fc45992 100644 --- a/v2/test/playbook/__init__.py +++ b/v2/test/playbook/__init__.py @@ -14,3 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/test/playbook/test_task.py b/v2/test/playbook/test_task.py index 286437b4d4..487bca207b 100644 --- a/v2/test/playbook/test_task.py +++ b/v2/test/playbook/test_task.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from ansible.playbook.task import Task from .. compat import unittest From eafa718be17f2a96e0565fe1b2f400ee96989c22 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Oct 2014 19:37:29 -0400 Subject: [PATCH 240/813] Remove inherit from object as it's no longer needed to determine new-style classes --- v2/ansible/executor/HostLog.py | 2 +- v2/ansible/executor/HostPlaybookIterator.py | 2 +- v2/ansible/executor/PlaybookExecutor.py | 2 +- v2/ansible/executor/TaskExecutor.py | 2 +- v2/ansible/executor/TaskQueueManager.py | 2 +- v2/ansible/inventory/__init__.py | 2 +- v2/ansible/parsing/mod_args.py | 2 +- v2/ansible/parsing/yaml/objects.py | 2 +- v2/ansible/playbook/__init__.py | 2 +- v2/ansible/playbook/attribute.py | 2 +- v2/ansible/playbook/base.py | 2 +- v2/ansible/playbook/conditional.py | 2 +- v2/ansible/playbook/tag.py | 2 +- v2/ansible/plugins/__init__.py | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/v2/ansible/executor/HostLog.py b/v2/ansible/executor/HostLog.py index 9c0565b199..495ad79f7d 100644 --- a/v2/ansible/executor/HostLog.py +++ b/v2/ansible/executor/HostLog.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -class HostLog(object): +class HostLog: def __init__(self, host): self.host = host diff --git a/v2/ansible/executor/HostPlaybookIterator.py b/v2/ansible/executor/HostPlaybookIterator.py index 22189583f9..07fab06714 100644 --- a/v2/ansible/executor/HostPlaybookIterator.py +++ b/v2/ansible/executor/HostPlaybookIterator.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -class HostPlaybookIterator(object): +class HostPlaybookIterator: def __init__(self, host, playbook): pass diff --git a/v2/ansible/executor/PlaybookExecutor.py b/v2/ansible/executor/PlaybookExecutor.py index 0f57b4bbed..7031e51142 100644 --- a/v2/ansible/executor/PlaybookExecutor.py +++ b/v2/ansible/executor/PlaybookExecutor.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -class PlaybookExecutor(object): +class PlaybookExecutor: def __init__(self, list_of_plays=[]): # self.tqm = TaskQueueManager(forks) diff --git a/v2/ansible/executor/TaskExecutor.py b/v2/ansible/executor/TaskExecutor.py index b3156bc262..878c15c489 100644 --- a/v2/ansible/executor/TaskExecutor.py +++ b/v2/ansible/executor/TaskExecutor.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -class TaskExecutor(object): +class TaskExecutor: def __init__(self, task, host): pass diff --git a/v2/ansible/executor/TaskQueueManager.py b/v2/ansible/executor/TaskQueueManager.py index 93066f6b40..a79235bfd0 100644 --- a/v2/ansible/executor/TaskQueueManager.py +++ b/v2/ansible/executor/TaskQueueManager.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -class TaskQueueManagerHostPlaybookIterator(object): +class TaskQueueManagerHostPlaybookIterator: def __init__(self, host, playbook): pass diff --git a/v2/ansible/inventory/__init__.py b/v2/ansible/inventory/__init__.py index 7fc8112807..11f7e35c8b 100644 --- a/v2/ansible/inventory/__init__.py +++ b/v2/ansible/inventory/__init__.py @@ -21,7 +21,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -class Inventory(object): +class Inventory: def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): pass def get_hosts(self, pattern="all"): diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index 2a29c43742..4c452b4edf 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -25,7 +25,7 @@ from ansible.errors import AnsibleParserError from ansible.plugins import module_finder from ansible.parsing.splitter import parse_kv -class ModuleArgsParser(object): +class ModuleArgsParser: """ There are several ways a module and argument set can be expressed: diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py index 6ea8f196f1..6fb1631b87 100644 --- a/v2/ansible/parsing/yaml/objects.py +++ b/v2/ansible/parsing/yaml/objects.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -class AnsibleBaseYAMLObject(object): +class AnsibleBaseYAMLObject: ''' the base class used to sub-class python built-in objects so that we can add attributes to them during yaml parsing diff --git a/v2/ansible/playbook/__init__.py b/v2/ansible/playbook/__init__.py index fa7ec5faa5..87b422b280 100644 --- a/v2/ansible/playbook/__init__.py +++ b/v2/ansible/playbook/__init__.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -class Playbook(object): +class Playbook: def __init__(self, filename): self.ds = v2.utils.load_yaml_from_file(filename) self.plays = [] diff --git a/v2/ansible/playbook/attribute.py b/v2/ansible/playbook/attribute.py index d029c3e610..d28ba4cc85 100644 --- a/v2/ansible/playbook/attribute.py +++ b/v2/ansible/playbook/attribute.py @@ -21,7 +21,7 @@ __metaclass__ = type #from ansible.common.errors import AnsibleError -class Attribute(object): +class Attribute: def __init__(self, isa=None, private=False, default=None): diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 6fe6bc1d2d..cf075b020e 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -26,7 +26,7 @@ from six import iteritems, string_types from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.parsing import load as ds_load -class Base(object): +class Base: def __init__(self): diff --git a/v2/ansible/playbook/conditional.py b/v2/ansible/playbook/conditional.py index b3770a276e..b921d4191a 100644 --- a/v2/ansible/playbook/conditional.py +++ b/v2/ansible/playbook/conditional.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -class Conditional(object): +class Conditional: def __init__(self, task): self._task = task diff --git a/v2/ansible/playbook/tag.py b/v2/ansible/playbook/tag.py index 50dae41172..cb3e3f9291 100644 --- a/v2/ansible/playbook/tag.py +++ b/v2/ansible/playbook/tag.py @@ -22,7 +22,7 @@ __metaclass__ = type from errors import AnsibleError from ansible.utils import list_union -class Tag(object): +class Tag: def __init__(self, tags=[]): assert isinstance(tags, list) self._tags = tags diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py index 445fbd539f..958ed2850f 100644 --- a/v2/ansible/plugins/__init__.py +++ b/v2/ansible/plugins/__init__.py @@ -39,7 +39,7 @@ def push_basedir(basedir): if basedir not in _basedirs: _basedirs.insert(0, basedir) -class PluginLoader(object): +class PluginLoader: ''' PluginLoader loads plugins from the configured plugin directories. From 1d04e4b3d27b0ae16619eda4136a939fe6f3299c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 15 Oct 2014 17:59:51 -0700 Subject: [PATCH 241/813] Updating unittests for v2 stuff --- test/v2/plugins/__init__.py | 16 +++++++ test/v2/plugins/test_plugins.py | 75 +++++++++++++++++++++++++++++++++ v2/ansible/errors/__init__.py | 4 +- v2/ansible/plugins/__init__.py | 1 - v2/test/errors/test_errors.py | 39 +++++++++++++---- 5 files changed, 122 insertions(+), 13 deletions(-) create mode 100644 test/v2/plugins/__init__.py create mode 100644 test/v2/plugins/test_plugins.py diff --git a/test/v2/plugins/__init__.py b/test/v2/plugins/__init__.py new file mode 100644 index 0000000000..1f84012e01 --- /dev/null +++ b/test/v2/plugins/__init__.py @@ -0,0 +1,16 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . diff --git a/test/v2/plugins/test_plugins.py b/test/v2/plugins/test_plugins.py new file mode 100644 index 0000000000..62d8ee4dfb --- /dev/null +++ b/test/v2/plugins/test_plugins.py @@ -0,0 +1,75 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import unittest + +from mock import mock_open, patch, MagicMock + +from ansible.plugins import MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE, _basedirs, push_basedir, PluginLoader + +class TestErrors(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_push_basedir(self): + push_basedir('/root/foo/bar') + self.assertEqual(_basedirs, ['/root/foo/bar']) + + @patch.object(PluginLoader, '_get_paths') + def test_print_paths(self, mock_method): + mock_method.return_value = ['/path/one', '/path/two', '/path/three'] + pl = PluginLoader('foo', 'foo', '', 'test_plugins') + paths = pl.print_paths() + expected_paths = os.pathsep.join(['/path/one', '/path/two', '/path/three']) + self.assertEqual(paths, expected_paths) + + def test_plugins__get_package_paths_no_package(self): + pl = PluginLoader('test', '', 'test', 'test_plugin') + self.assertEqual(pl._get_package_paths(), []) + + def test_plugins__get_package_paths_with_package(self): + # the _get_package_paths() call uses __import__ to load a + # python library, and then uses the __file__ attribute of + # the result for that to get the library path, so we mock + # that here and patch the builtin to use our mocked result + m = MagicMock() + m.return_value.__file__ = '/path/to/my/test.py' + pl = PluginLoader('test', 'foo.bar.bam', 'test', 'test_plugin') + with patch('__builtin__.__import__', m): + self.assertEqual(pl._get_package_paths(), ['/path/to/my/bar/bam']) + + def test_plugins__get_paths(self): + pl = PluginLoader('test', '', 'test', 'test_plugin') + pl._paths = ['/path/one', '/path/two'] + self.assertEqual(pl._get_paths(), ['/path/one', '/path/two']) + + # NOT YET WORKING + #def fake_glob(path): + # if path == 'test/*': + # return ['test/foo', 'test/bar', 'test/bam'] + # elif path == 'test/*/*' + #m._paths = None + #mock_glob = MagicMock() + #mock_glob.return_value = [] + #with patch('glob.glob', mock_glob): + # pass + diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py index ccc8a1a203..a0ae94111a 100644 --- a/v2/ansible/errors/__init__.py +++ b/v2/ansible/errors/__init__.py @@ -38,9 +38,7 @@ class AnsibleError(Exception): def _get_line_from_file(self, filename, line_number): with open(filename, 'r') as f: lines = f.readlines() - if line_number < len(lines): - return lines[line_number] - return None + return lines[line_number] def _get_extended_error(self): error_message = '' diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py index 958ed2850f..5ab704b8a1 100644 --- a/v2/ansible/plugins/__init__.py +++ b/v2/ansible/plugins/__init__.py @@ -112,7 +112,6 @@ class PluginLoader: for basedir in _basedirs: fullpath = os.path.realpath(os.path.join(basedir, self.subdir)) if os.path.isdir(fullpath): - files = glob.glob("%s/*" % fullpath) # allow directories to be two levels deep diff --git a/v2/test/errors/test_errors.py b/v2/test/errors/test_errors.py index 42174ae08c..60ba7eb858 100644 --- a/v2/test/errors/test_errors.py +++ b/v2/test/errors/test_errors.py @@ -31,22 +31,43 @@ class TestErrors(unittest.TestCase): def setUp(self): self.message = 'this is the error message' + self.obj = AnsibleBaseYAMLObject() + def tearDown(self): pass def test_basic_error(self): e = AnsibleError(self.message) self.assertEqual(e.message, self.message) + self.assertEqual(e.__repr__(), self.message) - def test_error_with_object(self): - obj = AnsibleBaseYAMLObject() - obj._data_source = 'foo.yml' - obj._line_number = 1 - obj._column_number = 1 + @patch.object(AnsibleError, '_get_line_from_file') + def test_error_with_object(self, mock_method): + self.obj._data_source = 'foo.yml' + self.obj._line_number = 1 + self.obj._column_number = 1 - m = mock_open() - m.return_value.readlines.return_value = ['this is line 1\n', 'this is line 2\n', 'this is line 3\n'] - with patch('{0}.open'.format(BUILTINS), m): - e = AnsibleError(self.message, obj) + mock_method.return_value = 'this is line 1\n' + e = AnsibleError(self.message, self.obj) self.assertEqual(e.message, 'this is the error message\nThe error occurred on line 1 of the file foo.yml:\nthis is line 1\n^') + + def test_error_get_line_from_file(self): + m = mock_open() + m.return_value.readlines.return_value = ['this is line 1\n'] + + with patch('__builtin__.open', m): + # this line will be found in the file + self.obj._data_source = 'foo.yml' + self.obj._line_number = 1 + self.obj._column_number = 1 + e = AnsibleError(self.message, self.obj) + self.assertEqual(e.message, 'this is the error message\nThe error occurred on line 1 of the file foo.yml:\nthis is line 1\n^') + + # this line will not be found, as it is out of the index range + self.obj._data_source = 'foo.yml' + self.obj._line_number = 2 + self.obj._column_number = 1 + e = AnsibleError(self.message, self.obj) + self.assertEqual(e.message, 'this is the error message\nThe error occurred on line 2 of the file foo.yml:\n\n(specified line no longer in file, maybe it changed?)') + From 21577ff2ccbc0d84b4f19e416194d4dd1f201150 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 16 Oct 2014 08:55:44 -0700 Subject: [PATCH 242/813] Moved v2 tests for plugins to new location --- {test/v2 => v2/test}/plugins/__init__.py | 0 {test/v2 => v2/test}/plugins/test_plugins.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename {test/v2 => v2/test}/plugins/__init__.py (100%) rename {test/v2 => v2/test}/plugins/test_plugins.py (100%) diff --git a/test/v2/plugins/__init__.py b/v2/test/plugins/__init__.py similarity index 100% rename from test/v2/plugins/__init__.py rename to v2/test/plugins/__init__.py diff --git a/test/v2/plugins/test_plugins.py b/v2/test/plugins/test_plugins.py similarity index 100% rename from test/v2/plugins/test_plugins.py rename to v2/test/plugins/test_plugins.py From 57d2622c8cff37d3a61c3b6fc1d422c61695e2fb Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 16 Oct 2014 12:08:33 -0700 Subject: [PATCH 243/813] Adding block code and tests --- v2/ansible/playbook/attribute.py | 2 - v2/ansible/playbook/base.py | 10 ++-- v2/ansible/playbook/block.py | 46 +++++++++++++++++-- v2/test/playbook/test_block.py | 79 ++++++++++++++++++++++++++++++++ 4 files changed, 126 insertions(+), 11 deletions(-) create mode 100644 v2/test/playbook/test_block.py diff --git a/v2/ansible/playbook/attribute.py b/v2/ansible/playbook/attribute.py index d28ba4cc85..ecafe653f0 100644 --- a/v2/ansible/playbook/attribute.py +++ b/v2/ansible/playbook/attribute.py @@ -19,8 +19,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -#from ansible.common.errors import AnsibleError - class Attribute: def __init__(self, isa=None, private=False, default=None): diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index cf075b020e..08f4d519e6 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -65,11 +65,11 @@ class Base: if isinstance(attribute, FieldAttribute): # copy the value over unless a _load_field method is defined - method = getattr(self, '_load_%s' % aname, None) - if method: - self._attributes[aname] = method(self, attribute) - else: - if aname in ds: + if aname in ds: + method = getattr(self, '_load_%s' % aname, None) + if method: + self._attributes[aname] = method(aname, ds[aname]) + else: self._attributes[aname] = ds[aname] # return the constructed object diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index 70af215923..46b670b168 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -19,10 +19,48 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from v2.playbook.base import PlaybookBase +from ansible.playbook.base import Base +from ansible.playbook.task import Task +from ansible.playbook.attribute import Attribute, FieldAttribute -class Block(PlaybookBase): +class Block(Base): - def __init__(self): - pass + _begin = FieldAttribute(isa='list') + _rescue = FieldAttribute(isa='list') + _end = FieldAttribute(isa='list') + _otherwise = FieldAttribute(isa='list') + + def __init__(self, role=None): + self.role = role + super(Block, self).__init__() + + def get_variables(self): + # blocks do not (currently) store any variables directly, + # so we just return an empty dict here + return dict() + + @staticmethod + def load(data, role=None): + b = Block(role=role) + return b.load_data(data) + + def _load_list_of_tasks(self, ds): + assert type(ds) == list + task_list = [] + for task in ds: + t = Task.load(task) + task_list.append(t) + return task_list + + def _load_begin(self, attr, ds): + return self._load_list_of_tasks(ds) + + def _load_rescue(self, attr, ds): + return self._load_list_of_tasks(ds) + + def _load_end(self, attr, ds): + return self._load_list_of_tasks(ds) + + def _load_otherwise(self, attr, ds): + return self._load_list_of_tasks(ds) diff --git a/v2/test/playbook/test_block.py b/v2/test/playbook/test_block.py new file mode 100644 index 0000000000..a0da7f0e6a --- /dev/null +++ b/v2/test/playbook/test_block.py @@ -0,0 +1,79 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.playbook.block import Block +from ansible.playbook.task import Task +from .. compat import unittest + +class TestBlock(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_construct_empty_block(self): + b = Block() + + def test_construct_block_with_role(self): + pass + + def test_block__load_list_of_tasks(self): + task = dict(action='test') + b = Block() + self.assertEqual(b._load_list_of_tasks([]), []) + res = b._load_list_of_tasks([task]) + self.assertEqual(len(res), 1) + assert isinstance(res[0], Task) + res = b._load_list_of_tasks([task,task,task]) + self.assertEqual(len(res), 3) + + def test_load_block_simple(self): + ds = dict( + begin = [], + rescue = [], + end = [], + otherwise = [], + ) + b = Block.load(ds) + self.assertEqual(b.begin, []) + self.assertEqual(b.rescue, []) + self.assertEqual(b.end, []) + self.assertEqual(b.otherwise, []) + + def test_load_block_with_tasks(self): + ds = dict( + begin = [dict(action='begin')], + rescue = [dict(action='rescue')], + end = [dict(action='end')], + otherwise = [dict(action='otherwise')], + ) + b = Block.load(ds) + self.assertEqual(len(b.begin), 1) + assert isinstance(b.begin[0], Task) + self.assertEqual(len(b.rescue), 1) + assert isinstance(b.rescue[0], Task) + self.assertEqual(len(b.end), 1) + assert isinstance(b.end[0], Task) + self.assertEqual(len(b.otherwise), 1) + assert isinstance(b.otherwise[0], Task) + From 4755bde28d05a8dcafe979e95bf2da937d180c16 Mon Sep 17 00:00:00 2001 From: Andres Silva Date: Fri, 17 Oct 2014 12:16:35 -0400 Subject: [PATCH 244/813] adding for loop on list to to handle the return of none when the list is empty. With the previous method if the list was empty the script died. See http://stackoverflow.com/questions/18852324/typeerror-sequence-item-0-expected-string-nonetype-found --- plugins/inventory/ec2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index aec6473be6..9d2dec38d3 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -622,8 +622,8 @@ class Ec2Inventory(object): for group in value: group_ids.append(group.id) group_names.append(group.name) - instance_vars["ec2_security_group_ids"] = ','.join(group_ids) - instance_vars["ec2_security_group_names"] = ','.join(group_names) + instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) + instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) else: pass # TODO Product codes if someone finds them useful From 5efc4efca7288f072028d1a13e5d0d731f0b7a8f Mon Sep 17 00:00:00 2001 From: Christian Hammerl Date: Sat, 18 Oct 2014 15:02:04 +0200 Subject: [PATCH 245/813] Fix os_family and distribution on archlinux Fixes #8732, ansible/ansible-modules-core#34 --- lib/ansible/module_utils/facts.py | 147 +++++++++++++++--------------- 1 file changed, 74 insertions(+), 73 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 8ec1b4f7c7..6bbcaced08 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -273,84 +273,85 @@ class Facts(object): self.facts['distribution_release'] = dist[2] or 'NA' # Try to handle the exceptions now ... for (path, name) in Facts.OSDIST_LIST: - if os.path.exists(path) and os.path.getsize(path) > 0: - if self.facts['distribution'] in ('Fedora', ): - # Once we determine the value is one of these distros - # we trust the values are always correct - break - elif name == 'RedHat': - data = get_file_content(path) - if 'Red Hat' in data: + if os.path.exists(path): + if os.path.getsize(path) > 0: + if self.facts['distribution'] in ('Fedora', ): + # Once we determine the value is one of these distros + # we trust the values are always correct + break + elif name == 'RedHat': + data = get_file_content(path) + if 'Red Hat' in data: + self.facts['distribution'] = name + else: + self.facts['distribution'] = data.split()[0] + break + elif name == 'OtherLinux': + data = get_file_content(path) + if 'Amazon' in data: + self.facts['distribution'] = 'Amazon' + self.facts['distribution_version'] = data.split()[-1] + break + elif name == 'OpenWrt': + data = get_file_content(path) + if 'OpenWrt' in data: + self.facts['distribution'] = name + version = re.search('DISTRIB_RELEASE="(.*)"', data) + if version: + self.facts['distribution_version'] = version.groups()[0] + release = re.search('DISTRIB_CODENAME="(.*)"', data) + if release: + self.facts['distribution_release'] = release.groups()[0] + break + elif name == 'Alpine': + data = get_file_content(path) self.facts['distribution'] = name - else: - self.facts['distribution'] = data.split()[0] - break - elif name == 'OtherLinux': - data = get_file_content(path) - if 'Amazon' in data: - self.facts['distribution'] = 'Amazon' - self.facts['distribution_version'] = data.split()[-1] + self.facts['distribution_version'] = data break - elif name == 'OpenWrt': - data = get_file_content(path) - if 'OpenWrt' in data: - self.facts['distribution'] = name - version = re.search('DISTRIB_RELEASE="(.*)"', data) - if version: - self.facts['distribution_version'] = version.groups()[0] - release = re.search('DISTRIB_CODENAME="(.*)"', data) - if release: - self.facts['distribution_release'] = release.groups()[0] - break - elif name == 'Alpine': - data = get_file_content(path) - self.facts['distribution'] = name - self.facts['distribution_version'] = data - break - elif name == 'Solaris': - data = get_file_content(path).split('\n')[0] - if 'Solaris' in data: - ora_prefix = '' - if 'Oracle Solaris' in data: - data = data.replace('Oracle ','') - ora_prefix = 'Oracle ' - self.facts['distribution'] = data.split()[0] - self.facts['distribution_version'] = data.split()[1] - self.facts['distribution_release'] = ora_prefix + data - break - elif name == 'SuSE': - data = get_file_content(path) - if 'suse' in data.lower(): - if path == '/etc/os-release': + elif name == 'Solaris': + data = get_file_content(path).split('\n')[0] + if 'Solaris' in data: + ora_prefix = '' + if 'Oracle Solaris' in data: + data = data.replace('Oracle ','') + ora_prefix = 'Oracle ' + self.facts['distribution'] = data.split()[0] + self.facts['distribution_version'] = data.split()[1] + self.facts['distribution_release'] = ora_prefix + data + break + elif name == 'SuSE': + data = get_file_content(path) + if 'suse' in data.lower(): + if path == '/etc/os-release': + release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) + if release: + self.facts['distribution_release'] = release.groups()[0] + break + elif path == '/etc/SuSE-release': + data = data.splitlines() + for line in data: + release = re.search('CODENAME *= *([^\n]+)', line) + if release: + self.facts['distribution_release'] = release.groups()[0].strip() + break + elif name == 'Debian': + data = get_file_content(path) + if 'Debian' in data: release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) if release: self.facts['distribution_release'] = release.groups()[0] - break - elif path == '/etc/SuSE-release': - data = data.splitlines() - for line in data: - release = re.search('CODENAME *= *([^\n]+)', line) - if release: - self.facts['distribution_release'] = release.groups()[0].strip() - break - elif name == 'Debian': - data = get_file_content(path) - if 'Debian' in data: - release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) - if release: - self.facts['distribution_release'] = release.groups()[0] - break - elif name == 'Mandriva': - data = get_file_content(path) - if 'Mandriva' in data: - version = re.search('DISTRIB_RELEASE="(.*)"', data) - if version: - self.facts['distribution_version'] = version.groups()[0] - release = re.search('DISTRIB_CODENAME="(.*)"', data) - if release: - self.facts['distribution_release'] = release.groups()[0] - self.facts['distribution'] = name - break + break + elif name == 'Mandriva': + data = get_file_content(path) + if 'Mandriva' in data: + version = re.search('DISTRIB_RELEASE="(.*)"', data) + if version: + self.facts['distribution_version'] = version.groups()[0] + release = re.search('DISTRIB_CODENAME="(.*)"', data) + if release: + self.facts['distribution_release'] = release.groups()[0] + self.facts['distribution'] = name + break else: self.facts['distribution'] = name From a60fd58145fcacd34bf881a232eeb920f526e948 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 19 Oct 2014 10:48:32 -0400 Subject: [PATCH 246/813] Update block.py Add note on block properties --- v2/ansible/playbook/block.py | 1 + 1 file changed, 1 insertion(+) diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index 46b670b168..45bab87bf6 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -25,6 +25,7 @@ from ansible.playbook.attribute import Attribute, FieldAttribute class Block(Base): + # TODO: FIXME: block/rescue/always should be enough _begin = FieldAttribute(isa='list') _rescue = FieldAttribute(isa='list') _end = FieldAttribute(isa='list') From ee5f5ee00df5f0ec87117c0346b74586e8167f37 Mon Sep 17 00:00:00 2001 From: Felix Geyer Date: Sun, 19 Oct 2014 22:55:50 +0200 Subject: [PATCH 247/813] Detect kvm guests through sys_vendor. The two detection methods currently implemented do not cover all cases. For example qemu guests invoked like this: qemu -machine pc-i440fx-1.4,accel=kvm -cpu SandyBridge return this information: product_name: Standard PC (i440FX + PIIX, 1996) cpuinfo: model name : Intel Xeon E312xx (Sandy Bridge) --- lib/ansible/module_utils/facts.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 9515bd1fff..e0a21bfcde 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2157,6 +2157,11 @@ class LinuxVirtual(Virtual): self.facts['virtualization_role'] = 'guest' return + if sys_vendor == 'QEMU': + self.facts['virtualization_type'] = 'kvm' + self.facts['virtualization_role'] = 'guest' + return + if os.path.exists('/proc/self/status'): for line in open('/proc/self/status').readlines(): if re.match('^VxID: \d+', line): From 895da79d87705adc4c060db1e094877617de4d9a Mon Sep 17 00:00:00 2001 From: Guillaume Quintard Date: Mon, 20 Oct 2014 10:09:46 +0200 Subject: [PATCH 248/813] Detect docker as virtualization_type New docker versions use "docker" in /proc/1/cgroup, which disturbs the virtualization_type detection. So, grep for docker, in addition of "lxc". --- lib/ansible/module_utils/facts.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 8ec1b4f7c7..42b82301e9 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2159,6 +2159,10 @@ class LinuxVirtual(Virtual): if os.path.exists('/proc/1/cgroup'): for line in open('/proc/1/cgroup').readlines(): + if re.search('/docker/', line): + self.facts['virtualization_type'] = 'docker' + self.facts['virtualization_role'] = 'guest' + return if re.search('/lxc/', line): self.facts['virtualization_type'] = 'lxc' self.facts['virtualization_role'] = 'guest' From 3d257bc6955237457fd39ea4e943915e54cd5439 Mon Sep 17 00:00:00 2001 From: insaneirish Date: Mon, 20 Oct 2014 10:12:51 -0400 Subject: [PATCH 249/813] Change "usuable" to "usable". --- hacking/templates/rst.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index db9b184d19..ae7a7e60a2 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -130,7 +130,7 @@ Should you have a question rather than a bug report, inquries are welcome on the Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. -Note that this module is designated a "extras" module. Non-core modules are still fully usuable, but may receive slightly lower response rates for issues and pull requests. +Note that this module is designated a "extras" module. Non-core modules are still fully usable, but may receive slightly lower response rates for issues and pull requests. Popular "extras" modules may be promoted to core modules over time. {% endif %} From 5fe5e2d3f45fe1901c3c28e48282fab4eed20572 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 20 Oct 2014 11:43:06 -0400 Subject: [PATCH 250/813] Docstring for v2 inventory.subset --- v2/ansible/inventory/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/v2/ansible/inventory/__init__.py b/v2/ansible/inventory/__init__.py index 11f7e35c8b..5ad688eaf0 100644 --- a/v2/ansible/inventory/__init__.py +++ b/v2/ansible/inventory/__init__.py @@ -58,6 +58,12 @@ class Inventory: def also_restrict_to(self, restriction): pass def subset(self, subset_pattern): + """ + Limits inventory results to a subset of inventory that matches a given + pattern, such as to select a given geographic of numeric slice amongst + a previous 'hosts' selection that only select roles, or vice versa... + Corresponds to --limit parameter to ansible-playbook + """ pass def lift_restriction(self): # HACK -- From b0069a338ecf6c182623977d9411c1e5368a3664 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 19 Oct 2014 00:14:30 -0500 Subject: [PATCH 251/813] Overhauls to v2 code * using inspect module instead of iteritems(self.__class__.__dict__, due to the fact that the later does not include attributes from parent classes * added tags/when attributes to Base() class for use by all subclasses * removed value/callable code from Attribute, as they are not used * started moving some limited code from utils to new places in v2 tree (vault, yaml-parsing related defs) * re-added ability of Block.load() to create implicit blocks from tasks * started overhaul of Role class and role-related code --- v2/ansible/parsing/__init__.py | 192 ++++++++- v2/ansible/parsing/vault/__init__.py | 563 +++++++++++++++++++++++++++ v2/ansible/playbook/attribute.py | 4 - v2/ansible/playbook/base.py | 64 +-- v2/ansible/playbook/block.py | 33 +- v2/ansible/playbook/role.py | 261 +++++++++++-- v2/test/parsing/test_general.py | 14 +- v2/test/playbook/test_block.py | 38 +- v2/test/playbook/test_role.py | 52 +++ 9 files changed, 1130 insertions(+), 91 deletions(-) create mode 100644 v2/ansible/parsing/vault/__init__.py create mode 100644 v2/test/playbook/test_role.py diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index 4641623c03..229be2622f 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -19,19 +19,205 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import json + +from yaml import YAMLError + from ansible.errors import AnsibleError, AnsibleInternalError +from ansible.parsing.vault import VaultLib +from ansible.parsing.yaml import safe_load -def load(self, data): - if instanceof(data, file): +def process_common_errors(msg, probline, column): + replaced = probline.replace(" ","") + + if ":{{" in replaced and "}}" in replaced: + msg = msg + """ +This one looks easy to fix. YAML thought it was looking for the start of a +hash/dictionary and was confused to see a second "{". Most likely this was +meant to be an ansible template evaluation instead, so we have to give the +parser a small hint that we wanted a string instead. The solution here is to +just quote the entire value. + +For instance, if the original line was: + + app_path: {{ base_path }}/foo + +It should be written as: + + app_path: "{{ base_path }}/foo" +""" + return msg + + elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1: + msg = msg + """ +This one looks easy to fix. There seems to be an extra unquoted colon in the line +and this is confusing the parser. It was only expecting to find one free +colon. The solution is just add some quotes around the colon, or quote the +entire line after the first colon. + +For instance, if the original line was: + + copy: src=file.txt dest=/path/filename:with_colon.txt + +It can be written as: + + copy: src=file.txt dest='/path/filename:with_colon.txt' + +Or: + + copy: 'src=file.txt dest=/path/filename:with_colon.txt' + + +""" + return msg + else: + parts = probline.split(":") + if len(parts) > 1: + middle = parts[1].strip() + match = False + unbalanced = False + if middle.startswith("'") and not middle.endswith("'"): + match = True + elif middle.startswith('"') and not middle.endswith('"'): + match = True + if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2: + unbalanced = True + if match: + msg = msg + """ +This one looks easy to fix. It seems that there is a value started +with a quote, and the YAML parser is expecting to see the line ended +with the same kind of quote. For instance: + + when: "ok" in result.stdout + +Could be written as: + + when: '"ok" in result.stdout' + +or equivalently: + + when: "'ok' in result.stdout" + +""" + return msg + + if unbalanced: + msg = msg + """ +We could be wrong, but this one looks like it might be an issue with +unbalanced quotes. If starting a value with a quote, make sure the +line ends with the same set of quotes. For instance this arbitrary +example: + + foo: "bad" "wolf" + +Could be written as: + + foo: '"bad" "wolf"' + +""" + return msg + + return msg + +def process_yaml_error(exc, data, path=None, show_content=True): + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + if show_content: + if mark.line -1 >= 0: + before_probline = data.split("\n")[mark.line-1] + else: + before_probline = '' + probline = data.split("\n")[mark.line] + arrow = " " * mark.column + "^" + msg = """Syntax Error while loading YAML script, %s +Note: The error may actually appear before this position: line %s, column %s + +%s +%s +%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow) + + unquoted_var = None + if '{{' in probline and '}}' in probline: + if '"{{' not in probline or "'{{" not in probline: + unquoted_var = True + + if not unquoted_var: + msg = process_common_errors(msg, probline, mark.column) + else: + msg = msg + """ +We could be wrong, but this one looks like it might be an issue with +missing quotes. Always quote template expression brackets when they +start a value. For instance: + + with_items: + - {{ foo }} + +Should be written as: + + with_items: + - "{{ foo }}" + +""" + else: + # most likely displaying a file with sensitive content, + # so don't show any of the actual lines of yaml just the + # line number itself + msg = """Syntax error while loading YAML script, %s +The error appears to have been on line %s, column %s, but may actually +be before there depending on the exact syntax problem. +""" % (path, mark.line + 1, mark.column + 1) + + else: + # No problem markers means we have to throw a generic + # "stuff messed up" type message. Sry bud. + if path: + msg = "Could not parse YAML. Check over %s again." % path + else: + msg = "Could not parse YAML." + raise errors.AnsibleYAMLValidationFailed(msg) + + +def load_data(data): + + if isinstance(data, file): fd = open(f) data = fd.read() fd.close() - if instanceof(data, basestring): + if isinstance(data, basestring): try: return json.loads(data) except: return safe_load(data) raise AnsibleInternalError("expected file or string, got %s" % type(data)) + +def load_data_from_file(path, vault_password=None): + ''' + Convert a yaml file to a data structure. + Was previously 'parse_yaml_from_file()'. + ''' + + data = None + show_content = True + + try: + data = open(path).read() + except IOError: + raise errors.AnsibleError("file could not read: %s" % path) + + vault = VaultLib(password=vault_password) + if vault.is_encrypted(data): + # if the file is encrypted and no password was specified, + # the decrypt call would throw an error, but we check first + # since the decrypt function doesn't know the file name + if vault_password is None: + raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path) + data = vault.decrypt(data) + show_content = False + + try: + return load_data(data) + except YAMLError, exc: + process_yaml_error(exc, data, path, show_content) diff --git a/v2/ansible/parsing/vault/__init__.py b/v2/ansible/parsing/vault/__init__.py new file mode 100644 index 0000000000..3b83d2989e --- /dev/null +++ b/v2/ansible/parsing/vault/__init__.py @@ -0,0 +1,563 @@ +# (c) 2014, James Tanner +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# ansible-pull is a script that runs ansible in local mode +# after checking out a playbooks directory from source repo. There is an +# example playbook to bootstrap this script in the examples/ dir which +# installs ansible and sets it up to run on cron. + +import os +import shlex +import shutil +import tempfile +from io import BytesIO +from subprocess import call +from ansible import errors +from hashlib import sha256 +from hashlib import md5 +from binascii import hexlify +from binascii import unhexlify +from ansible import constants as C + +try: + from Crypto.Hash import SHA256, HMAC + HAS_HASH = True +except ImportError: + HAS_HASH = False + +# Counter import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Util import Counter + HAS_COUNTER = True +except ImportError: + HAS_COUNTER = False + +# KDF import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Protocol.KDF import PBKDF2 + HAS_PBKDF2 = True +except ImportError: + HAS_PBKDF2 = False + +# AES IMPORTS +try: + from Crypto.Cipher import AES as AES + HAS_AES = True +except ImportError: + HAS_AES = False + +CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: yum install python-devel; rpm -e --nodeps python-crypto; pip install pycrypto" + +HEADER='$ANSIBLE_VAULT' +CIPHER_WHITELIST=['AES', 'AES256'] + +class VaultLib(object): + + def __init__(self, password): + self.password = password + self.cipher_name = None + self.version = '1.1' + + def is_encrypted(self, data): + if data.startswith(HEADER): + return True + else: + return False + + def encrypt(self, data): + + if self.is_encrypted(data): + raise errors.AnsibleError("data is already encrypted") + + if not self.cipher_name: + self.cipher_name = "AES256" + #raise errors.AnsibleError("the cipher must be set before encrypting data") + + if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: + cipher = globals()['Vault' + self.cipher_name] + this_cipher = cipher() + else: + raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name) + + """ + # combine sha + data + this_sha = sha256(data).hexdigest() + tmp_data = this_sha + "\n" + data + """ + + # encrypt sha + data + enc_data = this_cipher.encrypt(data, self.password) + + # add header + tmp_data = self._add_header(enc_data) + return tmp_data + + def decrypt(self, data): + if self.password is None: + raise errors.AnsibleError("A vault password must be specified to decrypt data") + + if not self.is_encrypted(data): + raise errors.AnsibleError("data is not encrypted") + + # clean out header + data = self._split_header(data) + + # create the cipher object + if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: + cipher = globals()['Vault' + self.cipher_name] + this_cipher = cipher() + else: + raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name) + + # try to unencrypt data + data = this_cipher.decrypt(data, self.password) + if data is None: + raise errors.AnsibleError("Decryption failed") + + return data + + def _add_header(self, data): + # combine header and encrypted data in 80 char columns + + #tmpdata = hexlify(data) + tmpdata = [data[i:i+80] for i in range(0, len(data), 80)] + + if not self.cipher_name: + raise errors.AnsibleError("the cipher must be set before adding a header") + + dirty_data = HEADER + ";" + str(self.version) + ";" + self.cipher_name + "\n" + + for l in tmpdata: + dirty_data += l + '\n' + + return dirty_data + + + def _split_header(self, data): + # used by decrypt + + tmpdata = data.split('\n') + tmpheader = tmpdata[0].strip().split(';') + + self.version = str(tmpheader[1].strip()) + self.cipher_name = str(tmpheader[2].strip()) + clean_data = '\n'.join(tmpdata[1:]) + + """ + # strip out newline, join, unhex + clean_data = [ x.strip() for x in clean_data ] + clean_data = unhexlify(''.join(clean_data)) + """ + + return clean_data + + def __enter__(self): + return self + + def __exit__(self, *err): + pass + +class VaultEditor(object): + # uses helper methods for write_file(self, filename, data) + # to write a file so that code isn't duplicated for simple + # file I/O, ditto read_file(self, filename) and launch_editor(self, filename) + # ... "Don't Repeat Yourself", etc. + + def __init__(self, cipher_name, password, filename): + # instantiates a member variable for VaultLib + self.cipher_name = cipher_name + self.password = password + self.filename = filename + + def create_file(self): + """ create a new encrypted file """ + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + if os.path.isfile(self.filename): + raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename) + + # drop the user into vim on file + old_umask = os.umask(0077) + call(self._editor_shell_command(self.filename)) + tmpdata = self.read_data(self.filename) + this_vault = VaultLib(self.password) + this_vault.cipher_name = self.cipher_name + enc_data = this_vault.encrypt(tmpdata) + self.write_data(enc_data, self.filename) + os.umask(old_umask) + + def decrypt_file(self): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + if not os.path.isfile(self.filename): + raise errors.AnsibleError("%s does not exist" % self.filename) + + tmpdata = self.read_data(self.filename) + this_vault = VaultLib(self.password) + if this_vault.is_encrypted(tmpdata): + dec_data = this_vault.decrypt(tmpdata) + if dec_data is None: + raise errors.AnsibleError("Decryption failed") + else: + self.write_data(dec_data, self.filename) + else: + raise errors.AnsibleError("%s is not encrypted" % self.filename) + + def edit_file(self): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + # make sure the umask is set to a sane value + old_mask = os.umask(0077) + + # decrypt to tmpfile + tmpdata = self.read_data(self.filename) + this_vault = VaultLib(self.password) + dec_data = this_vault.decrypt(tmpdata) + _, tmp_path = tempfile.mkstemp() + self.write_data(dec_data, tmp_path) + + # drop the user into vim on the tmp file + call(self._editor_shell_command(tmp_path)) + new_data = self.read_data(tmp_path) + + # create new vault + new_vault = VaultLib(self.password) + + # we want the cipher to default to AES256 + #new_vault.cipher_name = this_vault.cipher_name + + # encrypt new data a write out to tmp + enc_data = new_vault.encrypt(new_data) + self.write_data(enc_data, tmp_path) + + # shuffle tmp file into place + self.shuffle_files(tmp_path, self.filename) + + # and restore the old umask + os.umask(old_mask) + + def view_file(self): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + # decrypt to tmpfile + tmpdata = self.read_data(self.filename) + this_vault = VaultLib(self.password) + dec_data = this_vault.decrypt(tmpdata) + _, tmp_path = tempfile.mkstemp() + self.write_data(dec_data, tmp_path) + + # drop the user into pager on the tmp file + call(self._pager_shell_command(tmp_path)) + os.remove(tmp_path) + + def encrypt_file(self): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + if not os.path.isfile(self.filename): + raise errors.AnsibleError("%s does not exist" % self.filename) + + tmpdata = self.read_data(self.filename) + this_vault = VaultLib(self.password) + this_vault.cipher_name = self.cipher_name + if not this_vault.is_encrypted(tmpdata): + enc_data = this_vault.encrypt(tmpdata) + self.write_data(enc_data, self.filename) + else: + raise errors.AnsibleError("%s is already encrypted" % self.filename) + + def rekey_file(self, new_password): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + # decrypt + tmpdata = self.read_data(self.filename) + this_vault = VaultLib(self.password) + dec_data = this_vault.decrypt(tmpdata) + + # create new vault + new_vault = VaultLib(new_password) + + # we want to force cipher to the default + #new_vault.cipher_name = this_vault.cipher_name + + # re-encrypt data and re-write file + enc_data = new_vault.encrypt(dec_data) + self.write_data(enc_data, self.filename) + + def read_data(self, filename): + f = open(filename, "rb") + tmpdata = f.read() + f.close() + return tmpdata + + def write_data(self, data, filename): + if os.path.isfile(filename): + os.remove(filename) + f = open(filename, "wb") + f.write(data) + f.close() + + def shuffle_files(self, src, dest): + # overwrite dest with src + if os.path.isfile(dest): + os.remove(dest) + shutil.move(src, dest) + + def _editor_shell_command(self, filename): + EDITOR = os.environ.get('EDITOR','vim') + editor = shlex.split(EDITOR) + editor.append(filename) + + return editor + + def _pager_shell_command(self, filename): + PAGER = os.environ.get('PAGER','less') + pager = shlex.split(PAGER) + pager.append(filename) + + return pager + +######################################## +# CIPHERS # +######################################## + +class VaultAES(object): + + # this version has been obsoleted by the VaultAES256 class + # which uses encrypt-then-mac (fixing order) and also improving the KDF used + # code remains for upgrade purposes only + # http://stackoverflow.com/a/16761459 + + def __init__(self): + if not HAS_AES: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + def aes_derive_key_and_iv(self, password, salt, key_length, iv_length): + + """ Create a key and an initialization vector """ + + d = d_i = '' + while len(d) < key_length + iv_length: + d_i = md5(d_i + password + salt).digest() + d += d_i + + key = d[:key_length] + iv = d[key_length:key_length+iv_length] + + return key, iv + + def encrypt(self, data, password, key_length=32): + + """ Read plaintext data from in_file and write encrypted to out_file """ + + + # combine sha + data + this_sha = sha256(data).hexdigest() + tmp_data = this_sha + "\n" + data + + in_file = BytesIO(tmp_data) + in_file.seek(0) + out_file = BytesIO() + + bs = AES.block_size + + # Get a block of random data. EL does not have Crypto.Random.new() + # so os.urandom is used for cross platform purposes + salt = os.urandom(bs - len('Salted__')) + + key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs) + cipher = AES.new(key, AES.MODE_CBC, iv) + out_file.write('Salted__' + salt) + finished = False + while not finished: + chunk = in_file.read(1024 * bs) + if len(chunk) == 0 or len(chunk) % bs != 0: + padding_length = (bs - len(chunk) % bs) or bs + chunk += padding_length * chr(padding_length) + finished = True + out_file.write(cipher.encrypt(chunk)) + + out_file.seek(0) + enc_data = out_file.read() + tmp_data = hexlify(enc_data) + + return tmp_data + + + def decrypt(self, data, password, key_length=32): + + """ Read encrypted data from in_file and write decrypted to out_file """ + + # http://stackoverflow.com/a/14989032 + + data = ''.join(data.split('\n')) + data = unhexlify(data) + + in_file = BytesIO(data) + in_file.seek(0) + out_file = BytesIO() + + bs = AES.block_size + salt = in_file.read(bs)[len('Salted__'):] + key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs) + cipher = AES.new(key, AES.MODE_CBC, iv) + next_chunk = '' + finished = False + + while not finished: + chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs)) + if len(next_chunk) == 0: + padding_length = ord(chunk[-1]) + chunk = chunk[:-padding_length] + finished = True + out_file.write(chunk) + + # reset the stream pointer to the beginning + out_file.seek(0) + new_data = out_file.read() + + # split out sha and verify decryption + split_data = new_data.split("\n") + this_sha = split_data[0] + this_data = '\n'.join(split_data[1:]) + test_sha = sha256(this_data).hexdigest() + + if this_sha != test_sha: + raise errors.AnsibleError("Decryption failed") + + #return out_file.read() + return this_data + + +class VaultAES256(object): + + """ + Vault implementation using AES-CTR with an HMAC-SHA256 authentication code. + Keys are derived using PBKDF2 + """ + + # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html + + def __init__(self): + + if not HAS_PBKDF2 or not HAS_COUNTER or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + def gen_key_initctr(self, password, salt): + # 16 for AES 128, 32 for AES256 + keylength = 32 + + # match the size used for counter.new to avoid extra work + ivlength = 16 + + hash_function = SHA256 + + # make two keys and one iv + pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest() + + + derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength, + count=10000, prf=pbkdf2_prf) + + key1 = derivedkey[:keylength] + key2 = derivedkey[keylength:(keylength * 2)] + iv = derivedkey[(keylength * 2):(keylength * 2) + ivlength] + + return key1, key2, hexlify(iv) + + + def encrypt(self, data, password): + + salt = os.urandom(32) + key1, key2, iv = self.gen_key_initctr(password, salt) + + # PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3 + bs = AES.block_size + padding_length = (bs - len(data) % bs) or bs + data += padding_length * chr(padding_length) + + # COUNTER.new PARAMETERS + # 1) nbits (integer) - Length of the counter, in bits. + # 2) initial_value (integer) - initial value of the counter. "iv" from gen_key_initctr + + ctr = Counter.new(128, initial_value=long(iv, 16)) + + # AES.new PARAMETERS + # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from gen_key_initctr + # 2) MODE_CTR, is the recommended mode + # 3) counter= + + cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) + + # ENCRYPT PADDED DATA + cryptedData = cipher.encrypt(data) + + # COMBINE SALT, DIGEST AND DATA + hmac = HMAC.new(key2, cryptedData, SHA256) + message = "%s\n%s\n%s" % ( hexlify(salt), hmac.hexdigest(), hexlify(cryptedData) ) + message = hexlify(message) + return message + + def decrypt(self, data, password): + + # SPLIT SALT, DIGEST, AND DATA + data = ''.join(data.split("\n")) + data = unhexlify(data) + salt, cryptedHmac, cryptedData = data.split("\n", 2) + salt = unhexlify(salt) + cryptedData = unhexlify(cryptedData) + + key1, key2, iv = self.gen_key_initctr(password, salt) + + # EXIT EARLY IF DIGEST DOESN'T MATCH + hmacDecrypt = HMAC.new(key2, cryptedData, SHA256) + if not self.is_equal(cryptedHmac, hmacDecrypt.hexdigest()): + return None + + # SET THE COUNTER AND THE CIPHER + ctr = Counter.new(128, initial_value=long(iv, 16)) + cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) + + # DECRYPT PADDED DATA + decryptedData = cipher.decrypt(cryptedData) + + # UNPAD DATA + padding_length = ord(decryptedData[-1]) + decryptedData = decryptedData[:-padding_length] + + return decryptedData + + def is_equal(self, a, b): + # http://codahale.com/a-lesson-in-timing-attacks/ + if len(a) != len(b): + return False + + result = 0 + for x, y in zip(a, b): + result |= ord(x) ^ ord(y) + return result == 0 + + diff --git a/v2/ansible/playbook/attribute.py b/v2/ansible/playbook/attribute.py index ecafe653f0..1e7e404181 100644 --- a/v2/ansible/playbook/attribute.py +++ b/v2/ansible/playbook/attribute.py @@ -25,11 +25,7 @@ class Attribute: self.isa = isa self.private = private - self.value = None self.default = default - def __call__(self): - return self.value - class FieldAttribute(Attribute): pass diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 08f4d519e6..a992e19a5d 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -19,24 +19,39 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from inspect import getmembers from io import FileIO from six import iteritems, string_types from ansible.playbook.attribute import Attribute, FieldAttribute -from ansible.parsing import load as ds_load +from ansible.parsing import load_data class Base: + _tags = FieldAttribute(isa='list') + _when = FieldAttribute(isa='list') + def __init__(self): # each class knows attributes set upon it, see Task.py for example self._attributes = dict() - for (name, value) in iteritems(self.__class__.__dict__): - aname = name[1:] + for (name, value) in self._get_base_attributes().iteritems(): + self._attributes[name] = value.default + + def _get_base_attributes(self): + ''' + Returns the list of attributes for this class (or any subclass thereof). + If the attribute name starts with an underscore, it is removed + ''' + base_attributes = dict() + for (name, value) in getmembers(self.__class__): if isinstance(value, Attribute): - self._attributes[aname] = value.default + if name.startswith('_'): + name = name[1:] + base_attributes[name] = value + return base_attributes def munge(self, ds): ''' infrequently used method to do some pre-processing of legacy terms ''' @@ -49,7 +64,7 @@ class Base: assert ds is not None if isinstance(ds, string_types) or isinstance(ds, FileIO): - ds = ds_load(ds) + ds = load_data(ds) # we currently don't do anything with private attributes but may # later decide to filter them out of 'ds' here. @@ -57,20 +72,15 @@ class Base: ds = self.munge(ds) # walk all attributes in the class - for (name, attribute) in iteritems(self.__class__.__dict__): - aname = name[1:] + for (name, attribute) in self._get_base_attributes().iteritems(): - # process Field attributes which get loaded from the YAML - - if isinstance(attribute, FieldAttribute): - - # copy the value over unless a _load_field method is defined - if aname in ds: - method = getattr(self, '_load_%s' % aname, None) - if method: - self._attributes[aname] = method(aname, ds[aname]) - else: - self._attributes[aname] = ds[aname] + # copy the value over unless a _load_field method is defined + if name in ds: + method = getattr(self, '_load_%s' % name, None) + if method: + self._attributes[name] = method(name, ds[name]) + else: + self._attributes[name] = ds[name] # return the constructed object self.validate() @@ -81,20 +91,12 @@ class Base: ''' validation that is done at parse time, not load time ''' # walk all fields in the object - for (name, attribute) in self.__dict__.iteritems(): + for (name, attribute) in self._get_base_attributes().iteritems(): - # find any field attributes - if isinstance(attribute, FieldAttribute): - - if not name.startswith("_"): - raise AnsibleError("FieldAttribute %s must start with _" % name) - - aname = name[1:] - - # run validator only if present - method = getattr(self, '_validate_%s' % (prefix, aname), None) - if method: - method(self, attribute) + # run validator only if present + method = getattr(self, '_validate_%s' % name, None) + if method: + method(self, attribute) def post_validate(self, runner_context): ''' diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index 45bab87bf6..5e4826d119 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -25,11 +25,13 @@ from ansible.playbook.attribute import Attribute, FieldAttribute class Block(Base): - # TODO: FIXME: block/rescue/always should be enough - _begin = FieldAttribute(isa='list') + _block = FieldAttribute(isa='list') _rescue = FieldAttribute(isa='list') - _end = FieldAttribute(isa='list') - _otherwise = FieldAttribute(isa='list') + _always = FieldAttribute(isa='list') + + # for future consideration? this would be functionally + # similar to the 'else' clause for exceptions + #_otherwise = FieldAttribute(isa='list') def __init__(self, role=None): self.role = role @@ -45,6 +47,20 @@ class Block(Base): b = Block(role=role) return b.load_data(data) + def munge(self, ds): + ''' + If a simple task is given, an implicit block for that single task + is created, which goes in the main portion of the block + ''' + is_block = False + for attr in ('block', 'rescue', 'always'): + if attr in ds: + is_block = True + break + if not is_block: + return dict(block=ds) + return ds + def _load_list_of_tasks(self, ds): assert type(ds) == list task_list = [] @@ -53,15 +69,16 @@ class Block(Base): task_list.append(t) return task_list - def _load_begin(self, attr, ds): + def _load_block(self, attr, ds): return self._load_list_of_tasks(ds) def _load_rescue(self, attr, ds): return self._load_list_of_tasks(ds) - def _load_end(self, attr, ds): + def _load_always(self, attr, ds): return self._load_list_of_tasks(ds) - def _load_otherwise(self, attr, ds): - return self._load_list_of_tasks(ds) + # not currently used + #def _load_otherwise(self, attr, ds): + # return self._load_list_of_tasks(ds) diff --git a/v2/ansible/playbook/role.py b/v2/ansible/playbook/role.py index f36207874d..af3855f3d9 100644 --- a/v2/ansible/playbook/role.py +++ b/v2/ansible/playbook/role.py @@ -19,31 +19,251 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from v2.playbook.base import PlaybookBase -from v2.utils import list_union +from six import iteritems, string_types -class Role(PlaybookBase): +import os - # TODO: this will be overhauled to match Task.py at some point +from ansible.playbook.attribute import FieldAttribute +from ansible.playbook.base import Base +from ansible.playbook.block import Block +from ansible.parsing import load_data_from_file - def __init__(self): - pass +#from ansible.utils import list_union, unfrackpath + +class Role(Base): + + _role = FieldAttribute(isa='string') + _src = FieldAttribute(isa='string') + _scm = FieldAttribute(isa='string') + _version = FieldAttribute(isa='string') + _params = FieldAttribute(isa='dict') + _metadata = FieldAttribute(isa='dict') + _task_blocks = FieldAttribute(isa='list') + _handler_blocks = FieldAttribute(isa='list') + _default_vars = FieldAttribute(isa='dict') + _role_vars = FieldAttribute(isa='dict') + + def __init__(self, vault_password=None): + self._role_path = None + self._vault_password = vault_password + super(Role, self).__init__() + + def __repr__(self): + return self.get_name() def get_name(self): - return "TEMPORARY" + return self._attributes['role'] - def load(self, ds): - self._ds = ds - self._tasks = [] - self._handlers = [] - self._blocks = [] - self._dependencies = [] - self._metadata = dict() - self._defaults = dict() - self._vars = dict() - self._params = dict() + @staticmethod + def load(data, vault_password=None): + assert isinstance(data, string_types) or isinstance(data, dict) + r = Role(vault_password=vault_password) + r.load_data(data) + return r - def get_vars(self): + #------------------------------------------------------------------------------ + # munge, and other functions used for loading the ds + + def munge(self, ds): + # Role definitions can be strings or dicts, so we fix + # things up here. Anything that is not a role name, tag, + # or conditional will also be added to the params sub- + # dictionary for loading later + if isinstance(ds, string_types): + new_ds = dict(role=ds) + else: + ds = self._munge_role(ds) + + params = dict() + new_ds = dict() + + for (key, value) in iteritems(ds): + if key not in [name for (name, value) in self._get_base_attributes().iteritems()]: + # this key does not match a field attribute, + # so it must be a role param + params[key] = value + else: + # this is a field attribute, so copy it over directly + new_ds[key] = value + + # finally, assign the params to a new entry in the revised ds + new_ds['params'] = params + + # set the role path, based on the role definition + self._role_path = self._get_role_path(new_ds.get('role')) + + # load the role's files, if they exist + new_ds['metadata'] = self._load_role_yaml('meta') + new_ds['task_blocks'] = self._load_role_yaml('tasks') + new_ds['handler_blocks'] = self._load_role_yaml('handlers') + new_ds['default_vars'] = self._load_role_yaml('defaults') + new_ds['role_vars'] = self._load_role_yaml('vars') + + return new_ds + + def _load_role_yaml(self, subdir): + file_path = os.path.join(self._role_path, subdir) + if os.path.exists(file_path) and os.path.isdir(file_path): + main_file = self._resolve_main(file_path) + if os.path.exists(main_file): + return load_data_from_file(main_file, self._vault_password) + return None + + def _resolve_main(self, basepath): + ''' flexibly handle variations in main filenames ''' + possible_mains = ( + os.path.join(basepath, 'main'), + os.path.join(basepath, 'main.yml'), + os.path.join(basepath, 'main.yaml'), + os.path.join(basepath, 'main.json'), + ) + + if sum([os.path.isfile(x) for x in possible_mains]) > 1: + raise errors.AnsibleError("found multiple main files at %s, only one allowed" % (basepath)) + else: + for m in possible_mains: + if os.path.isfile(m): + return m # exactly one main file + return possible_mains[0] # zero mains (we still need to return something) + + def _get_role_path(self, role): + ''' + the 'role', as specified in the ds (or as a bare string), can either + be a simple name or a full path. If it is a full path, we use the + basename as the role name, otherwise we take the name as-given and + append it to the default role path + ''' + + # FIXME: this should use unfrackpath once the utils code has been sorted out + role_path = os.path.normpath(role) + if os.path.exists(role_path): + return role_path + else: + for path in ('./roles', '/etc/ansible/roles'): + role_path = os.path.join(path, role) + if os.path.exists(role_path): + return role_path + # FIXME: raise something here + raise + + def _repo_url_to_role_name(self, repo_url): + # gets the role name out of a repo like + # http://git.example.com/repos/repo.git" => "repo" + + if '://' not in repo_url and '@' not in repo_url: + return repo_url + trailing_path = repo_url.split('/')[-1] + if trailing_path.endswith('.git'): + trailing_path = trailing_path[:-4] + if trailing_path.endswith('.tar.gz'): + trailing_path = trailing_path[:-7] + if ',' in trailing_path: + trailing_path = trailing_path.split(',')[0] + return trailing_path + + def _role_spec_parse(self, role_spec): + # takes a repo and a version like + # git+http://git.example.com/repos/repo.git,v1.0 + # and returns a list of properties such as: + # { + # 'scm': 'git', + # 'src': 'http://git.example.com/repos/repo.git', + # 'version': 'v1.0', + # 'name': 'repo' + # } + + default_role_versions = dict(git='master', hg='tip') + + role_spec = role_spec.strip() + role_version = '' + if role_spec == "" or role_spec.startswith("#"): + return (None, None, None, None) + + tokens = [s.strip() for s in role_spec.split(',')] + + # assume https://github.com URLs are git+https:// URLs and not + # tarballs unless they end in '.zip' + if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'): + tokens[0] = 'git+' + tokens[0] + + if '+' in tokens[0]: + (scm, role_url) = tokens[0].split('+') + else: + scm = None + role_url = tokens[0] + + if len(tokens) >= 2: + role_version = tokens[1] + + if len(tokens) == 3: + role_name = tokens[2] + else: + role_name = repo_url_to_role_name(tokens[0]) + + if scm and not role_version: + role_version = default_role_versions.get(scm, '') + + return dict(scm=scm, src=role_url, version=role_version, name=role_name) + + def _munge_role(self, ds): + if 'role' in ds: + # Old style: {role: "galaxy.role,version,name", other_vars: "here" } + role_info = self._role_spec_parse(ds['role']) + if isinstance(role_info, dict): + # Warning: Slight change in behaviour here. name may be being + # overloaded. Previously, name was only a parameter to the role. + # Now it is both a parameter to the role and the name that + # ansible-galaxy will install under on the local system. + if 'name' in ds and 'name' in role_info: + del role_info['name'] + ds.update(role_info) + else: + # New style: { src: 'galaxy.role,version,name', other_vars: "here" } + if 'github.com' in ds["src"] and 'http' in ds["src"] and '+' not in ds["src"] and not ds["src"].endswith('.tar.gz'): + ds["src"] = "git+" + ds["src"] + + if '+' in ds["src"]: + (scm, src) = ds["src"].split('+') + ds["scm"] = scm + ds["src"] = src + + if 'name' in role: + ds["role"] = ds["name"] + del ds["name"] + else: + ds["role"] = self._repo_url_to_role_name(ds["src"]) + + # set some values to a default value, if none were specified + ds.setdefault('version', '') + ds.setdefault('scm', None) + + return ds + + #------------------------------------------------------------------------------ + # attribute loading defs + + def _load_list_of_blocks(self, ds): + assert type(ds) == list + block_list = [] + for block in ds: + b = Block(block) + block_list.append(b) + return block_list + + def _load_task_blocks(self, attr, ds): + if ds is None: + return [] + return self._load_list_of_blocks(ds) + + def _load_handler_blocks(self, attr, ds): + if ds is None: + return [] + return self._load_list_of_blocks(ds) + + #------------------------------------------------------------------------------ + # other functions + + def get_variables(self): # returns the merged variables for this role, including # recursively merging those of all child roles return dict() @@ -60,8 +280,3 @@ class Role(PlaybookBase): all_deps = list_union(all_deps, self.dependencies) return all_deps - def get_blocks(self): - # should return - return self.blocks - - diff --git a/v2/test/parsing/test_general.py b/v2/test/parsing/test_general.py index 0a150e1a23..b86fcba289 100644 --- a/v2/test/parsing/test_general.py +++ b/v2/test/parsing/test_general.py @@ -20,7 +20,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from .. compat import unittest -from ansible.parsing import load +from ansible.parsing import load_data from ansible.errors import AnsibleParserError import json @@ -68,12 +68,12 @@ class TestGeneralParsing(unittest.TestCase): "jkl" : 5678 } """ - output = load(input) + output = load_data(input) self.assertEqual(output['asdf'], '1234') self.assertEqual(output['jkl'], 5678) def parse_json_from_file(self): - output = load(MockFile(dict(a=1,b=2,c=3)),'json') + output = load_data(MockFile(dict(a=1,b=2,c=3)),'json') self.assertEqual(ouput, dict(a=1,b=2,c=3)) def parse_yaml_from_dict(self): @@ -81,12 +81,12 @@ class TestGeneralParsing(unittest.TestCase): asdf: '1234' jkl: 5678 """ - output = load(input) + output = load_data(input) self.assertEqual(output['asdf'], '1234') self.assertEqual(output['jkl'], 5678) def parse_yaml_from_file(self): - output = load(MockFile(dict(a=1,b=2,c=3),'yaml')) + output = load_data(MockFile(dict(a=1,b=2,c=3),'yaml')) self.assertEqual(output, dict(a=1,b=2,c=3)) def parse_fail(self): @@ -95,10 +95,10 @@ class TestGeneralParsing(unittest.TestCase): *** NOT VALID """ - self.assertRaises(load(input), AnsibleParserError) + self.assertRaises(load_data(input), AnsibleParserError) def parse_fail_from_file(self): - self.assertRaises(load(MockFile(None,'fail')), AnsibleParserError) + self.assertRaises(load_data(MockFile(None,'fail')), AnsibleParserError) def parse_fail_invalid_type(self): self.assertRaises(3000, AnsibleParsingError) diff --git a/v2/test/playbook/test_block.py b/v2/test/playbook/test_block.py index a0da7f0e6a..46921ae6d2 100644 --- a/v2/test/playbook/test_block.py +++ b/v2/test/playbook/test_block.py @@ -49,31 +49,39 @@ class TestBlock(unittest.TestCase): def test_load_block_simple(self): ds = dict( - begin = [], + block = [], rescue = [], - end = [], - otherwise = [], + always = [], + #otherwise = [], ) b = Block.load(ds) - self.assertEqual(b.begin, []) + self.assertEqual(b.block, []) self.assertEqual(b.rescue, []) - self.assertEqual(b.end, []) - self.assertEqual(b.otherwise, []) + self.assertEqual(b.always, []) + # not currently used + #self.assertEqual(b.otherwise, []) def test_load_block_with_tasks(self): ds = dict( - begin = [dict(action='begin')], + block = [dict(action='block')], rescue = [dict(action='rescue')], - end = [dict(action='end')], - otherwise = [dict(action='otherwise')], + always = [dict(action='always')], + #otherwise = [dict(action='otherwise')], ) b = Block.load(ds) - self.assertEqual(len(b.begin), 1) - assert isinstance(b.begin[0], Task) + self.assertEqual(len(b.block), 1) + assert isinstance(b.block[0], Task) self.assertEqual(len(b.rescue), 1) assert isinstance(b.rescue[0], Task) - self.assertEqual(len(b.end), 1) - assert isinstance(b.end[0], Task) - self.assertEqual(len(b.otherwise), 1) - assert isinstance(b.otherwise[0], Task) + self.assertEqual(len(b.always), 1) + assert isinstance(b.always[0], Task) + # not currently used + #self.assertEqual(len(b.otherwise), 1) + #assert isinstance(b.otherwise[0], Task) + + def test_load_implicit_block(self): + ds = [dict(action='foo')] + b = Block.load(ds) + self.assertEqual(len(b.block), 1) + assert isinstance(b.block[0], Task) diff --git a/v2/test/playbook/test_role.py b/v2/test/playbook/test_role.py new file mode 100644 index 0000000000..55e170de24 --- /dev/null +++ b/v2/test/playbook/test_role.py @@ -0,0 +1,52 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.playbook.block import Block +from ansible.playbook.role import Role +from ansible.playbook.task import Task +from .. compat import unittest + +class TestRole(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_construct_empty_block(self): + r = Role() + + def test_role__load_list_of_blocks(self): + task = dict(action='test') + r = Role() + self.assertEqual(r._load_list_of_blocks([]), []) + res = r._load_list_of_blocks([task]) + self.assertEqual(len(res), 1) + assert isinstance(res[0], Block) + res = r._load_list_of_blocks([task,task,task]) + self.assertEqual(len(res), 3) + + def test_load_role_simple(self): + pass + + def test_load_role_complex(self): + pass From b61a78532c9d1e2d79f339f64fc38cb179ee8543 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 20 Oct 2014 13:10:42 -0500 Subject: [PATCH 252/813] Catch template syntax errors specifically when templating strings Fixes #9333 --- lib/ansible/utils/template.py | 2 ++ test/integration/Makefile | 1 + test/integration/roles/test_bad_parsing/tasks/main.yml | 4 +++- test/integration/test_group_by.yml | 2 +- 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index 9521f2f2ec..5146057dac 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -340,6 +340,8 @@ def template_from_string(basedir, data, vars, fail_on_undefined=False): try: t = environment.from_string(data) + except TemplateSyntaxError, e: + raise errors.AnsibleError("template error while templating string: %s" % str(e)) except Exception, e: if 'recursion' in str(e): raise errors.AnsibleError("recursive loop detected in template string: %s" % data) diff --git a/test/integration/Makefile b/test/integration/Makefile index 4bccc8cd9f..56e35d7c8b 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -26,6 +26,7 @@ parsing: ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario2; [ $$? -eq 3 ] ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario3; [ $$? -eq 3 ] ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario4; [ $$? -eq 3 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario5; [ $$? -eq 3 ] ansible-playbook good_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) includes: diff --git a/test/integration/roles/test_bad_parsing/tasks/main.yml b/test/integration/roles/test_bad_parsing/tasks/main.yml index fae01f2ee9..3899821de6 100644 --- a/test/integration/roles/test_bad_parsing/tasks/main.yml +++ b/test/integration/roles/test_bad_parsing/tasks/main.yml @@ -49,5 +49,7 @@ failed_when: False tags: scenario4 - +- name: test that a missing/malformed jinja2 filter fails + debug: msg="{{output_dir|badfiltername}}" + tags: scenario5 diff --git a/test/integration/test_group_by.yml b/test/integration/test_group_by.yml index 6385c1f5ad..0f4ff41387 100644 --- a/test/integration/test_group_by.yml +++ b/test/integration/test_group_by.yml @@ -92,7 +92,7 @@ - name: set a fact to check that we ran this play set_fact: genus_LAMA=true -- hosts: '{{genus' +- hosts: 'genus' gather_facts: false tasks: - name: no hosts should match this group From 18a9282e0de42ae61fb29d959b411d3fcfa22161 Mon Sep 17 00:00:00 2001 From: Michael Warkentin Date: Mon, 20 Oct 2014 20:03:43 -0400 Subject: [PATCH 253/813] Add host to polling message The message for when an async task has finished has the hostname in it. This adds it to the polling message as well: ``` finished on 107.6.24.140 finished on 69.90.50.171 polling on 69.90.50.172, 6840s remaining ``` --- lib/ansible/callbacks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/callbacks.py b/lib/ansible/callbacks.py index 03b7b8a853..d6dfb3c11c 100644 --- a/lib/ansible/callbacks.py +++ b/lib/ansible/callbacks.py @@ -411,7 +411,7 @@ class CliRunnerCallbacks(DefaultRunnerCallbacks): self._async_notified[jid] = clock + 1 if self._async_notified[jid] > clock: self._async_notified[jid] = clock - display(" polling, %ss remaining" % (jid, clock), runner=self.runner) + display(" polling on %s, %ss remaining" % (jid, host, clock), runner=self.runner) super(CliRunnerCallbacks, self).on_async_poll(host, res, jid, clock) def on_async_ok(self, host, res, jid): From d69aa192063700b052d82b302a91d8251a656183 Mon Sep 17 00:00:00 2001 From: Michael Warkentin Date: Mon, 20 Oct 2014 20:08:31 -0400 Subject: [PATCH 254/813] Fix typo `Propspective` -> `Prospective` --- docsite/rst/community.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index 70e65cdf20..b89d6e61df 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -115,7 +115,7 @@ github about any errors you spot or sections you would like to see added. For mo on creating pull requests, please refer to the `github help guide `_. -For Current and Propspective Developers +For Current and Prospective Developers ======================================= I'd Like To Learn How To Develop on Ansible From feb9ed1de85e9cad6450bf61341c3a0b535124cd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 20 Oct 2014 22:15:46 -0400 Subject: [PATCH 255/813] Fix template module incorrectly handling mode when dest is a directory Fixes #9350 --- lib/ansible/modules/core | 2 +- lib/ansible/runner/action_plugins/template.py | 5 ++- .../roles/test_template/tasks/main.yml | 39 +++++++++++++++++-- 3 files changed, 41 insertions(+), 5 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 5af8d55b03..fa6d74a970 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 5af8d55b0365a5c3278c43b5424bf5f2ddf897b8 +Subproject commit fa6d74a97054d5d5123696d1af94b31ac1a65237 diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index 11e37b4815..4f5a41df8a 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -133,9 +133,12 @@ class ActionModule(object): # when running the file module based on the template data, we do # not want the source filename (the name of the template) to be used, # since this would mess up links, so we clear the src param and tell - # the module to follow links + # the module to follow links. When doing that, we have to set + # original_basename to the template just in case the dest is + # a directory. new_module_args = dict( src=None, + original_basename=os.path.basename(source), follow=True, ) # be sure to inject the check mode param into the module args and diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml index 4784dc6ac8..0c5d3d18bf 100644 --- a/test/integration/roles/test_template/tasks/main.yml +++ b/test/integration/roles/test_template/tasks/main.yml @@ -60,7 +60,40 @@ register: file_result - name: ensure file mode did not change - assert: - that: + assert: + that: - "file_result.changed != True" - + +# VERIFY dest as a directory does not break file attributes +# Note: expanduser is needed to go down the particular codepath that was broken before +- name: setup directory for test + file: state=directory dest={{output_dir | expanduser}}/template-dir mode=0755 owner=nobody group=nobody + +- name: set file mode when the destination is a directory + template: src=foo.j2 dest={{output_dir | expanduser}}/template-dir/ mode=0600 owner=root group=root + +- name: set file mode when the destination is a directory + template: src=foo.j2 dest={{output_dir | expanduser}}/template-dir/ mode=0600 owner=root group=root + register: file_result + +- name: check that the file has the correct attributes + stat: path={{output_dir | expanduser}}/template-dir/foo.j2 + register: file_attrs + +- assert: + that: + - "file_attrs.stat.gid == 0" + - "file_attrs.stat.uid == 0" + - "file_attrs.stat.pw_name == 'root'" + - "file_attrs.stat.mode == '0600'" + +- name: check that the containing directory did not change attributes + stat: path={{output_dir | expanduser}}/template-dir/ + register: dir_attrs + +- assert: + that: + - "dir_attrs.stat.gid != 0" + - "dir_attrs.stat.uid != 0" + - "dir_attrs.stat.pw_name == 'nobody'" + - "dir_attrs.stat.mode == '0755'" From 694e0420ab2743ac79ac05e4ba7d30218012d1cd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 21 Oct 2014 00:21:45 -0400 Subject: [PATCH 256/813] Update the modules to pull in the files module needed for the updated integration tests --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index fa6d74a970..88b73afcbe 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit fa6d74a97054d5d5123696d1af94b31ac1a65237 +Subproject commit 88b73afcbe15a0c5ddbbb9c977e7c09199e47733 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 681db4ce2c..a0df36c6ab 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 681db4ce2c534eca4fca57c5a83a5be8035c257d +Subproject commit a0df36c6ab257281cbaae00b8a4590200802f571 From da9d87b1d479ef23260d268693a00f12d9565444 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 21 Oct 2014 00:32:08 -0400 Subject: [PATCH 257/813] Make vault file creation use a tempfile --- lib/ansible/utils/vault.py | 72 ++++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 34 deletions(-) diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 3b83d2989e..5ed2ad4559 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -181,6 +181,35 @@ class VaultEditor(object): self.password = password self.filename = filename + def _edit_file_helper(self, existing_data=None, cipher=None): + # make sure the umask is set to a sane value + old_umask = os.umask(0077) + + # Create a tempfile + _, tmp_path = tempfile.mkstemp() + + if existing_data: + self.write_data(data, tmp_path) + + # drop the user into an editor on the tmp file + call(self._editor_shell_command(tmp_path)) + tmpdata = self.read_data(tmp_path) + + # create new vault + this_vault = VaultLib(self.password) + if cipher: + this_vault.cipher_name = cipher + + # encrypt new data and write out to tmp + enc_data = this_vault.encrypt(tmpdata) + self.write_data(enc_data, tmp_path) + + # shuffle tmp file into place + self.shuffle_files(tmp_path, self.filename) + + # and restore umask + os.umask(old_umask) + def create_file(self): """ create a new encrypted file """ @@ -190,15 +219,8 @@ class VaultEditor(object): if os.path.isfile(self.filename): raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename) - # drop the user into vim on file - old_umask = os.umask(0077) - call(self._editor_shell_command(self.filename)) - tmpdata = self.read_data(self.filename) - this_vault = VaultLib(self.password) - this_vault.cipher_name = self.cipher_name - enc_data = this_vault.encrypt(tmpdata) - self.write_data(enc_data, self.filename) - os.umask(old_umask) + # Let the user specify contents and save file + self._edit_file_helper(cipher=self.cipher_name) def decrypt_file(self): @@ -207,7 +229,7 @@ class VaultEditor(object): if not os.path.isfile(self.filename): raise errors.AnsibleError("%s does not exist" % self.filename) - + tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) if this_vault.is_encrypted(tmpdata): @@ -224,35 +246,17 @@ class VaultEditor(object): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: raise errors.AnsibleError(CRYPTO_UPGRADE) - # make sure the umask is set to a sane value - old_mask = os.umask(0077) - # decrypt to tmpfile tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) dec_data = this_vault.decrypt(tmpdata) - _, tmp_path = tempfile.mkstemp() - self.write_data(dec_data, tmp_path) - # drop the user into vim on the tmp file - call(self._editor_shell_command(tmp_path)) - new_data = self.read_data(tmp_path) + # let the user edit the data and save + self._edit_file_helper(existing_data=dec_data) + ###we want the cipher to default to AES256 (get rid of files + # encrypted with the AES cipher) + #self._edit_file_helper(existing_data=dec_data, cipher=this_vault.cipher_name) - # create new vault - new_vault = VaultLib(self.password) - - # we want the cipher to default to AES256 - #new_vault.cipher_name = this_vault.cipher_name - - # encrypt new data a write out to tmp - enc_data = new_vault.encrypt(new_data) - self.write_data(enc_data, tmp_path) - - # shuffle tmp file into place - self.shuffle_files(tmp_path, self.filename) - - # and restore the old umask - os.umask(old_mask) def view_file(self): @@ -277,7 +281,7 @@ class VaultEditor(object): if not os.path.isfile(self.filename): raise errors.AnsibleError("%s does not exist" % self.filename) - + tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) this_vault.cipher_name = self.cipher_name From 3f27e5e0804b7b301fabab45bfd882a39a3b495c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 21 Oct 2014 01:09:11 -0400 Subject: [PATCH 258/813] Compat tree for code dealing with compatibility between python versions --- v2/ansible/compat/__init__.py | 27 +++++++++++++++++++ v2/ansible/compat/configparser.py | 30 ++++++++++++++++++++++ v2/ansible/compat/tests/__init__.py | 40 +++++++++++++++++++++++++++++ v2/ansible/compat/tests/mock.py | 38 +++++++++++++++++++++++++++ v2/ansible/compat/tests/unittest.py | 36 ++++++++++++++++++++++++++ v2/ansible/constants.py | 6 +---- 6 files changed, 172 insertions(+), 5 deletions(-) create mode 100644 v2/ansible/compat/__init__.py create mode 100644 v2/ansible/compat/configparser.py create mode 100644 v2/ansible/compat/tests/__init__.py create mode 100644 v2/ansible/compat/tests/mock.py create mode 100644 v2/ansible/compat/tests/unittest.py diff --git a/v2/ansible/compat/__init__.py b/v2/ansible/compat/__init__.py new file mode 100644 index 0000000000..ab861135c7 --- /dev/null +++ b/v2/ansible/compat/__init__.py @@ -0,0 +1,27 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat library for ansible. This contains compatiblity definitions for older python +When we need to import a module differently depending on python version, do it +here. Then in the code we can simply import from compat in order to get what we want. +''' + diff --git a/v2/ansible/compat/configparser.py b/v2/ansible/compat/configparser.py new file mode 100644 index 0000000000..7cce642376 --- /dev/null +++ b/v2/ansible/compat/configparser.py @@ -0,0 +1,30 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's configparser +''' + +# Python 2.7 +try: + from configparser import * +except ImportError: + from ConfigParser import * diff --git a/v2/ansible/compat/tests/__init__.py b/v2/ansible/compat/tests/__init__.py new file mode 100644 index 0000000000..fc05b2549b --- /dev/null +++ b/v2/ansible/compat/tests/__init__.py @@ -0,0 +1,40 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +This module contains things that are only needed for compat in the testsuites, +not in ansible itself. If you are not installing the test suite, you can +safely remove this subdirectory. +''' + +# +# Compat for python2.7 +# + +# One unittest needs to import builtins via __import__() so we need to have +# the string that represents it +try: + import __builtin__ +except ImportError: + BUILTINS = 'builtins' +else: + BUILTINS = '__builtin__' + diff --git a/v2/ansible/compat/tests/mock.py b/v2/ansible/compat/tests/mock.py new file mode 100644 index 0000000000..0614391c4b --- /dev/null +++ b/v2/ansible/compat/tests/mock.py @@ -0,0 +1,38 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's unittest.mock module +''' + +# Python 2.7 + +# Note: Could use the pypi mock library on python3.x as well as python2.x. It +# is the same as the python3 stdlib mock library + +try: + from unittest.mock import * +except ImportError: + # Python 2 + try: + from mock import * + except ImportError: + print('You need the mock library installed on python2.x to run tests') diff --git a/v2/ansible/compat/tests/unittest.py b/v2/ansible/compat/tests/unittest.py new file mode 100644 index 0000000000..a629849b31 --- /dev/null +++ b/v2/ansible/compat/tests/unittest.py @@ -0,0 +1,36 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python2.7's unittest module +''' + +import sys + +# Python 2.6 +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + from unittest2 import * + except ImportError: + print('You need unittest2 installed on python2.6.x to run tests') +else: + from unittest import * diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index 97d6870a3d..e74720b8a6 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -23,11 +23,7 @@ import os import pwd import sys -try: - import configparser -except ImportError: - # Python 2.7 - import ConfigParser as configparser +from . compat import configparser from string import ascii_letters, digits From a9542209570af63f3203019dcf1780f88a57e0b2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 21 Oct 2014 01:14:30 -0400 Subject: [PATCH 259/813] Migrate the v2/tests to use the ansible.compat module --- v2/test/compat.py | 56 ------------------------------- v2/test/errors/test_errors.py | 5 +-- v2/test/parsing/test_general.py | 2 +- v2/test/parsing/test_mod_args.py | 2 +- v2/test/parsing/yaml/test_yaml.py | 2 +- v2/test/playbook/test_block.py | 2 +- v2/test/playbook/test_role.py | 2 +- v2/test/playbook/test_task.py | 2 +- 8 files changed, 9 insertions(+), 64 deletions(-) delete mode 100644 v2/test/compat.py diff --git a/v2/test/compat.py b/v2/test/compat.py deleted file mode 100644 index 6930de94b0..0000000000 --- a/v2/test/compat.py +++ /dev/null @@ -1,56 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -import unittest - -# -# Compat for python2.6 -# - -if sys.version_info < (2, 7): - try: - # Need unittest2 on python2.6 - import unittest2 as unittest - except ImportError: - print('You need unittest2 installed on python2.x') -else: - import unittest - - -# -# Compat for python2.7 -# - -# Could use the pypi mock library on py3 as well as py2. They are the same -try: - from unittest.mock import mock_open, patch -except ImportError: - # Python2 - from mock import mock_open, patch - -try: - import __builtin__ -except ImportError: - BUILTINS = 'builtins' -else: - BUILTINS = '__builtin__' - diff --git a/v2/test/errors/test_errors.py b/v2/test/errors/test_errors.py index 60ba7eb858..6088f3bd5e 100644 --- a/v2/test/errors/test_errors.py +++ b/v2/test/errors/test_errors.py @@ -19,12 +19,13 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from .. compat import unittest +from ansible.compat.tests import unittest from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject from ansible.errors import AnsibleError -from .. compat import BUILTINS, mock_open, patch +from ansible.compat.tests import BUILTINS +from ansible.compat.tests.mock import mock_open, patch class TestErrors(unittest.TestCase): diff --git a/v2/test/parsing/test_general.py b/v2/test/parsing/test_general.py index b86fcba289..5ccefd3b50 100644 --- a/v2/test/parsing/test_general.py +++ b/v2/test/parsing/test_general.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from .. compat import unittest +from ansible.compat.tests import unittest from ansible.parsing import load_data from ansible.errors import AnsibleParserError diff --git a/v2/test/parsing/test_mod_args.py b/v2/test/parsing/test_mod_args.py index e593522aa8..8268e6126b 100644 --- a/v2/test/parsing/test_mod_args.py +++ b/v2/test/parsing/test_mod_args.py @@ -21,7 +21,7 @@ __metaclass__ = type from ansible.parsing.mod_args import ModuleArgsParser -from .. compat import unittest +from ansible.compat.tests import unittest class TestModArgsDwim(unittest.TestCase): diff --git a/v2/test/parsing/yaml/test_yaml.py b/v2/test/parsing/yaml/test_yaml.py index 6b1d09d741..c468ef6d6f 100644 --- a/v2/test/parsing/yaml/test_yaml.py +++ b/v2/test/parsing/yaml/test_yaml.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ... compat import unittest +from ansible.compat.tests import unittest from yaml.scanner import ScannerError diff --git a/v2/test/playbook/test_block.py b/v2/test/playbook/test_block.py index 46921ae6d2..ccb8f2b6d3 100644 --- a/v2/test/playbook/test_block.py +++ b/v2/test/playbook/test_block.py @@ -21,7 +21,7 @@ __metaclass__ = type from ansible.playbook.block import Block from ansible.playbook.task import Task -from .. compat import unittest +from ansible.compat.tests import unittest class TestBlock(unittest.TestCase): diff --git a/v2/test/playbook/test_role.py b/v2/test/playbook/test_role.py index 55e170de24..f2236f7fc0 100644 --- a/v2/test/playbook/test_role.py +++ b/v2/test/playbook/test_role.py @@ -22,7 +22,7 @@ __metaclass__ = type from ansible.playbook.block import Block from ansible.playbook.role import Role from ansible.playbook.task import Task -from .. compat import unittest +from ansible.compat.tests import unittest class TestRole(unittest.TestCase): diff --git a/v2/test/playbook/test_task.py b/v2/test/playbook/test_task.py index 487bca207b..0af53c9117 100644 --- a/v2/test/playbook/test_task.py +++ b/v2/test/playbook/test_task.py @@ -20,7 +20,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.playbook.task import Task -from .. compat import unittest +from ansible.compat.tests import unittest basic_shell_task = dict( name = 'Test Task', From e78c5f925bd2bb5d90f95ce83778a61523db785e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 21 Oct 2014 01:24:09 -0400 Subject: [PATCH 260/813] Python3 fixes --- v2/ansible/parsing/__init__.py | 2 +- v2/ansible/parsing/vault/__init__.py | 4 ++-- v2/ansible/playbook/base.py | 6 +++--- v2/test/errors/test_errors.py | 2 +- v2/test/plugins/__init__.py | 5 +++++ v2/test/plugins/test_plugins.py | 14 ++++++++++---- 6 files changed, 22 insertions(+), 11 deletions(-) diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index 229be2622f..39cfb2ac98 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -219,5 +219,5 @@ def load_data_from_file(path, vault_password=None): try: return load_data(data) - except YAMLError, exc: + except YAMLError as exc: process_yaml_error(exc, data, path, show_content) diff --git a/v2/ansible/parsing/vault/__init__.py b/v2/ansible/parsing/vault/__init__.py index 3b83d2989e..c1c5b65404 100644 --- a/v2/ansible/parsing/vault/__init__.py +++ b/v2/ansible/parsing/vault/__init__.py @@ -191,7 +191,7 @@ class VaultEditor(object): raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename) # drop the user into vim on file - old_umask = os.umask(0077) + old_umask = os.umask(0o077) call(self._editor_shell_command(self.filename)) tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) @@ -225,7 +225,7 @@ class VaultEditor(object): raise errors.AnsibleError(CRYPTO_UPGRADE) # make sure the umask is set to a sane value - old_mask = os.umask(0077) + old_mask = os.umask(0o077) # decrypt to tmpfile tmpdata = self.read_data(self.filename) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index a992e19a5d..e61e1f6545 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -37,7 +37,7 @@ class Base: # each class knows attributes set upon it, see Task.py for example self._attributes = dict() - for (name, value) in self._get_base_attributes().iteritems(): + for (name, value) in iteritems(self._get_base_attributes()): self._attributes[name] = value.default def _get_base_attributes(self): @@ -72,7 +72,7 @@ class Base: ds = self.munge(ds) # walk all attributes in the class - for (name, attribute) in self._get_base_attributes().iteritems(): + for (name, attribute) in iteritems(self._get_base_attributes()): # copy the value over unless a _load_field method is defined if name in ds: @@ -91,7 +91,7 @@ class Base: ''' validation that is done at parse time, not load time ''' # walk all fields in the object - for (name, attribute) in self._get_base_attributes().iteritems(): + for (name, attribute) in iteritems(self._get_base_attributes()): # run validator only if present method = getattr(self, '_validate_%s' % name, None) diff --git a/v2/test/errors/test_errors.py b/v2/test/errors/test_errors.py index 6088f3bd5e..5d1868a5a4 100644 --- a/v2/test/errors/test_errors.py +++ b/v2/test/errors/test_errors.py @@ -57,7 +57,7 @@ class TestErrors(unittest.TestCase): m = mock_open() m.return_value.readlines.return_value = ['this is line 1\n'] - with patch('__builtin__.open', m): + with patch('{0}.open'.format(BUILTINS), m): # this line will be found in the file self.obj._data_source = 'foo.yml' self.obj._line_number = 1 diff --git a/v2/test/plugins/__init__.py b/v2/test/plugins/__init__.py index 1f84012e01..785fc45992 100644 --- a/v2/test/plugins/__init__.py +++ b/v2/test/plugins/__init__.py @@ -14,3 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/test/plugins/test_plugins.py b/v2/test/plugins/test_plugins.py index 62d8ee4dfb..e6bef809e6 100644 --- a/v2/test/plugins/test_plugins.py +++ b/v2/test/plugins/test_plugins.py @@ -15,10 +15,16 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import os -import unittest -from mock import mock_open, patch, MagicMock +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +from ansible.compat.tests import unittest +from ansible.compat.tests import BUILTINS + +from ansible.compat.tests.mock import mock_open, patch, MagicMock from ansible.plugins import MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE, _basedirs, push_basedir, PluginLoader @@ -54,7 +60,7 @@ class TestErrors(unittest.TestCase): m = MagicMock() m.return_value.__file__ = '/path/to/my/test.py' pl = PluginLoader('test', 'foo.bar.bam', 'test', 'test_plugin') - with patch('__builtin__.__import__', m): + with patch('{0}.__import__'.format(BUILTINS), m): self.assertEqual(pl._get_package_paths(), ['/path/to/my/bar/bam']) def test_plugins__get_paths(self): From 43b3eecf525511932fa2d6b8439fc0f832873d29 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 21 Oct 2014 01:31:55 -0400 Subject: [PATCH 261/813] Debian/Ubuntu doesn't have group nobody so remove the group portion of the new template tests as owner and mode will exercise the problematic code path --- test/integration/roles/test_template/tasks/main.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml index 0c5d3d18bf..0305885473 100644 --- a/test/integration/roles/test_template/tasks/main.yml +++ b/test/integration/roles/test_template/tasks/main.yml @@ -67,7 +67,7 @@ # VERIFY dest as a directory does not break file attributes # Note: expanduser is needed to go down the particular codepath that was broken before - name: setup directory for test - file: state=directory dest={{output_dir | expanduser}}/template-dir mode=0755 owner=nobody group=nobody + file: state=directory dest={{output_dir | expanduser}}/template-dir mode=0755 owner=nobody group=root - name: set file mode when the destination is a directory template: src=foo.j2 dest={{output_dir | expanduser}}/template-dir/ mode=0600 owner=root group=root @@ -82,7 +82,6 @@ - assert: that: - - "file_attrs.stat.gid == 0" - "file_attrs.stat.uid == 0" - "file_attrs.stat.pw_name == 'root'" - "file_attrs.stat.mode == '0600'" @@ -93,7 +92,6 @@ - assert: that: - - "dir_attrs.stat.gid != 0" - "dir_attrs.stat.uid != 0" - "dir_attrs.stat.pw_name == 'nobody'" - "dir_attrs.stat.mode == '0755'" From e265fb85b4e30b11d7d3b5ab3373dec3ad3630c9 Mon Sep 17 00:00:00 2001 From: ronalexander Date: Tue, 21 Oct 2014 09:21:31 -0500 Subject: [PATCH 262/813] Update intro_installation.rst Corrected casing of Jinja2 in pip dependencies. --- docsite/rst/intro_installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 692e9048a2..7f41851800 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -117,7 +117,7 @@ If you don't have pip installed in your version of Python, install pip:: Ansible also uses the following Python modules that need to be installed:: - $ sudo pip install paramiko PyYAML jinja2 httplib2 + $ sudo pip install paramiko PyYAML Jinja2 httplib2 Note when updating ansible, be sure to not only update the source tree, but also the "submodules" in git which point at Ansible's own modules (not the same kind of modules, alas). From 3d135f98d191ab58e9104076423ad3bbe1a9ffa5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 21 Oct 2014 10:32:25 -0400 Subject: [PATCH 263/813] Small python3 compat in vault to keep code in sync with v2 --- lib/ansible/utils/vault.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 5ed2ad4559..506c0852f3 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -183,7 +183,7 @@ class VaultEditor(object): def _edit_file_helper(self, existing_data=None, cipher=None): # make sure the umask is set to a sane value - old_umask = os.umask(0077) + old_umask = os.umask(0o077) # Create a tempfile _, tmp_path = tempfile.mkstemp() From 4c86bd3ca64e10141ab21053bd34870d71ccbff1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 21 Oct 2014 10:33:33 -0400 Subject: [PATCH 264/813] Merge the vault tempfile fix to the v2 tree --- v2/ansible/parsing/vault/__init__.py | 72 +++++++++++++++------------- 1 file changed, 38 insertions(+), 34 deletions(-) diff --git a/v2/ansible/parsing/vault/__init__.py b/v2/ansible/parsing/vault/__init__.py index c1c5b65404..506c0852f3 100644 --- a/v2/ansible/parsing/vault/__init__.py +++ b/v2/ansible/parsing/vault/__init__.py @@ -181,6 +181,35 @@ class VaultEditor(object): self.password = password self.filename = filename + def _edit_file_helper(self, existing_data=None, cipher=None): + # make sure the umask is set to a sane value + old_umask = os.umask(0o077) + + # Create a tempfile + _, tmp_path = tempfile.mkstemp() + + if existing_data: + self.write_data(data, tmp_path) + + # drop the user into an editor on the tmp file + call(self._editor_shell_command(tmp_path)) + tmpdata = self.read_data(tmp_path) + + # create new vault + this_vault = VaultLib(self.password) + if cipher: + this_vault.cipher_name = cipher + + # encrypt new data and write out to tmp + enc_data = this_vault.encrypt(tmpdata) + self.write_data(enc_data, tmp_path) + + # shuffle tmp file into place + self.shuffle_files(tmp_path, self.filename) + + # and restore umask + os.umask(old_umask) + def create_file(self): """ create a new encrypted file """ @@ -190,15 +219,8 @@ class VaultEditor(object): if os.path.isfile(self.filename): raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename) - # drop the user into vim on file - old_umask = os.umask(0o077) - call(self._editor_shell_command(self.filename)) - tmpdata = self.read_data(self.filename) - this_vault = VaultLib(self.password) - this_vault.cipher_name = self.cipher_name - enc_data = this_vault.encrypt(tmpdata) - self.write_data(enc_data, self.filename) - os.umask(old_umask) + # Let the user specify contents and save file + self._edit_file_helper(cipher=self.cipher_name) def decrypt_file(self): @@ -207,7 +229,7 @@ class VaultEditor(object): if not os.path.isfile(self.filename): raise errors.AnsibleError("%s does not exist" % self.filename) - + tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) if this_vault.is_encrypted(tmpdata): @@ -224,35 +246,17 @@ class VaultEditor(object): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: raise errors.AnsibleError(CRYPTO_UPGRADE) - # make sure the umask is set to a sane value - old_mask = os.umask(0o077) - # decrypt to tmpfile tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) dec_data = this_vault.decrypt(tmpdata) - _, tmp_path = tempfile.mkstemp() - self.write_data(dec_data, tmp_path) - # drop the user into vim on the tmp file - call(self._editor_shell_command(tmp_path)) - new_data = self.read_data(tmp_path) + # let the user edit the data and save + self._edit_file_helper(existing_data=dec_data) + ###we want the cipher to default to AES256 (get rid of files + # encrypted with the AES cipher) + #self._edit_file_helper(existing_data=dec_data, cipher=this_vault.cipher_name) - # create new vault - new_vault = VaultLib(self.password) - - # we want the cipher to default to AES256 - #new_vault.cipher_name = this_vault.cipher_name - - # encrypt new data a write out to tmp - enc_data = new_vault.encrypt(new_data) - self.write_data(enc_data, tmp_path) - - # shuffle tmp file into place - self.shuffle_files(tmp_path, self.filename) - - # and restore the old umask - os.umask(old_mask) def view_file(self): @@ -277,7 +281,7 @@ class VaultEditor(object): if not os.path.isfile(self.filename): raise errors.AnsibleError("%s does not exist" % self.filename) - + tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) this_vault.cipher_name = self.cipher_name From 0ed97e4d111ac39004854121e846deac696261c7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 21 Oct 2014 10:10:03 -0500 Subject: [PATCH 265/813] Updating v2 Role class code --- v2/ansible/parsing/yaml/objects.py | 9 +++ v2/ansible/playbook/role.py | 92 ++++++++++++++++++------------ 2 files changed, 64 insertions(+), 37 deletions(-) diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py index 6fb1631b87..be687d1e14 100644 --- a/v2/ansible/parsing/yaml/objects.py +++ b/v2/ansible/parsing/yaml/objects.py @@ -32,6 +32,15 @@ class AnsibleBaseYAMLObject: def get_position_info(self): return (self._data_source, self._line_number, self._column_number) + def copy_position_info(obj): + ''' copies the position info from another object ''' + assert isinstance(obj, AnsibleBaseYAMLObject) + + (src, line, col) = obj.get_position_info() + self._data_source = src + self._line_number = line + self._column_number = col + class AnsibleMapping(AnsibleBaseYAMLObject, dict): ''' sub class for dictionaries ''' pass diff --git a/v2/ansible/playbook/role.py b/v2/ansible/playbook/role.py index af3855f3d9..ea4f2af67f 100644 --- a/v2/ansible/playbook/role.py +++ b/v2/ansible/playbook/role.py @@ -27,21 +27,23 @@ from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base from ansible.playbook.block import Block from ansible.parsing import load_data_from_file +from ansible.errors import AnsibleError -#from ansible.utils import list_union, unfrackpath +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping class Role(Base): - _role = FieldAttribute(isa='string') + _role_name = FieldAttribute(isa='string') + _role_path = FieldAttribute(isa='string') _src = FieldAttribute(isa='string') _scm = FieldAttribute(isa='string') _version = FieldAttribute(isa='string') - _params = FieldAttribute(isa='dict') - _metadata = FieldAttribute(isa='dict') - _task_blocks = FieldAttribute(isa='list') - _handler_blocks = FieldAttribute(isa='list') - _default_vars = FieldAttribute(isa='dict') - _role_vars = FieldAttribute(isa='dict') + _params = FieldAttribute(isa='dict', default=dict()) + _metadata = FieldAttribute(isa='dict', default=dict()) + _task_blocks = FieldAttribute(isa='list', default=[]) + _handler_blocks = FieldAttribute(isa='list', default=[]) + _default_vars = FieldAttribute(isa='dict', default=dict()) + _role_vars = FieldAttribute(isa='dict', default=dict()) def __init__(self, vault_password=None): self._role_path = None @@ -52,7 +54,7 @@ class Role(Base): return self.get_name() def get_name(self): - return self._attributes['role'] + return self._attributes['role_name'] @staticmethod def load(data, vault_password=None): @@ -65,44 +67,52 @@ class Role(Base): # munge, and other functions used for loading the ds def munge(self, ds): - # Role definitions can be strings or dicts, so we fix - # things up here. Anything that is not a role name, tag, - # or conditional will also be added to the params sub- - # dictionary for loading later + # create the new ds as an AnsibleMapping, so we can preserve any line/column + # data from the parser, and copy that info from the old ds (if applicable) + new_ds = AnsibleMapping() + if isinstance(ds, AnsibleBaseYAMLObject): + new_ds.copy_position_info(ds) + + # Role definitions can be strings or dicts, so we fix things up here. + # Anything that is not a role name, tag, or conditional will also be + # added to the params sub-dictionary for loading later if isinstance(ds, string_types): - new_ds = dict(role=ds) + new_ds['role_name'] = ds else: + # munge the role ds here to correctly fill in the various fields which + # may be used to define the role, like: role, src, scm, etc. ds = self._munge_role(ds) + # now we split any random role params off from the role spec and store + # them in a dictionary of params for parsing later params = dict() - new_ds = dict() - + attr_names = [attr_name for (attr_name, attr_value) in self._get_base_attributes().iteritems()] for (key, value) in iteritems(ds): - if key not in [name for (name, value) in self._get_base_attributes().iteritems()]: - # this key does not match a field attribute, - # so it must be a role param + if key not in attr_names and key != 'role': + # this key does not match a field attribute, so it must be a role param params[key] = value else: # this is a field attribute, so copy it over directly new_ds[key] = value - - # finally, assign the params to a new entry in the revised ds new_ds['params'] = params - # set the role path, based on the role definition - self._role_path = self._get_role_path(new_ds.get('role')) + # Set the role name and path, based on the role definition + (role_name, role_path) = self._get_role_path(new_ds.get('role_name')) + new_ds['role_name'] = role_name + new_ds['role_path'] = role_path # load the role's files, if they exist - new_ds['metadata'] = self._load_role_yaml('meta') - new_ds['task_blocks'] = self._load_role_yaml('tasks') - new_ds['handler_blocks'] = self._load_role_yaml('handlers') - new_ds['default_vars'] = self._load_role_yaml('defaults') - new_ds['role_vars'] = self._load_role_yaml('vars') + new_ds['metadata'] = self._load_role_yaml(role_path, 'meta') + new_ds['task_blocks'] = self._load_role_yaml(role_path, 'tasks') + new_ds['handler_blocks'] = self._load_role_yaml(role_path, 'handlers') + new_ds['default_vars'] = self._load_role_yaml(role_path, 'defaults') + new_ds['role_vars'] = self._load_role_yaml(role_path, 'vars') + # and return the newly munged ds return new_ds - def _load_role_yaml(self, subdir): - file_path = os.path.join(self._role_path, subdir) + def _load_role_yaml(self, role_path, subdir): + file_path = os.path.join(role_path, subdir) if os.path.exists(file_path) and os.path.isdir(file_path): main_file = self._resolve_main(file_path) if os.path.exists(main_file): @@ -119,7 +129,7 @@ class Role(Base): ) if sum([os.path.isfile(x) for x in possible_mains]) > 1: - raise errors.AnsibleError("found multiple main files at %s, only one allowed" % (basepath)) + raise AnsibleError("found multiple main files at %s, only one allowed" % (basepath)) else: for m in possible_mains: if os.path.isfile(m): @@ -136,15 +146,23 @@ class Role(Base): # FIXME: this should use unfrackpath once the utils code has been sorted out role_path = os.path.normpath(role) + print("first role path is %s" % role_path) if os.path.exists(role_path): - return role_path + role_name = os.path.basename(role) + print('returning role path %s' % role_path) + return (role_name, role_path) else: for path in ('./roles', '/etc/ansible/roles'): role_path = os.path.join(path, role) + print("current role path is %s" % role_path) if os.path.exists(role_path): - return role_path - # FIXME: raise something here - raise + print('returning role path %s' % role_path) + return (role, role_path) + + # FIXME: make the parser smart about list/string entries + # in the yaml so the error line/file can be reported + # here + raise AnsibleError("the role '%s' was not found" % role, obj=role) def _repo_url_to_role_name(self, repo_url): # gets the role name out of a repo like @@ -198,12 +216,12 @@ class Role(Base): if len(tokens) == 3: role_name = tokens[2] else: - role_name = repo_url_to_role_name(tokens[0]) + role_name = self._repo_url_to_role_name(tokens[0]) if scm and not role_version: role_version = default_role_versions.get(scm, '') - return dict(scm=scm, src=role_url, version=role_version, name=role_name) + return dict(scm=scm, src=role_url, version=role_version, role_name=role_name) def _munge_role(self, ds): if 'role' in ds: From 0a8c91a8126680673eeb7c6ca3802c35ed9d78d1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 21 Oct 2014 13:27:01 -0500 Subject: [PATCH 266/813] Fixing up tests, removing some of the yaml error stuff from parsing * moved old unittests for vault over to the new codebase * reverted YAML error helpers and reverted the load() function in parsing/__init__.py, pending a rewrite of a new YAML loader class of some kind to encapsulate all of that * fixed an error in in the module args parser regarding the shell/ command argument parsing, where some additional arguments were being lost --- v2/ansible/errors/__init__.py | 3 +- v2/ansible/parsing/__init__.py | 180 +--------------------------- v2/ansible/parsing/mod_args.py | 9 +- v2/ansible/playbook/base.py | 4 +- v2/ansible/playbook/role.py | 5 +- v2/test/parsing/test_general.py | 2 +- v2/test/parsing/vault/__init__.py | 21 ++++ v2/test/parsing/vault/test_vault.py | 156 ++++++++++++++++++++++++ 8 files changed, 189 insertions(+), 191 deletions(-) create mode 100644 v2/test/parsing/vault/__init__.py create mode 100644 v2/test/parsing/vault/test_vault.py diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py index a0ae94111a..67f4d0a78b 100644 --- a/v2/ansible/errors/__init__.py +++ b/v2/ansible/errors/__init__.py @@ -20,10 +20,11 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os -from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject class AnsibleError(Exception): def __init__(self, message, obj=None): + # we import this here to prevent an import loop with errors + from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject self._obj = obj if isinstance(self._obj, AnsibleBaseYAMLObject): extended_error = self._get_extended_error() diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index 39cfb2ac98..69b4aacd2c 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -28,157 +28,7 @@ from ansible.parsing.vault import VaultLib from ansible.parsing.yaml import safe_load -def process_common_errors(msg, probline, column): - replaced = probline.replace(" ","") - - if ":{{" in replaced and "}}" in replaced: - msg = msg + """ -This one looks easy to fix. YAML thought it was looking for the start of a -hash/dictionary and was confused to see a second "{". Most likely this was -meant to be an ansible template evaluation instead, so we have to give the -parser a small hint that we wanted a string instead. The solution here is to -just quote the entire value. - -For instance, if the original line was: - - app_path: {{ base_path }}/foo - -It should be written as: - - app_path: "{{ base_path }}/foo" -""" - return msg - - elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1: - msg = msg + """ -This one looks easy to fix. There seems to be an extra unquoted colon in the line -and this is confusing the parser. It was only expecting to find one free -colon. The solution is just add some quotes around the colon, or quote the -entire line after the first colon. - -For instance, if the original line was: - - copy: src=file.txt dest=/path/filename:with_colon.txt - -It can be written as: - - copy: src=file.txt dest='/path/filename:with_colon.txt' - -Or: - - copy: 'src=file.txt dest=/path/filename:with_colon.txt' - - -""" - return msg - else: - parts = probline.split(":") - if len(parts) > 1: - middle = parts[1].strip() - match = False - unbalanced = False - if middle.startswith("'") and not middle.endswith("'"): - match = True - elif middle.startswith('"') and not middle.endswith('"'): - match = True - if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2: - unbalanced = True - if match: - msg = msg + """ -This one looks easy to fix. It seems that there is a value started -with a quote, and the YAML parser is expecting to see the line ended -with the same kind of quote. For instance: - - when: "ok" in result.stdout - -Could be written as: - - when: '"ok" in result.stdout' - -or equivalently: - - when: "'ok' in result.stdout" - -""" - return msg - - if unbalanced: - msg = msg + """ -We could be wrong, but this one looks like it might be an issue with -unbalanced quotes. If starting a value with a quote, make sure the -line ends with the same set of quotes. For instance this arbitrary -example: - - foo: "bad" "wolf" - -Could be written as: - - foo: '"bad" "wolf"' - -""" - return msg - - return msg - -def process_yaml_error(exc, data, path=None, show_content=True): - if hasattr(exc, 'problem_mark'): - mark = exc.problem_mark - if show_content: - if mark.line -1 >= 0: - before_probline = data.split("\n")[mark.line-1] - else: - before_probline = '' - probline = data.split("\n")[mark.line] - arrow = " " * mark.column + "^" - msg = """Syntax Error while loading YAML script, %s -Note: The error may actually appear before this position: line %s, column %s - -%s -%s -%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow) - - unquoted_var = None - if '{{' in probline and '}}' in probline: - if '"{{' not in probline or "'{{" not in probline: - unquoted_var = True - - if not unquoted_var: - msg = process_common_errors(msg, probline, mark.column) - else: - msg = msg + """ -We could be wrong, but this one looks like it might be an issue with -missing quotes. Always quote template expression brackets when they -start a value. For instance: - - with_items: - - {{ foo }} - -Should be written as: - - with_items: - - "{{ foo }}" - -""" - else: - # most likely displaying a file with sensitive content, - # so don't show any of the actual lines of yaml just the - # line number itself - msg = """Syntax error while loading YAML script, %s -The error appears to have been on line %s, column %s, but may actually -be before there depending on the exact syntax problem. -""" % (path, mark.line + 1, mark.column + 1) - - else: - # No problem markers means we have to throw a generic - # "stuff messed up" type message. Sry bud. - if path: - msg = "Could not parse YAML. Check over %s again." % path - else: - msg = "Could not parse YAML." - raise errors.AnsibleYAMLValidationFailed(msg) - - -def load_data(data): +def load(data): if isinstance(data, file): fd = open(f) @@ -193,31 +43,3 @@ def load_data(data): raise AnsibleInternalError("expected file or string, got %s" % type(data)) -def load_data_from_file(path, vault_password=None): - ''' - Convert a yaml file to a data structure. - Was previously 'parse_yaml_from_file()'. - ''' - - data = None - show_content = True - - try: - data = open(path).read() - except IOError: - raise errors.AnsibleError("file could not read: %s" % path) - - vault = VaultLib(password=vault_password) - if vault.is_encrypted(data): - # if the file is encrypted and no password was specified, - # the decrypt call would throw an error, but we check first - # since the decrypt function doesn't know the file name - if vault_password is None: - raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path) - data = vault.decrypt(data) - show_content = False - - try: - return load_data(data) - except YAMLError as exc: - process_yaml_error(exc, data, path, show_content) diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index 4c452b4edf..d462780051 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -94,18 +94,13 @@ class ModuleArgsParser: if action not in ['shell', 'command']: return (action, args) - new_args = {} - # the shell module really is the command module with an additional # parameter if action == 'shell': action = 'command' - new_args['_uses_shell'] = True + args['_uses_shell'] = True - # make sure the non-key-value params hop in the data - new_args['_raw_params'] = args['_raw_params'] - - return (action, new_args) + return (action, args) def _normalize_parameters(self, thing, action=None): ''' diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index e61e1f6545..59c329d453 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -25,7 +25,7 @@ from io import FileIO from six import iteritems, string_types from ansible.playbook.attribute import Attribute, FieldAttribute -from ansible.parsing import load_data +from ansible.parsing import load class Base: @@ -64,7 +64,7 @@ class Base: assert ds is not None if isinstance(ds, string_types) or isinstance(ds, FileIO): - ds = load_data(ds) + ds = load(ds) # we currently don't do anything with private attributes but may # later decide to filter them out of 'ds' here. diff --git a/v2/ansible/playbook/role.py b/v2/ansible/playbook/role.py index ea4f2af67f..88aecab985 100644 --- a/v2/ansible/playbook/role.py +++ b/v2/ansible/playbook/role.py @@ -26,9 +26,12 @@ import os from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base from ansible.playbook.block import Block -from ansible.parsing import load_data_from_file from ansible.errors import AnsibleError +# FIXME: this def was cruft from the old utils code, so we'll need +# to relocate it somewhere before we can use it +#from ansible.parsing import load_data_from_file + from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping class Role(Base): diff --git a/v2/test/parsing/test_general.py b/v2/test/parsing/test_general.py index 5ccefd3b50..1d1c5ddc22 100644 --- a/v2/test/parsing/test_general.py +++ b/v2/test/parsing/test_general.py @@ -20,8 +20,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.tests import unittest -from ansible.parsing import load_data from ansible.errors import AnsibleParserError +from ansible.parsing import load import json diff --git a/v2/test/parsing/vault/__init__.py b/v2/test/parsing/vault/__init__.py new file mode 100644 index 0000000000..785fc45992 --- /dev/null +++ b/v2/test/parsing/vault/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/test/parsing/vault/test_vault.py b/v2/test/parsing/vault/test_vault.py new file mode 100644 index 0000000000..eb4df6ed90 --- /dev/null +++ b/v2/test/parsing/vault/test_vault.py @@ -0,0 +1,156 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import getpass +import os +import shutil +import time +import tempfile +from binascii import unhexlify +from binascii import hexlify +from nose.plugins.skip import SkipTest + +from ansible.compat.tests import unittest + +from ansible import errors +from ansible.parsing.vault import VaultLib + +# Counter import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Util import Counter + HAS_COUNTER = True +except ImportError: + HAS_COUNTER = False + +# KDF import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Protocol.KDF import PBKDF2 + HAS_PBKDF2 = True +except ImportError: + HAS_PBKDF2 = False + +# AES IMPORTS +try: + from Crypto.Cipher import AES as AES + HAS_AES = True +except ImportError: + HAS_AES = False + +class TestVaultLib(unittest.TestCase): + + def test_methods_exist(self): + v = VaultLib('ansible') + slots = ['is_encrypted', + 'encrypt', + 'decrypt', + '_add_header', + '_split_header',] + for slot in slots: + assert hasattr(v, slot), "VaultLib is missing the %s method" % slot + + def test_is_encrypted(self): + v = VaultLib(None) + assert not v.is_encrypted("foobar"), "encryption check on plaintext failed" + data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify("ansible") + assert v.is_encrypted(data), "encryption check on headered text failed" + + def test_add_header(self): + v = VaultLib('ansible') + v.cipher_name = "TEST" + sensitive_data = "ansible" + data = v._add_header(sensitive_data) + lines = data.split('\n') + assert len(lines) > 1, "failed to properly add header" + header = lines[0] + assert header.endswith(';TEST'), "header does end with cipher name" + header_parts = header.split(';') + assert len(header_parts) == 3, "header has the wrong number of parts" + assert header_parts[0] == '$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT" + assert header_parts[1] == v.version, "header version is incorrect" + assert header_parts[2] == 'TEST', "header does end with cipher name" + + def test_split_header(self): + v = VaultLib('ansible') + data = "$ANSIBLE_VAULT;9.9;TEST\nansible" + rdata = v._split_header(data) + lines = rdata.split('\n') + assert lines[0] == "ansible" + assert v.cipher_name == 'TEST', "cipher name was not set" + assert v.version == "9.9" + + def test_encrypt_decrypt_aes(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + v = VaultLib('ansible') + v.cipher_name = 'AES' + enc_data = v.encrypt("foobar") + dec_data = v.decrypt(enc_data) + assert enc_data != "foobar", "encryption failed" + assert dec_data == "foobar", "decryption failed" + + def test_encrypt_decrypt_aes256(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + v = VaultLib('ansible') + v.cipher_name = 'AES256' + enc_data = v.encrypt("foobar") + dec_data = v.decrypt(enc_data) + assert enc_data != "foobar", "encryption failed" + assert dec_data == "foobar", "decryption failed" + + def test_encrypt_encrypted(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + v = VaultLib('ansible') + v.cipher_name = 'AES' + data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify("ansible") + error_hit = False + try: + enc_data = v.encrypt(data) + except errors.AnsibleError, e: + error_hit = True + assert error_hit, "No error was thrown when trying to encrypt data with a header" + + def test_decrypt_decrypted(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + v = VaultLib('ansible') + data = "ansible" + error_hit = False + try: + dec_data = v.decrypt(data) + except errors.AnsibleError, e: + error_hit = True + assert error_hit, "No error was thrown when trying to decrypt data without a header" + + def test_cipher_not_set(self): + # not setting the cipher should default to AES256 + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + v = VaultLib('ansible') + data = "ansible" + error_hit = False + try: + enc_data = v.encrypt(data) + except errors.AnsibleError, e: + error_hit = True + assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set" + assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name From b3452de8a79f79b6b8f274948b8d6ecec9cd8e81 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 21 Oct 2014 16:06:40 -0400 Subject: [PATCH 267/813] Hook up the general parsing unittests and get them passing --- v2/ansible/parsing/__init__.py | 16 ++++----- v2/test/parsing/test_general.py | 60 ++++++++++++++++----------------- 2 files changed, 37 insertions(+), 39 deletions(-) diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index 69b4aacd2c..bc2a000f8b 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -23,23 +23,23 @@ import json from yaml import YAMLError -from ansible.errors import AnsibleError, AnsibleInternalError +from ansible.errors import AnsibleParserError, AnsibleInternalError from ansible.parsing.vault import VaultLib from ansible.parsing.yaml import safe_load - def load(data): - if isinstance(data, file): - fd = open(f) - data = fd.read() - fd.close() + if hasattr(data, 'read') and hasattr(data.read, '__call__'): + data = data.read() if isinstance(data, basestring): try: - return json.loads(data) + try: + return json.loads(data) + except: + return safe_load(data) except: - return safe_load(data) + raise AnsibleParserError("data was not valid yaml") raise AnsibleInternalError("expected file or string, got %s" % type(data)) diff --git a/v2/test/parsing/test_general.py b/v2/test/parsing/test_general.py index 1d1c5ddc22..b06038a588 100644 --- a/v2/test/parsing/test_general.py +++ b/v2/test/parsing/test_general.py @@ -20,10 +20,11 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.tests import unittest -from ansible.errors import AnsibleParserError +from ansible.errors import AnsibleInternalError, AnsibleParserError from ansible.parsing import load import json +import yaml from io import FileIO @@ -34,13 +35,14 @@ class MockFile(FileIO): self.method = method def read(self): - if method == 'json': - return json.dumps(ds) - elif method == 'yaml': - return yaml.dumps(ds) - elif method == 'fail': + if self.method == 'json': + return json.dumps(self.ds) + elif self.method == 'yaml': + return yaml.dump(self.ds) + elif self.method == 'fail': return """ - AAARGGGGH + AAARGGGGH: + ***** THIS WON'T PARSE !!! NOOOOOOOOOOOOOOOOOO """ @@ -51,56 +53,52 @@ class MockFile(FileIO): pass class TestGeneralParsing(unittest.TestCase): - - def __init__(self): - pass - def setUp(self): pass def tearDown(self): pass - def parse_json_from_string(self): - input = """ + def test_parse_json_from_string(self): + data = """ { "asdf" : "1234", "jkl" : 5678 } """ - output = load_data(input) + output = load(data) self.assertEqual(output['asdf'], '1234') self.assertEqual(output['jkl'], 5678) - def parse_json_from_file(self): - output = load_data(MockFile(dict(a=1,b=2,c=3)),'json') - self.assertEqual(ouput, dict(a=1,b=2,c=3)) + def test_parse_json_from_file(self): + output = load(MockFile(dict(a=1,b=2,c=3), 'json')) + self.assertEqual(output, dict(a=1,b=2,c=3)) - def parse_yaml_from_dict(self): - input = """ + def test_parse_yaml_from_dict(self): + data = """ asdf: '1234' jkl: 5678 """ - output = load_data(input) + output = load(data) self.assertEqual(output['asdf'], '1234') self.assertEqual(output['jkl'], 5678) - def parse_yaml_from_file(self): - output = load_data(MockFile(dict(a=1,b=2,c=3),'yaml')) + def test_parse_yaml_from_file(self): + output = load(MockFile(dict(a=1,b=2,c=3),'yaml')) self.assertEqual(output, dict(a=1,b=2,c=3)) - def parse_fail(self): - input = """ - TEXT + def test_parse_fail(self): + data = """ + TEXT: *** NOT VALID """ - self.assertRaises(load_data(input), AnsibleParserError) + self.assertRaises(AnsibleParserError, load, data) - def parse_fail_from_file(self): - self.assertRaises(load_data(MockFile(None,'fail')), AnsibleParserError) + def test_parse_fail_from_file(self): + self.assertRaises(AnsibleParserError, load, MockFile(None,'fail')) - def parse_fail_invalid_type(self): - self.assertRaises(3000, AnsibleParsingError) - self.assertRaises(dict(a=1,b=2,c=3), AnsibleParserError) + def test_parse_fail_invalid_type(self): + self.assertRaises(AnsibleInternalError, load, 3000) + self.assertRaises(AnsibleInternalError, load, dict(a=1,b=2,c=3)) From 6c94d8569046d039ab551c8eb877b3224e7b5d2a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 21 Oct 2014 16:41:16 -0400 Subject: [PATCH 268/813] Tell nose to include branches in its coverage reporting --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index ec2742f89d..5ac0e26d40 100644 --- a/Makefile +++ b/Makefile @@ -96,10 +96,10 @@ tests: PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v # Could do: --with-coverage --cover-package=ansible newtests: - PYTHONPATH=./v2:./lib $(NOSETESTS) -d -w v2/test -v --with-coverage --cover-package=ansible + PYTHONPATH=./v2:./lib $(NOSETESTS) -d -w v2/test -v --with-coverage --cover-package=ansible --cover-branches newtests-py3: - PYTHONPATH=./v2:./lib $(NOSETESTS3) -d -w v2/test -v --with-coverage --cover-package=ansible + PYTHONPATH=./v2:./lib $(NOSETESTS3) -d -w v2/test -v --with-coverage --cover-package=ansible --cover-branches authors: sh hacking/authors.sh From c1b290a3119565f7367960f266844c860121bc68 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 21 Oct 2014 17:04:49 -0400 Subject: [PATCH 269/813] Fix logic that prevents multiple action, local_action, and modules to be specified. --- v2/ansible/parsing/mod_args.py | 33 +++++++++++++++++--------------- v2/test/parsing/test_mod_args.py | 7 +++++++ 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index d462780051..5e7c4225df 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -198,6 +198,12 @@ class ModuleArgsParser: delegate_to = None args = dict() + + # + # We can have one of action, local_action, or module specified + # + + # action if 'action' in ds: # an old school 'action' statement @@ -205,7 +211,8 @@ class ModuleArgsParser: delegate_to = None action, args = self._normalize_parameters(thing) - elif 'local_action' in ds: + # local_action + if 'local_action' in ds: # local_action is similar but also implies a delegate_to if action is not None: @@ -214,21 +221,17 @@ class ModuleArgsParser: delegate_to = 'localhost' action, args = self._normalize_parameters(thing) - else: + # module: is the more new-style invocation - # module: is the more new-style invocation - if action is not None: - raise AnsibleParserError("conflicting action statements", obj=self._task) - - # walk the input dictionary to see we recognize a module name - for (item, value) in iteritems(ds): - if item in module_finder: - # finding more than one module name is a problem - if action is not None: - raise AnsibleParserError("conflicting action statements", obj=self._task) - action = item - thing = value - action, args = self._normalize_parameters(value, action=action) + # walk the input dictionary to see we recognize a module name + for (item, value) in iteritems(ds): + if item in module_finder: + # finding more than one module name is a problem + if action is not None: + raise AnsibleParserError("conflicting action statements", obj=self._task) + action = item + thing = value + action, args = self._normalize_parameters(value, action=action) # if we didn't see any module in the task at all, it's not a task really if action is None: diff --git a/v2/test/parsing/test_mod_args.py b/v2/test/parsing/test_mod_args.py index 8268e6126b..0f9ee28dec 100644 --- a/v2/test/parsing/test_mod_args.py +++ b/v2/test/parsing/test_mod_args.py @@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.parsing.mod_args import ModuleArgsParser +from ansible.errors import AnsibleParserError from ansible.compat.tests import unittest @@ -106,3 +107,9 @@ class TestModArgsDwim(unittest.TestCase): self.assertEqual(mod, 'copy') self.assertEqual(args, dict(src='a', dest='b')) self.assertIs(to, 'localhost') + + def test_multiple_actions(self): + self.assertRaises(AnsibleParserError, self.m.parse, dict(action='shell echo hi', local_action='shell echo hi')) + self.assertRaises(AnsibleParserError, self.m.parse, dict(action='shell echo hi', shell='echo hi')) + self.assertRaises(AnsibleParserError, self.m.parse, dict(local_action='shell echo hi', shell='echo hi')) + self.assertRaises(AnsibleParserError, self.m.parse, dict(ping='data=hi', shell='echo hi')) From 2ff17ebd93d7ce19f4cbce3bc515c68ee4fe00e3 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 21 Oct 2014 16:14:50 -0500 Subject: [PATCH 270/813] Adding in VaultEditor tests from old unittests --- v2/test/parsing/vault/test_vault_editor.py | 183 +++++++++++++++++++++ 1 file changed, 183 insertions(+) create mode 100644 v2/test/parsing/vault/test_vault_editor.py diff --git a/v2/test/parsing/vault/test_vault_editor.py b/v2/test/parsing/vault/test_vault_editor.py new file mode 100644 index 0000000000..3396c6f8ab --- /dev/null +++ b/v2/test/parsing/vault/test_vault_editor.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python + +import getpass +import os +import shutil +import time +import tempfile +from binascii import unhexlify +from binascii import hexlify +from nose.plugins.skip import SkipTest + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch + +from ansible import errors +from ansible.parsing.vault import VaultLib +from ansible.parsing.vault import VaultEditor + +# Counter import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Util import Counter + HAS_COUNTER = True +except ImportError: + HAS_COUNTER = False + +# KDF import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Protocol.KDF import PBKDF2 + HAS_PBKDF2 = True +except ImportError: + HAS_PBKDF2 = False + +# AES IMPORTS +try: + from Crypto.Cipher import AES as AES + HAS_AES = True +except ImportError: + HAS_AES = False + +v10_data = """$ANSIBLE_VAULT;1.0;AES +53616c7465645f5fd0026926a2d415a28a2622116273fbc90e377225c12a347e1daf4456d36a77f9 +9ad98d59f61d06a4b66718d855f16fb7bdfe54d1ec8aeaa4d06c2dc1fa630ae1846a029877f0eeb1 +83c62ffb04c2512995e815de4b4d29ed""" + +v11_data = """$ANSIBLE_VAULT;1.1;AES256 +62303130653266653331306264616235333735323636616539316433666463323964623162386137 +3961616263373033353631316333623566303532663065310a393036623466376263393961326530 +64336561613965383835646464623865663966323464653236343638373165343863623638316664 +3631633031323837340a396530313963373030343933616133393566366137363761373930663833 +3739""" + +class TestVaultEditor(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_methods_exist(self): + v = VaultEditor(None, None, None) + slots = ['create_file', + 'decrypt_file', + 'edit_file', + 'encrypt_file', + 'rekey_file', + 'read_data', + 'write_data', + 'shuffle_files'] + for slot in slots: + assert hasattr(v, slot), "VaultLib is missing the %s method" % slot + + @patch.object(VaultEditor, '_editor_shell_command') + def test_create_file(self, mock_editor_shell_command): + + def sc_side_effect(filename): + return ['touch', filename] + mock_editor_shell_command.side_effect = sc_side_effect + + tmp_file = tempfile.NamedTemporaryFile() + os.unlink(tmp_file.name) + + ve = VaultEditor(None, "ansible", tmp_file.name) + ve.create_file() + + self.assertTrue(os.path.exists(tmp_file.name)) + + def test_decrypt_1_0(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + + v10_file = tempfile.NamedTemporaryFile(delete=False) + with v10_file as f: + f.write(v10_data) + + ve = VaultEditor(None, "ansible", v10_file.name) + + # make sure the password functions for the cipher + error_hit = False + try: + ve.decrypt_file() + except errors.AnsibleError, e: + error_hit = True + + # verify decrypted content + f = open(v10_file.name, "rb") + fdata = f.read() + f.close() + + os.unlink(v10_file.name) + + assert error_hit == False, "error decrypting 1.0 file" + assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + + + def test_decrypt_1_1(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + + v11_file = tempfile.NamedTemporaryFile(delete=False) + with v11_file as f: + f.write(v11_data) + + ve = VaultEditor(None, "ansible", v11_file.name) + + # make sure the password functions for the cipher + error_hit = False + try: + ve.decrypt_file() + except errors.AnsibleError, e: + error_hit = True + + # verify decrypted content + f = open(v11_file.name, "rb") + fdata = f.read() + f.close() + + os.unlink(v11_file.name) + + assert error_hit == False, "error decrypting 1.0 file" + assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + + + def test_rekey_migration(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + + v10_file = tempfile.NamedTemporaryFile(delete=False) + with v10_file as f: + f.write(v10_data) + + ve = VaultEditor(None, "ansible", v10_file.name) + + # make sure the password functions for the cipher + error_hit = False + try: + ve.rekey_file('ansible2') + except errors.AnsibleError, e: + error_hit = True + + # verify decrypted content + f = open(v10_file.name, "rb") + fdata = f.read() + f.close() + + assert error_hit == False, "error rekeying 1.0 file to 1.1" + + # ensure filedata can be decrypted, is 1.1 and is AES256 + vl = VaultLib("ansible2") + dec_data = None + error_hit = False + try: + dec_data = vl.decrypt(fdata) + except errors.AnsibleError, e: + error_hit = True + + os.unlink(v10_file.name) + + assert vl.cipher_name == "AES256", "wrong cipher name set after rekey: %s" % vl.cipher_name + assert error_hit == False, "error decrypting migrated 1.0 file" + assert dec_data.strip() == "foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data + + From 4c47c2273a150e3909c36a9acefcd24b4c002f27 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 22 Oct 2014 10:30:04 -0500 Subject: [PATCH 271/813] Use convert_bare instead of string formatting for raw vars in debug action plugin --- lib/ansible/runner/action_plugins/debug.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/debug.py b/lib/ansible/runner/action_plugins/debug.py index 9882095657..75613b9919 100644 --- a/lib/ansible/runner/action_plugins/debug.py +++ b/lib/ansible/runner/action_plugins/debug.py @@ -51,7 +51,7 @@ class ActionModule(object): else: result = dict(msg=args['msg']) elif 'var' in args and not utils.LOOKUP_REGEX.search(args['var']): - results = template.template(self.basedir, "{{ %s }}" % args['var'], inject) + results = template.template(self.basedir, args['var'], inject, convert_bare=True) result[args['var']] = results # force flag to make debug output module always verbose From be972225a56d0ee69f15d62bed73327b4c197a4b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 22 Oct 2014 10:33:09 -0500 Subject: [PATCH 272/813] Removing unnecessary import from v2 parsing init --- v2/ansible/parsing/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index bc2a000f8b..5f922a120f 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -21,8 +21,6 @@ __metaclass__ = type import json -from yaml import YAMLError - from ansible.errors import AnsibleParserError, AnsibleInternalError from ansible.parsing.vault import VaultLib from ansible.parsing.yaml import safe_load From b4f9631e4c02fc7737fe259ab851641dfc5cf4f0 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 22 Oct 2014 10:53:58 -0500 Subject: [PATCH 273/813] Revert "remove complex_args_hack as it was only needed for Baby JSON" This reverts commit a6029264b8194f0e6577d3e8cb4f853d779ef4f6. Fixes #9400 --- lib/ansible/runner/__init__.py | 20 ++++++++++++++++++++ lib/ansible/runner/action_plugins/normal.py | 2 ++ 2 files changed, 22 insertions(+) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 75a8a0766a..1265f79efe 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -253,6 +253,26 @@ class Runner(object): # ensure we are using unique tmp paths random.seed() + # ***************************************************** + + def _complex_args_hack(self, complex_args, module_args): + """ + ansible-playbook both allows specifying key=value string arguments and complex arguments + however not all modules use our python common module system and cannot + access these. An example might be a Bash module. This hack allows users to still pass "args" + as a hash of simple scalars to those arguments and is short term. We could technically + just feed JSON to the module, but that makes it hard on Bash consumers. The way this is implemented + it does mean values in 'args' have LOWER priority than those on the key=value line, allowing + args to provide yet another way to have pluggable defaults. + """ + if complex_args is None: + return module_args + if not isinstance(complex_args, dict): + raise errors.AnsibleError("complex arguments are not a dictionary: %s" % complex_args) + for (k,v) in complex_args.iteritems(): + if isinstance(v, basestring): + module_args = "%s=%s %s" % (k, pipes.quote(v), module_args) + return module_args # ***************************************************** diff --git a/lib/ansible/runner/action_plugins/normal.py b/lib/ansible/runner/action_plugins/normal.py index d845fa886f..8500c6641c 100644 --- a/lib/ansible/runner/action_plugins/normal.py +++ b/lib/ansible/runner/action_plugins/normal.py @@ -36,6 +36,8 @@ class ActionModule(object): def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs): ''' transfer & execute a module that is not 'copy' or 'template' ''' + module_args = self.runner._complex_args_hack(complex_args, module_args) + if self.runner.noop_on_check(inject): if module_name in [ 'shell', 'command' ]: return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for %s' % module_name)) From 9ee3cd14ee5ddb8be6ad63b66a26c868cd1511a9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 22 Oct 2014 11:08:01 -0500 Subject: [PATCH 274/813] Fix typo in vault edit helper code Fixes #9399 --- lib/ansible/utils/vault.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 506c0852f3..50b686c1e0 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -189,7 +189,7 @@ class VaultEditor(object): _, tmp_path = tempfile.mkstemp() if existing_data: - self.write_data(data, tmp_path) + self.write_data(existing_data, tmp_path) # drop the user into an editor on the tmp file call(self._editor_shell_command(tmp_path)) From 3c88f99d13e2e1dbe3ff008f35b33aabc464843f Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 22 Oct 2014 14:19:17 -0400 Subject: [PATCH 275/813] Add AnsibleFest London link. --- docsite/_themes/srtd/layout.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index 866efb72fc..1073cad40e 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -179,8 +179,8 @@
- - + +
 

 
From 9c10a11aea8d0fb7fe2f3c48a83e2a2a76b8d241 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Wed, 22 Oct 2014 15:40:41 -0400 Subject: [PATCH 276/813] Fix the synopsis in ansible-pull man page. --- docs/man/man1/ansible-pull.1.asciidoc.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/man/man1/ansible-pull.1.asciidoc.in b/docs/man/man1/ansible-pull.1.asciidoc.in index 39a8de0b0e..d75fc63794 100644 --- a/docs/man/man1/ansible-pull.1.asciidoc.in +++ b/docs/man/man1/ansible-pull.1.asciidoc.in @@ -12,7 +12,7 @@ ansible-pull - set up a remote copy of ansible on each managed node SYNOPSIS -------- -ansible -d DEST -U URL [options] [ ] +ansible-pull -d DEST -U URL [options] [ ] DESCRIPTION From 76a43d4b388e67b5c372af0d6316ac79daa4757e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 22 Oct 2014 14:40:41 -0500 Subject: [PATCH 277/813] Updating submodule pointer for core modules --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 88b73afcbe..b8e82f8504 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 88b73afcbe15a0c5ddbbb9c977e7c09199e47733 +Subproject commit b8e82f85040b4a502d305783734920e0f664562c From 56c268ade4013144ae1ee02df2976ad0545a3000 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 22 Oct 2014 16:36:54 -0400 Subject: [PATCH 278/813] New test that newlines in systemd service scripts don't cause problems --- test/integration/roles/test_service/files/ansible.systemd | 2 +- test/integration/roles/test_service/tasks/systemd_setup.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_service/files/ansible.systemd b/test/integration/roles/test_service/files/ansible.systemd index 4517433fd2..77c4911c23 100644 --- a/test/integration/roles/test_service/files/ansible.systemd +++ b/test/integration/roles/test_service/files/ansible.systemd @@ -2,6 +2,6 @@ Description=Ansible Test Service [Service] -ExecStart=/usr/sbin/ansible_test_service +ExecStart=/usr/sbin/ansible_test_service "Test\nthat newlines in scripts\nwork" ExecReload=/bin/true Type=forking diff --git a/test/integration/roles/test_service/tasks/systemd_setup.yml b/test/integration/roles/test_service/tasks/systemd_setup.yml index e2c1ecfc1c..6d42933213 100644 --- a/test/integration/roles/test_service/tasks/systemd_setup.yml +++ b/test/integration/roles/test_service/tasks/systemd_setup.yml @@ -12,7 +12,7 @@ - "install_systemd_result.dest == '/usr/lib/systemd/system/ansible_test.service'" - "install_systemd_result.state == 'file'" - "install_systemd_result.mode == '0644'" - - "install_systemd_result.md5sum == 'f634df77d9160ab05bad4ed49d82a0d0'" + - "install_systemd_result.md5sum == '6be64a1e44e9e72a467e70a0b562444f'" - "install_broken_systemd_result.dest == '/usr/lib/systemd/system/ansible_test_broken.service'" - "install_broken_systemd_result.state == 'link'" From 34129a223d2d4f58128cf561f657ad3ef17beeda Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 22 Oct 2014 16:46:50 -0400 Subject: [PATCH 279/813] update core submodules for systemd service fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index b8e82f8504..7f611468a8 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit b8e82f85040b4a502d305783734920e0f664562c +Subproject commit 7f611468a8279d785af26852ca7dccc95bc73a41 From c15b47fb7bbcca965089afc15c2dacf2f8120758 Mon Sep 17 00:00:00 2001 From: Maykel Moya Date: Tue, 7 Jan 2014 08:36:42 +1100 Subject: [PATCH 280/813] Configure retry file usage and location Adds new settings for managing retry files: * retry_files_enabled, defaults to True * retry_files_save_path, defaults to ~/.ansible-retry This change was adapted from PR #5515. --- bin/ansible-playbook | 2 +- lib/ansible/constants.py | 3 +++ lib/ansible/playbook/__init__.py | 19 ++++++++++++++----- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/bin/ansible-playbook b/bin/ansible-playbook index 96e87de3eb..7793b914c4 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -276,7 +276,7 @@ def main(args): retries = failed_hosts + unreachable_hosts - if len(retries) > 0: + if C.RETRY_FILES_ENABLED and len(retries) > 0: filename = pb.generate_retry_inventory(retries) if filename: display(" to retry, use: --limit @%s\n" % filename) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 861dd5325c..a255ed77d8 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -161,6 +161,9 @@ DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', ' COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) +RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) +RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/.ansible-retry') + # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None) ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 58e2bafe18..a6f67e196d 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -636,19 +636,28 @@ class PlayBook(object): buf = StringIO.StringIO() for x in replay_hosts: buf.write("%s\n" % x) - basedir = self.inventory.basedir() + basedir = C.shell_expand_path(C.RETRY_FILES_SAVE_PATH) filename = "%s.retry" % os.path.basename(self.filename) filename = filename.replace(".yml","") - filename = os.path.join(os.path.expandvars('$HOME/'), filename) + filename = os.path.join(basedir, filename) try: + if not os.path.exists(basedir): + os.makedirs(basedir) + fd = open(filename, 'w') fd.write(buf.getvalue()) fd.close() - return filename except: - pass - return None + ansible.callbacks.display( + "\nERROR: could not create retry file. Check the value of \n" + + "the configuration variable 'retry_files_save_path' or set \n" + + "'retry_files_enabled' to False to avoid this message.\n", + color='red' + ) + return None + + return filename # ***************************************************** From c86851be2c309f7ff3bff7758b346b52975c380b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 22 Oct 2014 18:45:02 -0400 Subject: [PATCH 281/813] Quote ControlPath ssh parameter Fixes #9316 --- lib/ansible/runner/connection_plugins/ssh.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index e8e431f401..c2fd9666eb 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -65,7 +65,7 @@ class Connection(object): else: self.common_args += ["-o", "ControlMaster=auto", "-o", "ControlPersist=60s", - "-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))] + "-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))] cp_in_use = False cp_path_set = False @@ -76,7 +76,7 @@ class Connection(object): cp_path_set = True if cp_in_use and not cp_path_set: - self.common_args += ["-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))] + self.common_args += ["-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))] if not C.HOST_KEY_CHECKING: self.common_args += ["-o", "StrictHostKeyChecking=no"] From c16c527923cfe3f67e981a8de330cbd248dd3226 Mon Sep 17 00:00:00 2001 From: Juri Glass Date: Thu, 23 Oct 2014 16:42:10 +0200 Subject: [PATCH 282/813] Update playbooks_variables.rst There is some kind of duplicated content with http://docs.ansible.com/faq.html#how-do-i-loop-over-a-list-of-hosts-in-a-group-inside-of-a-template and this gotcha isn't listed here. --- docsite/rst/playbooks_variables.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 86146cdd0f..b552352932 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -842,6 +842,7 @@ A frequently used idiom is walking a group to find all IP addresses in that grou {% endfor %} An example of this could include pointing a frontend proxy server to all of the app servers, setting up the correct firewall rules between servers, etc. +You need to make sure that the facts of those hosts have been populated before though, for example by running a play against them. Additionally, *inventory_hostname* is the name of the hostname as configured in Ansible's inventory host file. This can be useful for when you don't want to rely on the discovered hostname `ansible_hostname` or for other mysterious From 2ef2f1247252b703acd3f806ad8132041badefe4 Mon Sep 17 00:00:00 2001 From: Daniel Roberts Date: Thu, 23 Oct 2014 16:39:09 -0400 Subject: [PATCH 283/813] Update developing_modules.rst --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 5a92ec8ddc..608ac7185b 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -402,7 +402,7 @@ support formatting with some special macros. These formatting functions are ``U()``, ``M()``, ``I()``, and ``C()`` for URL, module, italic, and constant-width respectively. It is suggested to use ``C()`` for file and option names, and ``I()`` when referencing -parameters; module names should be specifies as ``M(module)``. +parameters; module names should be specified as ``M(module)``. Examples (which typically contain colons, quotes, etc.) are difficult to format with YAML, so these must be From 7cb489eca3bb167ac9e22b310075e944b8254a27 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 23 Oct 2014 15:39:27 -0500 Subject: [PATCH 284/813] Adding a data parsing class for v2 --- v2/ansible/errors/__init__.py | 101 ++++++++++++++++--- v2/ansible/parsing/__init__.py | 22 ----- v2/ansible/parsing/yaml/__init__.py | 115 +++++++++++++++++++++- v2/ansible/parsing/yaml/objects.py | 5 + v2/ansible/parsing/yaml/strings.py | 118 +++++++++++++++++++++++ v2/ansible/playbook/base.py | 9 +- v2/ansible/playbook/role.py | 11 +-- v2/ansible/playbook/task.py | 5 +- v2/test/errors/test_errors.py | 14 +-- v2/test/parsing/test_general.py | 104 -------------------- v2/test/parsing/yaml/test_data_loader.py | 64 ++++++++++++ v2/test/parsing/yaml/test_yaml.py | 100 ------------------- 12 files changed, 406 insertions(+), 262 deletions(-) create mode 100644 v2/ansible/parsing/yaml/strings.py delete mode 100644 v2/test/parsing/test_general.py create mode 100644 v2/test/parsing/yaml/test_data_loader.py delete mode 100644 v2/test/parsing/yaml/test_yaml.py diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py index 67f4d0a78b..e0c21d195b 100644 --- a/v2/ansible/errors/__init__.py +++ b/v2/ansible/errors/__init__.py @@ -21,11 +21,30 @@ __metaclass__ = type import os +from ansible.parsing.yaml.strings import * + class AnsibleError(Exception): - def __init__(self, message, obj=None): - # we import this here to prevent an import loop with errors + ''' + This is the base class for all errors raised from Ansible code, + and can be instantiated with two optional parameters beyond the + error message to control whether detailed information is displayed + when the error occurred while parsing a data file of some kind. + + Usage: + + raise AnsibleError('some message here', obj=obj, show_content=True) + + Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject, + which should be returned by the DataLoader() class. + ''' + + def __init__(self, message, obj=None, show_content=True): + # we import this here to prevent an import loop problem, + # since the objects code also imports ansible.errors from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject - self._obj = obj + + self._obj = obj + self._show_content = show_content if isinstance(self._obj, AnsibleBaseYAMLObject): extended_error = self._get_extended_error() if extended_error: @@ -36,22 +55,80 @@ class AnsibleError(Exception): def __repr__(self): return self.message - def _get_line_from_file(self, filename, line_number): - with open(filename, 'r') as f: + def _get_error_lines_from_file(self, file_name, line_number): + ''' + Returns the line in the file which coresponds to the reported error + location, as well as the line preceeding it (if the error did not + occur on the first line), to provide context to the error. + ''' + + target_line = '' + prev_line = '' + + with open(file_name, 'r') as f: lines = f.readlines() - return lines[line_number] + + target_line = lines[line_number] + if line_number > 0: + prev_line = lines[line_number - 1] + + return (target_line, prev_line) def _get_extended_error(self): + ''' + Given an object reporting the location of the exception in a file, return + detailed information regarding it including: + + * the line which caused the error as well as the one preceeding it + * causes and suggested remedies for common syntax errors + + If this error was created with show_content=False, the reporting of content + is suppressed, as the file contents may be sensitive (ie. vault data). + ''' + error_message = '' try: (src_file, line_number, col_number) = self._obj.get_position_info() - error_message += 'The error occurred on line %d of the file %s:\n' % (line_number, src_file) - if src_file not in ('', ''): - responsible_line = self._get_line_from_file(src_file, line_number - 1) - if responsible_line: - error_message += responsible_line - error_message += (' ' * (col_number-1)) + '^' + error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number) + if src_file not in ('', '') and self._show_content: + (target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1) + if target_line: + stripped_line = target_line.replace(" ","") + arrow_line = (" " * (col_number-1)) + "^" + error_message += "%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line) + + # common error/remediation checking here: + # check for unquoted vars starting lines + if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line): + error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR + # check for common dictionary mistakes + elif ":{{" in stripped_line and "}}" in stripped_line: + error_message += YAML_COMMON_DICT_ERROR + # check for common unquoted colon mistakes + elif len(target_line) and len(target_line) > 1 and len(target_line) > col_number and target_line[col_number] == ":" and target_line.count(':') > 1: + error_message += YAML_COMMON_UNQUOTED_COLON_ERROR + # otherwise, check for some common quoting mistakes + else: + parts = target_line.split(":") + if len(parts) > 1: + middle = parts[1].strip() + match = False + unbalanced = False + + if middle.startswith("'") and not middle.endswith("'"): + match = True + elif middle.startswith('"') and not middle.endswith('"'): + match = True + + if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and target_line.count("'") > 2 or target_line.count('"') > 2: + unbalanced = True + + if match: + error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR + if unbalanced: + error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR + except IOError: error_message += '\n(could not open file to display line)' except IndexError: diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index 5f922a120f..785fc45992 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -19,25 +19,3 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import json - -from ansible.errors import AnsibleParserError, AnsibleInternalError -from ansible.parsing.vault import VaultLib -from ansible.parsing.yaml import safe_load - -def load(data): - - if hasattr(data, 'read') and hasattr(data.read, '__call__'): - data = data.read() - - if isinstance(data, basestring): - try: - try: - return json.loads(data) - except: - return safe_load(data) - except: - raise AnsibleParserError("data was not valid yaml") - - raise AnsibleInternalError("expected file or string, got %s" % type(data)) - diff --git a/v2/ansible/parsing/yaml/__init__.py b/v2/ansible/parsing/yaml/__init__.py index 6cc55bfc84..6d121d991e 100644 --- a/v2/ansible/parsing/yaml/__init__.py +++ b/v2/ansible/parsing/yaml/__init__.py @@ -19,9 +19,114 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from yaml import load -from ansible.parsing.yaml.loader import AnsibleLoader +import json +import os + +from yaml import load, YAMLError + +from ansible.errors import AnsibleParserError + +from ansible.parsing.vault import VaultLib +from ansible.parsing.yaml.loader import AnsibleLoader +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject +from ansible.parsing.yaml.strings import YAML_SYNTAX_ERROR + +class DataLoader(): + + ''' + The DataLoader class is used to load and parse YAML or JSON content, + either from a given file name or from a string that was previously + read in through other means. A Vault password can be specified, and + any vault-encrypted files will be decrypted. + + Data read from files will also be cached, so the file will never be + read from disk more than once. + + Usage: + + dl = DataLoader() + (or) + dl = DataLoader(vault_password='foo') + + ds = dl.load('...') + ds = dl.load_from_file('/path/to/file') + ''' + + _FILE_CACHE = dict() + + def __init__(self, vault_password=None): + self._vault = VaultLib(password=vault_password) + + def load(self, data, file_name='', show_content=True): + ''' + Creates a python datastructure from the given data, which can be either + a JSON or YAML string. + ''' + + try: + # we first try to load this data as JSON + return json.loads(data) + except: + try: + # if loading JSON failed for any reason, we go ahead + # and try to parse it as YAML instead + return self._safe_load(data) + except YAMLError, yaml_exc: + self._handle_error(yaml_exc, file_name, show_content) + + def load_from_file(self, file_name): + ''' Loads data from a file, which can contain either JSON or YAML. ''' + + # if the file has already been read in and cached, we'll + # return those results to avoid more file/vault operations + if file_name in self._FILE_CACHE: + return self._FILE_CACHE + + # read the file contents and load the data structure from them + (file_data, show_content) = self._get_file_contents(file_name) + parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content) + + # cache the file contents for next time + self._FILE_CACHE[file_name] = parsed_data + + return parsed_data + + def _safe_load(self, stream): + ''' Implements yaml.safe_load(), except using our custom loader class. ''' + return load(stream, AnsibleLoader) + + def _get_file_contents(self, file_name): + ''' + Reads the file contents from the given file name, and will decrypt them + if they are found to be vault-encrypted. + ''' + if not os.path.exists(file_name) or not os.path.isfile(file_name): + raise AnsibleParserError("the file_name '%s' does not exist, or is not readable" % file_name) + + show_content = True + try: + with open(file_name, 'r') as f: + data = f.read() + if self._vault.is_encrypted(data): + data = self._vault.decrypt(data) + show_content = False + return (data, show_content) + except (IOError, OSError) as e: + raise AnsibleParserError("an error occured while trying to read the file '%s': %s" % (file_name, str(e))) + + def _handle_error(self, yaml_exc, file_name, show_content): + ''' + Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the + file name/position where a YAML exception occured, and raises an AnsibleParserError + to display the syntax exception information. + ''' + + # if the YAML exception contains a problem mark, use it to construct + # an object the error class can use to display the faulty line + err_obj = None + if hasattr(yaml_exc, 'problem_mark'): + err_obj = AnsibleBaseYAMLObject() + err_obj.set_position_info(file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1) + + raise AnsibleParserError(YAML_SYNTAX_ERROR, obj=err_obj, show_content=show_content) -def safe_load(stream): - ''' implements yaml.safe_load(), except using our custom loader class ''' - return load(stream, AnsibleLoader) diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py index be687d1e14..ba89accd73 100644 --- a/v2/ansible/parsing/yaml/objects.py +++ b/v2/ansible/parsing/yaml/objects.py @@ -32,6 +32,11 @@ class AnsibleBaseYAMLObject: def get_position_info(self): return (self._data_source, self._line_number, self._column_number) + def set_position_info(self, src, line, col): + self._data_source = src + self._line_number = line + self._column_number = col + def copy_position_info(obj): ''' copies the position info from another object ''' assert isinstance(obj, AnsibleBaseYAMLObject) diff --git a/v2/ansible/parsing/yaml/strings.py b/v2/ansible/parsing/yaml/strings.py new file mode 100644 index 0000000000..b7e304194f --- /dev/null +++ b/v2/ansible/parsing/yaml/strings.py @@ -0,0 +1,118 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +__all__ = [ + 'YAML_SYNTAX_ERROR', + 'YAML_POSITION_DETAILS', + 'YAML_COMMON_DICT_ERROR', + 'YAML_COMMON_UNQUOTED_VARIABLE_ERROR', + 'YAML_COMMON_UNQUOTED_COLON_ERROR', + 'YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR', + 'YAML_COMMON_UNBALANCED_QUOTES_ERROR', +] + +YAML_SYNTAX_ERROR = """\ +Syntax Error while loading YAML. +""" + +YAML_POSITION_DETAILS = """\ +The error appears to have been in '%s': line %s, column %s, +but may actually be before there depending on the exact syntax problem. +""" + +YAML_COMMON_DICT_ERROR = """\ +This one looks easy to fix. YAML thought it was looking for the start of a +hash/dictionary and was confused to see a second "{". Most likely this was +meant to be an ansible template evaluation instead, so we have to give the +parser a small hint that we wanted a string instead. The solution here is to +just quote the entire value. + +For instance, if the original line was: + + app_path: {{ base_path }}/foo + +It should be written as: + + app_path: "{{ base_path }}/foo" +""" + +YAML_COMMON_UNQUOTED_VARIABLE_ERROR = """\ +We could be wrong, but this one looks like it might be an issue with +missing quotes. Always quote template expression brackets when they +start a value. For instance: + + with_items: + - {{ foo }} + +Should be written as: + + with_items: + - "{{ foo }}" +""" + +YAML_COMMON_UNQUOTED_COLON_ERROR = """\ +This one looks easy to fix. There seems to be an extra unquoted colon in the line +and this is confusing the parser. It was only expecting to find one free +colon. The solution is just add some quotes around the colon, or quote the +entire line after the first colon. + +For instance, if the original line was: + + copy: src=file.txt dest=/path/filename:with_colon.txt + +It can be written as: + + copy: src=file.txt dest='/path/filename:with_colon.txt' + +Or: + + copy: 'src=file.txt dest=/path/filename:with_colon.txt' +""" + +YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR = """\ +This one looks easy to fix. It seems that there is a value started +with a quote, and the YAML parser is expecting to see the line ended +with the same kind of quote. For instance: + + when: "ok" in result.stdout + +Could be written as: + + when: '"ok" in result.stdout' + +Or equivalently: + + when: "'ok' in result.stdout" +""" + +YAML_COMMON_UNBALANCED_QUOTES_ERROR = """\ +We could be wrong, but this one looks like it might be an issue with +unbalanced quotes. If starting a value with a quote, make sure the +line ends with the same set of quotes. For instance this arbitrary +example: + + foo: "bad" "wolf" + +Could be written as: + + foo: '"bad" "wolf"' +""" + diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 59c329d453..577a5dae22 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -25,14 +25,17 @@ from io import FileIO from six import iteritems, string_types from ansible.playbook.attribute import Attribute, FieldAttribute -from ansible.parsing import load +from ansible.parsing.yaml import DataLoader class Base: _tags = FieldAttribute(isa='list') _when = FieldAttribute(isa='list') - def __init__(self): + def __init__(self, loader=DataLoader): + + # the data loader class is used to parse data from strings and files + self._loader = loader # each class knows attributes set upon it, see Task.py for example self._attributes = dict() @@ -64,7 +67,7 @@ class Base: assert ds is not None if isinstance(ds, string_types) or isinstance(ds, FileIO): - ds = load(ds) + ds = self._loader.load(ds) # we currently don't do anything with private attributes but may # later decide to filter them out of 'ds' here. diff --git a/v2/ansible/playbook/role.py b/v2/ansible/playbook/role.py index 88aecab985..b68ce51583 100644 --- a/v2/ansible/playbook/role.py +++ b/v2/ansible/playbook/role.py @@ -23,14 +23,11 @@ from six import iteritems, string_types import os +from ansible.errors import AnsibleError +from ansible.parsing.yaml import DataLoader from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base from ansible.playbook.block import Block -from ansible.errors import AnsibleError - -# FIXME: this def was cruft from the old utils code, so we'll need -# to relocate it somewhere before we can use it -#from ansible.parsing import load_data_from_file from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping @@ -48,10 +45,10 @@ class Role(Base): _default_vars = FieldAttribute(isa='dict', default=dict()) _role_vars = FieldAttribute(isa='dict', default=dict()) - def __init__(self, vault_password=None): + def __init__(self, vault_password=None, loader=DataLoader): self._role_path = None self._vault_password = vault_password - super(Role, self).__init__() + super(Role, self).__init__(loader=loader) def __repr__(self): return self.get_name() diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 91ca7558d6..aa79d49410 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -26,6 +26,7 @@ from ansible.errors import AnsibleError from ansible.parsing.splitter import parse_kv from ansible.parsing.mod_args import ModuleArgsParser +from ansible.parsing.yaml import DataLoader from ansible.plugins import module_finder, lookup_finder class Task(Base): @@ -85,11 +86,11 @@ class Task(Base): _transport = FieldAttribute(isa='string') _until = FieldAttribute(isa='list') # ? - def __init__(self, block=None, role=None): + def __init__(self, block=None, role=None, loader=DataLoader): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' self._block = block self._role = role - super(Task, self).__init__() + super(Task, self).__init__(loader) def get_name(self): ''' return the name of the task ''' diff --git a/v2/test/errors/test_errors.py b/v2/test/errors/test_errors.py index 5d1868a5a4..5b24dc4345 100644 --- a/v2/test/errors/test_errors.py +++ b/v2/test/errors/test_errors.py @@ -30,7 +30,7 @@ from ansible.compat.tests.mock import mock_open, patch class TestErrors(unittest.TestCase): def setUp(self): - self.message = 'this is the error message' + self.message = 'This is the error message' self.obj = AnsibleBaseYAMLObject() @@ -42,18 +42,18 @@ class TestErrors(unittest.TestCase): self.assertEqual(e.message, self.message) self.assertEqual(e.__repr__(), self.message) - @patch.object(AnsibleError, '_get_line_from_file') + @patch.object(AnsibleError, '_get_error_lines_from_file') def test_error_with_object(self, mock_method): self.obj._data_source = 'foo.yml' self.obj._line_number = 1 self.obj._column_number = 1 - mock_method.return_value = 'this is line 1\n' + mock_method.return_value = ('this is line 1\n', '') e = AnsibleError(self.message, self.obj) - self.assertEqual(e.message, 'this is the error message\nThe error occurred on line 1 of the file foo.yml:\nthis is line 1\n^') + self.assertEqual(e.message, "This is the error message\nThe error appears to have been in 'foo.yml': line 1, column 1,\nbut may actually be before there depending on the exact syntax problem.\n\nthis is line 1\n^\n") - def test_error_get_line_from_file(self): + def test_get_error_lines_from_file(self): m = mock_open() m.return_value.readlines.return_value = ['this is line 1\n'] @@ -63,12 +63,12 @@ class TestErrors(unittest.TestCase): self.obj._line_number = 1 self.obj._column_number = 1 e = AnsibleError(self.message, self.obj) - self.assertEqual(e.message, 'this is the error message\nThe error occurred on line 1 of the file foo.yml:\nthis is line 1\n^') + self.assertEqual(e.message, "This is the error message\nThe error appears to have been in 'foo.yml': line 1, column 1,\nbut may actually be before there depending on the exact syntax problem.\n\nthis is line 1\n^\n") # this line will not be found, as it is out of the index range self.obj._data_source = 'foo.yml' self.obj._line_number = 2 self.obj._column_number = 1 e = AnsibleError(self.message, self.obj) - self.assertEqual(e.message, 'this is the error message\nThe error occurred on line 2 of the file foo.yml:\n\n(specified line no longer in file, maybe it changed?)') + self.assertEqual(e.message, "This is the error message\nThe error appears to have been in 'foo.yml': line 2, column 1,\nbut may actually be before there depending on the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)") diff --git a/v2/test/parsing/test_general.py b/v2/test/parsing/test_general.py deleted file mode 100644 index b06038a588..0000000000 --- a/v2/test/parsing/test_general.py +++ /dev/null @@ -1,104 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.compat.tests import unittest -from ansible.errors import AnsibleInternalError, AnsibleParserError -from ansible.parsing import load - -import json -import yaml - -from io import FileIO - -class MockFile(FileIO): - - def __init__(self, ds, method='json'): - self.ds = ds - self.method = method - - def read(self): - if self.method == 'json': - return json.dumps(self.ds) - elif self.method == 'yaml': - return yaml.dump(self.ds) - elif self.method == 'fail': - return """ - AAARGGGGH: - ***** - THIS WON'T PARSE !!! - NOOOOOOOOOOOOOOOOOO - """ - else: - raise Exception("untestable serializer") - - def close(self): - pass - -class TestGeneralParsing(unittest.TestCase): - def setUp(self): - pass - - def tearDown(self): - pass - - def test_parse_json_from_string(self): - data = """ - { - "asdf" : "1234", - "jkl" : 5678 - } - """ - output = load(data) - self.assertEqual(output['asdf'], '1234') - self.assertEqual(output['jkl'], 5678) - - def test_parse_json_from_file(self): - output = load(MockFile(dict(a=1,b=2,c=3), 'json')) - self.assertEqual(output, dict(a=1,b=2,c=3)) - - def test_parse_yaml_from_dict(self): - data = """ - asdf: '1234' - jkl: 5678 - """ - output = load(data) - self.assertEqual(output['asdf'], '1234') - self.assertEqual(output['jkl'], 5678) - - def test_parse_yaml_from_file(self): - output = load(MockFile(dict(a=1,b=2,c=3),'yaml')) - self.assertEqual(output, dict(a=1,b=2,c=3)) - - def test_parse_fail(self): - data = """ - TEXT: - *** - NOT VALID - """ - self.assertRaises(AnsibleParserError, load, data) - - def test_parse_fail_from_file(self): - self.assertRaises(AnsibleParserError, load, MockFile(None,'fail')) - - def test_parse_fail_invalid_type(self): - self.assertRaises(AnsibleInternalError, load, 3000) - self.assertRaises(AnsibleInternalError, load, dict(a=1,b=2,c=3)) - diff --git a/v2/test/parsing/yaml/test_data_loader.py b/v2/test/parsing/yaml/test_data_loader.py new file mode 100644 index 0000000000..166a60ee5e --- /dev/null +++ b/v2/test/parsing/yaml/test_data_loader.py @@ -0,0 +1,64 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from yaml.scanner import ScannerError + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch +from ansible.errors import AnsibleParserError + +from ansible.parsing.yaml import DataLoader +from ansible.parsing.yaml.objects import AnsibleMapping + +class TestDataLoader(unittest.TestCase): + + def setUp(self): + # FIXME: need to add tests that utilize vault_password + self._loader = DataLoader() + + def tearDown(self): + pass + + @patch.object(DataLoader, '_get_file_contents') + def test_parse_json_from_file(self, mock_def): + mock_def.return_value = ("""{"a": 1, "b": 2, "c": 3}""", True) + output = self._loader.load_from_file('dummy_json.txt') + self.assertEqual(output, dict(a=1,b=2,c=3)) + + @patch.object(DataLoader, '_get_file_contents') + def test_parse_yaml_from_file(self, mock_def): + mock_def.return_value = (""" + a: 1 + b: 2 + c: 3 + """, True) + output = self._loader.load_from_file('dummy_yaml.txt') + self.assertEqual(output, dict(a=1,b=2,c=3)) + + @patch.object(DataLoader, '_get_file_contents') + def test_parse_fail_from_file(self, mock_def): + mock_def.return_value = (""" + TEXT: + *** + NOT VALID + """, True) + self.assertRaises(AnsibleParserError, self._loader.load_from_file, 'dummy_yaml_bad.txt') + diff --git a/v2/test/parsing/yaml/test_yaml.py b/v2/test/parsing/yaml/test_yaml.py deleted file mode 100644 index c468ef6d6f..0000000000 --- a/v2/test/parsing/yaml/test_yaml.py +++ /dev/null @@ -1,100 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.compat.tests import unittest - -from yaml.scanner import ScannerError - -from ansible.parsing.yaml import safe_load -from ansible.parsing.yaml.objects import AnsibleMapping - -# a single dictionary instance -data1 = '''--- -key: value -''' - -# multiple dictionary instances -data2 = '''--- -- key1: value1 -- key2: value2 - -- key3: value3 - - -- key4: value4 -''' - -# multiple dictionary instances with other nested -# dictionaries contained within those -data3 = '''--- -- key1: - subkey1: subvalue1 - subkey2: subvalue2 - subkey3: - subsubkey1: subsubvalue1 -- key2: - subkey4: subvalue4 -- list1: - - list1key1: list1value1 - list1key2: list1value2 - list1key3: list1value3 -''' - -bad_data1 = '''--- -foo: bar - bam: baz -''' - -class TestSafeLoad(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_safe_load_bad(self): - # test the loading of bad yaml data - self.assertRaises(ScannerError, safe_load, bad_data1) - - def test_safe_load(self): - # test basic dictionary - res = safe_load(data1) - self.assertEqual(type(res), AnsibleMapping) - self.assertEqual(res._line_number, 2) - - # test data with multiple dictionaries - res = safe_load(data2) - self.assertEqual(len(res), 4) - self.assertEqual(res[0]._line_number, 2) - self.assertEqual(res[1]._line_number, 3) - self.assertEqual(res[2]._line_number, 5) - self.assertEqual(res[3]._line_number, 8) - - # test data with multiple sub-dictionaries - res = safe_load(data3) - self.assertEqual(len(res), 3) - self.assertEqual(res[0]._line_number, 2) - self.assertEqual(res[1]._line_number, 7) - self.assertEqual(res[2]._line_number, 9) - self.assertEqual(res[0]['key1']._line_number, 3) - self.assertEqual(res[1]['key2']._line_number, 8) - self.assertEqual(res[2]['list1'][0]._line_number, 10) From 5ceb07c65af0539d05af27bfd03f7b6a33265aea Mon Sep 17 00:00:00 2001 From: Will Thames Date: Sat, 25 Oct 2014 18:17:57 +1000 Subject: [PATCH 285/813] Add Frankfurt AWS region --- lib/ansible/module_utils/ec2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index b4558ef0a4..3d3040068f 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -37,6 +37,7 @@ AWS_REGIONS = [ 'ap-southeast-1', 'ap-southeast-2', 'eu-west-1', + 'eu-central-1', 'sa-east-1', 'us-east-1', 'us-west-1', From 44afa7faccdf212b4329f83619358f999bbd9be7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 25 Oct 2014 23:42:35 -0500 Subject: [PATCH 286/813] Updating Role class for new DataLoader stuff --- v2/ansible/playbook/base.py | 2 +- v2/ansible/playbook/role.py | 13 ++-- v2/test/playbook/test_role.py | 108 ++++++++++++++++++++++++++++++++-- 3 files changed, 110 insertions(+), 13 deletions(-) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 577a5dae22..ce0e2a199c 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -35,7 +35,7 @@ class Base: def __init__(self, loader=DataLoader): # the data loader class is used to parse data from strings and files - self._loader = loader + self._loader = loader() # each class knows attributes set upon it, see Task.py for example self._attributes = dict() diff --git a/v2/ansible/playbook/role.py b/v2/ansible/playbook/role.py index b68ce51583..38465783f5 100644 --- a/v2/ansible/playbook/role.py +++ b/v2/ansible/playbook/role.py @@ -38,16 +38,15 @@ class Role(Base): _src = FieldAttribute(isa='string') _scm = FieldAttribute(isa='string') _version = FieldAttribute(isa='string') - _params = FieldAttribute(isa='dict', default=dict()) - _metadata = FieldAttribute(isa='dict', default=dict()) _task_blocks = FieldAttribute(isa='list', default=[]) _handler_blocks = FieldAttribute(isa='list', default=[]) + _params = FieldAttribute(isa='dict', default=dict()) + _metadata = FieldAttribute(isa='dict', default=dict()) _default_vars = FieldAttribute(isa='dict', default=dict()) _role_vars = FieldAttribute(isa='dict', default=dict()) - def __init__(self, vault_password=None, loader=DataLoader): + def __init__(self, loader=DataLoader): self._role_path = None - self._vault_password = vault_password super(Role, self).__init__(loader=loader) def __repr__(self): @@ -57,9 +56,9 @@ class Role(Base): return self._attributes['role_name'] @staticmethod - def load(data, vault_password=None): + def load(data): assert isinstance(data, string_types) or isinstance(data, dict) - r = Role(vault_password=vault_password) + r = Role() r.load_data(data) return r @@ -116,7 +115,7 @@ class Role(Base): if os.path.exists(file_path) and os.path.isdir(file_path): main_file = self._resolve_main(file_path) if os.path.exists(main_file): - return load_data_from_file(main_file, self._vault_password) + return self._loader.load_from_file(main_file) return None def _resolve_main(self, basepath): diff --git a/v2/test/playbook/test_role.py b/v2/test/playbook/test_role.py index f2236f7fc0..b24a1b1936 100644 --- a/v2/test/playbook/test_role.py +++ b/v2/test/playbook/test_role.py @@ -19,10 +19,14 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, MagicMock + from ansible.playbook.block import Block from ansible.playbook.role import Role from ansible.playbook.task import Task -from ansible.compat.tests import unittest + +from ansible.parsing.yaml import DataLoader class TestRole(unittest.TestCase): @@ -35,6 +39,15 @@ class TestRole(unittest.TestCase): def test_construct_empty_block(self): r = Role() + @patch.object(DataLoader, 'load_from_file') + def test__load_role_yaml(self, _load_from_file): + _load_from_file.return_value = dict(foo='bar') + r = Role() + with patch('os.path.exists', return_value=True): + with patch('os.path.isdir', return_value=True): + res = r._load_role_yaml('/fake/path', 'some_subdir') + self.assertEqual(res, dict(foo='bar')) + def test_role__load_list_of_blocks(self): task = dict(action='test') r = Role() @@ -45,8 +58,93 @@ class TestRole(unittest.TestCase): res = r._load_list_of_blocks([task,task,task]) self.assertEqual(len(res), 3) - def test_load_role_simple(self): - pass + @patch.object(Role, '_get_role_path') + @patch.object(Role, '_load_role_yaml') + def test_load_role_with_tasks(self, _load_role_yaml, _get_role_path): + + _get_role_path.return_value = ('foo', '/etc/ansible/roles/foo') + + def fake_load_role_yaml(role_path, subdir): + if role_path == '/etc/ansible/roles/foo': + if subdir == 'tasks': + return [dict(shell='echo "hello world"')] + return None + + _load_role_yaml.side_effect = fake_load_role_yaml + + r = Role.load('foo') + self.assertEqual(len(r.task_blocks), 1) + assert isinstance(r.task_blocks[0], Block) + + @patch.object(Role, '_get_role_path') + @patch.object(Role, '_load_role_yaml') + def test_load_role_with_handlers(self, _load_role_yaml, _get_role_path): + + _get_role_path.return_value = ('foo', '/etc/ansible/roles/foo') + + def fake_load_role_yaml(role_path, subdir): + if role_path == '/etc/ansible/roles/foo': + if subdir == 'handlers': + return [dict(name='test handler', shell='echo "hello world"')] + return None + + _load_role_yaml.side_effect = fake_load_role_yaml + + r = Role.load('foo') + self.assertEqual(len(r.handler_blocks), 1) + assert isinstance(r.handler_blocks[0], Block) + + @patch.object(Role, '_get_role_path') + @patch.object(Role, '_load_role_yaml') + def test_load_role_with_vars(self, _load_role_yaml, _get_role_path): + + _get_role_path.return_value = ('foo', '/etc/ansible/roles/foo') + + def fake_load_role_yaml(role_path, subdir): + if role_path == '/etc/ansible/roles/foo': + if subdir == 'defaults': + return dict(foo='bar') + elif subdir == 'vars': + return dict(foo='bam') + return None + + _load_role_yaml.side_effect = fake_load_role_yaml + + r = Role.load('foo') + self.assertEqual(r.default_vars, dict(foo='bar')) + self.assertEqual(r.role_vars, dict(foo='bam')) + + @patch.object(Role, '_get_role_path') + @patch.object(Role, '_load_role_yaml') + def test_load_role_with_metadata(self, _load_role_yaml, _get_role_path): + + _get_role_path.return_value = ('foo', '/etc/ansible/roles/foo') + + def fake_load_role_yaml(role_path, subdir): + if role_path == '/etc/ansible/roles/foo': + if subdir == 'meta': + return dict(dependencies=[], allow_duplicates=False) + return None + + _load_role_yaml.side_effect = fake_load_role_yaml + + r = Role.load('foo') + self.assertEqual(r.metadata, dict(dependencies=[], allow_duplicates=False)) + + @patch.object(Role, '_get_role_path') + @patch.object(Role, '_load_role_yaml') + def test_load_role_complex(self, _load_role_yaml, _get_role_path): + + _get_role_path.return_value = ('foo', '/etc/ansible/roles/foo') + + def fake_load_role_yaml(role_path, subdir): + if role_path == '/etc/ansible/roles/foo': + if subdir == 'tasks': + return [dict(shell='echo "hello world"')] + return None + + _load_role_yaml.side_effect = fake_load_role_yaml + + r = Role.load(dict(role='foo')) + - def test_load_role_complex(self): - pass From 39bbe36fc78d3eff1effb8e7116c4ab05315dfc5 Mon Sep 17 00:00:00 2001 From: Ton Kersten Date: Sun, 26 Oct 2014 15:46:08 +0100 Subject: [PATCH 287/813] Ignore compiled Python for documentation. When running `ansible-doc --list` a lot of errors are generated when `ansible-doc` tries to find documentation strings in `.pyc` files. --- bin/ansible-doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ansible-doc b/bin/ansible-doc index d949c8e537..d5143f33a1 100755 --- a/bin/ansible-doc +++ b/bin/ansible-doc @@ -33,7 +33,7 @@ import traceback MODULEDIR = C.DEFAULT_MODULE_PATH -BLACKLIST_EXTS = ('.swp', '.bak', '~', '.rpm') +BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm') _ITALIC = re.compile(r"I\(([^)]+)\)") _BOLD = re.compile(r"B\(([^)]+)\)") From 66f700eb0d23ade0c5656dca09e62380f02ae366 Mon Sep 17 00:00:00 2001 From: bobobox Date: Sun, 26 Oct 2014 14:44:04 -0500 Subject: [PATCH 288/813] Update playbooks_intro.rst Small punctuation/grammar fix. --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 70db3f7fe2..4bc3bccf2d 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -196,7 +196,7 @@ it is recommended that you use the more conventional "module: options" format. This recommended format is used throughout the documentation, but you may encounter the older format in some playbooks. -Here is what a basic task looks like, as with most modules, +Here is what a basic task looks like. As with most modules, the service module takes key=value arguments:: tasks: From 7ea84d74994f73361c889b3bb71d558a01234275 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 27 Oct 2014 13:25:32 -0500 Subject: [PATCH 289/813] More v2 roles class work * added ability to set parents (will be used when the deps are loaded) * added role caching, so roles are not reloaded needlessly (and for use in detecting when roles have already been run) * reworked the way metadata was stored - now individual attribute fields instead of a dictionary blob --- v2/ansible/playbook/role.py | 92 ++++++++++++++++++++++++++++++++--- v2/test/playbook/test_role.py | 23 +++++++-- 2 files changed, 105 insertions(+), 10 deletions(-) diff --git a/v2/ansible/playbook/role.py b/v2/ansible/playbook/role.py index 38465783f5..2d492ee506 100644 --- a/v2/ansible/playbook/role.py +++ b/v2/ansible/playbook/role.py @@ -23,7 +23,9 @@ from six import iteritems, string_types import os -from ansible.errors import AnsibleError +from hashlib import md5 + +from ansible.errors import AnsibleError, AnsibleParserError from ansible.parsing.yaml import DataLoader from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base @@ -31,6 +33,20 @@ from ansible.playbook.block import Block from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping +__all__ = ['Role'] + +# The role cache is used to prevent re-loading roles, which +# may already exist. Keys into this cache are the MD5 hash +# of the role definition (for dictionary definitions, this +# will be based on the repr() of the dictionary object) +_ROLE_CACHE = dict() + +_VALID_METADATA_KEYS = [ + 'dependencies', + 'allow_duplicates', + 'galaxy_info', +] + class Role(Base): _role_name = FieldAttribute(isa='string') @@ -41,12 +57,19 @@ class Role(Base): _task_blocks = FieldAttribute(isa='list', default=[]) _handler_blocks = FieldAttribute(isa='list', default=[]) _params = FieldAttribute(isa='dict', default=dict()) - _metadata = FieldAttribute(isa='dict', default=dict()) _default_vars = FieldAttribute(isa='dict', default=dict()) _role_vars = FieldAttribute(isa='dict', default=dict()) + # Attributes based on values in metadata. These MUST line up + # with the values stored in _VALID_METADATA_KEYS + _dependencies = FieldAttribute(isa='list', default=[]) + _allow_duplicates = FieldAttribute(isa='bool', default=False) + _galaxy_info = FieldAttribute(isa='dict', default=dict()) + def __init__(self, loader=DataLoader): self._role_path = None + self._parents = [] + super(Role, self).__init__(loader=loader) def __repr__(self): @@ -56,10 +79,30 @@ class Role(Base): return self._attributes['role_name'] @staticmethod - def load(data): + def load(data, parent_role=None): assert isinstance(data, string_types) or isinstance(data, dict) - r = Role() - r.load_data(data) + + # Check to see if this role has been loaded already, based on the + # role definition, partially to save loading time and also to make + # sure that roles are run a single time unless specifically allowed + # to run more than once + + # FIXME: the tags and conditionals, if specified in the role def, + # should not figure into the resulting hash + cache_key = md5(repr(data)) + if cache_key in _ROLE_CACHE: + r = _ROLE_CACHE[cache_key] + else: + # load the role + r = Role() + r.load_data(data) + # and cache it for next time + _ROLE_CACHE[cache_key] = r + + # now add the parent to the (new) role + if parent_role: + r.add_parent(parent_role) + return r #------------------------------------------------------------------------------ @@ -101,12 +144,16 @@ class Role(Base): new_ds['role_path'] = role_path # load the role's files, if they exist - new_ds['metadata'] = self._load_role_yaml(role_path, 'meta') new_ds['task_blocks'] = self._load_role_yaml(role_path, 'tasks') new_ds['handler_blocks'] = self._load_role_yaml(role_path, 'handlers') new_ds['default_vars'] = self._load_role_yaml(role_path, 'defaults') new_ds['role_vars'] = self._load_role_yaml(role_path, 'vars') + # we treat metadata slightly differently: we instead pull out the + # valid metadata keys and munge them directly into new_ds + metadata_ds = self._munge_metadata(role_name, role_path) + new_ds.update(metadata_ds) + # and return the newly munged ds return new_ds @@ -256,6 +303,32 @@ class Role(Base): return ds + def _munge_metadata(self, role_name, role_path): + ''' + loads the metadata main.yml (if it exists) and creates a clean + datastructure we can merge into the newly munged ds + ''' + + meta_ds = dict() + + metadata = self._load_role_yaml(role_path, 'meta') + if metadata: + if not isinstance(metadata, dict): + raise AnsibleParserError("The metadata for role '%s' should be a dictionary, instead it is a %s" % (role_name, type(metadata)), obj=metadata) + + for key in metadata: + if key in _VALID_METADATA_KEYS: + if isinstance(metadata[key], dict): + meta_ds[key] = metadata[key].copy() + elif isinstance(metadata[key], list): + meta_ds[key] = metadata[key][:] + else: + meta_ds[key] = metadata[key] + else: + raise AnsibleParserError("%s is not a valid metadata key for role '%s'" % (key, role_name), obj=metadata) + + return meta_ds + #------------------------------------------------------------------------------ # attribute loading defs @@ -280,6 +353,13 @@ class Role(Base): #------------------------------------------------------------------------------ # other functions + def add_parent(self, parent_role): + ''' adds a role to the list of this roles parents ''' + assert isinstance(role, Role) + + if parent_role not in self._parents: + self._parents.append(parent_role) + def get_variables(self): # returns the merged variables for this role, including # recursively merging those of all child roles diff --git a/v2/test/playbook/test_role.py b/v2/test/playbook/test_role.py index b24a1b1936..d3138fa576 100644 --- a/v2/test/playbook/test_role.py +++ b/v2/test/playbook/test_role.py @@ -22,6 +22,7 @@ __metaclass__ = type from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch, MagicMock +from ansible.errors import AnsibleParserError from ansible.playbook.block import Block from ansible.playbook.role import Role from ansible.playbook.task import Task @@ -118,18 +119,32 @@ class TestRole(unittest.TestCase): @patch.object(Role, '_load_role_yaml') def test_load_role_with_metadata(self, _load_role_yaml, _get_role_path): - _get_role_path.return_value = ('foo', '/etc/ansible/roles/foo') - def fake_load_role_yaml(role_path, subdir): if role_path == '/etc/ansible/roles/foo': if subdir == 'meta': - return dict(dependencies=[], allow_duplicates=False) + return dict(dependencies=['bar'], allow_duplicates=True, galaxy_info=dict(a='1', b='2', c='3')) + elif role_path == '/etc/ansible/roles/bad1': + if subdir == 'meta': + return 1 + elif role_path == '/etc/ansible/roles/bad2': + if subdir == 'meta': + return dict(foo='bar') return None _load_role_yaml.side_effect = fake_load_role_yaml + _get_role_path.return_value = ('foo', '/etc/ansible/roles/foo') + r = Role.load('foo') - self.assertEqual(r.metadata, dict(dependencies=[], allow_duplicates=False)) + self.assertEqual(r.dependencies, ['bar']) + self.assertEqual(r.allow_duplicates, True) + self.assertEqual(r.galaxy_info, dict(a='1', b='2', c='3')) + + _get_role_path.return_value = ('bad1', '/etc/ansible/roles/bad1') + self.assertRaises(AnsibleParserError, Role.load, 'bad1') + + _get_role_path.return_value = ('bad2', '/etc/ansible/roles/bad2') + self.assertRaises(AnsibleParserError, Role.load, 'bad2') @patch.object(Role, '_get_role_path') @patch.object(Role, '_load_role_yaml') From b3b356480da93d9266a9a846c364b2a74f4d0085 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 27 Oct 2014 15:52:56 -0700 Subject: [PATCH 290/813] added the ability to keep aliased and deprecated modules prefixed with '_', they will be loaded after non prefixed modules are checked they can be full modules or symlinks to existing ones (alias) also updated ansible doc to ignore these, will eventually add selective display --- bin/ansible-doc | 4 +++- lib/ansible/utils/plugins.py | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/bin/ansible-doc b/bin/ansible-doc index d5143f33a1..8a7faadb24 100755 --- a/bin/ansible-doc +++ b/bin/ansible-doc @@ -225,11 +225,13 @@ def main(): # list all modules paths = utils.plugins.module_finder._get_paths() module_list = [] + deprecated_list = [] + module_aliases = {} for path in paths: # os.system("ls -C %s" % (path)) if os.path.isdir(path): for module in os.listdir(path): - if any(module.endswith(x) for x in BLACKLIST_EXTS): + if module.startswith('_') or any(module.endswith(x) for x in BLACKLIST_EXTS): continue module_list.append(module) diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/utils/plugins.py index faf5b5f26f..0d050fd13d 100644 --- a/lib/ansible/utils/plugins.py +++ b/lib/ansible/utils/plugins.py @@ -178,6 +178,9 @@ class PluginLoader(object): self._plugin_path_cache[full_name] = path return path + if not name.startswith('_'): + return self.find_plugin('_' + name, suffixes, transport) + return None def has_plugin(self, name): From 58defa5cce4ac1095e35a21b167621c27649a34f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 28 Oct 2014 00:15:02 -0500 Subject: [PATCH 291/813] Adding role deps to v2 Role class and fixing some bugs --- v2/ansible/parsing/yaml/__init__.py | 2 +- v2/ansible/parsing/yaml/objects.py | 2 +- v2/ansible/parsing/yaml/strings.py | 4 ---- v2/ansible/playbook/role.py | 29 +++++++++++++++++++---------- v2/test/playbook/test_role.py | 24 ++++++++++++++++++------ 5 files changed, 39 insertions(+), 22 deletions(-) diff --git a/v2/ansible/parsing/yaml/__init__.py b/v2/ansible/parsing/yaml/__init__.py index 6d121d991e..0acb77f8fd 100644 --- a/v2/ansible/parsing/yaml/__init__.py +++ b/v2/ansible/parsing/yaml/__init__.py @@ -80,7 +80,7 @@ class DataLoader(): # if the file has already been read in and cached, we'll # return those results to avoid more file/vault operations if file_name in self._FILE_CACHE: - return self._FILE_CACHE + return self._FILE_CACHE[file_name] # read the file contents and load the data structure from them (file_data, show_content) = self._get_file_contents(file_name) diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py index ba89accd73..6a7482fe49 100644 --- a/v2/ansible/parsing/yaml/objects.py +++ b/v2/ansible/parsing/yaml/objects.py @@ -37,7 +37,7 @@ class AnsibleBaseYAMLObject: self._line_number = line self._column_number = col - def copy_position_info(obj): + def copy_position_info(self, obj): ''' copies the position info from another object ''' assert isinstance(obj, AnsibleBaseYAMLObject) diff --git a/v2/ansible/parsing/yaml/strings.py b/v2/ansible/parsing/yaml/strings.py index b7e304194f..a778904e63 100644 --- a/v2/ansible/parsing/yaml/strings.py +++ b/v2/ansible/parsing/yaml/strings.py @@ -15,10 +15,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - __all__ = [ 'YAML_SYNTAX_ERROR', 'YAML_POSITION_DETAILS', diff --git a/v2/ansible/playbook/role.py b/v2/ansible/playbook/role.py index 2d492ee506..38a8ac195d 100644 --- a/v2/ansible/playbook/role.py +++ b/v2/ansible/playbook/role.py @@ -93,11 +93,14 @@ class Role(Base): if cache_key in _ROLE_CACHE: r = _ROLE_CACHE[cache_key] else: - # load the role - r = Role() - r.load_data(data) - # and cache it for next time - _ROLE_CACHE[cache_key] = r + try: + # load the role + r = Role() + r.load_data(data) + # and cache it for next time + _ROLE_CACHE[cache_key] = r + except RuntimeError: + raise AnsibleError("A recursive loop was detected while loading your roles", obj=data) # now add the parent to the (new) role if parent_role: @@ -192,17 +195,13 @@ class Role(Base): # FIXME: this should use unfrackpath once the utils code has been sorted out role_path = os.path.normpath(role) - print("first role path is %s" % role_path) if os.path.exists(role_path): role_name = os.path.basename(role) - print('returning role path %s' % role_path) return (role_name, role_path) else: for path in ('./roles', '/etc/ansible/roles'): role_path = os.path.join(path, role) - print("current role path is %s" % role_path) if os.path.exists(role_path): - print('returning role path %s' % role_path) return (role, role_path) # FIXME: make the parser smart about list/string entries @@ -350,12 +349,22 @@ class Role(Base): return [] return self._load_list_of_blocks(ds) + def _load_dependencies(self, attr, ds): + assert type(ds) in (list, type(None)) + + deps = [] + if ds: + for role_def in ds: + r = Role.load(role_def, parent_role=self) + deps.append(r) + return deps + #------------------------------------------------------------------------------ # other functions def add_parent(self, parent_role): ''' adds a role to the list of this roles parents ''' - assert isinstance(role, Role) + assert isinstance(parent_role, Role) if parent_role not in self._parents: self._parents.append(parent_role) diff --git a/v2/test/playbook/test_role.py b/v2/test/playbook/test_role.py index d3138fa576..094c5c3f49 100644 --- a/v2/test/playbook/test_role.py +++ b/v2/test/playbook/test_role.py @@ -119,10 +119,23 @@ class TestRole(unittest.TestCase): @patch.object(Role, '_load_role_yaml') def test_load_role_with_metadata(self, _load_role_yaml, _get_role_path): + def fake_get_role_path(role): + if role == 'foo': + return ('foo', '/etc/ansible/roles/foo') + elif role == 'bar': + return ('bar', '/etc/ansible/roles/bar') + elif role == 'bad1': + return ('bad1', '/etc/ansible/roles/bad1') + elif role == 'bad2': + return ('bad2', '/etc/ansible/roles/bad2') + def fake_load_role_yaml(role_path, subdir): if role_path == '/etc/ansible/roles/foo': if subdir == 'meta': return dict(dependencies=['bar'], allow_duplicates=True, galaxy_info=dict(a='1', b='2', c='3')) + elif role_path == '/etc/ansible/roles/bar': + if subdir == 'meta': + return dict() elif role_path == '/etc/ansible/roles/bad1': if subdir == 'meta': return 1 @@ -131,19 +144,18 @@ class TestRole(unittest.TestCase): return dict(foo='bar') return None + _get_role_path.side_effect = fake_get_role_path _load_role_yaml.side_effect = fake_load_role_yaml - _get_role_path.return_value = ('foo', '/etc/ansible/roles/foo') - r = Role.load('foo') - self.assertEqual(r.dependencies, ['bar']) + self.assertEqual(len(r.dependencies), 1) + self.assertEqual(type(r.dependencies[0]), Role) + self.assertEqual(len(r.dependencies[0]._parents), 1) + self.assertEqual(r.dependencies[0]._parents[0], r) self.assertEqual(r.allow_duplicates, True) self.assertEqual(r.galaxy_info, dict(a='1', b='2', c='3')) - _get_role_path.return_value = ('bad1', '/etc/ansible/roles/bad1') self.assertRaises(AnsibleParserError, Role.load, 'bad1') - - _get_role_path.return_value = ('bad2', '/etc/ansible/roles/bad2') self.assertRaises(AnsibleParserError, Role.load, 'bad2') @patch.object(Role, '_get_role_path') From 9974ba01e3922ac2b9ff27e8b09166e7d916ae46 Mon Sep 17 00:00:00 2001 From: Juri Glass Date: Tue, 28 Oct 2014 11:18:32 +0100 Subject: [PATCH 292/813] added fact caching hint --- docsite/rst/playbooks_variables.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index b552352932..340744f419 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -842,7 +842,7 @@ A frequently used idiom is walking a group to find all IP addresses in that grou {% endfor %} An example of this could include pointing a frontend proxy server to all of the app servers, setting up the correct firewall rules between servers, etc. -You need to make sure that the facts of those hosts have been populated before though, for example by running a play against them. +You need to make sure that the facts of those hosts have been populated before though, for example by running a play against them if the facts have not been cached recently (fact caching was added in Ansible 1.8). Additionally, *inventory_hostname* is the name of the hostname as configured in Ansible's inventory host file. This can be useful for when you don't want to rely on the discovered hostname `ansible_hostname` or for other mysterious From f38f1c5663d6a5253e30a1e2d84a1596c004f7cd Mon Sep 17 00:00:00 2001 From: bobobox Date: Tue, 28 Oct 2014 10:15:05 -0500 Subject: [PATCH 293/813] vars_files haven't been discussed yet I think this is a chapter ordering thing... vars_files have not actually be explained yet (aside from a mention of them in the note right above, which also might need reconsidering?) I think they do get introduced in the next section 'Variables'. --- docsite/rst/playbooks_roles.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index 3a2b2b7514..0d847b3278 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -153,7 +153,7 @@ Roles .. versionadded:: 1.2 -Now that you have learned about :ref:`vars_files `, tasks, and handlers, what is the best way to organize your playbooks? +Now that you have learned about tasks and handlers, what is the best way to organize your playbooks? The short answer is to use roles! Roles are ways of automatically loading certain vars_files, tasks, and handlers based on a known file structure. Grouping content by roles also allows easy sharing of roles with other users. From 5ab4467708d95777abbac8b9e74f99965da2f4aa Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 28 Oct 2014 08:36:31 -0700 Subject: [PATCH 294/813] module formatter skips modules with leading underscore to avoid documenting them. Soon will be patched to recognize them as either deprecated or an alias --- hacking/module_formatter.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index f7d8570e93..53c2616533 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -128,6 +128,9 @@ def list_modules(module_dir): files2 = glob.glob("%s/*" % d) for f in files2: + if os.path.basename(f).startswith("_"): # skip deprecated/aliases for now + continue + if not f.endswith(".py") or f.endswith('__init__.py'): # windows powershell modules have documentation stubs in python docstring # format (they are not executed) so skip the ps1 format files From 617b6323e23128c938a28c01f55ab254ffdd183d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 28 Oct 2014 08:43:18 -0700 Subject: [PATCH 295/813] added info about new deprecated/alias plugin loading --- docsite/rst/developing_modules.rst | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 608ac7185b..355f402835 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -465,6 +465,23 @@ a github pull request to the `extras Date: Tue, 28 Oct 2014 15:15:11 -0400 Subject: [PATCH 296/813] Add a changelog entry for how the git module has changed. --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c443a5552c..4faa8f2ed3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,7 @@ Some other notable changes: * ec2_ami_search: support for SSD and IOPS provisioned EBS images * can set ansible_sudo_exe as an inventory variable which allows specifying a different sudo (or equivalent) command +* git module: Submodule handling has changed. Previously if you used the ``recursive`` parameter to handle submodules, ansible would track the submodule upstream's head revision. This has been changed to checkout the version of the submodule specified in the superproject's git repository. This is inline with what git submodule update does. If you want the old behaviour use the new module parameter track_submodules=yes And various other bug fixes and improvements ... From b502cb8f5d828187efdc01ee278148492b47e67a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Oct 2014 15:21:08 -0400 Subject: [PATCH 297/813] Pull in git submodule tracking --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 7f611468a8..9fe5c2af2d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 7f611468a8279d785af26852ca7dccc95bc73a41 +Subproject commit 9fe5c2af2dcfb125398475e4ed0b740e71d70709 From cb53b0f94af23599562f8375a191f45ab297e7f2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Oct 2014 15:55:24 -0400 Subject: [PATCH 298/813] Add branch info for our submodules --- .gitmodules | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitmodules b/.gitmodules index 6ab000c816..3f14953ec8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,12 +1,16 @@ [submodule "lib/ansible/modules/core"] path = lib/ansible/modules/core url = https://github.com/ansible/ansible-modules-core.git + branch = devel [submodule "lib/ansible/modules/extras"] path = lib/ansible/modules/extras url = https://github.com/ansible/ansible-modules-extras.git + branch = devel [submodule "v2/ansible/modules/core"] path = v2/ansible/modules/core url = https://github.com/ansible/ansible-modules-core.git + branch = devel [submodule "v2/ansible/modules/extras"] path = v2/ansible/modules/extras url = https://github.com/ansible/ansible-modules-extras.git + branch = devel From 82e8d677d9a77c49f1a31c46056fb542a2fb507d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Oct 2014 17:58:25 -0400 Subject: [PATCH 299/813] Test the git changes for the git module's recursive flag --- lib/ansible/modules/core | 2 +- .../integration/roles/test_git/tasks/main.yml | 143 ++++++++++++++++++ 2 files changed, 144 insertions(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 9fe5c2af2d..63e81cfc2e 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9fe5c2af2dcfb125398475e4ed0b740e71d70709 +Subproject commit 63e81cfc2e0c3c07245342cd41a0ba147eac55be diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml index 93774afb46..09e42cbcd8 100644 --- a/test/integration/roles/test_git/tasks/main.yml +++ b/test/integration/roles/test_git/tasks/main.yml @@ -22,6 +22,11 @@ repo_format1: 'https://github.com/jimi-c/test_role' repo_format2: 'git@github.com:jimi-c/test_role.git' repo_format3: 'ssh://git@github.com/jimi-c/test_role.git' + repo_submodules: 'https://github.com/abadger/test_submodules.git' + repo_submodules_newer: 'https://github.com/abadger/test_submodules_newer.git' + repo_submodule1: 'https://github.com/abadger/test_submodules_subm1.git' + repo_submodule1_newer: 'https://github.com/abadger/test_submodules_subm1_newer.git' + repo_submodule2: 'https://github.com/abadger/test_submodules_subm2.git' known_host_files: - "{{ lookup('env','HOME') }}/.ssh/known_hosts" - '/etc/ssh/ssh_known_hosts' @@ -147,3 +152,141 @@ - assert: that: - 'git_result.changed' + +# +# Submodule tests +# + +# Repository A with submodules defined (repo_submodules) +# .gitmodules file points to Repository I +# Repository B forked from A that has newer commits (repo_submodules_newer) +# .gitmodules file points to Repository II instead of I +# .gitmodules file also points to Repository III +# Repository I for submodule1 (repo_submodule1) +# Has 1 file checked in +# Repository II forked from I that has newer commits (repo_submodule1_newer) +# Has 2 files checked in +# Repository III for a second submodule (repo_submodule2) +# Has 1 file checked in + +- name: clear checkout_dir + file: state=absent path={{ checkout_dir }} + +- name: Test that clone without recursive does not retrieve submodules + git: + repo: '{{ repo_submodules }}' + dest: '{{ checkout_dir }}' + recursive: no + +- command: 'ls -1a {{ checkout_dir }}/submodule1' + register: submodule1 + +- assert: + that: '{{ submodule1.stdout_lines|length }} == 2' + +- name: clear checkout_dir + file: state=absent path={{ checkout_dir }} + + + +- name: Test that clone with recursive retrieves submodules + git: + repo: '{{ repo_submodules }}' + dest: '{{ checkout_dir }}' + recursive: yes + +- command: 'ls -1a {{ checkout_dir }}/submodule1' + register: submodule1 + +- assert: + that: '{{ submodule1.stdout_lines|length }} == 4' + +- name: Copy the checkout so we can run several different tests on it + command: 'cp -pr {{ checkout_dir }} {{ checkout_dir }}.bak' + + + +- name: Check that modules will be updated if main repo is not + command: git config --replace-all remote.origin.url {{ repo_submodule1_newer }} + args: + chdir: "{{ checkout_dir }}/submodule1" + +- git: + repo: '{{ repo_submodules }}' + dest: '{{ checkout_dir }}' + update: yes + recursive: yes + track_submodules: yes + +- command: 'ls -1a {{ checkout_dir }}/submodule1' + register: submodule1 + +- debug: var=submodule1 +- assert: + that: '{{ submodule1.stdout_lines|length }} == 5' + ignore_errors: true + + + +- name: Restore checkout to prior state + file: state=absent path={{ checkout_dir }} +- command: 'cp -pr {{ checkout_dir }}.bak {{ checkout_dir }}' + +- name: Test that update without recursive does not change submodules + command: 'git config --replace-all remote.origin.url {{ repo_submodules_newer }}' + args: + chdir: '{{ checkout_dir }}' + +- git: + repo: '{{ repo_submodules_newer }}' + dest: '{{ checkout_dir }}' + recursive: no + update: yes + track_submodules: yes + +- command: 'ls -1a {{ checkout_dir }}/submodule1' + register: submodule1 + +- stat: + path: '{{ checkout_dir }}/submodule2' + register: submodule2 + +- command: 'ls -1a {{ checkout_dir }}/submodule2' + register: submodule2 + +- assert: + that: '{{ submodule1.stdout_lines|length }} == 4' +- assert: + that: '{{ submodule2.stdout_lines|length }} == 2' + + + +- name: Restore checkout to prior state + file: state=absent path={{ checkout_dir }} +- command: 'cp -pr {{ checkout_dir }}.bak {{ checkout_dir }}' + +- name: Test that update with recursive updated existing submodules + command: 'git config --replace-all remote.origin.url {{ repo_submodules_newer }}' + args: + chdir: '{{ checkout_dir }}' + +- git: + repo: '{{ repo_submodules_newer }}' + dest: '{{ checkout_dir }}' + update: yes + recursive: yes + track_submodules: yes + +- command: 'ls -1a {{ checkout_dir }}/submodule1' + register: submodule1 + +- assert: + that: '{{ submodule1.stdout_lines|length }} == 5' + + +- name: Test that update with recursive found new submodules + command: 'ls -1a {{ checkout_dir }}/submodule2' + register: submodule2 + +- assert: + that: '{{ submodule2.stdout_lines|length }} == 4' From 050d17295d2104695f4737f81684daf9ce32cf2d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Oct 2014 19:09:32 -0400 Subject: [PATCH 300/813] Remove test case that wasn't reported as something git should do --- .../integration/roles/test_git/tasks/main.yml | 27 ------------------- 1 file changed, 27 deletions(-) diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml index 09e42cbcd8..14623a2ce9 100644 --- a/test/integration/roles/test_git/tasks/main.yml +++ b/test/integration/roles/test_git/tasks/main.yml @@ -188,7 +188,6 @@ file: state=absent path={{ checkout_dir }} - - name: Test that clone with recursive retrieves submodules git: repo: '{{ repo_submodules }}' @@ -206,32 +205,6 @@ -- name: Check that modules will be updated if main repo is not - command: git config --replace-all remote.origin.url {{ repo_submodule1_newer }} - args: - chdir: "{{ checkout_dir }}/submodule1" - -- git: - repo: '{{ repo_submodules }}' - dest: '{{ checkout_dir }}' - update: yes - recursive: yes - track_submodules: yes - -- command: 'ls -1a {{ checkout_dir }}/submodule1' - register: submodule1 - -- debug: var=submodule1 -- assert: - that: '{{ submodule1.stdout_lines|length }} == 5' - ignore_errors: true - - - -- name: Restore checkout to prior state - file: state=absent path={{ checkout_dir }} -- command: 'cp -pr {{ checkout_dir }}.bak {{ checkout_dir }}' - - name: Test that update without recursive does not change submodules command: 'git config --replace-all remote.origin.url {{ repo_submodules_newer }}' args: From 67ff4428d52a50b74812e446ac81a124562fd118 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Oct 2014 20:27:14 -0400 Subject: [PATCH 301/813] Fix up all python3 issues that do not have to do with text/bytes --- test/integration/Makefile | 3 +++ v2/ansible/parsing/vault/__init__.py | 4 ++-- v2/ansible/parsing/yaml/__init__.py | 2 +- v2/test/parsing/vault/test_vault.py | 6 +++--- v2/test/parsing/vault/test_vault_editor.py | 12 ++++++------ 5 files changed, 15 insertions(+), 12 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 56e35d7c8b..6568c53017 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -38,6 +38,9 @@ unicode: non_destructive: ansible-playbook non_destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) +mine: + ansible-playbook mine.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) + destructive: ansible-playbook destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) diff --git a/v2/ansible/parsing/vault/__init__.py b/v2/ansible/parsing/vault/__init__.py index 506c0852f3..32a2727d33 100644 --- a/v2/ansible/parsing/vault/__init__.py +++ b/v2/ansible/parsing/vault/__init__.py @@ -507,7 +507,7 @@ class VaultAES256(object): # 1) nbits (integer) - Length of the counter, in bits. # 2) initial_value (integer) - initial value of the counter. "iv" from gen_key_initctr - ctr = Counter.new(128, initial_value=long(iv, 16)) + ctr = Counter.new(128, initial_value=int(iv, 16)) # AES.new PARAMETERS # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from gen_key_initctr @@ -542,7 +542,7 @@ class VaultAES256(object): return None # SET THE COUNTER AND THE CIPHER - ctr = Counter.new(128, initial_value=long(iv, 16)) + ctr = Counter.new(128, initial_value=int(iv, 16)) cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) # DECRYPT PADDED DATA diff --git a/v2/ansible/parsing/yaml/__init__.py b/v2/ansible/parsing/yaml/__init__.py index 0acb77f8fd..c382282398 100644 --- a/v2/ansible/parsing/yaml/__init__.py +++ b/v2/ansible/parsing/yaml/__init__.py @@ -71,7 +71,7 @@ class DataLoader(): # if loading JSON failed for any reason, we go ahead # and try to parse it as YAML instead return self._safe_load(data) - except YAMLError, yaml_exc: + except YAMLError as yaml_exc: self._handle_error(yaml_exc, file_name, show_content) def load_from_file(self, file_name): diff --git a/v2/test/parsing/vault/test_vault.py b/v2/test/parsing/vault/test_vault.py index eb4df6ed90..d24573c729 100644 --- a/v2/test/parsing/vault/test_vault.py +++ b/v2/test/parsing/vault/test_vault.py @@ -125,7 +125,7 @@ class TestVaultLib(unittest.TestCase): error_hit = False try: enc_data = v.encrypt(data) - except errors.AnsibleError, e: + except errors.AnsibleError as e: error_hit = True assert error_hit, "No error was thrown when trying to encrypt data with a header" @@ -137,7 +137,7 @@ class TestVaultLib(unittest.TestCase): error_hit = False try: dec_data = v.decrypt(data) - except errors.AnsibleError, e: + except errors.AnsibleError as e: error_hit = True assert error_hit, "No error was thrown when trying to decrypt data without a header" @@ -150,7 +150,7 @@ class TestVaultLib(unittest.TestCase): error_hit = False try: enc_data = v.encrypt(data) - except errors.AnsibleError, e: + except errors.AnsibleError as e: error_hit = True assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set" assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name diff --git a/v2/test/parsing/vault/test_vault_editor.py b/v2/test/parsing/vault/test_vault_editor.py index 3396c6f8ab..8948fd7209 100644 --- a/v2/test/parsing/vault/test_vault_editor.py +++ b/v2/test/parsing/vault/test_vault_editor.py @@ -97,9 +97,9 @@ class TestVaultEditor(unittest.TestCase): # make sure the password functions for the cipher error_hit = False - try: + try: ve.decrypt_file() - except errors.AnsibleError, e: + except errors.AnsibleError as e: error_hit = True # verify decrypted content @@ -125,9 +125,9 @@ class TestVaultEditor(unittest.TestCase): # make sure the password functions for the cipher error_hit = False - try: + try: ve.decrypt_file() - except errors.AnsibleError, e: + except errors.AnsibleError as e: error_hit = True # verify decrypted content @@ -155,7 +155,7 @@ class TestVaultEditor(unittest.TestCase): error_hit = False try: ve.rekey_file('ansible2') - except errors.AnsibleError, e: + except errors.AnsibleError as e: error_hit = True # verify decrypted content @@ -171,7 +171,7 @@ class TestVaultEditor(unittest.TestCase): error_hit = False try: dec_data = vl.decrypt(fdata) - except errors.AnsibleError, e: + except errors.AnsibleError as e: error_hit = True os.unlink(v10_file.name) From e99dc5a57cd639841775acbbe84ee27026d67f43 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Oct 2014 20:40:39 -0400 Subject: [PATCH 302/813] Add copyright and py3 compat header --- v2/test/parsing/vault/test_vault_editor.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/v2/test/parsing/vault/test_vault_editor.py b/v2/test/parsing/vault/test_vault_editor.py index 8948fd7209..c788df54ae 100644 --- a/v2/test/parsing/vault/test_vault_editor.py +++ b/v2/test/parsing/vault/test_vault_editor.py @@ -1,3 +1,24 @@ +# (c) 2014, James Tanner +# (c) 2014, James Cammarata, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type #!/usr/bin/env python import getpass From e7ee9ddc0a655575e240a570e240f8957687c883 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Oct 2014 20:41:51 -0400 Subject: [PATCH 303/813] Add the python3 compat header --- v2/scripts/ansible | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/v2/scripts/ansible b/v2/scripts/ansible index 1f84012e01..ae8ccff595 100644 --- a/v2/scripts/ansible +++ b/v2/scripts/ansible @@ -14,3 +14,7 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type From 70555052271bfd8569c3be17a229f8c16dc405fb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Oct 2014 20:44:21 -0400 Subject: [PATCH 304/813] Add python3 compat header --- v2/ansible/parsing/vault/__init__.py | 4 ++++ v2/ansible/parsing/yaml/strings.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/v2/ansible/parsing/vault/__init__.py b/v2/ansible/parsing/vault/__init__.py index 32a2727d33..44f50f7d21 100644 --- a/v2/ansible/parsing/vault/__init__.py +++ b/v2/ansible/parsing/vault/__init__.py @@ -18,6 +18,10 @@ # example playbook to bootstrap this script in the examples/ dir which # installs ansible and sets it up to run on cron. +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import os import shlex import shutil diff --git a/v2/ansible/parsing/yaml/strings.py b/v2/ansible/parsing/yaml/strings.py index a778904e63..b7e304194f 100644 --- a/v2/ansible/parsing/yaml/strings.py +++ b/v2/ansible/parsing/yaml/strings.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + __all__ = [ 'YAML_SYNTAX_ERROR', 'YAML_POSITION_DETAILS', From 54ba31aeefd818e1a7c4b88e1b998e2aeac33ee0 Mon Sep 17 00:00:00 2001 From: Praveen Kumar Date: Wed, 29 Oct 2014 10:29:46 +0530 Subject: [PATCH 305/813] As per yum module documents it support 'present, absent and latest' Current intro doc is pointing older way of using yum module. --- docsite/rst/intro_adhoc.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/intro_adhoc.rst b/docsite/rst/intro_adhoc.rst index 787a7e76ee..2646945be4 100644 --- a/docsite/rst/intro_adhoc.rst +++ b/docsite/rst/intro_adhoc.rst @@ -154,11 +154,11 @@ with yum. Ensure a package is installed, but don't update it:: - $ ansible webservers -m yum -a "name=acme state=installed" + $ ansible webservers -m yum -a "name=acme state=present" Ensure a package is installed to a specific version:: - $ ansible webservers -m yum -a "name=acme-1.5 state=installed" + $ ansible webservers -m yum -a "name=acme-1.5 state=present" Ensure a package is at the latest version:: @@ -166,7 +166,7 @@ Ensure a package is at the latest version:: Ensure a package is not installed:: - $ ansible webservers -m yum -a "name=acme state=removed" + $ ansible webservers -m yum -a "name=acme state=absent" Ansible has modules for managing packages under many platforms. If your package manager does not have a module available for it, you can install From 2f869a6309108f4d1178c388b7c2c0b3754a3617 Mon Sep 17 00:00:00 2001 From: Jure Triglav Date: Wed, 29 Oct 2014 14:16:01 +0100 Subject: [PATCH 306/813] Add the default Homebrew path for OpenSSL certs on OS X --- lib/ansible/module_utils/urls.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 1280745cc9..c2d87c27bc 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -219,6 +219,8 @@ class SSLValidationHandler(urllib2.BaseHandler): # Write the dummy ca cert if we are running on Mac OS X if platform == 'Darwin': os.write(tmp_fd, DUMMY_CA_CERT) + # Default Homebrew path for OpenSSL certs + paths_checked.append('/usr/local/etc/openssl') # for all of the paths, find any .crt or .pem files # and compile them into single temp file for use From cc12c9b2265ace64f599447f60ca61e08a7c1726 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 29 Oct 2014 10:32:17 -0400 Subject: [PATCH 307/813] fixed typo --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 355f402835..4a331626db 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -466,7 +466,7 @@ Included modules will ship with ansible, and also have a change to be promoted t gives them slightly higher development priority (though they'll work in exactly the same way). -Deprecating and makingm module aliases +Deprecating and making module aliases `````````````````````````````````````` Starting in 1.8 you can deprecate modules by renaming them with a preceeding _, i.e. old_cloud.py to From a2498bdadd290154f7f6c325a0c31d361e1e8721 Mon Sep 17 00:00:00 2001 From: Eric Johnson Date: Wed, 29 Oct 2014 19:57:46 +0000 Subject: [PATCH 308/813] Google: allow for different libcloud provider to support upcoming DNS module --- lib/ansible/module_utils/gce.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/gce.py b/lib/ansible/module_utils/gce.py index 68aa66c41a..37a4bf1dea 100644 --- a/lib/ansible/module_utils/gce.py +++ b/lib/ansible/module_utils/gce.py @@ -32,7 +32,7 @@ import pprint USER_AGENT_PRODUCT="Ansible-gce" USER_AGENT_VERSION="v1" -def gce_connect(module): +def gce_connect(module, provider=None): """Return a Google Cloud Engine connection.""" service_account_email = module.params.get('service_account_email', None) pem_file = module.params.get('pem_file', None) @@ -71,8 +71,14 @@ def gce_connect(module): 'secrets file.') return None + # Allow for passing in libcloud Google DNS (e.g, Provider.GOOGLE) + if provider is None: + provider = Provider.GCE + try: - gce = get_driver(Provider.GCE)(service_account_email, pem_file, datacenter=module.params.get('zone'), project=project_id) + gce = get_driver(provider)(service_account_email, pem_file, + datacenter=module.params.get('zone', None), + project=project_id) gce.connection.user_agent_append("%s/%s" % ( USER_AGENT_PRODUCT, USER_AGENT_VERSION)) except (RuntimeError, ValueError), e: From 1b70ef6cbaa23fa2399204689a489d39be7a76fb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 29 Oct 2014 22:33:31 -0400 Subject: [PATCH 309/813] Several changes to ansible-doc added display of deprecated to ansible-doc now it does better job of using tty columns fixed indication truncation of desc with trailing ... removed extension from module list, also fixed matching exlusion blacklist --- bin/ansible-doc | 45 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/bin/ansible-doc b/bin/ansible-doc index 8a7faadb24..3c4f84964a 100755 --- a/bin/ansible-doc +++ b/bin/ansible-doc @@ -164,7 +164,11 @@ def get_snippet_text(doc): return "\n".join(text) def get_module_list_text(module_list): + columns = max(60, int(os.popen('stty size', 'r').read().split()[1])) + displace = max(len(x) for x in module_list) + linelimit = columns - displace - 5 text = [] + deprecated = [] for module in sorted(set(module_list)): if module in module_docs.BLACKLIST_MODULES: @@ -181,13 +185,22 @@ def get_module_list_text(module_list): try: doc, plainexamples = module_docs.get_docstring(filename) - desc = tty_ify(doc.get('short_description', '?')) - if len(desc) > 55: - desc = desc + '...' - text.append("%-20s %-60.60s" % (module, desc)) + desc = tty_ify(doc.get('short_description', '?')).strip() + if len(desc) > linelimit: + desc = desc[:linelimit] + '...' + + if module.startswith('_'): # Handle replecated + module = module[1:] + deprecated.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) + else: + text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) except: traceback.print_exc() sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module) + + if len(deprecated) > 0: + text.append("\nDEPRECATED:") + text.extend(deprecated) return "\n".join(text) def main(): @@ -208,6 +221,11 @@ def main(): default=False, dest='list_dir', help='List available modules') + p.add_option("-c", "--list-columns", + action="store_true", + default=False, + dest='list_columns', + help='List modules in columns') p.add_option("-s", "--snippet", action="store_true", default=False, @@ -221,20 +239,25 @@ def main(): for i in options.module_path.split(os.pathsep): utils.plugins.module_finder.add_directory(i) - if options.list_dir: - # list all modules + if options.list_dir or options.list_deprecated: + # list modules paths = utils.plugins.module_finder._get_paths() module_list = [] - deprecated_list = [] - module_aliases = {} for path in paths: - # os.system("ls -C %s" % (path)) if os.path.isdir(path): for module in os.listdir(path): - if module.startswith('_') or any(module.endswith(x) for x in BLACKLIST_EXTS): + if any(module.endswith(x) for x in BLACKLIST_EXTS): continue - module_list.append(module) + elif module.startswith('__'): + continue + elif module.startswith('_'): + fullpath = '/'.join([path,module]) + if os.path.islink(fullpath): # avoids aliases + continue + module = os.path.splitext(module)[0] # removes the extension + module_list.append(module) + pager(get_module_list_text(module_list)) sys.exit() From e41bcc41d335996d7ff73eb84d8376f19372c297 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 30 Oct 2014 08:56:41 -0400 Subject: [PATCH 310/813] removed 'column display' options as there is no code handling this function --- bin/ansible-doc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/bin/ansible-doc b/bin/ansible-doc index 3c4f84964a..d399e4668e 100755 --- a/bin/ansible-doc +++ b/bin/ansible-doc @@ -221,11 +221,6 @@ def main(): default=False, dest='list_dir', help='List available modules') - p.add_option("-c", "--list-columns", - action="store_true", - default=False, - dest='list_columns', - help='List modules in columns') p.add_option("-s", "--snippet", action="store_true", default=False, From 27d741102c4009b89938fe32d8ec50b44b3c8a03 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 30 Oct 2014 11:26:43 -0400 Subject: [PATCH 311/813] Created Deprecated module category that only appears when there is something to show --- hacking/module_formatter.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 53c2616533..345c84ca04 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -121,28 +121,33 @@ def write_data(text, options, outputname, module): def list_modules(module_dir): ''' returns a hash of categories, each category being a hash of module names to file paths ''' - categories = dict(all=dict()) + categories = dict(all=dict(),deprecated=dict()) files = glob.glob("%s/*/*" % module_dir) for d in files: if os.path.isdir(d): files2 = glob.glob("%s/*" % d) for f in files2: - if os.path.basename(f).startswith("_"): # skip deprecated/aliases for now - continue + module = os.path.splitext(os.path.basename(f))[0] + category = os.path.dirname(f).split("/")[-1] if not f.endswith(".py") or f.endswith('__init__.py'): # windows powershell modules have documentation stubs in python docstring # format (they are not executed) so skip the ps1 format files continue + elif module.startswith("_"): # Handle deprecated modules + if not os.path.islink(f): # ignores aliases + categories['deprecated'][module] = f + continue + elif module in categories['deprecated']: # Removes dupes + categories['deprecated'].pop(module, None) - tokens = f.split("/") - module = tokens[-1].replace(".py","") - category = tokens[-2] if not category in categories: categories[category] = {} categories[category][module] = f categories['all'][module] = f + if not len(categories['deprecated']) > 0: + categories.pop('deprecated', None) return categories ##################################################################################### From 8b5b97d0667186b6adb4e8ba76c62dc9fa01b85f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 30 Oct 2014 13:29:54 -0400 Subject: [PATCH 312/813] now docs handle deprecated modules but still ignore aliases --- hacking/module_formatter.py | 12 ++++++++++-- hacking/templates/rst.j2 | 7 +++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 345c84ca04..61de1ea136 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -202,17 +202,23 @@ def process_module(module, options, env, template, outputname, module_map): fname = module_map[module] + basename = os.path.basename(fname) + deprecated = False # ignore files with extensions - if not os.path.basename(fname).endswith(".py"): + if not basename.endswith(".py"): return + elif basename.startswith("_"): + if os.path.islink(fname): # alias + return + deprecated = True # use ansible core library to parse out doc metadata YAML and plaintext examples doc, examples = ansible.utils.module_docs.get_docstring(fname, verbose=options.verbose) # crash if module is missing documentation and not explicitly hidden from docs index if doc is None and module not in ansible.utils.module_docs.BLACKLIST_MODULES: - sys.stderr.write("*** ERROR: CORE MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module)) + sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module)) sys.exit(1) if doc is None: @@ -254,6 +260,8 @@ def process_module(module, options, env, template, outputname, module_map): doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') doc['ansible_version'] = options.ansible_version doc['plainexamples'] = examples #plain text + if deprecated and 'deprecated' not in doc: + doc['deprecated'] = "This module is deprecated, as such it's use is discouraged." # here is where we build the table of contents... diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index fbedae566a..8d6dc1c89b 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -21,6 +21,13 @@ # --------------------------------------------#} +{% if deprecated is defined -%} +DEPRECATED +---------- + +@{ deprecated }@ +{% endif %} + Synopsis -------- From 0fb0548d0b04cf2a1d9b6755697b7dca45d2dbf8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 30 Oct 2014 13:40:34 -0400 Subject: [PATCH 313/813] removed no unused var that was not cleaned up properlly --- bin/ansible-doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ansible-doc b/bin/ansible-doc index d399e4668e..e4c7d19522 100755 --- a/bin/ansible-doc +++ b/bin/ansible-doc @@ -234,7 +234,7 @@ def main(): for i in options.module_path.split(os.pathsep): utils.plugins.module_finder.add_directory(i) - if options.list_dir or options.list_deprecated: + if options.list_dir: # list modules paths = utils.plugins.module_finder._get_paths() module_list = [] From bd203a44be06ce7fc9b20180ab9ea339e579d54d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 28 Oct 2014 10:24:33 -0500 Subject: [PATCH 314/813] Adding more unittests for the v2 role class --- v2/ansible/playbook/role.py | 33 ++++++++++++++++++---------- v2/test/playbook/test_role.py | 41 ++++++++++++++++++++++++++++++----- 2 files changed, 58 insertions(+), 16 deletions(-) diff --git a/v2/ansible/playbook/role.py b/v2/ansible/playbook/role.py index 38a8ac195d..b4b7eed012 100644 --- a/v2/ansible/playbook/role.py +++ b/v2/ansible/playbook/role.py @@ -41,6 +41,7 @@ __all__ = ['Role'] # will be based on the repr() of the dictionary object) _ROLE_CACHE = dict() +# The valid metadata keys for meta/main.yml files _VALID_METADATA_KEYS = [ 'dependencies', 'allow_duplicates', @@ -369,20 +370,30 @@ class Role(Base): if parent_role not in self._parents: self._parents.append(parent_role) - def get_variables(self): - # returns the merged variables for this role, including - # recursively merging those of all child roles - return dict() + def get_parents(self): + return self._parents - def get_immediate_dependencies(self): - return self._dependencies + # FIXME: not yet used + #def get_variables(self): + # # returns the merged variables for this role, including + # # recursively merging those of all child roles + # return dict() + + def get_direct_dependencies(self): + return self._attributes['dependencies'][:] def get_all_dependencies(self): # returns a list built recursively, of all deps from # all child dependencies - all_deps = [] - for dep in self._dependencies: - list_union(all_deps, dep.get_all_dependencies()) - all_deps = list_union(all_deps, self.dependencies) - return all_deps + + child_deps = [] + direct_deps = self.get_direct_dependencies() + + for dep in direct_deps: + dep_deps = dep.get_all_dependencies() + for dep_dep in dep_deps: + if dep_dep not in child_deps: + child_deps.append(dep_dep) + + return direct_deps + child_deps diff --git a/v2/test/playbook/test_role.py b/v2/test/playbook/test_role.py index 094c5c3f49..2c1ca6c959 100644 --- a/v2/test/playbook/test_role.py +++ b/v2/test/playbook/test_role.py @@ -22,7 +22,7 @@ __metaclass__ = type from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch, MagicMock -from ansible.errors import AnsibleParserError +from ansible.errors import AnsibleError, AnsibleParserError from ansible.playbook.block import Block from ansible.playbook.role import Role from ansible.playbook.task import Task @@ -124,16 +124,30 @@ class TestRole(unittest.TestCase): return ('foo', '/etc/ansible/roles/foo') elif role == 'bar': return ('bar', '/etc/ansible/roles/bar') + elif role == 'baz': + return ('baz', '/etc/ansible/roles/baz') + elif role == 'bam': + return ('bam', '/etc/ansible/roles/bam') elif role == 'bad1': return ('bad1', '/etc/ansible/roles/bad1') elif role == 'bad2': return ('bad2', '/etc/ansible/roles/bad2') + elif role == 'recursive1': + return ('recursive1', '/etc/ansible/roles/recursive1') + elif role == 'recursive2': + return ('recursive2', '/etc/ansible/roles/recursive2') def fake_load_role_yaml(role_path, subdir): if role_path == '/etc/ansible/roles/foo': if subdir == 'meta': return dict(dependencies=['bar'], allow_duplicates=True, galaxy_info=dict(a='1', b='2', c='3')) elif role_path == '/etc/ansible/roles/bar': + if subdir == 'meta': + return dict(dependencies=['baz']) + elif role_path == '/etc/ansible/roles/baz': + if subdir == 'meta': + return dict(dependencies=['bam']) + elif role_path == '/etc/ansible/roles/bam': if subdir == 'meta': return dict() elif role_path == '/etc/ansible/roles/bad1': @@ -142,21 +156,36 @@ class TestRole(unittest.TestCase): elif role_path == '/etc/ansible/roles/bad2': if subdir == 'meta': return dict(foo='bar') + elif role_path == '/etc/ansible/roles/recursive1': + if subdir == 'meta': + return dict(dependencies=['recursive2']) + elif role_path == '/etc/ansible/roles/recursive2': + if subdir == 'meta': + return dict(dependencies=['recursive1']) return None _get_role_path.side_effect = fake_get_role_path _load_role_yaml.side_effect = fake_load_role_yaml r = Role.load('foo') - self.assertEqual(len(r.dependencies), 1) - self.assertEqual(type(r.dependencies[0]), Role) - self.assertEqual(len(r.dependencies[0]._parents), 1) - self.assertEqual(r.dependencies[0]._parents[0], r) + role_deps = r.get_direct_dependencies() + + self.assertEqual(len(role_deps), 1) + self.assertEqual(type(role_deps[0]), Role) + self.assertEqual(len(role_deps[0].get_parents()), 1) + self.assertEqual(role_deps[0].get_parents()[0], r) self.assertEqual(r.allow_duplicates, True) self.assertEqual(r.galaxy_info, dict(a='1', b='2', c='3')) + all_deps = r.get_all_dependencies() + self.assertEqual(len(all_deps), 3) + self.assertEqual(all_deps[0].role_name, 'bar') + self.assertEqual(all_deps[1].role_name, 'baz') + self.assertEqual(all_deps[2].role_name, 'bam') + self.assertRaises(AnsibleParserError, Role.load, 'bad1') self.assertRaises(AnsibleParserError, Role.load, 'bad2') + self.assertRaises(AnsibleError, Role.load, 'recursive1') @patch.object(Role, '_get_role_path') @patch.object(Role, '_load_role_yaml') @@ -174,4 +203,6 @@ class TestRole(unittest.TestCase): r = Role.load(dict(role='foo')) + # FIXME: add tests for the more complex url-type + # constructions and tags/when statements From 3b0e64127dceb467b04005b3c2abc2b272a03548 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 28 Oct 2014 14:35:29 -0500 Subject: [PATCH 315/813] Refactoring role spec stuff into a dedicated parsing class Also reworking tests to cut down on the number of patches required by sub-classing the DataLoader() class and reworking the base object's structure a bit to allow its use --- v2/ansible/parsing/yaml/__init__.py | 11 +- v2/ansible/playbook/base.py | 20 +- v2/ansible/playbook/block.py | 6 +- v2/ansible/playbook/role.py | 399 ------------------------ v2/ansible/playbook/role/__init__.py | 205 ++++++++++++ v2/ansible/playbook/role/definition.py | 153 +++++++++ v2/ansible/playbook/role/include.py | 52 +++ v2/ansible/playbook/role/metadata.py | 91 ++++++ v2/ansible/playbook/role/requirement.py | 166 ++++++++++ v2/ansible/playbook/task.py | 10 +- v2/test/mock/__init__.py | 20 ++ v2/test/mock/loader.py | 80 +++++ v2/test/playbook/test_role.py | 235 ++++++-------- 13 files changed, 897 insertions(+), 551 deletions(-) delete mode 100644 v2/ansible/playbook/role.py create mode 100644 v2/ansible/playbook/role/__init__.py create mode 100644 v2/ansible/playbook/role/definition.py create mode 100644 v2/ansible/playbook/role/include.py create mode 100644 v2/ansible/playbook/role/metadata.py create mode 100644 v2/ansible/playbook/role/requirement.py create mode 100644 v2/test/mock/__init__.py create mode 100644 v2/test/mock/loader.py diff --git a/v2/ansible/parsing/yaml/__init__.py b/v2/ansible/parsing/yaml/__init__.py index c382282398..969fd2a3b5 100644 --- a/v2/ansible/parsing/yaml/__init__.py +++ b/v2/ansible/parsing/yaml/__init__.py @@ -91,6 +91,15 @@ class DataLoader(): return parsed_data + def path_exists(self, path): + return os.path.exists(path) + + def is_directory(self, path): + return os.path.isdir(path) + + def is_file(self, path): + return os.path.isfile(path) + def _safe_load(self, stream): ''' Implements yaml.safe_load(), except using our custom loader class. ''' return load(stream, AnsibleLoader) @@ -100,7 +109,7 @@ class DataLoader(): Reads the file contents from the given file name, and will decrypt them if they are found to be vault-encrypted. ''' - if not os.path.exists(file_name) or not os.path.isfile(file_name): + if not self.path_exists(file_name) or not self.is_file(file_name): raise AnsibleParserError("the file_name '%s' does not exist, or is not readable" % file_name) show_content = True diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index ce0e2a199c..e2b96c8cc2 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -29,13 +29,11 @@ from ansible.parsing.yaml import DataLoader class Base: - _tags = FieldAttribute(isa='list') - _when = FieldAttribute(isa='list') + def __init__(self): - def __init__(self, loader=DataLoader): - - # the data loader class is used to parse data from strings and files - self._loader = loader() + # initialize the data loader, this will be provided later + # when the object is actually loaded + self._loader = None # each class knows attributes set upon it, see Task.py for example self._attributes = dict() @@ -61,11 +59,17 @@ class Base: return ds - def load_data(self, ds): + def load_data(self, ds, loader=None): ''' walk the input datastructure and assign any values ''' assert ds is not None + # the data loader class is used to parse data from strings and files + if loader is not None: + self._loader = loader + else: + self._loader = DataLoader() + if isinstance(ds, string_types) or isinstance(ds, FileIO): ds = self._loader.load(ds) @@ -89,6 +93,8 @@ class Base: self.validate() return self + def get_loader(self): + return self._loader def validate(self): ''' validation that is done at parse time, not load time ''' diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index 5e4826d119..5f21cdaf60 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -28,6 +28,8 @@ class Block(Base): _block = FieldAttribute(isa='list') _rescue = FieldAttribute(isa='list') _always = FieldAttribute(isa='list') + _tags = FieldAttribute(isa='list', default=[]) + _when = FieldAttribute(isa='list', default=[]) # for future consideration? this would be functionally # similar to the 'else' clause for exceptions @@ -43,9 +45,9 @@ class Block(Base): return dict() @staticmethod - def load(data, role=None): + def load(data, role=None, loader=None): b = Block(role=role) - return b.load_data(data) + return b.load_data(data, loader=loader) def munge(self, ds): ''' diff --git a/v2/ansible/playbook/role.py b/v2/ansible/playbook/role.py deleted file mode 100644 index b4b7eed012..0000000000 --- a/v2/ansible/playbook/role.py +++ /dev/null @@ -1,399 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from six import iteritems, string_types - -import os - -from hashlib import md5 - -from ansible.errors import AnsibleError, AnsibleParserError -from ansible.parsing.yaml import DataLoader -from ansible.playbook.attribute import FieldAttribute -from ansible.playbook.base import Base -from ansible.playbook.block import Block - -from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping - -__all__ = ['Role'] - -# The role cache is used to prevent re-loading roles, which -# may already exist. Keys into this cache are the MD5 hash -# of the role definition (for dictionary definitions, this -# will be based on the repr() of the dictionary object) -_ROLE_CACHE = dict() - -# The valid metadata keys for meta/main.yml files -_VALID_METADATA_KEYS = [ - 'dependencies', - 'allow_duplicates', - 'galaxy_info', -] - -class Role(Base): - - _role_name = FieldAttribute(isa='string') - _role_path = FieldAttribute(isa='string') - _src = FieldAttribute(isa='string') - _scm = FieldAttribute(isa='string') - _version = FieldAttribute(isa='string') - _task_blocks = FieldAttribute(isa='list', default=[]) - _handler_blocks = FieldAttribute(isa='list', default=[]) - _params = FieldAttribute(isa='dict', default=dict()) - _default_vars = FieldAttribute(isa='dict', default=dict()) - _role_vars = FieldAttribute(isa='dict', default=dict()) - - # Attributes based on values in metadata. These MUST line up - # with the values stored in _VALID_METADATA_KEYS - _dependencies = FieldAttribute(isa='list', default=[]) - _allow_duplicates = FieldAttribute(isa='bool', default=False) - _galaxy_info = FieldAttribute(isa='dict', default=dict()) - - def __init__(self, loader=DataLoader): - self._role_path = None - self._parents = [] - - super(Role, self).__init__(loader=loader) - - def __repr__(self): - return self.get_name() - - def get_name(self): - return self._attributes['role_name'] - - @staticmethod - def load(data, parent_role=None): - assert isinstance(data, string_types) or isinstance(data, dict) - - # Check to see if this role has been loaded already, based on the - # role definition, partially to save loading time and also to make - # sure that roles are run a single time unless specifically allowed - # to run more than once - - # FIXME: the tags and conditionals, if specified in the role def, - # should not figure into the resulting hash - cache_key = md5(repr(data)) - if cache_key in _ROLE_CACHE: - r = _ROLE_CACHE[cache_key] - else: - try: - # load the role - r = Role() - r.load_data(data) - # and cache it for next time - _ROLE_CACHE[cache_key] = r - except RuntimeError: - raise AnsibleError("A recursive loop was detected while loading your roles", obj=data) - - # now add the parent to the (new) role - if parent_role: - r.add_parent(parent_role) - - return r - - #------------------------------------------------------------------------------ - # munge, and other functions used for loading the ds - - def munge(self, ds): - # create the new ds as an AnsibleMapping, so we can preserve any line/column - # data from the parser, and copy that info from the old ds (if applicable) - new_ds = AnsibleMapping() - if isinstance(ds, AnsibleBaseYAMLObject): - new_ds.copy_position_info(ds) - - # Role definitions can be strings or dicts, so we fix things up here. - # Anything that is not a role name, tag, or conditional will also be - # added to the params sub-dictionary for loading later - if isinstance(ds, string_types): - new_ds['role_name'] = ds - else: - # munge the role ds here to correctly fill in the various fields which - # may be used to define the role, like: role, src, scm, etc. - ds = self._munge_role(ds) - - # now we split any random role params off from the role spec and store - # them in a dictionary of params for parsing later - params = dict() - attr_names = [attr_name for (attr_name, attr_value) in self._get_base_attributes().iteritems()] - for (key, value) in iteritems(ds): - if key not in attr_names and key != 'role': - # this key does not match a field attribute, so it must be a role param - params[key] = value - else: - # this is a field attribute, so copy it over directly - new_ds[key] = value - new_ds['params'] = params - - # Set the role name and path, based on the role definition - (role_name, role_path) = self._get_role_path(new_ds.get('role_name')) - new_ds['role_name'] = role_name - new_ds['role_path'] = role_path - - # load the role's files, if they exist - new_ds['task_blocks'] = self._load_role_yaml(role_path, 'tasks') - new_ds['handler_blocks'] = self._load_role_yaml(role_path, 'handlers') - new_ds['default_vars'] = self._load_role_yaml(role_path, 'defaults') - new_ds['role_vars'] = self._load_role_yaml(role_path, 'vars') - - # we treat metadata slightly differently: we instead pull out the - # valid metadata keys and munge them directly into new_ds - metadata_ds = self._munge_metadata(role_name, role_path) - new_ds.update(metadata_ds) - - # and return the newly munged ds - return new_ds - - def _load_role_yaml(self, role_path, subdir): - file_path = os.path.join(role_path, subdir) - if os.path.exists(file_path) and os.path.isdir(file_path): - main_file = self._resolve_main(file_path) - if os.path.exists(main_file): - return self._loader.load_from_file(main_file) - return None - - def _resolve_main(self, basepath): - ''' flexibly handle variations in main filenames ''' - possible_mains = ( - os.path.join(basepath, 'main'), - os.path.join(basepath, 'main.yml'), - os.path.join(basepath, 'main.yaml'), - os.path.join(basepath, 'main.json'), - ) - - if sum([os.path.isfile(x) for x in possible_mains]) > 1: - raise AnsibleError("found multiple main files at %s, only one allowed" % (basepath)) - else: - for m in possible_mains: - if os.path.isfile(m): - return m # exactly one main file - return possible_mains[0] # zero mains (we still need to return something) - - def _get_role_path(self, role): - ''' - the 'role', as specified in the ds (or as a bare string), can either - be a simple name or a full path. If it is a full path, we use the - basename as the role name, otherwise we take the name as-given and - append it to the default role path - ''' - - # FIXME: this should use unfrackpath once the utils code has been sorted out - role_path = os.path.normpath(role) - if os.path.exists(role_path): - role_name = os.path.basename(role) - return (role_name, role_path) - else: - for path in ('./roles', '/etc/ansible/roles'): - role_path = os.path.join(path, role) - if os.path.exists(role_path): - return (role, role_path) - - # FIXME: make the parser smart about list/string entries - # in the yaml so the error line/file can be reported - # here - raise AnsibleError("the role '%s' was not found" % role, obj=role) - - def _repo_url_to_role_name(self, repo_url): - # gets the role name out of a repo like - # http://git.example.com/repos/repo.git" => "repo" - - if '://' not in repo_url and '@' not in repo_url: - return repo_url - trailing_path = repo_url.split('/')[-1] - if trailing_path.endswith('.git'): - trailing_path = trailing_path[:-4] - if trailing_path.endswith('.tar.gz'): - trailing_path = trailing_path[:-7] - if ',' in trailing_path: - trailing_path = trailing_path.split(',')[0] - return trailing_path - - def _role_spec_parse(self, role_spec): - # takes a repo and a version like - # git+http://git.example.com/repos/repo.git,v1.0 - # and returns a list of properties such as: - # { - # 'scm': 'git', - # 'src': 'http://git.example.com/repos/repo.git', - # 'version': 'v1.0', - # 'name': 'repo' - # } - - default_role_versions = dict(git='master', hg='tip') - - role_spec = role_spec.strip() - role_version = '' - if role_spec == "" or role_spec.startswith("#"): - return (None, None, None, None) - - tokens = [s.strip() for s in role_spec.split(',')] - - # assume https://github.com URLs are git+https:// URLs and not - # tarballs unless they end in '.zip' - if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'): - tokens[0] = 'git+' + tokens[0] - - if '+' in tokens[0]: - (scm, role_url) = tokens[0].split('+') - else: - scm = None - role_url = tokens[0] - - if len(tokens) >= 2: - role_version = tokens[1] - - if len(tokens) == 3: - role_name = tokens[2] - else: - role_name = self._repo_url_to_role_name(tokens[0]) - - if scm and not role_version: - role_version = default_role_versions.get(scm, '') - - return dict(scm=scm, src=role_url, version=role_version, role_name=role_name) - - def _munge_role(self, ds): - if 'role' in ds: - # Old style: {role: "galaxy.role,version,name", other_vars: "here" } - role_info = self._role_spec_parse(ds['role']) - if isinstance(role_info, dict): - # Warning: Slight change in behaviour here. name may be being - # overloaded. Previously, name was only a parameter to the role. - # Now it is both a parameter to the role and the name that - # ansible-galaxy will install under on the local system. - if 'name' in ds and 'name' in role_info: - del role_info['name'] - ds.update(role_info) - else: - # New style: { src: 'galaxy.role,version,name', other_vars: "here" } - if 'github.com' in ds["src"] and 'http' in ds["src"] and '+' not in ds["src"] and not ds["src"].endswith('.tar.gz'): - ds["src"] = "git+" + ds["src"] - - if '+' in ds["src"]: - (scm, src) = ds["src"].split('+') - ds["scm"] = scm - ds["src"] = src - - if 'name' in role: - ds["role"] = ds["name"] - del ds["name"] - else: - ds["role"] = self._repo_url_to_role_name(ds["src"]) - - # set some values to a default value, if none were specified - ds.setdefault('version', '') - ds.setdefault('scm', None) - - return ds - - def _munge_metadata(self, role_name, role_path): - ''' - loads the metadata main.yml (if it exists) and creates a clean - datastructure we can merge into the newly munged ds - ''' - - meta_ds = dict() - - metadata = self._load_role_yaml(role_path, 'meta') - if metadata: - if not isinstance(metadata, dict): - raise AnsibleParserError("The metadata for role '%s' should be a dictionary, instead it is a %s" % (role_name, type(metadata)), obj=metadata) - - for key in metadata: - if key in _VALID_METADATA_KEYS: - if isinstance(metadata[key], dict): - meta_ds[key] = metadata[key].copy() - elif isinstance(metadata[key], list): - meta_ds[key] = metadata[key][:] - else: - meta_ds[key] = metadata[key] - else: - raise AnsibleParserError("%s is not a valid metadata key for role '%s'" % (key, role_name), obj=metadata) - - return meta_ds - - #------------------------------------------------------------------------------ - # attribute loading defs - - def _load_list_of_blocks(self, ds): - assert type(ds) == list - block_list = [] - for block in ds: - b = Block(block) - block_list.append(b) - return block_list - - def _load_task_blocks(self, attr, ds): - if ds is None: - return [] - return self._load_list_of_blocks(ds) - - def _load_handler_blocks(self, attr, ds): - if ds is None: - return [] - return self._load_list_of_blocks(ds) - - def _load_dependencies(self, attr, ds): - assert type(ds) in (list, type(None)) - - deps = [] - if ds: - for role_def in ds: - r = Role.load(role_def, parent_role=self) - deps.append(r) - return deps - - #------------------------------------------------------------------------------ - # other functions - - def add_parent(self, parent_role): - ''' adds a role to the list of this roles parents ''' - assert isinstance(parent_role, Role) - - if parent_role not in self._parents: - self._parents.append(parent_role) - - def get_parents(self): - return self._parents - - # FIXME: not yet used - #def get_variables(self): - # # returns the merged variables for this role, including - # # recursively merging those of all child roles - # return dict() - - def get_direct_dependencies(self): - return self._attributes['dependencies'][:] - - def get_all_dependencies(self): - # returns a list built recursively, of all deps from - # all child dependencies - - child_deps = [] - direct_deps = self.get_direct_dependencies() - - for dep in direct_deps: - dep_deps = dep.get_all_dependencies() - for dep_dep in dep_deps: - if dep_dep not in child_deps: - child_deps.append(dep_dep) - - return direct_deps + child_deps - diff --git a/v2/ansible/playbook/role/__init__.py b/v2/ansible/playbook/role/__init__.py new file mode 100644 index 0000000000..ed7355f921 --- /dev/null +++ b/v2/ansible/playbook/role/__init__.py @@ -0,0 +1,205 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from six import iteritems, string_types + +import os + +from hashlib import md5 +from types import NoneType + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.parsing.yaml import DataLoader +from ansible.playbook.attribute import FieldAttribute +from ansible.playbook.base import Base +from ansible.playbook.block import Block +from ansible.playbook.role.include import RoleInclude +from ansible.playbook.role.metadata import RoleMetadata + + +__all__ = ['Role', 'ROLE_CACHE'] + + +# The role cache is used to prevent re-loading roles, which +# may already exist. Keys into this cache are the MD5 hash +# of the role definition (for dictionary definitions, this +# will be based on the repr() of the dictionary object) +ROLE_CACHE = dict() + + +class Role: + + def __init__(self): + self._role_name = None + self._role_path = None + self._role_params = dict() + self._loader = None + + self._metadata = None + self._parents = [] + self._dependencies = [] + self._task_blocks = [] + self._handler_blocks = [] + self._default_vars = dict() + self._role_vars = dict() + + def __repr__(self): + return self.get_name() + + def get_name(self): + return self._role_name + + @staticmethod + def load(role_include, parent_role=None): + # FIXME: add back in the role caching support + try: + r = Role() + r._load_role_data(role_include, parent_role=parent_role) + except RuntimeError: + # FIXME: needs a better way to access the ds in the role include + raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles", obj=role_include._ds) + return r + + def _load_role_data(self, role_include, parent_role=None): + self._role_name = role_include.role + self._role_path = role_include.get_role_path() + self._role_params = role_include.get_role_params() + self._loader = role_include.get_loader() + + if parent_role: + self.add_parent(parent_role) + + # load the role's files, if they exist + metadata = self._load_role_yaml('meta') + if metadata: + self._metadata = RoleMetadata.load(metadata, owner=self, loader=self._loader) + self._dependencies = self._load_dependencies() + + task_data = self._load_role_yaml('tasks') + if task_data: + self._task_blocks = self._load_list_of_blocks(task_data) + + handler_data = self._load_role_yaml('handlers') + if handler_data: + self._handler_blocks = self._load_list_of_blocks(handler_data) + + # vars and default vars are regular dictionaries + self._role_vars = self._load_role_yaml('vars') + if not isinstance(self._role_vars, (dict, NoneType)): + raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name, obj=ds) + + self._default_vars = self._load_role_yaml('defaults') + if not isinstance(self._default_vars, (dict, NoneType)): + raise AnsibleParserError("The default/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name, obj=ds) + + def _load_role_yaml(self, subdir): + file_path = os.path.join(self._role_path, subdir) + if self._loader.path_exists(file_path) and self._loader.is_directory(file_path): + main_file = self._resolve_main(file_path) + if self._loader.path_exists(main_file): + return self._loader.load_from_file(main_file) + return None + + def _resolve_main(self, basepath): + ''' flexibly handle variations in main filenames ''' + possible_mains = ( + os.path.join(basepath, 'main.yml'), + os.path.join(basepath, 'main.yaml'), + os.path.join(basepath, 'main.json'), + os.path.join(basepath, 'main'), + ) + + if sum([self._loader.is_file(x) for x in possible_mains]) > 1: + raise AnsibleError("found multiple main files at %s, only one allowed" % (basepath)) + else: + for m in possible_mains: + if self._loader.is_file(m): + return m # exactly one main file + return possible_mains[0] # zero mains (we still need to return something) + + def _load_list_of_blocks(self, ds): + ''' + Given a list of mixed task/block data (parsed from YAML), + return a list of Block() objects, where implicit blocks + are created for each bare Task. + ''' + + assert type(ds) in (list, NoneType) + + block_list = [] + if ds: + for block in ds: + b = Block(block) + block_list.append(b) + + return block_list + + def _load_dependencies(self): + ''' + Recursively loads role dependencies from the metadata list of + dependencies, if it exists + ''' + + deps = [] + if self._metadata: + for role_include in self._metadata.dependencies: + r = Role.load(role_include, parent_role=self) + deps.append(r) + + return deps + + #------------------------------------------------------------------------------ + # other functions + + def add_parent(self, parent_role): + ''' adds a role to the list of this roles parents ''' + assert isinstance(parent_role, Role) + + if parent_role not in self._parents: + self._parents.append(parent_role) + + def get_parents(self): + return self._parents + + # FIXME: not yet used + #def get_variables(self): + # # returns the merged variables for this role, including + # # recursively merging those of all child roles + # return dict() + + def get_direct_dependencies(self): + return self._dependencies[:] + + def get_all_dependencies(self): + # returns a list built recursively, of all deps from + # all child dependencies + + child_deps = [] + direct_deps = self.get_direct_dependencies() + + for dep in direct_deps: + dep_deps = dep.get_all_dependencies() + for dep_dep in dep_deps: + if dep_dep not in child_deps: + child_deps.append(dep_dep) + + return direct_deps + child_deps + diff --git a/v2/ansible/playbook/role/definition.py b/v2/ansible/playbook/role/definition.py new file mode 100644 index 0000000000..08d62afbe4 --- /dev/null +++ b/v2/ansible/playbook/role/definition.py @@ -0,0 +1,153 @@ +# (c) 2014 Michael DeHaan, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from six import iteritems, string_types + +import os + +from ansible.errors import AnsibleError +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping +from ansible.playbook.attribute import Attribute, FieldAttribute +from ansible.playbook.base import Base + + +__all__ = ['RoleDefinition'] + + +class RoleDefinition(Base): + + _role = FieldAttribute(isa='string') + + def __init__(self): + self._role_path = None + self._role_params = dict() + super(RoleDefinition, self).__init__() + + def __repr__(self): + return 'ROLEDEF: ' + self._attributes.get('role', '') + + @staticmethod + def load(data, loader=None): + raise AnsibleError("not implemented") + + def munge(self, ds): + + assert isinstance(ds, dict) or isinstance(ds, string_types) + + # we create a new data structure here, using the same + # object used internally by the YAML parsing code so we + # can preserve file:line:column information if it exists + new_ds = AnsibleMapping() + if isinstance(ds, AnsibleBaseYAMLObject): + new_ds.copy_position_info(ds) + + # first we pull the role name out of the data structure, + # and then use that to determine the role path (which may + # result in a new role name, if it was a file path) + role_name = self._load_role_name(ds) + (role_name, role_path) = self._load_role_path(role_name) + + # next, we split the role params out from the valid role + # attributes and update the new datastructure with that + # result and the role name + if isinstance(ds, dict): + (new_role_def, role_params) = self._split_role_params(ds) + new_ds.update(new_role_def) + self._role_params = role_params + + # set the role name in the new ds + new_ds['role'] = role_name + + # we store the role path internally + self._role_path = role_path + + # save the original ds for use later + self._ds = ds + + # and return the cleaned-up data structure + return new_ds + + def _load_role_name(self, ds): + ''' + Returns the role name (either the role: or name: field) from + the role definition, or (when the role definition is a simple + string), just that string + ''' + + if isinstance(ds, string_types): + return ds + + role_name = ds.get('role', ds.get('name')) + if not role_name: + raise AnsibleError('role definitions must contain a role name', obj=ds) + + return role_name + + def _load_role_path(self, role_name): + ''' + the 'role', as specified in the ds (or as a bare string), can either + be a simple name or a full path. If it is a full path, we use the + basename as the role name, otherwise we take the name as-given and + append it to the default role path + ''' + + # FIXME: this should use unfrackpath once the utils code has been sorted out + role_path = os.path.normpath(role_name) + if self._loader.path_exists(role_path): + role_name = os.path.basename(role_name) + return (role_name, role_path) + else: + # FIXME: this should search in the configured roles path + for path in ('./roles', '/etc/ansible/roles'): + role_path = os.path.join(path, role_name) + if self._loader.path_exists(role_path): + return (role_name, role_path) + + # FIXME: make the parser smart about list/string entries + # in the yaml so the error line/file can be reported + # here + raise AnsibleError("the role '%s' was not found" % role_name) + + def _split_role_params(self, ds): + ''' + Splits any random role params off from the role spec and store + them in a dictionary of params for parsing later + ''' + + role_def = dict() + role_params = dict() + for (key, value) in iteritems(ds): + # use the list of FieldAttribute values to determine what is and is not + # an extra parameter for this role (or sub-class of this role) + if key not in [attr_name for (attr_name, attr_value) in self._get_base_attributes().iteritems()]: + # this key does not match a field attribute, so it must be a role param + role_params[key] = value + else: + # this is a field attribute, so copy it over directly + role_def[key] = value + + return (role_def, role_params) + + def get_role_params(self): + return self._role_params.copy() + + def get_role_path(self): + return self._role_path diff --git a/v2/ansible/playbook/role/include.py b/v2/ansible/playbook/role/include.py new file mode 100644 index 0000000000..d36b0a9397 --- /dev/null +++ b/v2/ansible/playbook/role/include.py @@ -0,0 +1,52 @@ +# (c) 2014 Michael DeHaan, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from six import iteritems, string_types + +import os + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.playbook.attribute import Attribute, FieldAttribute +from ansible.playbook.role.definition import RoleDefinition + + +__all__ = ['RoleInclude'] + + +class RoleInclude(RoleDefinition): + + """ + FIXME: docstring + """ + + _tags = FieldAttribute(isa='list', default=[]) + _when = FieldAttribute(isa='list', default=[]) + + def __init__(self): + super(RoleInclude, self).__init__() + + @staticmethod + def load(data, parent_role=None, loader=None): + assert isinstance(data, string_types) or isinstance(data, dict) + + ri = RoleInclude() + return ri.load_data(data, loader=loader) + diff --git a/v2/ansible/playbook/role/metadata.py b/v2/ansible/playbook/role/metadata.py new file mode 100644 index 0000000000..9e732d6eea --- /dev/null +++ b/v2/ansible/playbook/role/metadata.py @@ -0,0 +1,91 @@ +# (c) 2014 Michael DeHaan, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from six import iteritems, string_types + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.playbook.attribute import Attribute, FieldAttribute +from ansible.playbook.base import Base +from ansible.playbook.role.include import RoleInclude + + +__all__ = ['RoleMetadata'] + + +class RoleMetadata(Base): + ''' + This class wraps the parsing and validation of the optional metadata + within each Role (meta/main.yml). + ''' + + _allow_duplicates = FieldAttribute(isa='bool', default=False) + _dependencies = FieldAttribute(isa='list', default=[]) + _galaxy_info = FieldAttribute(isa='GalaxyInfo') + + def __init__(self): + self._owner = None + super(RoleMetadata, self).__init__() + + @staticmethod + def load(data, owner, loader=None): + ''' + Returns a new RoleMetadata object based on the datastructure passed in. + ''' + + if not isinstance(data, dict): + raise AnsibleParserError("the 'meta/main.yml' for role %s is not a dictionary" % owner.get_name()) + + m = RoleMetadata().load_data(data, loader=loader) + return m + + def munge(self, ds): + # make sure there are no keys in the datastructure which + # do not map to attributes for this object + valid_attrs = [name for (name, attribute) in iteritems(self._get_base_attributes())] + for name in ds: + if name not in valid_attrs: + print("'%s' is not a valid attribute" % name) + raise AnsibleParserError("'%s' is not a valid attribute" % name, obj=ds) + return ds + + def _load_dependencies(self, attr, ds): + ''' + This is a helper loading function for the dependencis list, + which returns a list of RoleInclude objects + ''' + + assert isinstance(ds, list) + + deps = [] + for role_def in ds: + i = RoleInclude.load(role_def, loader=self._loader) + deps.append(i) + + return deps + + def _load_galaxy_info(self, attr, ds): + ''' + This is a helper loading function for the galaxy info entry + in the metadata, which returns a GalaxyInfo object rather than + a simple dictionary. + ''' + + return ds diff --git a/v2/ansible/playbook/role/requirement.py b/v2/ansible/playbook/role/requirement.py new file mode 100644 index 0000000000..d321f6e17d --- /dev/null +++ b/v2/ansible/playbook/role/requirement.py @@ -0,0 +1,166 @@ +# (c) 2014 Michael DeHaan, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from six import iteritems, string_types + +import os + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.playbook.role.definition import RoleDefinition + +__all__ = ['RoleRequirement'] + + +class RoleRequirement(RoleDefinition): + + """ + FIXME: document various ways role specs can be specified + """ + + def __init__(self): + pass + + def _get_valid_spec_keys(self): + return ( + 'name', + 'role', + 'scm', + 'src', + 'version', + ) + + def parse(self, ds): + ''' + FIXME: docstring + ''' + + assert type(ds) == dict or isinstance(ds, string_types) + + role_name = '' + role_params = dict() + new_ds = dict() + + if isinstance(ds, string_types): + role_name = ds + else: + ds = self._munge_role_spec(ds) + (new_ds, role_params) = self._split_role_params(ds) + + # pull the role name out of the ds + role_name = new_ds.get('role_name') + del ds['role_name'] + + return (new_ds, role_name, role_params) + + def _munge_role_spec(self, ds): + if 'role' in ds: + # Old style: {role: "galaxy.role,version,name", other_vars: "here" } + role_info = self._role_spec_parse(ds['role']) + if isinstance(role_info, dict): + # Warning: Slight change in behaviour here. name may be being + # overloaded. Previously, name was only a parameter to the role. + # Now it is both a parameter to the role and the name that + # ansible-galaxy will install under on the local system. + if 'name' in ds and 'name' in role_info: + del role_info['name'] + ds.update(role_info) + else: + # New style: { src: 'galaxy.role,version,name', other_vars: "here" } + if 'github.com' in ds["src"] and 'http' in ds["src"] and '+' not in ds["src"] and not ds["src"].endswith('.tar.gz'): + ds["src"] = "git+" + ds["src"] + + if '+' in ds["src"]: + (scm, src) = ds["src"].split('+') + ds["scm"] = scm + ds["src"] = src + + if 'name' in role: + ds["role"] = ds["name"] + del ds["name"] + else: + ds["role"] = self._repo_url_to_role_name(ds["src"]) + + # set some values to a default value, if none were specified + ds.setdefault('version', '') + ds.setdefault('scm', None) + + return ds + + def _repo_url_to_role_name(self, repo_url): + # gets the role name out of a repo like + # http://git.example.com/repos/repo.git" => "repo" + + if '://' not in repo_url and '@' not in repo_url: + return repo_url + trailing_path = repo_url.split('/')[-1] + if trailing_path.endswith('.git'): + trailing_path = trailing_path[:-4] + if trailing_path.endswith('.tar.gz'): + trailing_path = trailing_path[:-7] + if ',' in trailing_path: + trailing_path = trailing_path.split(',')[0] + return trailing_path + + def _role_spec_parse(self, role_spec): + # takes a repo and a version like + # git+http://git.example.com/repos/repo.git,v1.0 + # and returns a list of properties such as: + # { + # 'scm': 'git', + # 'src': 'http://git.example.com/repos/repo.git', + # 'version': 'v1.0', + # 'name': 'repo' + # } + + default_role_versions = dict(git='master', hg='tip') + + role_spec = role_spec.strip() + role_version = '' + if role_spec == "" or role_spec.startswith("#"): + return (None, None, None, None) + + tokens = [s.strip() for s in role_spec.split(',')] + + # assume https://github.com URLs are git+https:// URLs and not + # tarballs unless they end in '.zip' + if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'): + tokens[0] = 'git+' + tokens[0] + + if '+' in tokens[0]: + (scm, role_url) = tokens[0].split('+') + else: + scm = None + role_url = tokens[0] + + if len(tokens) >= 2: + role_version = tokens[1] + + if len(tokens) == 3: + role_name = tokens[2] + else: + role_name = self._repo_url_to_role_name(tokens[0]) + + if scm and not role_version: + role_version = default_role_versions.get(scm, '') + + return dict(scm=scm, src=role_url, version=role_version, role_name=role_name) + + diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index aa79d49410..422668148b 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -83,14 +83,16 @@ class Task(Base): _sudo = FieldAttribute(isa='bool') _sudo_user = FieldAttribute(isa='string') _sudo_pass = FieldAttribute(isa='string') + _tags = FieldAttribute(isa='list', default=[]) _transport = FieldAttribute(isa='string') _until = FieldAttribute(isa='list') # ? + _when = FieldAttribute(isa='list', default=[]) - def __init__(self, block=None, role=None, loader=DataLoader): + def __init__(self, block=None, role=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' self._block = block self._role = role - super(Task, self).__init__(loader) + super(Task, self).__init__() def get_name(self): ''' return the name of the task ''' @@ -118,9 +120,9 @@ class Task(Base): return buf @staticmethod - def load(data, block=None, role=None): + def load(data, block=None, role=None, loader=None): t = Task(block=block, role=role) - return t.load_data(data) + return t.load_data(data, loader=loader) def __repr__(self): ''' returns a human readable representation of the task ''' diff --git a/v2/test/mock/__init__.py b/v2/test/mock/__init__.py new file mode 100644 index 0000000000..ae8ccff595 --- /dev/null +++ b/v2/test/mock/__init__.py @@ -0,0 +1,20 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type diff --git a/v2/test/mock/loader.py b/v2/test/mock/loader.py new file mode 100644 index 0000000000..89dbfeea62 --- /dev/null +++ b/v2/test/mock/loader.py @@ -0,0 +1,80 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.parsing.yaml import DataLoader + +class DictDataLoader(DataLoader): + + def __init__(self, file_mapping=dict()): + assert type(file_mapping) == dict + + self._file_mapping = file_mapping + self._build_known_directories() + + super(DictDataLoader, self).__init__() + + def load_from_file(self, path): + if path in self._file_mapping: + return self.load(self._file_mapping[path], path) + return None + + def path_exists(self, path): + return path in self._file_mapping or path in self._known_directories + + def is_file(self, path): + return path in self._file_mapping + + def is_directory(self, path): + return path in self._known_directories + + def _add_known_directory(self, directory): + if directory not in self._known_directories: + self._known_directories.append(directory) + + def _build_known_directories(self): + self._known_directories = [] + for path in self._file_mapping: + dirname = os.path.dirname(path) + while dirname not in ('/', ''): + self._add_known_directory(dirname) + dirname = os.path.dirname(dirname) + + def push(self, path, content): + rebuild_dirs = False + if path not in self._file_mapping: + rebuild_dirs = True + + self._file_mapping[path] = content + + if rebuild_dirs: + self._build_known_directories() + + def pop(self, path): + if path in self._file_mapping: + del self._file_mapping[path] + self._build_known_directories() + + def clear(self): + self._file_mapping = dict() + self._known_directories = [] + diff --git a/v2/test/playbook/test_role.py b/v2/test/playbook/test_role.py index 2c1ca6c959..d0f3708898 100644 --- a/v2/test/playbook/test_role.py +++ b/v2/test/playbook/test_role.py @@ -25,9 +25,10 @@ from ansible.compat.tests.mock import patch, MagicMock from ansible.errors import AnsibleError, AnsibleParserError from ansible.playbook.block import Block from ansible.playbook.role import Role +from ansible.playbook.role.include import RoleInclude from ansible.playbook.task import Task -from ansible.parsing.yaml import DataLoader +from test.mock.loader import DictDataLoader class TestRole(unittest.TestCase): @@ -37,172 +38,130 @@ class TestRole(unittest.TestCase): def tearDown(self): pass - def test_construct_empty_block(self): - r = Role() + def test_load_role_with_tasks(self): - @patch.object(DataLoader, 'load_from_file') - def test__load_role_yaml(self, _load_from_file): - _load_from_file.return_value = dict(foo='bar') - r = Role() - with patch('os.path.exists', return_value=True): - with patch('os.path.isdir', return_value=True): - res = r._load_role_yaml('/fake/path', 'some_subdir') - self.assertEqual(res, dict(foo='bar')) + fake_loader = DictDataLoader({ + "/etc/ansible/roles/foo/tasks/main.yml": """ + - shell: echo 'hello world' + """, + }) - def test_role__load_list_of_blocks(self): - task = dict(action='test') - r = Role() - self.assertEqual(r._load_list_of_blocks([]), []) - res = r._load_list_of_blocks([task]) - self.assertEqual(len(res), 1) - assert isinstance(res[0], Block) - res = r._load_list_of_blocks([task,task,task]) - self.assertEqual(len(res), 3) + i = RoleInclude.load('foo', loader=fake_loader) + r = Role.load(i) - @patch.object(Role, '_get_role_path') - @patch.object(Role, '_load_role_yaml') - def test_load_role_with_tasks(self, _load_role_yaml, _get_role_path): + self.assertEqual(str(r), 'foo') + self.assertEqual(len(r._task_blocks), 1) + assert isinstance(r._task_blocks[0], Block) - _get_role_path.return_value = ('foo', '/etc/ansible/roles/foo') + def test_load_role_with_handlers(self): - def fake_load_role_yaml(role_path, subdir): - if role_path == '/etc/ansible/roles/foo': - if subdir == 'tasks': - return [dict(shell='echo "hello world"')] - return None + fake_loader = DictDataLoader({ + "/etc/ansible/roles/foo/handlers/main.yml": """ + - name: test handler + shell: echo 'hello world' + """, + }) - _load_role_yaml.side_effect = fake_load_role_yaml + i = RoleInclude.load('foo', loader=fake_loader) + r = Role.load(i) - r = Role.load('foo') - self.assertEqual(len(r.task_blocks), 1) - assert isinstance(r.task_blocks[0], Block) + self.assertEqual(len(r._handler_blocks), 1) + assert isinstance(r._handler_blocks[0], Block) - @patch.object(Role, '_get_role_path') - @patch.object(Role, '_load_role_yaml') - def test_load_role_with_handlers(self, _load_role_yaml, _get_role_path): + def test_load_role_with_vars(self): - _get_role_path.return_value = ('foo', '/etc/ansible/roles/foo') + fake_loader = DictDataLoader({ + "/etc/ansible/roles/foo/defaults/main.yml": """ + foo: bar + """, + "/etc/ansible/roles/foo/vars/main.yml": """ + foo: bam + """, + }) - def fake_load_role_yaml(role_path, subdir): - if role_path == '/etc/ansible/roles/foo': - if subdir == 'handlers': - return [dict(name='test handler', shell='echo "hello world"')] - return None + i = RoleInclude.load('foo', loader=fake_loader) + r = Role.load(i) - _load_role_yaml.side_effect = fake_load_role_yaml + self.assertEqual(r._default_vars, dict(foo='bar')) + self.assertEqual(r._role_vars, dict(foo='bam')) - r = Role.load('foo') - self.assertEqual(len(r.handler_blocks), 1) - assert isinstance(r.handler_blocks[0], Block) + def test_load_role_with_metadata(self): - @patch.object(Role, '_get_role_path') - @patch.object(Role, '_load_role_yaml') - def test_load_role_with_vars(self, _load_role_yaml, _get_role_path): + fake_loader = DictDataLoader({ + '/etc/ansible/roles/foo/meta/main.yml': """ + allow_duplicates: true + dependencies: + - bar + galaxy_info: + a: 1 + b: 2 + c: 3 + """, + '/etc/ansible/roles/bar/meta/main.yml': """ + dependencies: + - baz + """, + '/etc/ansible/roles/baz/meta/main.yml': """ + dependencies: + - bam + """, + '/etc/ansible/roles/bam/meta/main.yml': """ + dependencies: [] + """, + '/etc/ansible/roles/bad1/meta/main.yml': """ + 1 + """, + '/etc/ansible/roles/bad2/meta/main.yml': """ + foo: bar + """, + '/etc/ansible/roles/recursive1/meta/main.yml': """ + dependencies: ['recursive2'] + """, + '/etc/ansible/roles/recursive2/meta/main.yml': """ + dependencies: ['recursive1'] + """, + }) - _get_role_path.return_value = ('foo', '/etc/ansible/roles/foo') + i = RoleInclude.load('foo', loader=fake_loader) + r = Role.load(i) - def fake_load_role_yaml(role_path, subdir): - if role_path == '/etc/ansible/roles/foo': - if subdir == 'defaults': - return dict(foo='bar') - elif subdir == 'vars': - return dict(foo='bam') - return None - - _load_role_yaml.side_effect = fake_load_role_yaml - - r = Role.load('foo') - self.assertEqual(r.default_vars, dict(foo='bar')) - self.assertEqual(r.role_vars, dict(foo='bam')) - - @patch.object(Role, '_get_role_path') - @patch.object(Role, '_load_role_yaml') - def test_load_role_with_metadata(self, _load_role_yaml, _get_role_path): - - def fake_get_role_path(role): - if role == 'foo': - return ('foo', '/etc/ansible/roles/foo') - elif role == 'bar': - return ('bar', '/etc/ansible/roles/bar') - elif role == 'baz': - return ('baz', '/etc/ansible/roles/baz') - elif role == 'bam': - return ('bam', '/etc/ansible/roles/bam') - elif role == 'bad1': - return ('bad1', '/etc/ansible/roles/bad1') - elif role == 'bad2': - return ('bad2', '/etc/ansible/roles/bad2') - elif role == 'recursive1': - return ('recursive1', '/etc/ansible/roles/recursive1') - elif role == 'recursive2': - return ('recursive2', '/etc/ansible/roles/recursive2') - - def fake_load_role_yaml(role_path, subdir): - if role_path == '/etc/ansible/roles/foo': - if subdir == 'meta': - return dict(dependencies=['bar'], allow_duplicates=True, galaxy_info=dict(a='1', b='2', c='3')) - elif role_path == '/etc/ansible/roles/bar': - if subdir == 'meta': - return dict(dependencies=['baz']) - elif role_path == '/etc/ansible/roles/baz': - if subdir == 'meta': - return dict(dependencies=['bam']) - elif role_path == '/etc/ansible/roles/bam': - if subdir == 'meta': - return dict() - elif role_path == '/etc/ansible/roles/bad1': - if subdir == 'meta': - return 1 - elif role_path == '/etc/ansible/roles/bad2': - if subdir == 'meta': - return dict(foo='bar') - elif role_path == '/etc/ansible/roles/recursive1': - if subdir == 'meta': - return dict(dependencies=['recursive2']) - elif role_path == '/etc/ansible/roles/recursive2': - if subdir == 'meta': - return dict(dependencies=['recursive1']) - return None - - _get_role_path.side_effect = fake_get_role_path - _load_role_yaml.side_effect = fake_load_role_yaml - - r = Role.load('foo') role_deps = r.get_direct_dependencies() self.assertEqual(len(role_deps), 1) self.assertEqual(type(role_deps[0]), Role) self.assertEqual(len(role_deps[0].get_parents()), 1) self.assertEqual(role_deps[0].get_parents()[0], r) - self.assertEqual(r.allow_duplicates, True) - self.assertEqual(r.galaxy_info, dict(a='1', b='2', c='3')) + self.assertEqual(r._metadata.allow_duplicates, True) + self.assertEqual(r._metadata.galaxy_info, dict(a=1, b=2, c=3)) all_deps = r.get_all_dependencies() self.assertEqual(len(all_deps), 3) - self.assertEqual(all_deps[0].role_name, 'bar') - self.assertEqual(all_deps[1].role_name, 'baz') - self.assertEqual(all_deps[2].role_name, 'bam') + self.assertEqual(all_deps[0].get_name(), 'bar') + self.assertEqual(all_deps[1].get_name(), 'baz') + self.assertEqual(all_deps[2].get_name(), 'bam') - self.assertRaises(AnsibleParserError, Role.load, 'bad1') - self.assertRaises(AnsibleParserError, Role.load, 'bad2') - self.assertRaises(AnsibleError, Role.load, 'recursive1') + i = RoleInclude.load('bad1', loader=fake_loader) + self.assertRaises(AnsibleParserError, Role.load, i) - @patch.object(Role, '_get_role_path') - @patch.object(Role, '_load_role_yaml') - def test_load_role_complex(self, _load_role_yaml, _get_role_path): + i = RoleInclude.load('bad2', loader=fake_loader) + self.assertRaises(AnsibleParserError, Role.load, i) - _get_role_path.return_value = ('foo', '/etc/ansible/roles/foo') + i = RoleInclude.load('recursive1', loader=fake_loader) + self.assertRaises(AnsibleError, Role.load, i) - def fake_load_role_yaml(role_path, subdir): - if role_path == '/etc/ansible/roles/foo': - if subdir == 'tasks': - return [dict(shell='echo "hello world"')] - return None + def test_load_role_complex(self): - _load_role_yaml.side_effect = fake_load_role_yaml + # FIXME: add tests for the more complex uses of + # params and tags/when statements - r = Role.load(dict(role='foo')) + fake_loader = DictDataLoader({ + "/etc/ansible/roles/foo/tasks/main.yml": """ + - shell: echo 'hello world' + """, + }) - # FIXME: add tests for the more complex url-type - # constructions and tags/when statements + i = RoleInclude.load(dict(role='foo'), loader=fake_loader) + r = Role.load(i) + + self.assertEqual(r.get_name(), "foo") From 94a732fb1a122e6018713b0b7cc3f359e62a88c1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 30 Oct 2014 13:50:34 -0400 Subject: [PATCH 316/813] fixed typo in comments --- bin/ansible-doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ansible-doc b/bin/ansible-doc index e4c7d19522..5a708a421c 100755 --- a/bin/ansible-doc +++ b/bin/ansible-doc @@ -189,7 +189,7 @@ def get_module_list_text(module_list): if len(desc) > linelimit: desc = desc[:linelimit] + '...' - if module.startswith('_'): # Handle replecated + if module.startswith('_'): # Handle deprecated module = module[1:] deprecated.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) else: From 0317e7b91029ce90079802f125155c1944cf279c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 30 Oct 2014 13:53:05 -0400 Subject: [PATCH 317/813] avoid modifying module var by just passing the substring to the append --- bin/ansible-doc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/ansible-doc b/bin/ansible-doc index 5a708a421c..aed7d4d23c 100755 --- a/bin/ansible-doc +++ b/bin/ansible-doc @@ -190,8 +190,7 @@ def get_module_list_text(module_list): desc = desc[:linelimit] + '...' if module.startswith('_'): # Handle deprecated - module = module[1:] - deprecated.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) + deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc)) else: text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) except: From 7cd5b13e34270dd5be79269a0b88c8c408c18663 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 30 Oct 2014 16:04:34 -0500 Subject: [PATCH 318/813] Make sure v2 playbook classes validate attributes Also removing cruft code from earlier iteration on task.py and fixing a bug where 'shell' was not removed from the task ds after munge() cleaned things up --- v2/ansible/playbook/base.py | 26 +++- v2/ansible/playbook/role/metadata.py | 12 +- v2/ansible/playbook/task.py | 180 +-------------------------- 3 files changed, 23 insertions(+), 195 deletions(-) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index e2b96c8cc2..c163240363 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -24,6 +24,7 @@ from io import FileIO from six import iteritems, string_types +from ansible.errors import AnsibleParserError from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.parsing.yaml import DataLoader @@ -73,14 +74,18 @@ class Base: if isinstance(ds, string_types) or isinstance(ds, FileIO): ds = self._loader.load(ds) - # we currently don't do anything with private attributes but may - # later decide to filter them out of 'ds' here. - + # call the munge() function to massage the data into something + # we can more easily parse, and then call the validation function + # on it to ensure there are no incorrect key values ds = self.munge(ds) + self._validate_attributes(ds) + + # Walk all attributes in the class. + # + # FIXME: we currently don't do anything with private attributes but + # may later decide to filter them out of 'ds' here. - # walk all attributes in the class for (name, attribute) in iteritems(self._get_base_attributes()): - # copy the value over unless a _load_field method is defined if name in ds: method = getattr(self, '_load_%s' % name, None) @@ -96,6 +101,17 @@ class Base: def get_loader(self): return self._loader + def _validate_attributes(self, ds): + ''' + Ensures that there are no keys in the datastructure which do + not map to attributes for this object. + ''' + + valid_attrs = [name for (name, attribute) in iteritems(self._get_base_attributes())] + for key in ds: + if key not in valid_attrs: + raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__), obj=ds) + def validate(self): ''' validation that is done at parse time, not load time ''' diff --git a/v2/ansible/playbook/role/metadata.py b/v2/ansible/playbook/role/metadata.py index 9e732d6eea..485e3da59f 100644 --- a/v2/ansible/playbook/role/metadata.py +++ b/v2/ansible/playbook/role/metadata.py @@ -21,7 +21,7 @@ __metaclass__ = type from six import iteritems, string_types -from ansible.errors import AnsibleError, AnsibleParserError +from ansible.errors import AnsibleParserError from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base from ansible.playbook.role.include import RoleInclude @@ -56,16 +56,6 @@ class RoleMetadata(Base): m = RoleMetadata().load_data(data, loader=loader) return m - def munge(self, ds): - # make sure there are no keys in the datastructure which - # do not map to attributes for this object - valid_attrs = [name for (name, attribute) in iteritems(self._get_base_attributes())] - for name in ds: - if name not in valid_attrs: - print("'%s' is not a valid attribute" % name) - raise AnsibleParserError("'%s' is not a valid attribute" % name, obj=ds) - return ds - def _load_dependencies(self, attr, ds): ''' This is a helper loading function for the dependencis list, diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 422668148b..97f7b06eb6 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -160,7 +160,7 @@ class Task(Base): new_ds['delegate_to'] = delegate_to for (k,v) in ds.iteritems(): - if k in ('action', 'local_action', 'args', 'delegate_to') or k == action: + if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell': # we don't want to re-assign these values, which were # determined by the ModuleArgsParser() above continue @@ -171,181 +171,3 @@ class Task(Base): return new_ds - - # ================================================================================== - # BELOW THIS LINE - # info below this line is "old" and is before the attempt to build Attributes - # use as reference but plan to replace and radically simplify - # ================================================================================== - -LEGACY = """ - - def _load_action(self, ds, k, v): - ''' validate/transmogrify/assign the module and parameters if used in 'action/local_action' format ''' - - results = dict() - module_name, params = v.strip().split(' ', 1) - if module_name not in module_finder: - raise AnsibleError("the specified module '%s' could not be found, check your module path" % module_name) - results['_module_name'] = module_name - results['_parameters'] = parse_kv(params) - - if k == 'local_action': - if 'delegate_to' in ds: - raise AnsibleError("delegate_to cannot be specified with local_action in task: %s" % ds.get('name', v)) - results['_delegate_to'] = '127.0.0.1' - if not 'transport' in ds and not 'connection' in ds: - results['_transport'] = 'local' - return results - - def _load_module(self, ds, k, v): - ''' validate/transmogrify/assign the module and parameters if used in 'module:' format ''' - - results = dict() - if self._module_name: - raise AnsibleError("the module name (%s) was already specified, '%s' is a duplicate" % (self._module_name, k)) - elif 'action' in ds: - raise AnsibleError("multiple actions specified in task: '%s' and '%s'" % (k, ds.get('name', ds['action']))) - results['_module_name'] = k - if isinstance(v, dict) and 'args' in ds: - raise AnsibleError("can't combine args: and a dict for %s: in task %s" % (k, ds.get('name', "%s: %s" % (k, v)))) - results['_parameters'] = self._load_parameters(v) - return results - - def _load_loop(self, ds, k, v): - ''' validate/transmogrify/assign the module any loop directives that have valid action plugins as names ''' - - results = dict() - if isinstance(v, basestring): - param = v.strip() - if (param.startswith('{{') and param.find('}}') == len(ds[x]) - 2 and param.find('|') == -1): - utils.warning("It is unnecessary to use '{{' in loops, leave variables in loop expressions bare.") - plugin_name = k.replace("with_","") - if plugin_name in utils.plugins.lookup_loader: - results['_lookup_plugin'] = plugin_name - results['_lookup_terms'] = v - else: - raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name)) - return results - - def _load_legacy_when(self, ds, k, v): - ''' yell about old when syntax being used still ''' - - utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True) - if self._when: - raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds.get('action')))) - when_name = k.replace("when_","") - return dict(_when = "%s %s" % (when_name, v)) - - def _load_when(self, ds, k, v): - ''' validate/transmogrify/assign a conditional ''' - - conditionals = self._when.copy() - conditionals.push(v) - return dict(_when=conditionals) - - def _load_changed_when(self, ds, k, v): - ''' validate/transmogrify/assign a changed_when conditional ''' - - conditionals = self._changed_when.copy() - conditionals.push(v) - return dict(_changed_when=conditionals) - - def _load_failed_when(self, ds, k, v): - ''' validate/transmogrify/assign a failed_when conditional ''' - - conditionals = self._failed_when.copy() - conditionals.push(v) - return dict(_failed_when=conditionals) - - # FIXME: move to BaseObject - def _load_tags(self, ds, k, v): - ''' validate/transmogrify/assign any tags ''' - - new_tags = self.tags.copy() - tags = v - if isinstance(v, basestring): - tags = v.split(',') - new_tags.push(v) - return dict(_tags=v) - - def _load_invalid_key(self, ds, k, v): - ''' handle any key we do not recognize ''' - - raise AnsibleError("%s is not a legal parameter in an Ansible task or handler" % k) - - def _load_other_valid_key(self, ds, k, v): - ''' handle any other attribute we DO recognize ''' - - results = dict() - k = "_%s" % k - results[k] = v - return results - - def _loader_for_key(self, k): - ''' based on the name of a datastructure element, find the code to handle it ''' - - if k in ('action', 'local_action'): - return self._load_action - elif k in utils.plugins.module_finder: - return self._load_module - elif k.startswith('with_'): - return self._load_loop - elif k == 'changed_when': - return self._load_changed_when - elif k == 'failed_when': - return self._load_failed_when - elif k == 'when': - return self._load_when - elif k == 'tags': - return self._load_tags - elif k not in self.VALID_KEYS: - return self._load_invalid_key - else: - return self._load_other_valid_key - - # ================================================================================== - # PRE-VALIDATION - expected to be uncommonly used, this checks for arguments that - # are aliases of each other. Most everything else should be in the LOAD block - # or the POST-VALIDATE block. - - def _pre_validate(self, ds): - ''' rarely used function to see if the datastructure has items that mean the same thing ''' - - if 'action' in ds and 'local_action' in ds: - raise AnsibleError("the 'action' and 'local_action' attributes can not be used together") - - # ================================================================================= - # POST-VALIDATION: checks for internal inconsistency between fields - # validation can result in an error but also corrections - - def _post_validate(self): - ''' is the loaded datastructure sane? ''' - - if not self._name: - self._name = self._post_validate_fixed_name() - - # incompatible items - self._validate_conflicting_su_and_sudo() - self._validate_conflicting_first_available_file_and_loookup() - - def _post_validate_fixed_name(self): - '' construct a name for the task if no name was specified ''' - - flat_params = " ".join(["%s=%s" % (k,v) for k,v in self._parameters.iteritems()]) - return = "%s %s" % (self._module_name, flat_params) - - def _post_validate_conflicting_su_and_sudo(self): - ''' make sure su/sudo usage doesn't conflict ''' - - conflicting = (self._sudo or self._sudo_user or self._sudo_pass) and (self._su or self._su_user or self._su_pass): - if conflicting: - raise AnsibleError('sudo params ("sudo", "sudo_user", "sudo_pass") and su params ("su", "su_user", "su_pass") cannot be used together') - - def _post_validate_conflicting_first_available_file_and_lookup(self): - ''' first_available_file (deprecated) predates lookup plugins, and cannot be used with those kinds of loops ''' - - if self._first_available_file and self._lookup_plugin: - raise AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task") - -""" From 86de59235f0f18f397bab1637167fdb278803931 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 31 Oct 2014 14:18:18 -0400 Subject: [PATCH 319/813] bypass core/extras text when module is deprecated --- hacking/templates/rst.j2 | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index 8d6dc1c89b..1d55a0452b 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -109,7 +109,8 @@ Examples {% endif %} -{% if core %} +{% if not deprecated %} + {% if core %} This is a Core Module --------------------- @@ -124,7 +125,7 @@ Documentation updates for this module can also be edited directly by submitting This is a "core" ansible module, which means it will receive slightly higher priority for all requests than those in the "extras" repos. -{% else %} + {% else %} This is an Extras Module ------------------------ @@ -140,6 +141,7 @@ Documentation updates for this module can also be edited directly by submitting Note that this module is designated a "extras" module. Non-core modules are still fully usable, but may receive slightly lower response rates for issues and pull requests. Popular "extras" modules may be promoted to core modules over time. + {% endif %} {% endif %} For help in developing on modules, should you be so inclined, please read :doc:`community`, :doc:`developing_test_pr` and :doc:`developing_modules`. From 44f0279d0a2440172a97353ec47dc17eebbee98a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 31 Oct 2014 14:20:26 -0400 Subject: [PATCH 320/813] Now adds flags for non core and deprecated modules in listing --- hacking/module_formatter.py | 79 +++++++++++++++++++++++-------------- 1 file changed, 50 insertions(+), 29 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 61de1ea136..6392c83ac6 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -59,6 +59,8 @@ _MODULE = re.compile(r"M\(([^)]+)\)") _URL = re.compile(r"U\(([^)]+)\)") _CONST = re.compile(r"C\(([^)]+)\)") +DEPRECATED = " (D)" +NOTCORE = " (E)" ##################################################################################### def rst_ify(text): @@ -121,7 +123,7 @@ def write_data(text, options, outputname, module): def list_modules(module_dir): ''' returns a hash of categories, each category being a hash of module names to file paths ''' - categories = dict(all=dict(),deprecated=dict()) + categories = dict(all=dict()) files = glob.glob("%s/*/*" % module_dir) for d in files: if os.path.isdir(d): @@ -135,19 +137,14 @@ def list_modules(module_dir): # windows powershell modules have documentation stubs in python docstring # format (they are not executed) so skip the ps1 format files continue - elif module.startswith("_"): # Handle deprecated modules - if not os.path.islink(f): # ignores aliases - categories['deprecated'][module] = f + elif module.startswith("_") and os.path.islink(f): # ignores aliases continue - elif module in categories['deprecated']: # Removes dupes - categories['deprecated'].pop(module, None) if not category in categories: categories[category] = {} categories[category][module] = f categories['all'][module] = f - if not len(categories['deprecated']) > 0: - categories.pop('deprecated', None) + return categories ##################################################################################### @@ -198,9 +195,6 @@ def jinja2_environment(template_dir, typ): def process_module(module, options, env, template, outputname, module_map): - print "rendering: %s" % module - - fname = module_map[module] basename = os.path.basename(fname) deprecated = False @@ -208,21 +202,28 @@ def process_module(module, options, env, template, outputname, module_map): # ignore files with extensions if not basename.endswith(".py"): return - elif basename.startswith("_"): - if os.path.islink(fname): # alias - return + elif module.startswith("_"): + if os.path.islink(fname): + return # ignore, its an alias deprecated = True + module = module.replace("_","",1) + + print "rendering: %s" % module # use ansible core library to parse out doc metadata YAML and plaintext examples doc, examples = ansible.utils.module_docs.get_docstring(fname, verbose=options.verbose) # crash if module is missing documentation and not explicitly hidden from docs index - if doc is None and module not in ansible.utils.module_docs.BLACKLIST_MODULES: - sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module)) - sys.exit(1) - if doc is None: - return "SKIPPED" + if module in ansible.utils.module_docs.BLACKLIST_MODULES: + return "SKIPPED" + else: + sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module)) + sys.exit(1) + + if deprecated and 'deprecated' not in doc: + sys.stderr.write("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module)) + sys.exit(1) if "/core/" in fname: doc['core'] = True @@ -252,21 +253,21 @@ def process_module(module, options, env, template, outputname, module_map): for (k,v) in doc['options'].iteritems(): all_keys.append(k) - all_keys = sorted(all_keys) - doc['option_keys'] = all_keys + all_keys = sorted(all_keys) + + doc['option_keys'] = all_keys doc['filename'] = fname doc['docuri'] = doc['module'].replace('_', '-') doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') doc['ansible_version'] = options.ansible_version doc['plainexamples'] = examples #plain text - if deprecated and 'deprecated' not in doc: - doc['deprecated'] = "This module is deprecated, as such it's use is discouraged." # here is where we build the table of contents... text = template.render(doc) write_data(text, options, outputname, module) + return doc['short_description'] ##################################################################################### @@ -283,7 +284,19 @@ def process_category(category, categories, options, env, template, outputname): category = category.replace("_"," ") category = category.title() - modules = module_map.keys() + modules = [] + deprecated = [] + core = [] + for module in module_map.keys(): + + if module.startswith("_"): + module = module.replace("_","",1) + deprecated.append(module) + elif '/core/' in module_map[module]: + core.append(module) + + modules.append(module) + modules.sort() category_header = "%s Modules" % (category.title()) @@ -293,16 +306,24 @@ def process_category(category, categories, options, env, template, outputname): %s %s -.. toctree:: - :maxdepth: 1 +.. toctree:: :maxdepth: 1 """ % (category_header, underscores)) for module in modules: - result = process_module(module, options, env, template, outputname, module_map) - if result != "SKIPPED": - category_file.write(" %s_module\n" % module) + modstring = module + modname = module + if module in deprecated: + modstring = modstring + DEPRECATED + modname = "_" + module + elif module not in core: + modstring = modstring + NOTCORE + + result = process_module(modname, options, env, template, outputname, module_map) + + if result != "SKIPPED": + category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module)) category_file.close() From 023f5fd7e0c959fe09d26c49c534e966f3e82fb5 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 31 Oct 2014 15:06:00 -0400 Subject: [PATCH 321/813] Added note explaning the module tagging --- hacking/module_formatter.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 6392c83ac6..51bea3e135 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -325,6 +325,11 @@ def process_category(category, categories, options, env, template, outputname): if result != "SKIPPED": category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module)) + category_file.write("""\n\n +.. note:: + - %s: Denotes that this module is not part of core, it can be found in the extras repo + - %s: This marks a module as deprecated, kept for backwards compatibility but use is discouraged +""" % (DEPRECATED, NOTCORE)) category_file.close() # TODO: end a new category file From f6d9aa7a8ffcd97bb4cdd22871735a694ea7024a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 31 Oct 2014 16:05:22 -0400 Subject: [PATCH 322/813] corrected text/flag --- hacking/module_formatter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 51bea3e135..1218b85e71 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -327,8 +327,8 @@ def process_category(category, categories, options, env, template, outputname): category_file.write("""\n\n .. note:: - - %s: Denotes that this module is not part of core, it can be found in the extras repo - %s: This marks a module as deprecated, kept for backwards compatibility but use is discouraged + - %s: Denotes that this module is not part of core, it can be found in the extras or some other external repo """ % (DEPRECATED, NOTCORE)) category_file.close() From 2397926b948ec827bef4debb108b7806a7a039f1 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Sat, 1 Nov 2014 12:36:31 +1000 Subject: [PATCH 323/813] Handle case where boto needs an upgrade to recognise a new region Raise an exception if boto does not yet know about a region. --- lib/ansible/module_utils/ec2.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index 3d3040068f..5db6555365 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -164,6 +164,11 @@ def boto_fix_security_token_in_profile(conn, profile_name): def connect_to_aws(aws_module, region, **params): conn = aws_module.connect_to_region(region, **params) + if not conn: + if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]: + raise StandardError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto" % (region, aws_module.__name__)) + else: + raise StandardError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__)) if params.get('profile_name'): conn = boto_fix_security_token_in_profile(conn, params['profile_name']) return conn @@ -179,13 +184,13 @@ def ec2_connect(module): if region: try: ec2 = connect_to_aws(boto.ec2, region, **boto_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) # Otherwise, no region so we fallback to the old connection method elif ec2_url: try: ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="Either region or ec2_url must be specified") From 7a5e7db2df04c6c673b9d715b052503e49cdb6cf Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 1 Nov 2014 01:17:42 -0400 Subject: [PATCH 324/813] ansible doc now finds modules recursively more intelligent about ignoring files that are clearly not modules --- bin/ansible-doc | 46 ++++++++++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/bin/ansible-doc b/bin/ansible-doc index aed7d4d23c..0ba84b9a30 100755 --- a/bin/ansible-doc +++ b/bin/ansible-doc @@ -34,6 +34,7 @@ import traceback MODULEDIR = C.DEFAULT_MODULE_PATH BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm') +IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README" ] _ITALIC = re.compile(r"I\(([^)]+)\)") _BOLD = re.compile(r"B\(([^)]+)\)") @@ -94,7 +95,7 @@ def get_man_text(doc): desc = " ".join(doc['description']) text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=" ", subsequent_indent=" ")) - + if 'option_keys' in doc and len(doc['option_keys']) > 0: text.append("Options (= is mandatory):\n") @@ -202,6 +203,28 @@ def get_module_list_text(module_list): text.extend(deprecated) return "\n".join(text) +def find_modules(path, module_list): + + if os.path.isdir(path): + for module in os.listdir(path): + if module.startswith('.'): + continue + elif os.path.isdir(module): + find_modules(module, module_list) + elif any(module.endswith(x) for x in BLACKLIST_EXTS): + continue + elif module.startswith('__'): + continue + elif module in IGNORE_FILES: + continue + elif module.startswith('_'): + fullpath = '/'.join([path,module]) + if os.path.islink(fullpath): # avoids aliases + continue + + module = os.path.splitext(module)[0] # removes the extension + module_list.append(module) + def main(): p = optparse.OptionParser( @@ -238,26 +261,14 @@ def main(): paths = utils.plugins.module_finder._get_paths() module_list = [] for path in paths: - if os.path.isdir(path): - for module in os.listdir(path): - if any(module.endswith(x) for x in BLACKLIST_EXTS): - continue - elif module.startswith('__'): - continue - elif module.startswith('_'): - fullpath = '/'.join([path,module]) - if os.path.islink(fullpath): # avoids aliases - continue + find_modules(path, module_list) - module = os.path.splitext(module)[0] # removes the extension - module_list.append(module) - pager(get_module_list_text(module_list)) sys.exit() if len(args) == 0: p.print_help() - + def print_paths(finder): ''' Returns a string suitable for printing of the search path ''' @@ -267,14 +278,13 @@ def main(): if i not in ret: ret.append(i) return os.pathsep.join(ret) - + text = '' for module in args: filename = utils.plugins.module_finder.find_plugin(module) if filename is None: - sys.stderr.write("module %s not found in %s\n" % (module, - print_paths(utils.plugins.module_finder))) + sys.stderr.write("module %s not found in %s\n" % (module, print_paths(utils.plugins.module_finder))) continue if any(filename.endswith(x) for x in BLACKLIST_EXTS): From 3e992b58244ad5fb79485be16b22a503a4fbd1e2 Mon Sep 17 00:00:00 2001 From: akinsley Date: Sat, 1 Nov 2014 00:51:52 -0700 Subject: [PATCH 325/813] Setting keepalive option before opening session Setting the keepalive option before opening up the paramiko session in order to avoid the slim chance that the connection is stalled in the short time between opening the session and setting up the keepalive. I described the issue I am solving at https://groups.google.com/forum/#!topic/ansible-project/rkwvz3vyvLk --- .../runner/connection_plugins/paramiko_ssh.py | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/ansible/runner/connection_plugins/paramiko_ssh.py b/lib/ansible/runner/connection_plugins/paramiko_ssh.py index 59932ebb7d..4bb06e01c3 100644 --- a/lib/ansible/runner/connection_plugins/paramiko_ssh.py +++ b/lib/ansible/runner/connection_plugins/paramiko_ssh.py @@ -40,10 +40,10 @@ from ansible.callbacks import vvv from ansible import errors from ansible import utils from ansible import constants as C - + AUTHENTICITY_MSG=""" -paramiko: The authenticity of host '%s' can't be established. -The %s key fingerprint is %s. +paramiko: The authenticity of host '%s' can't be established. +The %s key fingerprint is %s. Are you sure you want to continue connecting (yes/no)? """ @@ -67,7 +67,7 @@ class MyAddPolicy(object): local L{HostKeys} object, and saving it. This is used by L{SSHClient}. """ - def __init__(self, runner): + def __init__(self, runner): self.runner = runner def missing_host_key(self, client, hostname, key): @@ -81,7 +81,7 @@ class MyAddPolicy(object): sys.stdin = self.runner._new_stdin fingerprint = hexlify(key.get_fingerprint()) ktype = key.get_name() - + # clear out any premature input on sys.stdin tcflush(sys.stdin, TCIFLUSH) @@ -103,7 +103,7 @@ class MyAddPolicy(object): # host keys are actually saved in close() function below # in order to control ordering. - + # keep connection objects on a per host basis to avoid repeated attempts to reconnect @@ -145,7 +145,7 @@ class Connection(object): vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self.user, self.port, self.host), host=self.host) ssh = paramiko.SSHClient() - + self.keyfile = os.path.expanduser("~/.ssh/known_hosts") if C.HOST_KEY_CHECKING: @@ -194,8 +194,8 @@ class Connection(object): try: - chan = self.ssh.get_transport().open_session() self.ssh.get_transport().set_keepalive(5) + chan = self.ssh.get_transport().open_session() except Exception, e: @@ -318,7 +318,7 @@ class Connection(object): def _any_keys_added(self): - added_any = False + added_any = False for hostname, keys in self.ssh._host_keys.iteritems(): for keytype, key in keys.iteritems(): added_this_time = getattr(key, '_added_by_ansible_this_time', False) @@ -327,9 +327,9 @@ class Connection(object): return False def _save_ssh_host_keys(self, filename): - ''' - not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks - don't complain about it :) + ''' + not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks + don't complain about it :) ''' if not self._any_keys_added(): @@ -372,7 +372,7 @@ class Connection(object): if C.HOST_KEY_CHECKING and C.PARAMIKO_RECORD_HOST_KEYS and self._any_keys_added(): # add any new SSH host keys -- warning -- this could be slow - lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock") + lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock") dirname = os.path.dirname(self.keyfile) if not os.path.exists(dirname): os.makedirs(dirname) @@ -414,4 +414,4 @@ class Connection(object): fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN) self.ssh.close() - + From 684cdd0298dafe51f4091d27b2bcc6acee96fc9f Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Sat, 1 Nov 2014 19:31:04 -0400 Subject: [PATCH 326/813] Docs: accelerate mode -> accelerated mode The docs sometimes referred to "accelerated mode" as "accelerate mode". This patch changes it to "accelerated mode" everywhere. --- docsite/rst/intro_configuration.rst | 6 +++--- docsite/rst/playbooks_acceleration.rst | 14 +++++++------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index cf4b9b6122..a9f50f804f 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -662,8 +662,8 @@ recommended if you can enable it, eliminating the need for :doc:`playbooks_accel .. _accelerate_settings: -Accelerate Mode Settings ------------------------- +Accelerated Mode Settings +------------------------- Under the [accelerate] header, the following settings are tunable for :doc:`playbooks_acceleration`. Acceleration is a useful performance feature to use if you cannot enable :ref:`pipelining` in your environment, but is probably @@ -676,7 +676,7 @@ accelerate_port .. versionadded:: 1.3 -This is the port to use for accelerate mode:: +This is the port to use for accelerated mode:: accelerate_port = 5099 diff --git a/docsite/rst/playbooks_acceleration.rst b/docsite/rst/playbooks_acceleration.rst index b7f08828a8..40b77246db 100644 --- a/docsite/rst/playbooks_acceleration.rst +++ b/docsite/rst/playbooks_acceleration.rst @@ -6,24 +6,24 @@ Accelerated Mode You Might Not Need This! ```````````````````````` -Are you running Ansible 1.5 or later? If so, you may not need accelerate mode due to a new feature called "SSH pipelining" and should read the :ref:`pipelining` section of the documentation. +Are you running Ansible 1.5 or later? If so, you may not need accelerated mode due to a new feature called "SSH pipelining" and should read the :ref:`pipelining` section of the documentation. -For users on 1.5 and later, accelerate mode only makes sense if you (A) are managing from an Enterprise Linux 6 or earlier host +For users on 1.5 and later, accelerated mode only makes sense if you (A) are managing from an Enterprise Linux 6 or earlier host and still are on paramiko, or (B) can't enable TTYs with sudo as described in the pipelining docs. If you can use pipelining, Ansible will reduce the amount of files transferred over the wire, -making everything much more efficient, and performance will be on par with accelerate mode in nearly all cases, possibly excluding very large file transfer. Because less moving parts are involved, pipelining is better than accelerate mode for nearly all use cases. +making everything much more efficient, and performance will be on par with accelerated mode in nearly all cases, possibly excluding very large file transfer. Because less moving parts are involved, pipelining is better than accelerated mode for nearly all use cases. -Accelerate mode remains around in support of EL6 +Accelerated moded remains around in support of EL6 control machines and other constrained environments. -Accelerate Mode Details -``````````````````````` +Accelerated Mode Details +```````````````````````` While OpenSSH using the ControlPersist feature is quite fast and scalable, there is a certain small amount of overhead involved in using SSH connections. While many people will not encounter a need, if you are running on a platform that doesn't have ControlPersist support (such as an EL6 control machine), you'll probably be even more interested in tuning options. -Accelerate mode is there to help connections work faster, but still uses SSH for initial secure key exchange. There is no +Accelerated mode is there to help connections work faster, but still uses SSH for initial secure key exchange. There is no additional public key infrastructure to manage, and this does not require things like NTP or even DNS. Accelerated mode can be anywhere from 2-6x faster than SSH with ControlPersist enabled, and 10x faster than paramiko. From 80b1365d53fe480776c2b84d61cacbc54a5fb3dc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 1 Nov 2014 23:19:25 -0400 Subject: [PATCH 327/813] now correctly processes modules when in subdirs of cloud --- hacking/module_formatter.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 1218b85e71..fe0da35ed8 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -127,11 +127,12 @@ def list_modules(module_dir): files = glob.glob("%s/*/*" % module_dir) for d in files: if os.path.isdir(d): - files2 = glob.glob("%s/*" % d) + files2 = glob.glob("%s/*" % d) + glob.glob("%s/*/*" % d) for f in files2: - module = os.path.splitext(os.path.basename(f))[0] - category = os.path.dirname(f).split("/")[-1] + category = "cloud" + if os.path.dirname(f).split("/")[-2] != "cloud": + category = os.path.dirname(f).split("/")[-1] if not f.endswith(".py") or f.endswith('__init__.py'): # windows powershell modules have documentation stubs in python docstring From 7bd2c945a76a1ca921b53f10a4bd4afbee5feeab Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 3 Nov 2014 08:15:26 -0500 Subject: [PATCH 328/813] now doc generation does not ignore subdirs of cloud --- hacking/module_formatter.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index fe0da35ed8..ee7ee45327 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -129,9 +129,11 @@ def list_modules(module_dir): if os.path.isdir(d): files2 = glob.glob("%s/*" % d) + glob.glob("%s/*/*" % d) for f in files2: + module = os.path.splitext(os.path.basename(f))[0] - category = "cloud" - if os.path.dirname(f).split("/")[-2] != "cloud": + if os.path.dirname(f).split("/")[-2] == "cloud": + category = "cloud" + else: category = os.path.dirname(f).split("/")[-1] if not f.endswith(".py") or f.endswith('__init__.py'): From 11822f0d57908da3bd11066fc57d14ccdb920ff5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 1 Nov 2014 14:34:14 -0500 Subject: [PATCH 329/813] Adding VariableManager class for v2 --- v2/ansible/plugins/cache/__init__.py | 59 ++++++++ v2/ansible/plugins/cache/base.py | 41 ++++++ v2/ansible/plugins/cache/memcached.py | 191 ++++++++++++++++++++++++++ v2/ansible/plugins/cache/memory.py | 44 ++++++ v2/ansible/plugins/cache/redis.py | 102 ++++++++++++++ v2/ansible/vars/__init__.py | 182 ++++++++++++++++++++++++ v2/test/vars/__init__.py | 21 +++ v2/test/vars/test_variable_manager.py | 131 ++++++++++++++++++ 8 files changed, 771 insertions(+) create mode 100644 v2/ansible/plugins/cache/__init__.py create mode 100644 v2/ansible/plugins/cache/base.py create mode 100644 v2/ansible/plugins/cache/memcached.py create mode 100644 v2/ansible/plugins/cache/memory.py create mode 100644 v2/ansible/plugins/cache/redis.py create mode 100644 v2/ansible/vars/__init__.py create mode 100644 v2/test/vars/__init__.py create mode 100644 v2/test/vars/test_variable_manager.py diff --git a/v2/ansible/plugins/cache/__init__.py b/v2/ansible/plugins/cache/__init__.py new file mode 100644 index 0000000000..deed7f3ecd --- /dev/null +++ b/v2/ansible/plugins/cache/__init__.py @@ -0,0 +1,59 @@ +# (c) 2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from collections import MutableMapping + +from ansible import constants as C +from ansible.plugins import cache_loader + +class FactCache(MutableMapping): + + def __init__(self, *args, **kwargs): + self._plugin = cache_loader.get(C.CACHE_PLUGIN) + if self._plugin is None: + return + + def __getitem__(self, key): + if key not in self: + raise KeyError + return self._plugin.get(key) + + def __setitem__(self, key, value): + self._plugin.set(key, value) + + def __delitem__(self, key): + self._plugin.delete(key) + + def __contains__(self, key): + return self._plugin.contains(key) + + def __iter__(self): + return iter(self._plugin.keys()) + + def __len__(self): + return len(self._plugin.keys()) + + def copy(self): + """ Return a primitive copy of the keys and values from the cache. """ + return dict([(k, v) for (k, v) in self.iteritems()]) + + def keys(self): + return self._plugin.keys() + + def flush(self): + """ Flush the fact cache of all keys. """ + self._plugin.flush() diff --git a/v2/ansible/plugins/cache/base.py b/v2/ansible/plugins/cache/base.py new file mode 100644 index 0000000000..b6254cdfd4 --- /dev/null +++ b/v2/ansible/plugins/cache/base.py @@ -0,0 +1,41 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import exceptions + +class BaseCacheModule(object): + + def get(self, key): + raise exceptions.NotImplementedError + + def set(self, key, value): + raise exceptions.NotImplementedError + + def keys(self): + raise exceptions.NotImplementedError + + def contains(self, key): + raise exceptions.NotImplementedError + + def delete(self, key): + raise exceptions.NotImplementedError + + def flush(self): + raise exceptions.NotImplementedError + + def copy(self): + raise exceptions.NotImplementedError diff --git a/v2/ansible/plugins/cache/memcached.py b/v2/ansible/plugins/cache/memcached.py new file mode 100644 index 0000000000..deaf07fe2e --- /dev/null +++ b/v2/ansible/plugins/cache/memcached.py @@ -0,0 +1,191 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import collections +import os +import sys +import time +import threading +from itertools import chain + +from ansible import constants as C +from ansible.plugins.cache.base import BaseCacheModule + +try: + import memcache +except ImportError: + print 'python-memcached is required for the memcached fact cache' + sys.exit(1) + + +class ProxyClientPool(object): + """ + Memcached connection pooling for thread/fork safety. Inspired by py-redis + connection pool. + + Available connections are maintained in a deque and released in a FIFO manner. + """ + + def __init__(self, *args, **kwargs): + self.max_connections = kwargs.pop('max_connections', 1024) + self.connection_args = args + self.connection_kwargs = kwargs + self.reset() + + def reset(self): + self.pid = os.getpid() + self._num_connections = 0 + self._available_connections = collections.deque(maxlen=self.max_connections) + self._locked_connections = set() + self._lock = threading.Lock() + + def _check_safe(self): + if self.pid != os.getpid(): + with self._lock: + if self.pid == os.getpid(): + # bail out - another thread already acquired the lock + return + self.disconnect_all() + self.reset() + + def get_connection(self): + self._check_safe() + try: + connection = self._available_connections.popleft() + except IndexError: + connection = self.create_connection() + self._locked_connections.add(connection) + return connection + + def create_connection(self): + if self._num_connections >= self.max_connections: + raise RuntimeError("Too many memcached connections") + self._num_connections += 1 + return memcache.Client(*self.connection_args, **self.connection_kwargs) + + def release_connection(self, connection): + self._check_safe() + self._locked_connections.remove(connection) + self._available_connections.append(connection) + + def disconnect_all(self): + for conn in chain(self._available_connections, self._locked_connections): + conn.disconnect_all() + + def __getattr__(self, name): + def wrapped(*args, **kwargs): + return self._proxy_client(name, *args, **kwargs) + return wrapped + + def _proxy_client(self, name, *args, **kwargs): + conn = self.get_connection() + + try: + return getattr(conn, name)(*args, **kwargs) + finally: + self.release_connection(conn) + + +class CacheModuleKeys(collections.MutableSet): + """ + A set subclass that keeps track of insertion time and persists + the set in memcached. + """ + PREFIX = 'ansible_cache_keys' + + def __init__(self, cache, *args, **kwargs): + self._cache = cache + self._keyset = dict(*args, **kwargs) + + def __contains__(self, key): + return key in self._keyset + + def __iter__(self): + return iter(self._keyset) + + def __len__(self): + return len(self._keyset) + + def add(self, key): + self._keyset[key] = time.time() + self._cache.set(self.PREFIX, self._keyset) + + def discard(self, key): + del self._keyset[key] + self._cache.set(self.PREFIX, self._keyset) + + def remove_by_timerange(self, s_min, s_max): + for k in self._keyset.keys(): + t = self._keyset[k] + if s_min < t < s_max: + del self._keyset[k] + self._cache.set(self.PREFIX, self._keyset) + + +class CacheModule(BaseCacheModule): + + def __init__(self, *args, **kwargs): + if C.CACHE_PLUGIN_CONNECTION: + connection = C.CACHE_PLUGIN_CONNECTION.split(',') + else: + connection = ['127.0.0.1:11211'] + + self._timeout = C.CACHE_PLUGIN_TIMEOUT + self._prefix = C.CACHE_PLUGIN_PREFIX + self._cache = ProxyClientPool(connection, debug=0) + self._keys = CacheModuleKeys(self._cache, self._cache.get(CacheModuleKeys.PREFIX) or []) + + def _make_key(self, key): + return "{0}{1}".format(self._prefix, key) + + def _expire_keys(self): + if self._timeout > 0: + expiry_age = time.time() - self._timeout + self._keys.remove_by_timerange(0, expiry_age) + + def get(self, key): + value = self._cache.get(self._make_key(key)) + # guard against the key not being removed from the keyset; + # this could happen in cases where the timeout value is changed + # between invocations + if value is None: + self.delete(key) + raise KeyError + return value + + def set(self, key, value): + self._cache.set(self._make_key(key), value, time=self._timeout, min_compress_len=1) + self._keys.add(key) + + def keys(self): + self._expire_keys() + return list(iter(self._keys)) + + def contains(self, key): + self._expire_keys() + return key in self._keys + + def delete(self, key): + self._cache.delete(self._make_key(key)) + self._keys.discard(key) + + def flush(self): + for key in self.keys(): + self.delete(key) + + def copy(self): + return self._keys.copy() diff --git a/v2/ansible/plugins/cache/memory.py b/v2/ansible/plugins/cache/memory.py new file mode 100644 index 0000000000..007719a647 --- /dev/null +++ b/v2/ansible/plugins/cache/memory.py @@ -0,0 +1,44 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.plugins.cache.base import BaseCacheModule + +class CacheModule(BaseCacheModule): + + def __init__(self, *args, **kwargs): + self._cache = {} + + def get(self, key): + return self._cache.get(key) + + def set(self, key, value): + self._cache[key] = value + + def keys(self): + return self._cache.keys() + + def contains(self, key): + return key in self._cache + + def delete(self, key): + del self._cache[key] + + def flush(self): + self._cache = {} + + def copy(self): + return self._cache.copy() diff --git a/v2/ansible/plugins/cache/redis.py b/v2/ansible/plugins/cache/redis.py new file mode 100644 index 0000000000..7f126de64b --- /dev/null +++ b/v2/ansible/plugins/cache/redis.py @@ -0,0 +1,102 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import +import collections +# FIXME: can we store these as something else before we ship it? +import sys +import time +import json + +from ansible import constants as C +from ansible.plugins.cache.base import BaseCacheModule + +try: + from redis import StrictRedis +except ImportError: + print "The 'redis' python module is required, 'pip install redis'" + sys.exit(1) + +class CacheModule(BaseCacheModule): + """ + A caching module backed by redis. + + Keys are maintained in a zset with their score being the timestamp + when they are inserted. This allows for the usage of 'zremrangebyscore' + to expire keys. This mechanism is used or a pattern matched 'scan' for + performance. + """ + def __init__(self, *args, **kwargs): + if C.CACHE_PLUGIN_CONNECTION: + connection = C.CACHE_PLUGIN_CONNECTION.split(':') + else: + connection = [] + + self._timeout = float(C.CACHE_PLUGIN_TIMEOUT) + self._prefix = C.CACHE_PLUGIN_PREFIX + self._cache = StrictRedis(*connection) + self._keys_set = 'ansible_cache_keys' + + def _make_key(self, key): + return self._prefix + key + + def get(self, key): + value = self._cache.get(self._make_key(key)) + # guard against the key not being removed from the zset; + # this could happen in cases where the timeout value is changed + # between invocations + if value is None: + self.delete(key) + raise KeyError + return json.loads(value) + + def set(self, key, value): + value2 = json.dumps(value) + if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire' + self._cache.setex(self._make_key(key), int(self._timeout), value2) + else: + self._cache.set(self._make_key(key), value2) + + self._cache.zadd(self._keys_set, time.time(), key) + + def _expire_keys(self): + if self._timeout > 0: + expiry_age = time.time() - self._timeout + self._cache.zremrangebyscore(self._keys_set, 0, expiry_age) + + def keys(self): + self._expire_keys() + return self._cache.zrange(self._keys_set, 0, -1) + + def contains(self, key): + self._expire_keys() + return (self._cache.zrank(self._keys_set, key) >= 0) + + def delete(self, key): + self._cache.delete(self._make_key(key)) + self._cache.zrem(self._keys_set, key) + + def flush(self): + for key in self.keys(): + self.delete(key) + + def copy(self): + # FIXME: there is probably a better way to do this in redis + ret = dict() + for key in self.keys(): + ret[key] = self.get(key) + return ret diff --git a/v2/ansible/vars/__init__.py b/v2/ansible/vars/__init__.py new file mode 100644 index 0000000000..af81b12b2e --- /dev/null +++ b/v2/ansible/vars/__init__.py @@ -0,0 +1,182 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from collections import defaultdict + +from ansible.parsing.yaml import DataLoader +from ansible.plugins.cache import FactCache + +class VariableManager: + + def __init__(self, inventory_path=None, loader=None): + + self._fact_cache = FactCache() + self._vars_cache = defaultdict(dict) + self._extra_vars = defaultdict(dict) + self._host_vars_files = defaultdict(dict) + self._group_vars_files = defaultdict(dict) + + if not loader: + self._loader = DataLoader() + else: + self._loader = loader + + @property + def extra_vars(self): + ''' ensures a clean copy of the extra_vars are made ''' + return self._extra_vars.copy() + + def set_extra_vars(self, value): + ''' ensures a clean copy of the extra_vars are used to set the value ''' + assert isinstance(value, dict) + self._extra_vars = value.copy() + + def _merge_dicts(self, a, b): + ''' + Recursively merges dict b into a, so that keys + from b take precedence over keys from a. + ''' + + result = dict() + + # FIXME: do we need this from utils, or should it just + # be merged into this definition? + #_validate_both_dicts(a, b) + + for dicts in a, b: + # next, iterate over b keys and values + for k, v in dicts.iteritems(): + # if there's already such key in a + # and that key contains dict + if k in result and isinstance(result[k], dict): + # merge those dicts recursively + result[k] = self._merge_dicts(a[k], v) + else: + # otherwise, just copy a value from b to a + result[k] = v + + return result + + def get_vars(self, play=None, host=None, task=None): + ''' + Returns the variables, with optional "context" given via the parameters + for the play, host, and task (which could possibly result in different + sets of variables being returned due to the additional context). + + The order of precedence is: + - play->roles->get_default_vars (if there is a play context) + - group_vars_files[host] (if there is a host context) + - host_vars_files[host] (if there is a host context) + - host->get_vars (if there is a host context) + - fact_cache[host] (if there is a host context) + - vars_cache[host] (if there is a host context) + - play vars (if there is a play context) + - play vars_files (if there's no host context, ignore + file names that cannot be templated) + - task->get_vars (if there is a task context) + - extra vars + ''' + + vars = defaultdict(dict) + + if play: + # first we compile any vars specified in defaults/main.yml + # for all roles within the specified play + for role in play.get_roles(): + vars = self._merge_dicts(vars, role.get_default_vars()) + + if host: + # next, if a host is specified, we load any vars from group_vars + # files and then any vars from host_vars files which may apply to + # this host or the groups it belongs to + for group in host.get_groups(): + if group in self._group_vars_files: + vars = self._merge_dicts(vars, self._group_vars_files[group]) + + host_name = host.get_name() + if host_name in self._host_vars_files: + vars = self._merge_dicts(vars, self._host_vars_files[host_name]) + + # then we merge in vars specified for this host + vars = self._merge_dicts(vars, host.get_vars()) + + # next comes the facts cache and the vars cache, respectively + vars = self._merge_dicts(vars, self._fact_cache.get(host.get_name(), dict())) + vars = self._merge_dicts(vars, self._vars_cache.get(host.get_name(), dict())) + + if play: + vars = self._merge_dicts(vars, play.get_vars()) + for vars_file in play.get_vars_files(): + # Try templating the vars_file. If an unknown var error is raised, + # ignore it - unless a host is specified + # TODO ... + + data = self._loader.load_from_file(vars_file) + vars = self._merge_dicts(vars, data) + + if task: + vars = self._merge_dicts(vars, task.get_vars()) + + vars = self._merge_dicts(vars, self._extra_vars) + + return vars + + def _get_inventory_basename(self, path): + ''' + Returns the bsaename minus the extension of the given path, so the + bare filename can be matched against host/group names later + ''' + + (name, ext) = os.path.splitext(os.path.basename(path)) + return name + + def _load_inventory_file(self, path): + ''' + helper function, which loads the file and gets the + basename of the file without the extension + ''' + + data = self._loader.load_from_file(path) + name = self._get_inventory_basename(path) + return (name, data) + + def add_host_vars_file(self, path): + ''' + Loads and caches a host_vars file in the _host_vars_files dict, + where the key to that dictionary is the basename of the file, minus + the extension, for matching against a given inventory host name + ''' + + (name, data) = self._load_inventory_file(path) + self._host_vars_files[name] = data + + def add_group_vars_file(self, path): + ''' + Loads and caches a host_vars file in the _host_vars_files dict, + where the key to that dictionary is the basename of the file, minus + the extension, for matching against a given inventory host name + ''' + + (name, data) = self._load_inventory_file(path) + self._group_vars_files[name] = data + diff --git a/v2/test/vars/__init__.py b/v2/test/vars/__init__.py new file mode 100644 index 0000000000..785fc45992 --- /dev/null +++ b/v2/test/vars/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/v2/test/vars/test_variable_manager.py b/v2/test/vars/test_variable_manager.py new file mode 100644 index 0000000000..63a80a7a1c --- /dev/null +++ b/v2/test/vars/test_variable_manager.py @@ -0,0 +1,131 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, MagicMock + +from ansible.vars import VariableManager + +from test.mock.loader import DictDataLoader + +class TestVariableManager(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_basic_manager(self): + v = VariableManager() + self.assertEqual(v.get_vars(), dict()) + + self.assertEqual( + v._merge_dicts( + dict(a=1), + dict(b=2) + ), dict(a=1, b=2) + ) + self.assertEqual( + v._merge_dicts( + dict(a=1, c=dict(foo='bar')), + dict(b=2, c=dict(baz='bam')) + ), dict(a=1, b=2, c=dict(foo='bar', baz='bam')) + ) + + + def test_manager_extra_vars(self): + extra_vars = dict(a=1, b=2, c=3) + v = VariableManager() + v.set_extra_vars(extra_vars) + + self.assertEqual(v.get_vars(), extra_vars) + self.assertIsNot(v.extra_vars, extra_vars) + + def test_manager_host_vars_file(self): + fake_loader = DictDataLoader({ + "host_vars/hostname1.yml": """ + foo: bar + """ + }) + + v = VariableManager(loader=fake_loader) + v.add_host_vars_file("host_vars/hostname1.yml") + self.assertIn("hostname1", v._host_vars_files) + self.assertEqual(v._host_vars_files["hostname1"], dict(foo="bar")) + + mock_host = MagicMock() + mock_host.get_name.return_value = "hostname1" + mock_host.get_vars.return_value = dict() + mock_host.get_groups.return_value = () + + self.assertEqual(v.get_vars(host=mock_host), dict(foo="bar")) + + def test_manager_group_vars_file(self): + fake_loader = DictDataLoader({ + "group_vars/somegroup.yml": """ + foo: bar + """ + }) + + v = VariableManager(loader=fake_loader) + v.add_group_vars_file("group_vars/somegroup.yml") + self.assertIn("somegroup", v._group_vars_files) + self.assertEqual(v._group_vars_files["somegroup"], dict(foo="bar")) + + mock_host = MagicMock() + mock_host.get_name.return_value = "hostname1" + mock_host.get_vars.return_value = dict() + mock_host.get_groups.return_value = ["somegroup"] + + self.assertEqual(v.get_vars(host=mock_host), dict(foo="bar")) + + def test_manager_play_vars(self): + mock_play = MagicMock() + mock_play.get_vars.return_value = dict(foo="bar") + mock_play.get_roles.return_value = [] + mock_play.get_vars_files.return_value = [] + + v = VariableManager() + self.assertEqual(v.get_vars(play=mock_play), dict(foo="bar")) + + def test_manager_play_vars_files(self): + fake_loader = DictDataLoader({ + "/path/to/somefile.yml": """ + foo: bar + """ + }) + + mock_play = MagicMock() + mock_play.get_vars.return_value = dict() + mock_play.get_roles.return_value = [] + mock_play.get_vars_files.return_value = ['/path/to/somefile.yml'] + + v = VariableManager(loader=fake_loader) + self.assertEqual(v.get_vars(play=mock_play), dict(foo="bar")) + + def test_manager_task_vars(self): + mock_task = MagicMock() + mock_task.get_vars.return_value = dict(foo="bar") + + v = VariableManager() + self.assertEqual(v.get_vars(task=mock_task), dict(foo="bar")) + From 9ae0fb5bdfc3531b02ad0436a46dba887972d7e1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 3 Nov 2014 14:32:15 -0600 Subject: [PATCH 330/813] Make OSX fallback to paramiko more selective Only fallback to paramiko now when the ssh password has been set, either through inventory or via a prompt. Fixes #9470 --- lib/ansible/runner/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 1265f79efe..4ef6f0ceab 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -220,7 +220,10 @@ class Runner(object): # would prevent us from using ssh, and fallback to paramiko. # 'smart' is the default since 1.2.1/1.3 self.transport = "ssh" - if sys.platform.startswith('darwin'): + if sys.platform.startswith('darwin') and self.remote_pass: + # due to a current bug in sshpass on OSX, which can trigger + # a kernel panic even for non-privileged users, we revert to + # paramiko on that OS when a SSH password is specified self.transport = "paramiko" else: # see if SSH can support ControlPersist if not use paramiko From 650048f7dd06d6704255c0ae6abd7d22ac88dc07 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 3 Nov 2014 22:02:13 -0500 Subject: [PATCH 331/813] now displays subcategories correctly --- hacking/module_formatter.py | 101 +++++++++++++++++++++++++----------- hacking/templates/rst.j2 | 4 ++ 2 files changed, 76 insertions(+), 29 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index ee7ee45327..f182550aff 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -120,33 +120,52 @@ def write_data(text, options, outputname, module): ##################################################################################### -def list_modules(module_dir): +def list_modules(module_dir, depth=0): ''' returns a hash of categories, each category being a hash of module names to file paths ''' - categories = dict(all=dict()) - files = glob.glob("%s/*/*" % module_dir) - for d in files: - if os.path.isdir(d): - files2 = glob.glob("%s/*" % d) + glob.glob("%s/*/*" % d) - for f in files2: + categories = dict(all=dict(),_aliases=dict()) + if depth <= 3: # limit # of subdirs - module = os.path.splitext(os.path.basename(f))[0] - if os.path.dirname(f).split("/")[-2] == "cloud": - category = "cloud" + files = glob.glob("%s/*" % module_dir) + for d in files: + + category = os.path.splitext(os.path.basename(d))[0] + if os.path.isdir(d): + + res = list_modules(d, depth + 1) + for key in res.keys(): + if key in categories: + categories[key].update(res[key]) + res.pop(key, None) + + if depth < 2: + categories.update(res) else: - category = os.path.dirname(f).split("/")[-1] - - if not f.endswith(".py") or f.endswith('__init__.py'): + category = module_dir.split("/")[-1] + if not category in categories: + categories[category] = res + else: + categories[category].update(res) + else: + module = category + category = os.path.basename(module_dir) + if not d.endswith(".py") or d.endswith('__init__.py'): # windows powershell modules have documentation stubs in python docstring # format (they are not executed) so skip the ps1 format files continue - elif module.startswith("_") and os.path.islink(f): # ignores aliases + elif module.startswith("_") and os.path.islink(d): + source = os.path.splitext(os.path.basename(os.path.realpath(d)))[0] + module = module.replace("_","",1) + if not d in categories['_aliases']: + categories['_aliases'][source] = [module] + else: + categories['_aliases'][source].update(module) continue if not category in categories: categories[category] = {} - categories[category][module] = f - categories['all'][module] = f + categories[category][module] = d + categories['all'][module] = d return categories @@ -196,9 +215,12 @@ def jinja2_environment(template_dir, typ): ##################################################################################### -def process_module(module, options, env, template, outputname, module_map): +def process_module(module, options, env, template, outputname, module_map, aliases): fname = module_map[module] + if isinstance(fname, dict): + return "SKIPPED" + basename = os.path.basename(fname) deprecated = False @@ -233,6 +255,8 @@ def process_module(module, options, env, template, outputname, module_map): else: doc['core'] = False + if module in aliases: + doc['aliases'] = aliases[module] all_keys = [] @@ -274,10 +298,28 @@ def process_module(module, options, env, template, outputname, module_map): ##################################################################################### +def print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases): + modstring = module + modname = module + if module in deprecated: + modstring = modstring + DEPRECATED + modname = "_" + module + elif module not in core: + modstring = modstring + NOTCORE + + result = process_module(modname, options, env, template, outputname, module_map, aliases) + + if result != "SKIPPED": + category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module)) + def process_category(category, categories, options, env, template, outputname): module_map = categories[category] + aliases = {} + if '_aliases' in categories: + aliases = categories['_aliases'] + category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category) category_file = open(category_file_path, "w") print "*** recording category %s in %s ***" % (category, category_file_path) @@ -312,21 +354,20 @@ def process_category(category, categories, options, env, template, outputname): .. toctree:: :maxdepth: 1 """ % (category_header, underscores)) - + sections = [] for module in modules: + if module in module_map and isinstance(module_map[module], dict): + sections.append(module) + continue + else: + print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases) - modstring = module - modname = module - if module in deprecated: - modstring = modstring + DEPRECATED - modname = "_" + module - elif module not in core: - modstring = modstring + NOTCORE + for section in sections: + category_file.write("%s/\n%s\n\n" % (section,'-' * len(section))) + category_file.write(".. toctree:: :maxdepth: 1\n\n") - result = process_module(modname, options, env, template, outputname, module_map) - - if result != "SKIPPED": - category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module)) + for module in module_map[section]: + print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map[section], aliases) category_file.write("""\n\n .. note:: @@ -377,6 +418,8 @@ def main(): category_list_file.write(" :maxdepth: 1\n\n") for category in category_names: + if category.startswith("_"): + continue category_list_file.write(" list_of_%s_modules\n" % category) process_category(category, categories, options, env, template, outputname) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index 1d55a0452b..232d97a731 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -21,6 +21,10 @@ # --------------------------------------------#} +{% if aliases is defined -%} +Aliases: @{ ','.join(aliases) }@ +{% endif %} + {% if deprecated is defined -%} DEPRECATED ---------- From 5f1ad79cd30ae0069ce4dcb449763e15677a24b1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 3 Nov 2014 23:14:22 -0500 Subject: [PATCH 332/813] now correctly flags and sorts subcategory modules --- hacking/module_formatter.py | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index f182550aff..03b8827d48 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -334,11 +334,19 @@ def process_category(category, categories, options, env, template, outputname): core = [] for module in module_map.keys(): - if module.startswith("_"): - module = module.replace("_","",1) - deprecated.append(module) - elif '/core/' in module_map[module]: - core.append(module) + if isinstance(module_map[module], dict): + for mod in module_map[module].keys(): + if mod.startswith("_"): + mod = mod.replace("_","",1) + deprecated.append(mod) + elif '/core/' in module_map[module][mod]: + core.append(mod) + else: + if module.startswith("_"): + module = module.replace("_","",1) + deprecated.append(module) + elif '/core/' in module_map[module]: + core.append(module) modules.append(module) @@ -362,11 +370,15 @@ def process_category(category, categories, options, env, template, outputname): else: print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases) + sections.sort() for section in sections: - category_file.write("%s/\n%s\n\n" % (section,'-' * len(section))) + category_file.write("%s\n%s\n\n" % (section,'-' * len(section))) category_file.write(".. toctree:: :maxdepth: 1\n\n") - for module in module_map[section]: + section_modules = module_map[section].keys() + section_modules.sort() + #for module in module_map[section]: + for module in section_modules: print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map[section], aliases) category_file.write("""\n\n From 12393a4b47f05fbf384ab7bb3bd7afa2fcf0b930 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 4 Nov 2014 08:44:39 -0500 Subject: [PATCH 333/813] subcategories are now Title case and _ gets changed to a space --- hacking/module_formatter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 03b8827d48..73729da4d6 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -372,7 +372,7 @@ def process_category(category, categories, options, env, template, outputname): sections.sort() for section in sections: - category_file.write("%s\n%s\n\n" % (section,'-' * len(section))) + category_file.write("%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section))) category_file.write(".. toctree:: :maxdepth: 1\n\n") section_modules = module_map[section].keys() From c1fc0ca4fd1b3414b2745b303e8afdd5325f198a Mon Sep 17 00:00:00 2001 From: cipress Date: Tue, 4 Nov 2014 17:38:08 +0100 Subject: [PATCH 334/813] Found issue on different System architecture. On x86 systems doesn't work so, starting by the line 63 we check if the architecture is x86 or x64. --- examples/scripts/upgrade_to_ps3.ps1 | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/examples/scripts/upgrade_to_ps3.ps1 b/examples/scripts/upgrade_to_ps3.ps1 index 693088b75f..c9f55267e4 100644 --- a/examples/scripts/upgrade_to_ps3.ps1 +++ b/examples/scripts/upgrade_to_ps3.ps1 @@ -62,13 +62,24 @@ if ([Environment]::OSVersion.Version.Major -gt 6) $osminor = [environment]::OSVersion.Version.Minor +$architecture = $ENV:PROCESSOR_ARCHITECTURE + +if ($architecture -eq "AMD64") +{ + $architecture = "x64" +} +else +{ + $architecture = "x86" +} + if ($osminor -eq 1) { - $DownloadUrl = "http://download.microsoft.com/download/E/7/6/E76850B8-DA6E-4FF5-8CCE-A24FC513FD16/Windows6.1-KB2506143-x64.msu" + $DownloadUrl = "http://download.microsoft.com/download/E/7/6/E76850B8-DA6E-4FF5-8CCE-A24FC513FD16/Windows6.1-KB2506143-" + $architecture + ".msu" } elseif ($osminor -eq 0) { - $DownloadUrl = "http://download.microsoft.com/download/E/7/6/E76850B8-DA6E-4FF5-8CCE-A24FC513FD16/Windows6.0-KB2506146-x64.msu" + $DownloadUrl = "http://download.microsoft.com/download/E/7/6/E76850B8-DA6E-4FF5-8CCE-A24FC513FD16/Windows6.0-KB2506146-" + $architecture + ".msu" } else { From 0ed9746db393bb169dceb3ead5912305b7d8e2af Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 4 Nov 2014 15:16:11 -0600 Subject: [PATCH 335/813] Adding Play class for v2 --- v2/ansible/parsing/mod_args.py | 2 +- v2/ansible/playbook/block.py | 23 ++--- v2/ansible/playbook/helpers.py | 76 +++++++++++++++ v2/ansible/playbook/play.py | 137 +++++++++++++++++++++++++++ v2/ansible/playbook/role/__init__.py | 23 +---- v2/ansible/playbook/role/metadata.py | 13 +-- v2/test/playbook/test_block.py | 10 -- v2/test/playbook/test_play.py | 120 +++++++++++++++++++++++ 8 files changed, 349 insertions(+), 55 deletions(-) create mode 100644 v2/ansible/playbook/helpers.py create mode 100644 v2/test/playbook/test_play.py diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index 5e7c4225df..7f4f42bddd 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -190,7 +190,7 @@ class ModuleArgsParser: task, dealing with all sorts of levels of fuzziness. ''' - assert type(ds) == dict + assert isinstance(ds, dict) thing = None diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index 5f21cdaf60..cc5ccacc40 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -19,9 +19,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible.playbook.base import Base -from ansible.playbook.task import Task from ansible.playbook.attribute import Attribute, FieldAttribute +from ansible.playbook.base import Base +from ansible.playbook.helpers import load_list_of_tasks class Block(Base): @@ -60,25 +60,20 @@ class Block(Base): is_block = True break if not is_block: - return dict(block=ds) + if isinstance(ds, list): + return dict(block=ds) + else: + return dict(block=[ds]) return ds - def _load_list_of_tasks(self, ds): - assert type(ds) == list - task_list = [] - for task in ds: - t = Task.load(task) - task_list.append(t) - return task_list - def _load_block(self, attr, ds): - return self._load_list_of_tasks(ds) + return load_list_of_tasks(ds) def _load_rescue(self, attr, ds): - return self._load_list_of_tasks(ds) + return load_list_of_tasks(ds) def _load_always(self, attr, ds): - return self._load_list_of_tasks(ds) + return load_list_of_tasks(ds) # not currently used #def _load_otherwise(self, attr, ds): diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py new file mode 100644 index 0000000000..6985ad7808 --- /dev/null +++ b/v2/ansible/playbook/helpers.py @@ -0,0 +1,76 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from types import NoneType + + +def load_list_of_blocks(ds, role=None, loader=None): + ''' + Given a list of mixed task/block data (parsed from YAML), + return a list of Block() objects, where implicit blocks + are created for each bare Task. + ''' + + # we import here to prevent a circular dependency with imports + from ansible.playbook.block import Block + + assert type(ds) in (list, NoneType) + + block_list = [] + if ds: + for block in ds: + b = Block.load(block, role=role, loader=loader) + block_list.append(b) + + return block_list + +def load_list_of_tasks(ds, block=None, role=None, loader=None): + ''' + Given a list of task datastructures (parsed from YAML), + return a list of Task() objects. + ''' + + # we import here to prevent a circular dependency with imports + from ansible.playbook.task import Task + + assert type(ds) == list + + task_list = [] + for task in ds: + t = Task.load(task, block=block, role=role, loader=loader) + task_list.append(t) + + return task_list + +def load_list_of_roles(ds, loader=None): + ''' + Loads and returns a list of RoleInclude objects from the datastructure + list of role definitions + ''' + + # we import here to prevent a circular dependency with imports + from ansible.playbook.role.include import RoleInclude + + assert isinstance(ds, list) + + roles = [] + for role_def in ds: + i = RoleInclude.load(role_def, loader=loader) + roles.append(i) + + return roles + diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index ae8ccff595..3c8a4bcb87 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -18,3 +18,140 @@ # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type + +from ansible.errors import AnsibleError, AnsibleParserError + +from ansible.parsing.yaml import DataLoader + +from ansible.playbook.attribute import Attribute, FieldAttribute +from ansible.playbook.base import Base +from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles + + +__all__ = ['Play'] + + +class Play(Base): + + """ + A play is a language feature that represents a list of roles and/or + task/handler blocks to execute on a given set of hosts. + + Usage: + + Play.load(datastructure) -> Play + Play.something(...) + """ + + # ================================================================================= + # Connection-Related Attributes + _accelerate = FieldAttribute(isa='bool', default=False) + _accelerate_ipv6 = FieldAttribute(isa='bool', default=False) + _accelerate_port = FieldAttribute(isa='int', default=5099) + _connection = FieldAttribute(isa='string', default='smart') + _gather_facts = FieldAttribute(isa='string', default='smart') + _hosts = FieldAttribute(isa='list', default=[]) + _name = FieldAttribute(isa='string', default='') + _port = FieldAttribute(isa='int', default=22) + _remote_user = FieldAttribute(isa='string', default='root') + _su = FieldAttribute(isa='bool', default=False) + _su_user = FieldAttribute(isa='string', default='root') + _sudo = FieldAttribute(isa='bool', default=False) + _sudo_user = FieldAttribute(isa='string', default='root') + _tags = FieldAttribute(isa='list', default=[]) + + # Variable Attributes + _vars = FieldAttribute(isa='dict', default=dict()) + _vars_files = FieldAttribute(isa='list', default=[]) + _vars_prompt = FieldAttribute(isa='dict', default=dict()) + _vault_password = FieldAttribute(isa='string') + + # Block (Task) Lists Attributes + _handlers = FieldAttribute(isa='list', default=[]) + _pre_tasks = FieldAttribute(isa='list', default=[]) + _post_tasks = FieldAttribute(isa='list', default=[]) + _tasks = FieldAttribute(isa='list', default=[]) + + # Role Attributes + _roles = FieldAttribute(isa='list', default=[]) + + # Flag/Setting Attributes + _any_errors_fatal = FieldAttribute(isa='bool', default=False) + _max_fail_percentage = FieldAttribute(isa='string', default='0') + _no_log = FieldAttribute(isa='bool', default=False) + _serial = FieldAttribute(isa='int', default=0) + + # ================================================================================= + + def __init__(self): + super(Play, self).__init__() + + def __repr__(self): + return self.get_name() + + def get_name(self): + ''' return the name of the Play ''' + return "PLAY: %s" % self._attributes.get('name') + + @staticmethod + def load(data, loader=None): + p = Play() + return p.load_data(data, loader=loader) + + def munge(self, ds): + ''' + Adjusts play datastructure to cleanup old/legacy items + ''' + + assert isinstance(ds, dict) + + # The use of 'user' in the Play datastructure was deprecated to + # line up with the same change for Tasks, due to the fact that + # 'user' conflicted with the user module. + if 'user' in ds: + # this should never happen, but error out with a helpful message + # to the user if it does... + if 'remote_user' in ds: + raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds) + + ds['remote_user'] = ds['user'] + del ds['user'] + + return ds + + def _load_tasks(self, attr, ds): + ''' + Loads a list of blocks from a list which may be mixed tasks/blocks. + Bare tasks outside of a block are given an implicit block. + ''' + return load_list_of_blocks(ds) + + def _load_pre_tasks(self, attr, ds): + ''' + Loads a list of blocks from a list which may be mixed tasks/blocks. + Bare tasks outside of a block are given an implicit block. + ''' + return load_list_of_blocks(ds) + + def _load_post_tasks(self, attr, ds): + ''' + Loads a list of blocks from a list which may be mixed tasks/blocks. + Bare tasks outside of a block are given an implicit block. + ''' + return load_list_of_blocks(ds) + + def _load_handlers(self, attr, ds): + ''' + Loads a list of blocks from a list which may be mixed handlers/blocks. + Bare handlers outside of a block are given an implicit block. + ''' + return load_list_of_blocks(ds) + + def _load_roles(self, attr, ds): + ''' + Loads and returns a list of RoleInclude objects from the datastructure + list of role definitions + ''' + return load_list_of_roles(ds, loader=self._loader) + + # FIXME: post_validation needs to ensure that su/sudo are not both set diff --git a/v2/ansible/playbook/role/__init__.py b/v2/ansible/playbook/role/__init__.py index ed7355f921..4950e944d3 100644 --- a/v2/ansible/playbook/role/__init__.py +++ b/v2/ansible/playbook/role/__init__.py @@ -30,7 +30,7 @@ from ansible.errors import AnsibleError, AnsibleParserError from ansible.parsing.yaml import DataLoader from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base -from ansible.playbook.block import Block +from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.role.include import RoleInclude from ansible.playbook.role.metadata import RoleMetadata @@ -95,11 +95,11 @@ class Role: task_data = self._load_role_yaml('tasks') if task_data: - self._task_blocks = self._load_list_of_blocks(task_data) + self._task_blocks = load_list_of_blocks(task_data) handler_data = self._load_role_yaml('handlers') if handler_data: - self._handler_blocks = self._load_list_of_blocks(handler_data) + self._handler_blocks = load_list_of_blocks(handler_data) # vars and default vars are regular dictionaries self._role_vars = self._load_role_yaml('vars') @@ -135,23 +135,6 @@ class Role: return m # exactly one main file return possible_mains[0] # zero mains (we still need to return something) - def _load_list_of_blocks(self, ds): - ''' - Given a list of mixed task/block data (parsed from YAML), - return a list of Block() objects, where implicit blocks - are created for each bare Task. - ''' - - assert type(ds) in (list, NoneType) - - block_list = [] - if ds: - for block in ds: - b = Block(block) - block_list.append(b) - - return block_list - def _load_dependencies(self): ''' Recursively loads role dependencies from the metadata list of diff --git a/v2/ansible/playbook/role/metadata.py b/v2/ansible/playbook/role/metadata.py index 485e3da59f..19b0f01f62 100644 --- a/v2/ansible/playbook/role/metadata.py +++ b/v2/ansible/playbook/role/metadata.py @@ -24,6 +24,7 @@ from six import iteritems, string_types from ansible.errors import AnsibleParserError from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base +from ansible.playbook.helpers import load_list_of_roles from ansible.playbook.role.include import RoleInclude @@ -58,18 +59,10 @@ class RoleMetadata(Base): def _load_dependencies(self, attr, ds): ''' - This is a helper loading function for the dependencis list, + This is a helper loading function for the dependencies list, which returns a list of RoleInclude objects ''' - - assert isinstance(ds, list) - - deps = [] - for role_def in ds: - i = RoleInclude.load(role_def, loader=self._loader) - deps.append(i) - - return deps + return load_list_of_roles(ds, loader=self._loader) def _load_galaxy_info(self, attr, ds): ''' diff --git a/v2/test/playbook/test_block.py b/v2/test/playbook/test_block.py index ccb8f2b6d3..348681527b 100644 --- a/v2/test/playbook/test_block.py +++ b/v2/test/playbook/test_block.py @@ -37,16 +37,6 @@ class TestBlock(unittest.TestCase): def test_construct_block_with_role(self): pass - def test_block__load_list_of_tasks(self): - task = dict(action='test') - b = Block() - self.assertEqual(b._load_list_of_tasks([]), []) - res = b._load_list_of_tasks([task]) - self.assertEqual(len(res), 1) - assert isinstance(res[0], Task) - res = b._load_list_of_tasks([task,task,task]) - self.assertEqual(len(res), 3) - def test_load_block_simple(self): ds = dict( block = [], diff --git a/v2/test/playbook/test_play.py b/v2/test/playbook/test_play.py new file mode 100644 index 0000000000..14732a1f9f --- /dev/null +++ b/v2/test/playbook/test_play.py @@ -0,0 +1,120 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, MagicMock + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.playbook.play import Play +from ansible.playbook.role import Role +from ansible.playbook.task import Task + +from test.mock.loader import DictDataLoader + +class TestPlay(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_empty_play(self): + p = Play.load(dict()) + self.assertEqual(str(p), "PLAY: ") + + def test_basic_play(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + connection='local', + remote_user="root", + sudo=True, + sudo_user="testing", + )) + + def test_play_with_user_conflict(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + user="testing", + gather_facts=False, + )) + self.assertEqual(p.remote_user, "testing") + + def test_play_with_user_conflict(self): + play_data = dict( + name="test play", + hosts=['foo'], + user="testing", + remote_user="testing", + ) + self.assertRaises(AnsibleParserError, Play.load, play_data) + + def test_play_with_tasks(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + tasks=[dict(action='shell echo "hello world"')], + )) + + def test_play_with_handlers(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + handlers=[dict(action='shell echo "hello world"')], + )) + + def test_play_with_pre_tasks(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + pre_tasks=[dict(action='shell echo "hello world"')], + )) + + def test_play_with_post_tasks(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + post_tasks=[dict(action='shell echo "hello world"')], + )) + + def test_play_with_roles(self): + fake_loader = DictDataLoader({ + '/etc/ansible/roles/foo/tasks.yml': """ + - name: role task + shell: echo "hello world" + """, + }) + + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + roles=['foo'], + ), loader=fake_loader) + + From 055d460d9777df4337279dddbee507813e3e5171 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 4 Nov 2014 16:48:44 -0500 Subject: [PATCH 336/813] Add ebook link to docsite. --- docsite/_themes/srtd/layout.html | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index 1073cad40e..460b259794 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -177,15 +177,17 @@
- -
- - - -
 
-
 
-
- + +
+ + + + + + +
 
+
 
+
{% include "breadcrumbs.html" %}
From c551fe8b502a058ed23a2820844879c07a5b5ccc Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 4 Nov 2014 17:38:02 -0500 Subject: [PATCH 337/813] Clarify module list footer. --- hacking/module_formatter.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 73729da4d6..d868156ef2 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -383,8 +383,9 @@ def process_category(category, categories, options, env, template, outputname): category_file.write("""\n\n .. note:: - - %s: This marks a module as deprecated, kept for backwards compatibility but use is discouraged - - %s: Denotes that this module is not part of core, it can be found in the extras or some other external repo + - %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale. + - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not neccessarily) less activity maintained than 'core' modules. + - Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub `_, extras tickets to `ansible/ansible-modules-extras on GitHub `_ """ % (DEPRECATED, NOTCORE)) category_file.close() From d3da2edfe32e7adba124f2a9c9ce4d109c6d7305 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 4 Nov 2014 17:56:27 -0500 Subject: [PATCH 338/813] Update submodule pointers for new docs org. --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 63e81cfc2e..cec519f70e 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 63e81cfc2e0c3c07245342cd41a0ba147eac55be +Subproject commit cec519f70e96f801c3a5243e96f69fe343cba0dc diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index a0df36c6ab..7e6fc7023d 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit a0df36c6ab257281cbaae00b8a4590200802f571 +Subproject commit 7e6fc7023d956d4c33d8596662e01f2678d35f58 From 2ba5c3c66bb68659df79c430e7d5e2cf1f89aad1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 4 Nov 2014 18:14:30 -0500 Subject: [PATCH 339/813] added blank line before section to avoid sphinx warnings --- hacking/module_formatter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index d868156ef2..73c3045479 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -372,7 +372,7 @@ def process_category(category, categories, options, env, template, outputname): sections.sort() for section in sections: - category_file.write("%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section))) + category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section))) category_file.write(".. toctree:: :maxdepth: 1\n\n") section_modules = module_map[section].keys() From 339d1ccc8b7f89f5580e755b1f8bcc703951fba5 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 4 Nov 2014 20:54:55 -0500 Subject: [PATCH 340/813] fixed issue with subclasses across the repos clobbering each other, they now merge --- hacking/module_formatter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 73c3045479..04f098fc98 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -135,7 +135,7 @@ def list_modules(module_dir, depth=0): res = list_modules(d, depth + 1) for key in res.keys(): if key in categories: - categories[key].update(res[key]) + categories[key] = ansible.utils.merge_hash(categories[key], res[key]) res.pop(key, None) if depth < 2: From 96a97e94145a26a969694436fce088f2d4548620 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 5 Nov 2014 11:22:25 -0500 Subject: [PATCH 341/813] updated ref to core with updated cloud sublcases with __init__.py --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index cec519f70e..488ac4cbdb 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit cec519f70e96f801c3a5243e96f69fe343cba0dc +Subproject commit 488ac4cbdb769d5b9598e7d4c39582a6eda72bc1 From 7ac52bb601ff5d712a306cf7a115fb8fd87a6547 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 5 Nov 2014 15:54:25 -0500 Subject: [PATCH 342/813] updated to latest core module, another init/packing issue, should be last --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 488ac4cbdb..b0a4a6dbe2 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 488ac4cbdb769d5b9598e7d4c39582a6eda72bc1 +Subproject commit b0a4a6dbe275735bb0910ea34486237065b54f59 From cbad867f24fd56c9a98c4cf85cc4447ccfa74066 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 5 Nov 2014 16:15:42 -0500 Subject: [PATCH 343/813] Submodule update for docs reorg of module subcategories. --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index b0a4a6dbe2..2970b339eb 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit b0a4a6dbe275735bb0910ea34486237065b54f59 +Subproject commit 2970b339eb8ea6031e6153cabe45459bc2bd5754 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 7e6fc7023d..ad181b7aa9 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 7e6fc7023d956d4c33d8596662e01f2678d35f58 +Subproject commit ad181b7aa949848e3085065e09195cb28c34fdf7 From d1cc49fc558b37887c467e0ac16fdec13e7c4005 Mon Sep 17 00:00:00 2001 From: Alois Mahdal Date: Thu, 6 Nov 2014 02:38:21 +0100 Subject: [PATCH 344/813] Fix note about video length Perhaps due to update mentioned at /resources page, the video is 34 minutes long. --- docsite/rst/quickstart.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/quickstart.rst b/docsite/rst/quickstart.rst index 3d2eaca94f..161748d9f0 100644 --- a/docsite/rst/quickstart.rst +++ b/docsite/rst/quickstart.rst @@ -3,7 +3,7 @@ Quickstart Video We've recorded a short video that shows how to get started with Ansible that you may like to use alongside the documentation. -The `quickstart video `_ is about 20 minutes long and will show you some of the basics about your +The `quickstart video `_ is about 30 minutes long and will show you some of the basics about your first steps with Ansible. Enjoy, and be sure to visit the rest of the documentation to learn more. From 229d49fe36a03d077cc9276e19d4acb9b5965e97 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 5 Nov 2014 08:00:00 -0600 Subject: [PATCH 345/813] Adding new playbook objects for v2 * Playbook * TaskInclude --- v2/ansible/errors/__init__.py | 7 +- v2/ansible/parsing/mod_args.py | 29 ++- v2/ansible/parsing/yaml/__init__.py | 35 ++- v2/ansible/parsing/yaml/constructor.py | 15 +- v2/ansible/parsing/yaml/loader.py | 4 +- v2/ansible/parsing/yaml/objects.py | 4 +- v2/ansible/parsing/yaml/strings.py | 4 +- v2/ansible/playbook/__init__.py | 62 +++++- v2/ansible/playbook/base.py | 2 +- v2/ansible/playbook/block.py | 8 +- v2/ansible/playbook/helpers.py | 17 +- v2/ansible/playbook/play.py | 8 +- v2/ansible/playbook/role/__init__.py | 4 +- v2/ansible/playbook/task.py | 32 +-- v2/ansible/playbook/task_include.py | 125 +++++++++++ v2/ansible/plugins/lookup/csvfile.py | 82 +++++++ v2/ansible/plugins/lookup/dict.py | 39 ++++ v2/ansible/plugins/lookup/dnstxt.py | 68 ++++++ v2/ansible/plugins/lookup/env.py | 41 ++++ v2/ansible/plugins/lookup/etcd.py | 78 +++++++ v2/ansible/plugins/lookup/file.py | 59 +++++ v2/ansible/plugins/lookup/fileglob.py | 39 ++++ v2/ansible/plugins/lookup/first_found.py | 194 +++++++++++++++++ v2/ansible/plugins/lookup/flattened.py | 78 +++++++ v2/ansible/plugins/lookup/indexed_items.py | 44 ++++ .../plugins/lookup/inventory_hostnames.py | 48 +++++ v2/ansible/plugins/lookup/items.py | 44 ++++ v2/ansible/plugins/lookup/lines.py | 38 ++++ v2/ansible/plugins/lookup/nested.py | 73 +++++++ v2/ansible/plugins/lookup/password.py | 129 +++++++++++ v2/ansible/plugins/lookup/pipe.py | 52 +++++ v2/ansible/plugins/lookup/random_choice.py | 41 ++++ v2/ansible/plugins/lookup/redis_kv.py | 72 +++++++ v2/ansible/plugins/lookup/sequence.py | 204 ++++++++++++++++++ v2/ansible/plugins/lookup/subelements.py | 67 ++++++ v2/ansible/plugins/lookup/template.py | 33 +++ v2/ansible/plugins/lookup/together.py | 64 ++++++ v2/test/errors/test_errors.py | 6 +- v2/test/parsing/test_mod_args.py | 41 ++-- v2/test/playbook/test_playbook.py | 65 ++++++ v2/test/playbook/test_task_include.py | 63 ++++++ v2/test/plugins/test_plugins.py | 4 - 42 files changed, 2041 insertions(+), 81 deletions(-) create mode 100644 v2/ansible/plugins/lookup/csvfile.py create mode 100644 v2/ansible/plugins/lookup/dict.py create mode 100644 v2/ansible/plugins/lookup/dnstxt.py create mode 100644 v2/ansible/plugins/lookup/env.py create mode 100644 v2/ansible/plugins/lookup/etcd.py create mode 100644 v2/ansible/plugins/lookup/file.py create mode 100644 v2/ansible/plugins/lookup/fileglob.py create mode 100644 v2/ansible/plugins/lookup/first_found.py create mode 100644 v2/ansible/plugins/lookup/flattened.py create mode 100644 v2/ansible/plugins/lookup/indexed_items.py create mode 100644 v2/ansible/plugins/lookup/inventory_hostnames.py create mode 100644 v2/ansible/plugins/lookup/items.py create mode 100644 v2/ansible/plugins/lookup/lines.py create mode 100644 v2/ansible/plugins/lookup/nested.py create mode 100644 v2/ansible/plugins/lookup/password.py create mode 100644 v2/ansible/plugins/lookup/pipe.py create mode 100644 v2/ansible/plugins/lookup/random_choice.py create mode 100644 v2/ansible/plugins/lookup/redis_kv.py create mode 100644 v2/ansible/plugins/lookup/sequence.py create mode 100644 v2/ansible/plugins/lookup/subelements.py create mode 100644 v2/ansible/plugins/lookup/template.py create mode 100644 v2/ansible/plugins/lookup/together.py create mode 100644 v2/test/playbook/test_playbook.py create mode 100644 v2/test/playbook/test_task_include.py diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py index e0c21d195b..d4d93d0e4f 100644 --- a/v2/ansible/errors/__init__.py +++ b/v2/ansible/errors/__init__.py @@ -48,10 +48,13 @@ class AnsibleError(Exception): if isinstance(self._obj, AnsibleBaseYAMLObject): extended_error = self._get_extended_error() if extended_error: - self.message = '%s\n%s' % (message, extended_error) + self.message = '%s\n\n%s' % (message, extended_error) else: self.message = message + def __str__(self): + return self.message + def __repr__(self): return self.message @@ -129,7 +132,7 @@ class AnsibleError(Exception): if unbalanced: error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR - except IOError: + except (IOError, TypeError): error_message += '\n(could not open file to display line)' except IndexError: error_message += '\n(specified line no longer in file, maybe it changed?)' diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index 7f4f42bddd..0bb1c3fa2b 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -63,8 +63,9 @@ class ModuleArgsParser: Args may also be munged for certain shell command parameters. """ - def __init__(self, task=None): - self._task = task + def __init__(self, task_ds=dict()): + assert isinstance(task_ds, dict) + self._task_ds = task_ds def _split_module_string(self, str): @@ -144,7 +145,7 @@ class ModuleArgsParser: # form is like: local_action: copy src=a dest=b ... pretty common args = parse_kv(thing) else: - raise AnsibleParsingError("unexpected parameter type in action: %s" % type(thing), obj=self._task) + raise AnsibleParsingError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds) return args def _normalize_new_style_args(self, thing): @@ -179,19 +180,17 @@ class ModuleArgsParser: else: # need a dict or a string, so giving up - raise AnsibleParsingError("unexpected parameter type in action: %s" % type(thing), obj=self._task) + raise AnsibleParsingError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds) return (action, args) - def parse(self, ds): + def parse(self): ''' Given a task in one of the supported forms, parses and returns returns the action, arguments, and delegate_to values for the task, dealing with all sorts of levels of fuzziness. ''' - assert isinstance(ds, dict) - thing = None action = None @@ -204,38 +203,38 @@ class ModuleArgsParser: # # action - if 'action' in ds: + if 'action' in self._task_ds: # an old school 'action' statement - thing = ds['action'] + thing = self._task_ds['action'] delegate_to = None action, args = self._normalize_parameters(thing) # local_action - if 'local_action' in ds: + if 'local_action' in self._task_ds: # local_action is similar but also implies a delegate_to if action is not None: - raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task) - thing = ds.get('local_action', '') + raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds) + thing = self._task_ds.get('local_action', '') delegate_to = 'localhost' action, args = self._normalize_parameters(thing) # module: is the more new-style invocation # walk the input dictionary to see we recognize a module name - for (item, value) in iteritems(ds): + for (item, value) in iteritems(self._task_ds): if item in module_finder: # finding more than one module name is a problem if action is not None: - raise AnsibleParserError("conflicting action statements", obj=self._task) + raise AnsibleParserError("conflicting action statements", obj=self._task_ds) action = item thing = value action, args = self._normalize_parameters(value, action=action) # if we didn't see any module in the task at all, it's not a task really if action is None: - raise AnsibleParserError("no action detected in task", obj=self._task) + raise AnsibleParserError("no action detected in task", obj=self._task_ds) # shell modules require special handling (action, args) = self._handle_shell_weirdness(action, args) diff --git a/v2/ansible/parsing/yaml/__init__.py b/v2/ansible/parsing/yaml/__init__.py index 969fd2a3b5..4273abee53 100644 --- a/v2/ansible/parsing/yaml/__init__.py +++ b/v2/ansible/parsing/yaml/__init__.py @@ -27,6 +27,7 @@ from yaml import load, YAMLError from ansible.errors import AnsibleParserError from ansible.parsing.vault import VaultLib +from ansible.parsing.splitter import unquote from ansible.parsing.yaml.loader import AnsibleLoader from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject from ansible.parsing.yaml.strings import YAML_SYNTAX_ERROR @@ -55,6 +56,7 @@ class DataLoader(): _FILE_CACHE = dict() def __init__(self, vault_password=None): + self._basedir = '.' self._vault = VaultLib(password=vault_password) def load(self, data, file_name='', show_content=True): @@ -70,13 +72,15 @@ class DataLoader(): try: # if loading JSON failed for any reason, we go ahead # and try to parse it as YAML instead - return self._safe_load(data) + return self._safe_load(data, file_name=file_name) except YAMLError as yaml_exc: self._handle_error(yaml_exc, file_name, show_content) def load_from_file(self, file_name): ''' Loads data from a file, which can contain either JSON or YAML. ''' + file_name = self.path_dwim(file_name) + # if the file has already been read in and cached, we'll # return those results to avoid more file/vault operations if file_name in self._FILE_CACHE: @@ -100,9 +104,14 @@ class DataLoader(): def is_file(self, path): return os.path.isfile(path) - def _safe_load(self, stream): + def _safe_load(self, stream, file_name=None): ''' Implements yaml.safe_load(), except using our custom loader class. ''' - return load(stream, AnsibleLoader) + + loader = AnsibleLoader(stream, file_name) + try: + return loader.get_single_data() + finally: + loader.dispose() def _get_file_contents(self, file_name): ''' @@ -139,3 +148,23 @@ class DataLoader(): raise AnsibleParserError(YAML_SYNTAX_ERROR, obj=err_obj, show_content=show_content) + def set_basedir(self, basedir): + ''' sets the base directory, used to find files when a relative path is given ''' + + if basedir is not None: + self._basedir = basedir + + def path_dwim(self, given): + ''' + make relative paths work like folks expect. + ''' + + given = unquote(given) + + if given.startswith("/"): + return os.path.abspath(given) + elif given.startswith("~"): + return os.path.abspath(os.path.expanduser(given)) + else: + return os.path.abspath(os.path.join(self._basedir, given)) + diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py index 1e94b808fa..730ba85418 100644 --- a/v2/ansible/parsing/yaml/constructor.py +++ b/v2/ansible/parsing/yaml/constructor.py @@ -23,6 +23,10 @@ from yaml.constructor import Constructor from ansible.parsing.yaml.objects import AnsibleMapping class AnsibleConstructor(Constructor): + def __init__(self, file_name=None): + self._ansible_file_name = file_name + super(AnsibleConstructor, self).__init__() + def construct_yaml_map(self, node): data = AnsibleMapping() yield data @@ -36,7 +40,16 @@ class AnsibleConstructor(Constructor): ret = AnsibleMapping(super(Constructor, self).construct_mapping(node, deep)) ret._line_number = node.__line__ ret._column_number = node.__column__ - ret._data_source = node.__datasource__ + + # in some cases, we may have pre-read the data and then + # passed it to the load() call for YAML, in which case we + # want to override the default datasource (which would be + # '') to the actual filename we read in + if self._ansible_file_name: + ret._data_source = self._ansible_file_name + else: + ret._data_source = node.__datasource__ + return ret AnsibleConstructor.add_constructor( diff --git a/v2/ansible/parsing/yaml/loader.py b/v2/ansible/parsing/yaml/loader.py index f75e5b4b27..0d13007819 100644 --- a/v2/ansible/parsing/yaml/loader.py +++ b/v2/ansible/parsing/yaml/loader.py @@ -28,11 +28,11 @@ from ansible.parsing.yaml.composer import AnsibleComposer from ansible.parsing.yaml.constructor import AnsibleConstructor class AnsibleLoader(Reader, Scanner, Parser, AnsibleComposer, AnsibleConstructor, Resolver): - def __init__(self, stream): + def __init__(self, stream, file_name=None): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) AnsibleComposer.__init__(self) - AnsibleConstructor.__init__(self) + AnsibleConstructor.__init__(self, file_name=file_name) Resolver.__init__(self) diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py index 6a7482fe49..6eff9966f9 100644 --- a/v2/ansible/parsing/yaml/objects.py +++ b/v2/ansible/parsing/yaml/objects.py @@ -26,8 +26,8 @@ class AnsibleBaseYAMLObject: ''' _data_source = None - _line_number = None - _column_number = None + _line_number = 0 + _column_number = 0 def get_position_info(self): return (self._data_source, self._line_number, self._column_number) diff --git a/v2/ansible/parsing/yaml/strings.py b/v2/ansible/parsing/yaml/strings.py index b7e304194f..dcd6ffd79f 100644 --- a/v2/ansible/parsing/yaml/strings.py +++ b/v2/ansible/parsing/yaml/strings.py @@ -34,8 +34,8 @@ Syntax Error while loading YAML. """ YAML_POSITION_DETAILS = """\ -The error appears to have been in '%s': line %s, column %s, -but may actually be before there depending on the exact syntax problem. +The error appears to have been in '%s': line %s, column %s, but may +be elsewhere in the file depending on the exact syntax problem. """ YAML_COMMON_DICT_ERROR = """\ diff --git a/v2/ansible/playbook/__init__.py b/v2/ansible/playbook/__init__.py index 87b422b280..f8f42b1163 100644 --- a/v2/ansible/playbook/__init__.py +++ b/v2/ansible/playbook/__init__.py @@ -19,14 +19,60 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import os + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.parsing.yaml import DataLoader +from ansible.playbook.attribute import Attribute, FieldAttribute +from ansible.playbook.play import Play +from ansible.plugins import push_basedir + + +__all__ = ['Playbook'] + + class Playbook: - def __init__(self, filename): - self.ds = v2.utils.load_yaml_from_file(filename) - self.plays = [] - def load(self): - # loads a list of plays from the parsed ds - self.plays = [] + def __init__(self, loader=None): + # Entries in the datastructure of a playbook may + # be either a play or an include statement + self._entries = [] + self._basedir = '.' + + if loader: + self._loader = loader + else: + self._loader = DataLoader() + + @staticmethod + def load(file_name, loader=None): + pb = Playbook(loader=loader) + pb._load_playbook_data(file_name) + return pb + + def _load_playbook_data(self, file_name): + + # add the base directory of the file to the data loader, + # so that it knows where to find relatively pathed files + basedir = os.path.dirname(file_name) + self._loader.set_basedir(basedir) + + ds = self._loader.load_from_file(file_name) + if not isinstance(ds, list): + raise AnsibleParserError("playbooks must be a list of plays", obj=ds) + + # Parse the playbook entries. For plays, we simply parse them + # using the Play() object, and includes are parsed using the + # PlaybookInclude() object + for entry in ds: + if not isinstance(entry, dict): + raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry) + + if 'include' in entry: + entry_obj = PlaybookInclude.load(entry, loader=self._loader) + else: + entry_obj = Play.load(entry, loader=self._loader) + + self._entries.append(entry_obj) + - def get_plays(self): - return self.plays diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index c163240363..c7748095a5 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -110,7 +110,7 @@ class Base: valid_attrs = [name for (name, attribute) in iteritems(self._get_base_attributes())] for key in ds: if key not in valid_attrs: - raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__), obj=ds) + raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds) def validate(self): ''' validation that is done at parse time, not load time ''' diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index cc5ccacc40..a082e97e5e 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -67,15 +67,15 @@ class Block(Base): return ds def _load_block(self, attr, ds): - return load_list_of_tasks(ds) + return load_list_of_tasks(ds, block=self, loader=self._loader) def _load_rescue(self, attr, ds): - return load_list_of_tasks(ds) + return load_list_of_tasks(ds, block=self, loader=self._loader) def _load_always(self, attr, ds): - return load_list_of_tasks(ds) + return load_list_of_tasks(ds, block=self, loader=self._loader) # not currently used #def _load_otherwise(self, attr, ds): - # return self._load_list_of_tasks(ds) + # return self._load_list_of_tasks(ds, block=self, loader=self._loader) diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py index 6985ad7808..f692f4baf6 100644 --- a/v2/ansible/playbook/helpers.py +++ b/v2/ansible/playbook/helpers.py @@ -17,6 +17,7 @@ from types import NoneType +from ansible.errors import AnsibleParserError def load_list_of_blocks(ds, role=None, loader=None): ''' @@ -38,24 +39,34 @@ def load_list_of_blocks(ds, role=None, loader=None): return block_list -def load_list_of_tasks(ds, block=None, role=None, loader=None): + +def load_list_of_tasks(ds, block=None, role=None, task_include=None, loader=None): ''' Given a list of task datastructures (parsed from YAML), - return a list of Task() objects. + return a list of Task() or TaskInclude() objects. ''' # we import here to prevent a circular dependency with imports from ansible.playbook.task import Task + from ansible.playbook.task_include import TaskInclude assert type(ds) == list task_list = [] for task in ds: - t = Task.load(task, block=block, role=role, loader=loader) + if not isinstance(task, dict): + raise AnsibleParserError("task/handler entries must be dictionaries (got a %s)" % type(task), obj=ds) + + if 'include' in task: + t = TaskInclude.load(task, block=block, role=role, task_include=task_include, loader=loader) + else: + t = Task.load(task, block=block, role=role, task_include=task_include, loader=loader) + task_list.append(t) return task_list + def load_list_of_roles(ds, loader=None): ''' Loads and returns a list of RoleInclude objects from the datastructure diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index 3c8a4bcb87..07ee4707b4 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -124,28 +124,28 @@ class Play(Base): Loads a list of blocks from a list which may be mixed tasks/blocks. Bare tasks outside of a block are given an implicit block. ''' - return load_list_of_blocks(ds) + return load_list_of_blocks(ds, loader=self._loader) def _load_pre_tasks(self, attr, ds): ''' Loads a list of blocks from a list which may be mixed tasks/blocks. Bare tasks outside of a block are given an implicit block. ''' - return load_list_of_blocks(ds) + return load_list_of_blocks(ds, loader=self._loader) def _load_post_tasks(self, attr, ds): ''' Loads a list of blocks from a list which may be mixed tasks/blocks. Bare tasks outside of a block are given an implicit block. ''' - return load_list_of_blocks(ds) + return load_list_of_blocks(ds, loader=self._loader) def _load_handlers(self, attr, ds): ''' Loads a list of blocks from a list which may be mixed handlers/blocks. Bare handlers outside of a block are given an implicit block. ''' - return load_list_of_blocks(ds) + return load_list_of_blocks(ds, loader=self._loader) def _load_roles(self, attr, ds): ''' diff --git a/v2/ansible/playbook/role/__init__.py b/v2/ansible/playbook/role/__init__.py index 4950e944d3..8f37970d59 100644 --- a/v2/ansible/playbook/role/__init__.py +++ b/v2/ansible/playbook/role/__init__.py @@ -95,11 +95,11 @@ class Role: task_data = self._load_role_yaml('tasks') if task_data: - self._task_blocks = load_list_of_blocks(task_data) + self._task_blocks = load_list_of_blocks(task_data, role=self, loader=self._loader) handler_data = self._load_role_yaml('handlers') if handler_data: - self._handler_blocks = load_list_of_blocks(handler_data) + self._handler_blocks = load_list_of_blocks(handler_data, role=self, loader=self._loader) # vars and default vars are regular dictionaries self._role_vars = self._load_role_yaml('vars') diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 97f7b06eb6..95571819af 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -27,6 +27,7 @@ from ansible.errors import AnsibleError from ansible.parsing.splitter import parse_kv from ansible.parsing.mod_args import ModuleArgsParser from ansible.parsing.yaml import DataLoader +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping from ansible.plugins import module_finder, lookup_finder class Task(Base): @@ -54,6 +55,7 @@ class Task(Base): _always_run = FieldAttribute(isa='bool') _any_errors_fatal = FieldAttribute(isa='bool') _async = FieldAttribute(isa='int') + _changed_when = FieldAttribute(isa='string') _connection = FieldAttribute(isa='string') _delay = FieldAttribute(isa='int') _delegate_to = FieldAttribute(isa='string') @@ -88,10 +90,13 @@ class Task(Base): _until = FieldAttribute(isa='list') # ? _when = FieldAttribute(isa='list', default=[]) - def __init__(self, block=None, role=None): + def __init__(self, block=None, role=None, task_include=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' - self._block = block - self._role = role + + self._block = block + self._role = role + self._task_include = task_include + super(Task, self).__init__() def get_name(self): @@ -120,8 +125,8 @@ class Task(Base): return buf @staticmethod - def load(data, block=None, role=None, loader=None): - t = Task(block=block, role=role) + def load(data, block=None, role=None, task_include=None, loader=None): + t = Task(block=block, role=role, task_include=task_include) return t.load_data(data, loader=loader) def __repr__(self): @@ -131,9 +136,10 @@ class Task(Base): def _munge_loop(self, ds, new_ds, k, v): ''' take a lookup plugin name and store it correctly ''' - if self._loop.value is not None: - raise AnsibleError("duplicate loop in task: %s" % k) - new_ds['loop'] = k + loop_name = k.replace("with_", "") + if new_ds.get('loop') is not None: + raise AnsibleError("duplicate loop in task: %s" % loop_name) + new_ds['loop'] = loop_name new_ds['loop_args'] = v def munge(self, ds): @@ -147,13 +153,15 @@ class Task(Base): # the new, cleaned datastructure, which will have legacy # items reduced to a standard structure suitable for the # attributes of the task class - new_ds = dict() + new_ds = AnsibleMapping() + if isinstance(ds, AnsibleBaseYAMLObject): + new_ds.copy_position_info(ds) # use the args parsing class to determine the action, args, # and the delegate_to value from the various possible forms # supported as legacy - args_parser = ModuleArgsParser() - (action, args, delegate_to) = args_parser.parse(ds) + args_parser = ModuleArgsParser(task_ds=ds) + (action, args, delegate_to) = args_parser.parse() new_ds['action'] = action new_ds['args'] = args @@ -164,7 +172,7 @@ class Task(Base): # we don't want to re-assign these values, which were # determined by the ModuleArgsParser() above continue - elif "with_%s" % k in lookup_finder: + elif k.replace("with_", "") in lookup_finder: self._munge_loop(ds, new_ds, k, v) else: new_ds[k] = v diff --git a/v2/ansible/playbook/task_include.py b/v2/ansible/playbook/task_include.py index 785fc45992..798ce020d1 100644 --- a/v2/ansible/playbook/task_include.py +++ b/v2/ansible/playbook/task_include.py @@ -19,3 +19,128 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible.errors import AnsibleParserError +from ansible.parsing.splitter import split_args, parse_kv +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping +from ansible.playbook.attribute import Attribute, FieldAttribute +from ansible.playbook.base import Base +from ansible.playbook.helpers import load_list_of_tasks +from ansible.plugins import lookup_finder + + +__all__ = ['TaskInclude'] + + +class TaskInclude(Base): + + ''' + A class used to wrap the use of `include: /some/other/file.yml` + within a task list, which may return a list of Task objects and/or + more TaskInclude objects. + ''' + + # the description field is used mainly internally to + # show a nice reprsentation of this class, rather than + # simply using __class__.__name__ + + __desc__ = "task include statement" + + + #----------------------------------------------------------------- + # Attributes + + _include = FieldAttribute(isa='string') + _loop = FieldAttribute(isa='string', private=True) + _loop_args = FieldAttribute(isa='list', private=True) + _tags = FieldAttribute(isa='list', default=[]) + _vars = FieldAttribute(isa='dict', default=dict()) + _when = FieldAttribute(isa='list', default=[]) + + def __init__(self, block=None, role=None, task_include=None): + self._tasks = [] + self._block = block + self._role = role + self._task_include = task_include + + super(TaskInclude, self).__init__() + + @staticmethod + def load(data, block=None, role=None, task_include=None, loader=None): + ti = TaskInclude(block=block, role=role, task_include=None) + return ti.load_data(data, loader=loader) + + def munge(self, ds): + ''' + Regorganizes the data for a TaskInclude datastructure to line + up with what we expect the proper attributes to be + ''' + + assert isinstance(ds, dict) + + # the new, cleaned datastructure, which will have legacy + # items reduced to a standard structure + new_ds = AnsibleMapping() + if isinstance(ds, AnsibleBaseYAMLObject): + new_ds.copy_position_info(ds) + + for (k,v) in ds.iteritems(): + if k == 'include': + self._munge_include(ds, new_ds, k, v) + elif k.replace("with_", "") in lookup_finder: + self._munge_loop(ds, new_ds, k, v) + else: + # some basic error checking, to make sure vars are properly + # formatted and do not conflict with k=v parameters + # FIXME: we could merge these instead, but controlling the order + # in which they're encountered could be difficult + if k == 'vars': + if 'vars' in new_ds: + raise AnsibleParserError("include parameters cannot be mixed with 'vars' entries for include statements", obj=ds) + elif not isinstance(v, dict): + raise AnsibleParserError("vars for include statements must be specified as a dictionary", obj=ds) + new_ds[k] = v + + return new_ds + + def _munge_include(self, ds, new_ds, k, v): + ''' + Splits the include line up into filename and parameters + ''' + + # The include line must include at least one item, which is the filename + # to include. Anything after that should be regarded as a parameter to the include + items = split_args(v) + if len(items) == 0: + raise AnsibleParserError("include statements must specify the file name to include", obj=ds) + else: + # FIXME/TODO: validate that items[0] is a file, which also + # exists and is readable + new_ds['include'] = items[0] + if len(items) > 1: + # rejoin the parameter portion of the arguments and + # then use parse_kv() to get a dict of params back + params = parse_kv(" ".join(items[1:])) + if 'vars' in new_ds: + # FIXME: see fixme above regarding merging vars + raise AnsibleParserError("include parameters cannot be mixed with 'vars' entries for include statements", obj=ds) + new_ds['vars'] = params + + def _munge_loop(self, ds, new_ds, k, v): + ''' take a lookup plugin name and store it correctly ''' + + loop_name = k.replace("with_", "") + if new_ds.get('loop') is not None: + raise AnsibleError("duplicate loop in task: %s" % loop_name) + new_ds['loop'] = loop_name + new_ds['loop_args'] = v + + + def _load_include(self, attr, ds): + ''' loads the file name specified in the ds and returns a list of tasks ''' + + data = self._loader.load_from_file(ds) + if not isinstance(data, list): + raise AnsibleParsingError("included task files must contain a list of tasks", obj=ds) + + self._tasks = load_list_of_tasks(data, task_include=self, loader=self._loader) + return ds diff --git a/v2/ansible/plugins/lookup/csvfile.py b/v2/ansible/plugins/lookup/csvfile.py new file mode 100644 index 0000000000..ce5a2b77d2 --- /dev/null +++ b/v2/ansible/plugins/lookup/csvfile.py @@ -0,0 +1,82 @@ +# (c) 2013, Jan-Piet Mens +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible import utils, errors +import os +import codecs +import csv + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def read_csv(self, filename, key, delimiter, dflt=None, col=1): + + try: + f = codecs.open(filename, 'r', encoding='utf-8') + creader = csv.reader(f, delimiter=delimiter) + + for row in creader: + if row[0] == key: + return row[int(col)] + except Exception, e: + raise errors.AnsibleError("csvfile: %s" % str(e)) + + return dflt + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + if isinstance(terms, basestring): + terms = [ terms ] + + ret = [] + for term in terms: + params = term.split() + key = params[0] + + paramvals = { + 'file' : 'ansible.csv', + 'default' : None, + 'delimiter' : "TAB", + 'col' : "1", # column to return + } + + # parameters specified? + try: + for param in params[1:]: + name, value = param.split('=') + assert(name in paramvals) + paramvals[name] = value + except (ValueError, AssertionError), e: + raise errors.AnsibleError(e) + + if paramvals['delimiter'] == 'TAB': + paramvals['delimiter'] = "\t" + + path = utils.path_dwim(self.basedir, paramvals['file']) + + var = self.read_csv(path, key, paramvals['delimiter'], paramvals['default'], paramvals['col']) + if var is not None: + if type(var) is list: + for v in var: + ret.append(v) + else: + ret.append(var) + return ret diff --git a/v2/ansible/plugins/lookup/dict.py b/v2/ansible/plugins/lookup/dict.py new file mode 100644 index 0000000000..cda1546598 --- /dev/null +++ b/v2/ansible/plugins/lookup/dict.py @@ -0,0 +1,39 @@ +# (c) 2014, Kent R. Spillner +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.utils import safe_eval +import ansible.utils as utils +import ansible.errors as errors + +def flatten_hash_to_list(terms): + ret = [] + for key in terms: + ret.append({'key': key, 'value': terms[key]}) + return ret + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def run(self, terms, inject=None, **kwargs): + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + if not isinstance(terms, dict): + raise errors.AnsibleError("with_dict expects a dict") + + return flatten_hash_to_list(terms) diff --git a/v2/ansible/plugins/lookup/dnstxt.py b/v2/ansible/plugins/lookup/dnstxt.py new file mode 100644 index 0000000000..4fa47bf4ee --- /dev/null +++ b/v2/ansible/plugins/lookup/dnstxt.py @@ -0,0 +1,68 @@ +# (c) 2012, Jan-Piet Mens +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible import utils, errors +import os +HAVE_DNS=False +try: + import dns.resolver + from dns.exception import DNSException + HAVE_DNS=True +except ImportError: + pass + +# ============================================================== +# DNSTXT: DNS TXT records +# +# key=domainname +# TODO: configurable resolver IPs +# -------------------------------------------------------------- + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + if HAVE_DNS == False: + raise errors.AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed") + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + if isinstance(terms, basestring): + terms = [ terms ] + + ret = [] + for term in terms: + domain = term.split()[0] + string = [] + try: + answers = dns.resolver.query(domain, 'TXT') + for rdata in answers: + s = rdata.to_text() + string.append(s[1:-1]) # Strip outside quotes on TXT rdata + + except dns.resolver.NXDOMAIN: + string = 'NXDOMAIN' + except dns.resolver.Timeout: + string = '' + except dns.exception.DNSException, e: + raise errors.AnsibleError("dns.resolver unhandled exception", e) + + ret.append(''.join(string)) + return ret diff --git a/v2/ansible/plugins/lookup/env.py b/v2/ansible/plugins/lookup/env.py new file mode 100644 index 0000000000..d4f85356ed --- /dev/null +++ b/v2/ansible/plugins/lookup/env.py @@ -0,0 +1,41 @@ +# (c) 2012, Jan-Piet Mens +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible import utils, errors +from ansible.utils import template +import os + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def run(self, terms, inject=None, **kwargs): + + try: + terms = template.template(self.basedir, terms, inject) + except Exception, e: + pass + + if isinstance(terms, basestring): + terms = [ terms ] + + ret = [] + for term in terms: + var = term.split()[0] + ret.append(os.getenv(var, '')) + return ret diff --git a/v2/ansible/plugins/lookup/etcd.py b/v2/ansible/plugins/lookup/etcd.py new file mode 100644 index 0000000000..a758a2fb0b --- /dev/null +++ b/v2/ansible/plugins/lookup/etcd.py @@ -0,0 +1,78 @@ +# (c) 2013, Jan-Piet Mens +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible import utils +import os +import urllib2 +try: + import json +except ImportError: + import simplejson as json + +# this can be made configurable, not should not use ansible.cfg +ANSIBLE_ETCD_URL = 'http://127.0.0.1:4001' +if os.getenv('ANSIBLE_ETCD_URL') is not None: + ANSIBLE_ETCD_URL = os.environ['ANSIBLE_ETCD_URL'] + +class etcd(): + def __init__(self, url=ANSIBLE_ETCD_URL): + self.url = url + self.baseurl = '%s/v1/keys' % (self.url) + + def get(self, key): + url = "%s/%s" % (self.baseurl, key) + + data = None + value = "" + try: + r = urllib2.urlopen(url) + data = r.read() + except: + return value + + try: + # {"action":"get","key":"/name","value":"Jane Jolie","index":5} + item = json.loads(data) + if 'value' in item: + value = item['value'] + if 'errorCode' in item: + value = "ENOENT" + except: + raise + pass + + return value + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + self.etcd = etcd() + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + if isinstance(terms, basestring): + terms = [ terms ] + + ret = [] + for term in terms: + key = term.split()[0] + value = self.etcd.get(key) + ret.append(value) + return ret diff --git a/v2/ansible/plugins/lookup/file.py b/v2/ansible/plugins/lookup/file.py new file mode 100644 index 0000000000..70bae6653a --- /dev/null +++ b/v2/ansible/plugins/lookup/file.py @@ -0,0 +1,59 @@ +# (c) 2012, Daniel Hokka Zakrisson +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible import utils, errors +import os +import codecs + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + ret = [] + + # this can happen if the variable contains a string, strictly not desired for lookup + # plugins, but users may try it, so make it work. + if not isinstance(terms, list): + terms = [ terms ] + + for term in terms: + basedir_path = utils.path_dwim(self.basedir, term) + relative_path = None + playbook_path = None + + # Special handling of the file lookup, used primarily when the + # lookup is done from a role. If the file isn't found in the + # basedir of the current file, use dwim_relative to look in the + # role/files/ directory, and finally the playbook directory + # itself (which will be relative to the current working dir) + if '_original_file' in inject: + relative_path = utils.path_dwim_relative(inject['_original_file'], 'files', term, self.basedir, check=False) + if 'playbook_dir' in inject: + playbook_path = os.path.join(inject['playbook_dir'], term) + + for path in (basedir_path, relative_path, playbook_path): + if path and os.path.exists(path): + ret.append(codecs.open(path, encoding="utf8").read().rstrip()) + break + else: + raise errors.AnsibleError("could not locate file in lookup: %s" % term) + + return ret diff --git a/v2/ansible/plugins/lookup/fileglob.py b/v2/ansible/plugins/lookup/fileglob.py new file mode 100644 index 0000000000..7d3cbb92be --- /dev/null +++ b/v2/ansible/plugins/lookup/fileglob.py @@ -0,0 +1,39 @@ +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import glob +from ansible import utils + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + ret = [] + + for term in terms: + + dwimmed = utils.path_dwim(self.basedir, term) + globbed = glob.glob(dwimmed) + ret.extend(g for g in globbed if os.path.isfile(g)) + + return ret diff --git a/v2/ansible/plugins/lookup/first_found.py b/v2/ansible/plugins/lookup/first_found.py new file mode 100644 index 0000000000..a48b56a3c2 --- /dev/null +++ b/v2/ansible/plugins/lookup/first_found.py @@ -0,0 +1,194 @@ +# (c) 2013, seth vidal red hat, inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# take a list of files and (optionally) a list of paths +# return the first existing file found in the paths +# [file1, file2, file3], [path1, path2, path3] +# search order is: +# path1/file1 +# path1/file2 +# path1/file3 +# path2/file1 +# path2/file2 +# path2/file3 +# path3/file1 +# path3/file2 +# path3/file3 + +# first file found with os.path.exists() is returned +# no file matches raises ansibleerror +# EXAMPLES +# - name: copy first existing file found to /some/file +# action: copy src=$item dest=/some/file +# with_first_found: +# - files: foo ${inventory_hostname} bar +# paths: /tmp/production /tmp/staging + +# that will look for files in this order: +# /tmp/production/foo +# ${inventory_hostname} +# bar +# /tmp/staging/foo +# ${inventory_hostname} +# bar + +# - name: copy first existing file found to /some/file +# action: copy src=$item dest=/some/file +# with_first_found: +# - files: /some/place/foo ${inventory_hostname} /some/place/else + +# that will look for files in this order: +# /some/place/foo +# $relative_path/${inventory_hostname} +# /some/place/else + +# example - including tasks: +# tasks: +# - include: $item +# with_first_found: +# - files: generic +# paths: tasks/staging tasks/production +# this will include the tasks in the file generic where it is found first (staging or production) + +# example simple file lists +#tasks: +#- name: first found file +# action: copy src=$item dest=/etc/file.cfg +# with_first_found: +# - files: foo.${inventory_hostname} foo + + +# example skipping if no matched files +# First_found also offers the ability to control whether or not failing +# to find a file returns an error or not +# +#- name: first found file - or skip +# action: copy src=$item dest=/etc/file.cfg +# with_first_found: +# - files: foo.${inventory_hostname} +# skip: true + +# example a role with default configuration and configuration per host +# you can set multiple terms with their own files and paths to look through. +# consider a role that sets some configuration per host falling back on a default config. +# +#- name: some configuration template +# template: src={{ item }} dest=/etc/file.cfg mode=0444 owner=root group=root +# with_first_found: +# - files: +# - ${inventory_hostname}/etc/file.cfg +# paths: +# - ../../../templates.overwrites +# - ../../../templates +# - files: +# - etc/file.cfg +# paths: +# - templates + +# the above will return an empty list if the files cannot be found at all +# if skip is unspecificed or if it is set to false then it will return a list +# error which can be caught bye ignore_errors: true for that action. + +# finally - if you want you can use it, in place to replace first_available_file: +# you simply cannot use the - files, path or skip options. simply replace +# first_available_file with with_first_found and leave the file listing in place +# +# +# - name: with_first_found like first_available_file +# action: copy src=$item dest=/tmp/faftest +# with_first_found: +# - ../files/foo +# - ../files/bar +# - ../files/baz +# ignore_errors: true + + +from ansible import utils, errors +import os + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + result = None + anydict = False + skip = False + + for term in terms: + if isinstance(term, dict): + anydict = True + + total_search = [] + if anydict: + for term in terms: + if isinstance(term, dict): + files = term.get('files', []) + paths = term.get('paths', []) + skip = utils.boolean(term.get('skip', False)) + + filelist = files + if isinstance(files, basestring): + files = files.replace(',', ' ') + files = files.replace(';', ' ') + filelist = files.split(' ') + + pathlist = paths + if paths: + if isinstance(paths, basestring): + paths = paths.replace(',', ' ') + paths = paths.replace(':', ' ') + paths = paths.replace(';', ' ') + pathlist = paths.split(' ') + + if not pathlist: + total_search = filelist + else: + for path in pathlist: + for fn in filelist: + f = os.path.join(path, fn) + total_search.append(f) + else: + total_search.append(term) + else: + total_search = terms + + for fn in total_search: + if inject and '_original_file' in inject: + # check the templates and vars directories too, + # if they exist + for roledir in ('templates', 'vars'): + path = utils.path_dwim(os.path.join(self.basedir, '..', roledir), fn) + if os.path.exists(path): + return [path] + # if none of the above were found, just check the + # current filename against the basedir (this will already + # have ../files from runner, if it's a role task + path = utils.path_dwim(self.basedir, fn) + if os.path.exists(path): + return [path] + else: + if skip: + return [] + else: + return [None] + diff --git a/v2/ansible/plugins/lookup/flattened.py b/v2/ansible/plugins/lookup/flattened.py new file mode 100644 index 0000000000..831b2e9130 --- /dev/null +++ b/v2/ansible/plugins/lookup/flattened.py @@ -0,0 +1,78 @@ +# (c) 2013, Serge van Ginderachter +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import ansible.utils as utils +import ansible.errors as errors + + +def check_list_of_one_list(term): + # make sure term is not a list of one (list of one..) item + # return the final non list item if so + + if isinstance(term,list) and len(term) == 1: + term = term[0] + if isinstance(term,list): + term = check_list_of_one_list(term) + + return term + + + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + + def flatten(self, terms, inject): + + ret = [] + for term in terms: + term = check_list_of_one_list(term) + + if term == 'None' or term == 'null': + # ignore undefined items + break + + if isinstance(term, basestring): + # convert a variable to a list + term2 = utils.listify_lookup_plugin_terms(term, self.basedir, inject) + # but avoid converting a plain string to a list of one string + if term2 != [ term ]: + term = term2 + + if isinstance(term, list): + # if it's a list, check recursively for items that are a list + term = self.flatten(term, inject) + ret.extend(term) + else: + ret.append(term) + + return ret + + + def run(self, terms, inject=None, **kwargs): + + # see if the string represents a list and convert to list if so + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + if not isinstance(terms, list): + raise errors.AnsibleError("with_flattened expects a list") + + ret = self.flatten(terms, inject) + return ret + diff --git a/v2/ansible/plugins/lookup/indexed_items.py b/v2/ansible/plugins/lookup/indexed_items.py new file mode 100644 index 0000000000..c1db1fdee2 --- /dev/null +++ b/v2/ansible/plugins/lookup/indexed_items.py @@ -0,0 +1,44 @@ +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.utils import safe_eval +import ansible.utils as utils +import ansible.errors as errors + +def flatten(terms): + ret = [] + for term in terms: + if isinstance(term, list): + ret.extend(term) + else: + ret.append(term) + return ret + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def run(self, terms, inject=None, **kwargs): + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + if not isinstance(terms, list): + raise errors.AnsibleError("with_indexed_items expects a list") + + items = flatten(terms) + return zip(range(len(items)), items) + diff --git a/v2/ansible/plugins/lookup/inventory_hostnames.py b/v2/ansible/plugins/lookup/inventory_hostnames.py new file mode 100644 index 0000000000..98523e1398 --- /dev/null +++ b/v2/ansible/plugins/lookup/inventory_hostnames.py @@ -0,0 +1,48 @@ +# (c) 2012, Michael DeHaan +# (c) 2013, Steven Dossett +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.utils import safe_eval +import ansible.utils as utils +import ansible.errors as errors +import ansible.inventory as inventory + +def flatten(terms): + ret = [] + for term in terms: + if isinstance(term, list): + ret.extend(term) + else: + ret.append(term) + return ret + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + if 'runner' in kwargs: + self.host_list = kwargs['runner'].inventory.host_list + else: + raise errors.AnsibleError("inventory_hostnames must be used as a loop. Example: \"with_inventory_hostnames: \'all\'\"") + + def run(self, terms, inject=None, **kwargs): + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + if not isinstance(terms, list): + raise errors.AnsibleError("with_inventory_hostnames expects a list") + return flatten(inventory.Inventory(self.host_list).list_hosts(terms)) + diff --git a/v2/ansible/plugins/lookup/items.py b/v2/ansible/plugins/lookup/items.py new file mode 100644 index 0000000000..85e77d5380 --- /dev/null +++ b/v2/ansible/plugins/lookup/items.py @@ -0,0 +1,44 @@ +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.utils import safe_eval +import ansible.utils as utils +import ansible.errors as errors + +def flatten(terms): + ret = [] + for term in terms: + if isinstance(term, list): + ret.extend(term) + else: + ret.append(term) + return ret + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def run(self, terms, inject=None, **kwargs): + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + if not isinstance(terms, list) and not isinstance(terms,set): + raise errors.AnsibleError("with_items expects a list or a set") + + return flatten(terms) + + diff --git a/v2/ansible/plugins/lookup/lines.py b/v2/ansible/plugins/lookup/lines.py new file mode 100644 index 0000000000..5d4b70a857 --- /dev/null +++ b/v2/ansible/plugins/lookup/lines.py @@ -0,0 +1,38 @@ +# (c) 2012, Daniel Hokka Zakrisson +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import subprocess +from ansible import utils, errors + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + ret = [] + for term in terms: + p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) + (stdout, stderr) = p.communicate() + if p.returncode == 0: + ret.extend(stdout.splitlines()) + else: + raise errors.AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode)) + return ret diff --git a/v2/ansible/plugins/lookup/nested.py b/v2/ansible/plugins/lookup/nested.py new file mode 100644 index 0000000000..29c4a7d21c --- /dev/null +++ b/v2/ansible/plugins/lookup/nested.py @@ -0,0 +1,73 @@ +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import ansible.utils as utils +from ansible.utils import safe_eval +import ansible.errors as errors + +def flatten(terms): + ret = [] + for term in terms: + if isinstance(term, list): + ret.extend(term) + elif isinstance(term, tuple): + ret.extend(term) + else: + ret.append(term) + return ret + +def combine(a,b): + results = [] + for x in a: + for y in b: + results.append(flatten([x,y])) + return results + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def __lookup_injects(self, terms, inject): + results = [] + for x in terms: + intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject) + results.append(intermediate) + return results + + def run(self, terms, inject=None, **kwargs): + + # this code is common with 'items.py' consider moving to utils if we need it again + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + terms = self.__lookup_injects(terms, inject) + + my_list = terms[:] + my_list.reverse() + result = [] + if len(my_list) == 0: + raise errors.AnsibleError("with_nested requires at least one element in the nested list") + result = my_list.pop() + while len(my_list) > 0: + result2 = combine(result, my_list.pop()) + result = result2 + new_result = [] + for x in result: + new_result.append(flatten(x)) + return new_result + + diff --git a/v2/ansible/plugins/lookup/password.py b/v2/ansible/plugins/lookup/password.py new file mode 100644 index 0000000000..a066887e2c --- /dev/null +++ b/v2/ansible/plugins/lookup/password.py @@ -0,0 +1,129 @@ +# (c) 2012, Daniel Hokka Zakrisson +# (c) 2013, Javier Candeira +# (c) 2013, Maykel Moya +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible import utils, errors +import os +import errno +from string import ascii_letters, digits +import string +import random + + +class LookupModule(object): + + LENGTH = 20 + + def __init__(self, length=None, encrypt=None, basedir=None, **kwargs): + self.basedir = basedir + + def random_salt(self): + salt_chars = ascii_letters + digits + './' + return utils.random_password(length=8, chars=salt_chars) + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + ret = [] + + for term in terms: + # you can't have escaped spaces in yor pathname + params = term.split() + relpath = params[0] + + paramvals = { + 'length': LookupModule.LENGTH, + 'encrypt': None, + 'chars': ['ascii_letters','digits',".,:-_"], + } + + # get non-default parameters if specified + try: + for param in params[1:]: + name, value = param.split('=') + assert(name in paramvals) + if name == 'length': + paramvals[name] = int(value) + elif name == 'chars': + use_chars=[] + if ",," in value: + use_chars.append(',') + use_chars.extend(value.replace(',,',',').split(',')) + paramvals['chars'] = use_chars + else: + paramvals[name] = value + except (ValueError, AssertionError), e: + raise errors.AnsibleError(e) + + length = paramvals['length'] + encrypt = paramvals['encrypt'] + use_chars = paramvals['chars'] + + # get password or create it if file doesn't exist + path = utils.path_dwim(self.basedir, relpath) + if not os.path.exists(path): + pathdir = os.path.dirname(path) + if not os.path.isdir(pathdir): + try: + os.makedirs(pathdir, mode=0700) + except OSError, e: + raise errors.AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e))) + + chars = "".join([getattr(string,c,c) for c in use_chars]).replace('"','').replace("'",'') + password = ''.join(random.choice(chars) for _ in range(length)) + + if encrypt is not None: + salt = self.random_salt() + content = '%s salt=%s' % (password, salt) + else: + content = password + with open(path, 'w') as f: + os.chmod(path, 0600) + f.write(content + '\n') + else: + content = open(path).read().rstrip() + sep = content.find(' ') + + if sep >= 0: + password = content[:sep] + salt = content[sep+1:].split('=')[1] + else: + password = content + salt = None + + # crypt requested, add salt if missing + if (encrypt is not None and not salt): + salt = self.random_salt() + content = '%s salt=%s' % (password, salt) + with open(path, 'w') as f: + os.chmod(path, 0600) + f.write(content + '\n') + # crypt not requested, remove salt if present + elif (encrypt is None and salt): + with open(path, 'w') as f: + os.chmod(path, 0600) + f.write(password + '\n') + + if encrypt: + password = utils.do_encrypt(password, encrypt, salt=salt) + + ret.append(password) + + return ret + diff --git a/v2/ansible/plugins/lookup/pipe.py b/v2/ansible/plugins/lookup/pipe.py new file mode 100644 index 0000000000..0cd9e1cda5 --- /dev/null +++ b/v2/ansible/plugins/lookup/pipe.py @@ -0,0 +1,52 @@ +# (c) 2012, Daniel Hokka Zakrisson +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import subprocess +from ansible import utils, errors + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + if isinstance(terms, basestring): + terms = [ terms ] + + ret = [] + for term in terms: + ''' + http://docs.python.org/2/library/subprocess.html#popen-constructor + + The shell argument (which defaults to False) specifies whether to use the + shell as the program to execute. If shell is True, it is recommended to pass + args as a string rather than as a sequence + + https://github.com/ansible/ansible/issues/6550 + ''' + term = str(term) + + p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) + (stdout, stderr) = p.communicate() + if p.returncode == 0: + ret.append(stdout.decode("utf-8").rstrip()) + else: + raise errors.AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode)) + return ret diff --git a/v2/ansible/plugins/lookup/random_choice.py b/v2/ansible/plugins/lookup/random_choice.py new file mode 100644 index 0000000000..9b32c2f119 --- /dev/null +++ b/v2/ansible/plugins/lookup/random_choice.py @@ -0,0 +1,41 @@ +# (c) 2013, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import random +from ansible import utils + +# useful for introducing chaos ... or just somewhat reasonably fair selection +# amongst available mirrors +# +# tasks: +# - debug: msg=$item +# with_random_choice: +# - one +# - two +# - three + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + return [ random.choice(terms) ] + diff --git a/v2/ansible/plugins/lookup/redis_kv.py b/v2/ansible/plugins/lookup/redis_kv.py new file mode 100644 index 0000000000..22c5c3754f --- /dev/null +++ b/v2/ansible/plugins/lookup/redis_kv.py @@ -0,0 +1,72 @@ +# (c) 2012, Jan-Piet Mens +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible import utils, errors +import os +HAVE_REDIS=False +try: + import redis # https://github.com/andymccurdy/redis-py/ + HAVE_REDIS=True +except ImportError: + pass +import re + +# ============================================================== +# REDISGET: Obtain value from a GET on a Redis key. Terms +# expected: 0 = URL, 1 = Key +# URL may be empty, in which case redis://localhost:6379 assumed +# -------------------------------------------------------------- + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + if HAVE_REDIS == False: + raise errors.AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed") + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + ret = [] + for term in terms: + (url,key) = term.split(',') + if url == "": + url = 'redis://localhost:6379' + + # urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason + # Redis' from_url() doesn't work here. + + p = '(?P[^:]+)://?(?P[^:/ ]+).?(?P[0-9]*).*' + + try: + m = re.search(p, url) + host = m.group('host') + port = int(m.group('port')) + except AttributeError: + raise errors.AnsibleError("Bad URI in redis lookup") + + try: + conn = redis.Redis(host=host, port=port) + res = conn.get(key) + if res is None: + res = "" + ret.append(res) + except: + ret.append("") # connection failed or key not found + return ret diff --git a/v2/ansible/plugins/lookup/sequence.py b/v2/ansible/plugins/lookup/sequence.py new file mode 100644 index 0000000000..b162b3069e --- /dev/null +++ b/v2/ansible/plugins/lookup/sequence.py @@ -0,0 +1,204 @@ +# (c) 2013, Jayson Vantuyl +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.errors import AnsibleError +import ansible.utils as utils +from re import compile as re_compile, IGNORECASE + +# shortcut format +NUM = "(0?x?[0-9a-f]+)" +SHORTCUT = re_compile( + "^(" + # Group 0 + NUM + # Group 1: Start + "-)?" + + NUM + # Group 2: End + "(/" + # Group 3 + NUM + # Group 4: Stride + ")?" + + "(:(.+))?$", # Group 5, Group 6: Format String + IGNORECASE +) + + +class LookupModule(object): + """ + sequence lookup module + + Used to generate some sequence of items. Takes arguments in two forms. + + The simple / shortcut form is: + + [start-]end[/stride][:format] + + As indicated by the brackets: start, stride, and format string are all + optional. The format string is in the style of printf. This can be used + to pad with zeros, format in hexadecimal, etc. All of the numerical values + can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8). + Negative numbers are not supported. + + Some examples: + + 5 -> ["1","2","3","4","5"] + 5-8 -> ["5", "6", "7", "8"] + 2-10/2 -> ["2", "4", "6", "8", "10"] + 4:host%02d -> ["host01","host02","host03","host04"] + + The standard Ansible key-value form is accepted as well. For example: + + start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"] + + This format takes an alternate form of "end" called "count", which counts + some number from the starting value. For example: + + count=5 -> ["1", "2", "3", "4", "5"] + start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"] + start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"] + start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"] + + The count option is mostly useful for avoiding off-by-one errors and errors + calculating the number of entries in a sequence when a stride is specified. + """ + + def __init__(self, basedir, **kwargs): + """absorb any keyword args""" + self.basedir = basedir + + def reset(self): + """set sensible defaults""" + self.start = 1 + self.count = None + self.end = None + self.stride = 1 + self.format = "%d" + + def parse_kv_args(self, args): + """parse key-value style arguments""" + for arg in ["start", "end", "count", "stride"]: + try: + arg_raw = args.pop(arg, None) + if arg_raw is None: + continue + arg_cooked = int(arg_raw, 0) + setattr(self, arg, arg_cooked) + except ValueError: + raise AnsibleError( + "can't parse arg %s=%r as integer" + % (arg, arg_raw) + ) + if 'format' in args: + self.format = args.pop("format") + if args: + raise AnsibleError( + "unrecognized arguments to with_sequence: %r" + % args.keys() + ) + + def parse_simple_args(self, term): + """parse the shortcut forms, return True/False""" + match = SHORTCUT.match(term) + if not match: + return False + + _, start, end, _, stride, _, format = match.groups() + + if start is not None: + try: + start = int(start, 0) + except ValueError: + raise AnsibleError("can't parse start=%s as integer" % start) + if end is not None: + try: + end = int(end, 0) + except ValueError: + raise AnsibleError("can't parse end=%s as integer" % end) + if stride is not None: + try: + stride = int(stride, 0) + except ValueError: + raise AnsibleError("can't parse stride=%s as integer" % stride) + + if start is not None: + self.start = start + if end is not None: + self.end = end + if stride is not None: + self.stride = stride + if format is not None: + self.format = format + + def sanity_check(self): + if self.count is None and self.end is None: + raise AnsibleError( + "must specify count or end in with_sequence" + ) + elif self.count is not None and self.end is not None: + raise AnsibleError( + "can't specify both count and end in with_sequence" + ) + elif self.count is not None: + # convert count to end + self.end = self.start + self.count * self.stride - 1 + del self.count + if self.end < self.start: + raise AnsibleError("can't count backwards") + if self.format.count('%') != 1: + raise AnsibleError("bad formatting string: %s" % self.format) + + def generate_sequence(self): + numbers = xrange(self.start, self.end + 1, self.stride) + + for i in numbers: + try: + formatted = self.format % i + yield formatted + except (ValueError, TypeError): + raise AnsibleError( + "problem formatting %r with %r" % self.format + ) + + def run(self, terms, inject=None, **kwargs): + results = [] + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + if isinstance(terms, basestring): + terms = [ terms ] + + for term in terms: + try: + self.reset() # clear out things for this iteration + + try: + if not self.parse_simple_args(term): + self.parse_kv_args(utils.parse_kv(term)) + except Exception: + raise AnsibleError( + "unknown error parsing with_sequence arguments: %r" + % term + ) + + self.sanity_check() + + results.extend(self.generate_sequence()) + except AnsibleError: + raise + except Exception: + raise AnsibleError( + "unknown error generating sequence" + ) + + return results diff --git a/v2/ansible/plugins/lookup/subelements.py b/v2/ansible/plugins/lookup/subelements.py new file mode 100644 index 0000000000..f33aae717d --- /dev/null +++ b/v2/ansible/plugins/lookup/subelements.py @@ -0,0 +1,67 @@ +# (c) 2013, Serge van Ginderachter +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import ansible.utils as utils +import ansible.errors as errors + + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + + def run(self, terms, inject=None, **kwargs): + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + terms[0] = utils.listify_lookup_plugin_terms(terms[0], self.basedir, inject) + + if not isinstance(terms, list) or not len(terms) == 2: + raise errors.AnsibleError( + "subelements lookup expects a list of two items, first a dict or a list, and second a string") + terms[0] = utils.listify_lookup_plugin_terms(terms[0], self.basedir, inject) + if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], basestring): + raise errors.AnsibleError( + "subelements lookup expects a list of two items, first a dict or a list, and second a string") + + if isinstance(terms[0], dict): # convert to list: + if terms[0].get('skipped',False) != False: + # the registered result was completely skipped + return [] + elementlist = [] + for key in terms[0].iterkeys(): + elementlist.append(terms[0][key]) + else: + elementlist = terms[0] + subelement = terms[1] + + ret = [] + for item0 in elementlist: + if not isinstance(item0, dict): + raise errors.AnsibleError("subelements lookup expects a dictionary, got '%s'" %item0) + if item0.get('skipped',False) != False: + # this particular item is to be skipped + continue + if not subelement in item0: + raise errors.AnsibleError("could not find '%s' key in iterated item '%s'" % (subelement, item0)) + if not isinstance(item0[subelement], list): + raise errors.AnsibleError("the key %s should point to a list, got '%s'" % (subelement, item0[subelement])) + sublist = item0.pop(subelement, []) + for item1 in sublist: + ret.append((item0, item1)) + + return ret + diff --git a/v2/ansible/plugins/lookup/template.py b/v2/ansible/plugins/lookup/template.py new file mode 100644 index 0000000000..e009b6b76b --- /dev/null +++ b/v2/ansible/plugins/lookup/template.py @@ -0,0 +1,33 @@ +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.utils import template +import ansible.utils as utils + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + ret = [] + for term in terms: + ret.append(template.template_from_file(self.basedir, term, inject)) + return ret diff --git a/v2/ansible/plugins/lookup/together.py b/v2/ansible/plugins/lookup/together.py new file mode 100644 index 0000000000..07332c9fb9 --- /dev/null +++ b/v2/ansible/plugins/lookup/together.py @@ -0,0 +1,64 @@ +# (c) 2013, Bradley Young +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import ansible.utils as utils +from ansible.utils import safe_eval +import ansible.errors as errors +from itertools import izip_longest + +def flatten(terms): + ret = [] + for term in terms: + if isinstance(term, list): + ret.extend(term) + elif isinstance(term, tuple): + ret.extend(term) + else: + ret.append(term) + return ret + +class LookupModule(object): + """ + Transpose a list of arrays: + [1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6] + Replace any empty spots in 2nd array with None: + [1, 2], [3] -> [1, 3], [2, None] + """ + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def __lookup_injects(self, terms, inject): + results = [] + for x in terms: + intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject) + results.append(intermediate) + return results + + def run(self, terms, inject=None, **kwargs): + + # this code is common with 'items.py' consider moving to utils if we need it again + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + terms = self.__lookup_injects(terms, inject) + + my_list = terms[:] + if len(my_list) == 0: + raise errors.AnsibleError("with_together requires at least one element in each list") + return [flatten(x) for x in izip_longest(*my_list, fillvalue=None)] + + diff --git a/v2/test/errors/test_errors.py b/v2/test/errors/test_errors.py index 5b24dc4345..30ff411128 100644 --- a/v2/test/errors/test_errors.py +++ b/v2/test/errors/test_errors.py @@ -51,7 +51,7 @@ class TestErrors(unittest.TestCase): mock_method.return_value = ('this is line 1\n', '') e = AnsibleError(self.message, self.obj) - self.assertEqual(e.message, "This is the error message\nThe error appears to have been in 'foo.yml': line 1, column 1,\nbut may actually be before there depending on the exact syntax problem.\n\nthis is line 1\n^\n") + self.assertEqual(e.message, "This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nthis is line 1\n^\n") def test_get_error_lines_from_file(self): m = mock_open() @@ -63,12 +63,12 @@ class TestErrors(unittest.TestCase): self.obj._line_number = 1 self.obj._column_number = 1 e = AnsibleError(self.message, self.obj) - self.assertEqual(e.message, "This is the error message\nThe error appears to have been in 'foo.yml': line 1, column 1,\nbut may actually be before there depending on the exact syntax problem.\n\nthis is line 1\n^\n") + self.assertEqual(e.message, "This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nthis is line 1\n^\n") # this line will not be found, as it is out of the index range self.obj._data_source = 'foo.yml' self.obj._line_number = 2 self.obj._column_number = 1 e = AnsibleError(self.message, self.obj) - self.assertEqual(e.message, "This is the error message\nThe error appears to have been in 'foo.yml': line 2, column 1,\nbut may actually be before there depending on the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)") + self.assertEqual(e.message, "This is the error message\n\nThe error appears to have been in 'foo.yml': line 2, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)") diff --git a/v2/test/parsing/test_mod_args.py b/v2/test/parsing/test_mod_args.py index 0f9ee28dec..187edfa03c 100644 --- a/v2/test/parsing/test_mod_args.py +++ b/v2/test/parsing/test_mod_args.py @@ -31,7 +31,6 @@ class TestModArgsDwim(unittest.TestCase): # and the task knows the line numbers def setUp(self): - self.m = ModuleArgsParser() pass def _debug(self, mod, args, to): @@ -43,7 +42,8 @@ class TestModArgsDwim(unittest.TestCase): pass def test_basic_shell(self): - mod, args, to = self.m.parse(dict(shell='echo hi')) + m = ModuleArgsParser(dict(shell='echo hi')) + mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'command') self.assertEqual(args, dict( @@ -53,7 +53,8 @@ class TestModArgsDwim(unittest.TestCase): self.assertIsNone(to) def test_basic_command(self): - mod, args, to = self.m.parse(dict(command='echo hi')) + m = ModuleArgsParser(dict(command='echo hi')) + mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'command') self.assertEqual(args, dict( @@ -62,7 +63,8 @@ class TestModArgsDwim(unittest.TestCase): self.assertIsNone(to) def test_shell_with_modifiers(self): - mod, args, to = self.m.parse(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep')) + m = ModuleArgsParser(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep')) + mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'command') self.assertEqual(args, dict( @@ -74,42 +76,55 @@ class TestModArgsDwim(unittest.TestCase): self.assertIsNone(to) def test_normal_usage(self): - mod, args, to = self.m.parse(dict(copy='src=a dest=b')) + m = ModuleArgsParser(dict(copy='src=a dest=b')) + mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'copy') self.assertEqual(args, dict(src='a', dest='b')) self.assertIsNone(to) def test_complex_args(self): - mod, args, to = self.m.parse(dict(copy=dict(src='a', dest='b'))) + m = ModuleArgsParser(dict(copy=dict(src='a', dest='b'))) + mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'copy') self.assertEqual(args, dict(src='a', dest='b')) self.assertIsNone(to) def test_action_with_complex(self): - mod, args, to = self.m.parse(dict(action=dict(module='copy', src='a', dest='b'))) + m = ModuleArgsParser(dict(action=dict(module='copy', src='a', dest='b'))) + mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'copy') self.assertEqual(args, dict(src='a', dest='b')) self.assertIsNone(to) def test_action_with_complex_and_complex_args(self): - mod, args, to = self.m.parse(dict(action=dict(module='copy', args=dict(src='a', dest='b')))) + m = ModuleArgsParser(dict(action=dict(module='copy', args=dict(src='a', dest='b')))) + mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'copy') self.assertEqual(args, dict(src='a', dest='b')) self.assertIsNone(to) def test_local_action_string(self): - mod, args, to = self.m.parse(dict(local_action='copy src=a dest=b')) + m = ModuleArgsParser(dict(local_action='copy src=a dest=b')) + mod, args, to = m.parse() self._debug(mod, args, to) self.assertEqual(mod, 'copy') self.assertEqual(args, dict(src='a', dest='b')) self.assertIs(to, 'localhost') def test_multiple_actions(self): - self.assertRaises(AnsibleParserError, self.m.parse, dict(action='shell echo hi', local_action='shell echo hi')) - self.assertRaises(AnsibleParserError, self.m.parse, dict(action='shell echo hi', shell='echo hi')) - self.assertRaises(AnsibleParserError, self.m.parse, dict(local_action='shell echo hi', shell='echo hi')) - self.assertRaises(AnsibleParserError, self.m.parse, dict(ping='data=hi', shell='echo hi')) + m = ModuleArgsParser(dict(action='shell echo hi', local_action='shell echo hi')) + self.assertRaises(AnsibleParserError, m.parse) + + m = ModuleArgsParser(dict(action='shell echo hi', shell='echo hi')) + self.assertRaises(AnsibleParserError, m.parse) + + m = ModuleArgsParser(dict(local_action='shell echo hi', shell='echo hi')) + self.assertRaises(AnsibleParserError, m.parse) + + m = ModuleArgsParser(dict(ping='data=hi', shell='echo hi')) + self.assertRaises(AnsibleParserError, m.parse) + diff --git a/v2/test/playbook/test_playbook.py b/v2/test/playbook/test_playbook.py new file mode 100644 index 0000000000..640057820e --- /dev/null +++ b/v2/test/playbook/test_playbook.py @@ -0,0 +1,65 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, MagicMock + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.playbook import Playbook + +from test.mock.loader import DictDataLoader + +class TestPlaybook(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_empty_playbook(self): + p = Playbook() + + def test_basic_playbook(self): + fake_loader = DictDataLoader({ + "test_file.yml":""" + - hosts: all + """, + }) + p = Playbook.load("test_file.yml", loader=fake_loader) + + def test_bad_playbook_files(self): + fake_loader = DictDataLoader({ + # represents a playbook which is not a list of plays + "bad_list.yml": """ + foo: bar + + """, + # represents a playbook where a play entry is mis-formatted + "bad_entry.yml": """ + - + - "This should be a mapping..." + + """, + }) + self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", fake_loader) + self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", fake_loader) + diff --git a/v2/test/playbook/test_task_include.py b/v2/test/playbook/test_task_include.py new file mode 100644 index 0000000000..42a63b7204 --- /dev/null +++ b/v2/test/playbook/test_task_include.py @@ -0,0 +1,63 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.errors import AnsibleParserError +from ansible.parsing.yaml.objects import AnsibleMapping +from ansible.playbook.task_include import TaskInclude + +from test.mock.loader import DictDataLoader + +class TestTaskInclude(unittest.TestCase): + + def setUp(self): + self._fake_loader = DictDataLoader({ + "foo.yml": """ + - shell: echo "hello world" + """ + }) + + pass + + def tearDown(self): + pass + + def test_empty_task_include(self): + ti = TaskInclude() + + def test_basic_task_include(self): + ti = TaskInclude.load(AnsibleMapping(include='foo.yml'), loader=self._fake_loader) + + def test_task_include_with_loop(self): + ti = TaskInclude.load(AnsibleMapping(include='foo.yml', with_items=['a', 'b', 'c']), loader=self._fake_loader) + + def test_task_include_with_conditional(self): + ti = TaskInclude.load(AnsibleMapping(include='foo.yml', when="1 == 1"), loader=self._fake_loader) + + def test_task_include_with_tags(self): + ti = TaskInclude.load(AnsibleMapping(include='foo.yml', tags="foo"), loader=self._fake_loader) + ti = TaskInclude.load(AnsibleMapping(include='foo.yml', tags=["foo", "bar"]), loader=self._fake_loader) + + def test_task_include_errors(self): + self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include=''), loader=self._fake_loader) + self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include='foo.yml', vars="1"), loader=self._fake_loader) + self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include='foo.yml a=1', vars=dict(b=2)), loader=self._fake_loader) + diff --git a/v2/test/plugins/test_plugins.py b/v2/test/plugins/test_plugins.py index e6bef809e6..0d0fe400d0 100644 --- a/v2/test/plugins/test_plugins.py +++ b/v2/test/plugins/test_plugins.py @@ -36,10 +36,6 @@ class TestErrors(unittest.TestCase): def tearDown(self): pass - def test_push_basedir(self): - push_basedir('/root/foo/bar') - self.assertEqual(_basedirs, ['/root/foo/bar']) - @patch.object(PluginLoader, '_get_paths') def test_print_paths(self, mock_method): mock_method.return_value = ['/path/one', '/path/two', '/path/three'] From 9112f650c085169bef9ecb17ddaf287a601079cd Mon Sep 17 00:00:00 2001 From: Alexander Ershov Date: Thu, 6 Nov 2014 13:50:36 +0300 Subject: [PATCH 346/813] Fix path to the integration tests "tests/integration" -> "test/integration" --- docsite/rst/community.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index b89d6e61df..d16070239e 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -146,7 +146,7 @@ to modify a pull request later. When submitting patches, be sure to run the unit tests first “make tests†and always use “git rebase†vs “git merge†(aliasing git pull to git pull --rebase is a great idea) to -avoid merge commits in your submissions. There are also integration tests that can be run in the "tests/integration" directory. +avoid merge commits in your submissions. There are also integration tests that can be run in the "test/integration" directory. In order to keep the history clean and better audit incoming code, we will require resubmission of pull requests that contain merge commits. Use "git pull --rebase" vs "git pull" and "git rebase" vs "git merge". Also be sure to use topic branches to keep your additions on different branches, such that they won't pick up stray commits later. From 5a11315dc73158dd988ea7c04724107e8fa6f134 Mon Sep 17 00:00:00 2001 From: Niku Toivola Date: Thu, 6 Nov 2014 15:03:03 +0200 Subject: [PATCH 347/813] add documentation for the delegate_to magic variable --- docsite/rst/playbooks_variables.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 340744f419..9c90a9afe2 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -851,6 +851,8 @@ period, without the rest of the domain. *play_hosts* is available as a list of hostnames that are in scope for the current play. This may be useful for filling out templates with multiple hostnames or for injecting the list into the rules for a load balancer. +*delegate_to* is the inventory hostname of the host that the current task has been delegated to using 'delegate_to'. + Don't worry about any of this unless you think you need it. You'll know when you do. Also available, *inventory_dir* is the pathname of the directory holding Ansible's inventory host file, *inventory_file* is the pathname and the filename pointing to the Ansible's inventory host file. From 8e2a8c92ac7c4134f822ec77de08a1106da36fc6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 6 Nov 2014 08:20:01 -0500 Subject: [PATCH 348/813] changed examples to not have a non working variable that gets confused with directives as per #9264 --- docsite/rst/playbooks_roles.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index 0d847b3278..3ffabe835d 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -61,19 +61,19 @@ For instance, if deploying multiple wordpress instances, I could contain all of my wordpress tasks in a single wordpress.yml file, and use it like so:: tasks: - - include: wordpress.yml user=timmy - - include: wordpress.yml user=alice - - include: wordpress.yml user=bob + - include: wordpress.yml wp_user=timmy + - include: wordpress.yml wp_user=alice + - include: wordpress.yml wp_user=bob If you are running Ansible 1.4 and later, include syntax is streamlined to match roles, and also allows passing list and dictionary parameters:: tasks: - - { include: wordpress.yml, user: timmy, ssh_keys: [ 'keys/one.txt', 'keys/two.txt' ] } + - { include: wordpress.yml, wp_user: timmy, ssh_keys: [ 'keys/one.txt', 'keys/two.txt' ] } Using either syntax, variables passed in can then be used in the included files. We'll cover them in :doc:`playbooks_variables`. You can reference them like this:: - {{ user }} + {{ wp_user }} (In addition to the explicitly passed-in parameters, all variables from the vars section are also available for use here as well.) @@ -85,7 +85,7 @@ which also supports structured variables:: - include: wordpress.yml vars: - remote_user: timmy + wp_user: timmy some_list_variable: - alpha - beta From a2ba0c03e4aeee99eb5864a43a02468497e64841 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 6 Nov 2014 13:38:11 -0500 Subject: [PATCH 349/813] added error handling on private key stat in case it is saved in agent but not in filesystem --- lib/ansible/runner/connection.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/connection.py b/lib/ansible/runner/connection.py index 429b3f190d..e0bada9ead 100644 --- a/lib/ansible/runner/connection.py +++ b/lib/ansible/runner/connection.py @@ -36,7 +36,14 @@ class Connector(object): raise AnsibleError("unsupported connection type: %s" % transport) if private_key_file: # If private key is readable by user other than owner, flag an error - st = os.stat(private_key_file) + try: + st = os.stat(private_key_file) + except IOError, e: + if e.errno == errno.ENOENT: # file is missing, might be agent + st = { 'st_mode': False } + else: + raise(e) + if st.st_mode & (stat.S_IRGRP | stat.S_IROTH): raise AnsibleError("private_key_file (%s) is group-readable or world-readable and thus insecure - " "you will probably get an SSH failure" From 22ef830814837d21a5f4f9eefca74f2ce14d7fa4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 6 Nov 2014 14:56:44 -0500 Subject: [PATCH 350/813] fixed ssh fix, bad test case but it should work now --- lib/ansible/runner/connection.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/runner/connection.py b/lib/ansible/runner/connection.py index e0bada9ead..bb50bf5531 100644 --- a/lib/ansible/runner/connection.py +++ b/lib/ansible/runner/connection.py @@ -20,6 +20,7 @@ import os import stat +import errno from ansible import utils from ansible.errors import AnsibleError @@ -36,15 +37,14 @@ class Connector(object): raise AnsibleError("unsupported connection type: %s" % transport) if private_key_file: # If private key is readable by user other than owner, flag an error + st = None try: st = os.stat(private_key_file) - except IOError, e: - if e.errno == errno.ENOENT: # file is missing, might be agent - st = { 'st_mode': False } - else: + except (IOError, OSError), e: + if e.errno != errno.ENOENT: # file is missing, might be agent raise(e) - if st.st_mode & (stat.S_IRGRP | stat.S_IROTH): + if st is not None and st.st_mode & (stat.S_IRGRP | stat.S_IROTH): raise AnsibleError("private_key_file (%s) is group-readable or world-readable and thus insecure - " "you will probably get an SSH failure" % (private_key_file,)) From 047dffdd01db04a12ef5eb43a6ac78bd3d30cfce Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 6 Nov 2014 16:41:34 -0600 Subject: [PATCH 351/813] Fix rax_find_loadbalancer issues * Loadbalancer IDs are not UUIDs * Ensure found list exists before using it --- lib/ansible/module_utils/rax.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/rax.py b/lib/ansible/module_utils/rax.py index a8f33208ca..75363b1aac 100644 --- a/lib/ansible/module_utils/rax.py +++ b/lib/ansible/module_utils/rax.py @@ -173,9 +173,9 @@ def rax_find_server(module, rax_module, server): def rax_find_loadbalancer(module, rax_module, loadbalancer): clb = rax_module.cloud_loadbalancers try: - UUID(loadbalancer) found = clb.get(loadbalancer) except: + found = [] for lb in clb.list(): if loadbalancer == lb.name: found.append(lb) From f1267c0b053e5975dc08c151530c802015902242 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 6 Nov 2014 21:28:04 -0800 Subject: [PATCH 352/813] Move from md5 to sha1 to work on fips-140 enabled systems --- CHANGELOG.md | 9 ++- docsite/rst/developing_modules.rst | 2 +- docsite/rst/playbooks_prompts.rst | 2 +- docsite/rst/playbooks_variables.rst | 4 +- lib/ansible/module_utils/basic.py | 11 ++- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- lib/ansible/runner/__init__.py | 25 ++++--- lib/ansible/runner/action_plugins/assemble.py | 6 +- lib/ansible/runner/action_plugins/copy.py | 32 ++++----- lib/ansible/runner/action_plugins/fetch.py | 62 ++++++++++++----- lib/ansible/runner/action_plugins/template.py | 6 +- .../runner/action_plugins/unarchive.py | 4 +- lib/ansible/runner/filter_plugins/core.py | 7 +- lib/ansible/runner/shell_plugins/sh.py | 16 ++--- lib/ansible/utils/__init__.py | 33 +++++++-- lib/ansible/utils/vault.py | 2 + .../roles/test_assemble/tasks/main.yml | 22 ++++-- .../roles/test_command_shell/tasks/main.yml | 4 +- .../roles/test_copy/tasks/main.yml | 17 +++-- .../roles/test_lineinfile/tasks/main.yml | 68 +++++++++---------- .../roles/test_service/tasks/main.yml | 2 +- .../test_service/tasks/systemd_setup.yml | 2 +- .../roles/test_service/tasks/sysv_setup.yml | 2 +- .../test_service/tasks/upstart_setup.yml | 4 +- .../roles/test_stat/tasks/main.yml | 2 + .../roles/test_template/tasks/main.yml | 1 + test/units/TestModuleUtilsBasic.py | 10 +-- test/units/TestUtils.py | 10 +++ v2/ansible/parsing/vault/__init__.py | 2 + v2/ansible/playbook/role/__init__.py | 6 +- 31 files changed, 238 insertions(+), 139 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4faa8f2ed3..0902313569 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,7 +58,14 @@ Some other notable changes: * ec2_ami_search: support for SSD and IOPS provisioned EBS images * can set ansible_sudo_exe as an inventory variable which allows specifying a different sudo (or equivalent) command -* git module: Submodule handling has changed. Previously if you used the ``recursive`` parameter to handle submodules, ansible would track the submodule upstream's head revision. This has been changed to checkout the version of the submodule specified in the superproject's git repository. This is inline with what git submodule update does. If you want the old behaviour use the new module parameter track_submodules=yes +* git module: Submodule handling has changed. Previously if you used the + ``recursive`` parameter to handle submodules, ansible would track the + submodule upstream's head revision. This has been changed to checkout the + version of the submodule specified in the superproject's git repository. + This is inline with what git submodule update does. If you want the old + behaviour use the new module parameter track_submodules=yes +* Checksumming of transferred files has been made more portable and now uses + the sha1 algorithm instead of md5 to be compatible with FIPS-140. And various other bug fixes and improvements ... diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 4a331626db..aff5fab556 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -262,7 +262,7 @@ And failures are just as simple (where 'msg' is a required parameter to explain module.fail_json(msg="Something fatal happened") -There are also other useful functions in the module class, such as module.md5(path). See +There are also other useful functions in the module class, such as module.sha1(path). See lib/ansible/module_common.py in the source checkout for implementation details. Again, modules developed this way are best tested with the hacking/test-module script in the git diff --git a/docsite/rst/playbooks_prompts.rst b/docsite/rst/playbooks_prompts.rst index c20e59e079..29fc218fe8 100644 --- a/docsite/rst/playbooks_prompts.rst +++ b/docsite/rst/playbooks_prompts.rst @@ -55,7 +55,7 @@ entered value so you can use it, for instance, with the user module to define a - name: "my_password2" prompt: "Enter password2" private: yes - encrypt: "md5_crypt" + encrypt: "sha512_crypt" confirm: yes salt_size: 7 diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 9c90a9afe2..f9e3dda4e2 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -327,9 +327,9 @@ To work with Base64 encoded strings:: {{ encoded | b64decode }} {{ decoded | b64encode }} -To take an md5sum of a filename:: +To take a sha1sum of a filename:: - {{ filename | md5 }} + {{ filename | sha1 }} To cast values as certain types, such as when you input a string as "True" from a vars_prompt and the system doesn't know it is a boolean value:: diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 8a4548dc16..b8cfea2014 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -87,8 +87,13 @@ except ImportError: HAVE_HASHLIB=False try: - from hashlib import md5 as _md5 + from hashlib import sha1 as _sha1 HAVE_HASHLIB=True +except ImportError: + from sha import sha as _sha1 + +try: + from hashlib import md5 as _md5 except ImportError: from md5 import md5 as _md5 @@ -1236,6 +1241,10 @@ class AnsibleModule(object): ''' Return MD5 hex digest of local file using digest_from_file(). ''' return self.digest_from_file(filename, _md5()) + def sha1(self, filename): + ''' Return SHA1 hex digest of local file using digest_from_file(). ''' + return self.digest_from_file(filename, _sha1()) + def sha256(self, filename): ''' Return SHA-256 hex digest of local file using digest_from_file(). ''' if not HAVE_HASHLIB: diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 2970b339eb..6317d3a988 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 2970b339eb8ea6031e6153cabe45459bc2bd5754 +Subproject commit 6317d3a988f7269340cb7a0d105d2c671ca1cd1e diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index ad181b7aa9..5a514ccdda 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit ad181b7aa949848e3085065e09195cb28c34fdf7 +Subproject commit 5a514ccddae85ccc5802eea8751401600e45c32f diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 4ef6f0ceab..7641200544 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -53,9 +53,9 @@ from ansible.utils import update_hash module_replacer = ModuleReplacer(strip_comments=False) try: - from hashlib import md5 as _md5 + from hashlib import sha1 except ImportError: - from md5 import md5 as _md5 + from sha import sha as sha1 HAS_ATFORK=True try: @@ -209,7 +209,7 @@ class Runner(object): self.su_user_var = su_user self.su_user = None self.su_pass = su_pass - self.omit_token = '__omit_place_holder__%s' % _md5(os.urandom(64)).hexdigest() + self.omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest() self.vault_pass = vault_pass self.no_log = no_log self.run_once = run_once @@ -1159,26 +1159,29 @@ class Runner(object): # ***************************************************** - def _remote_md5(self, conn, tmp, path): - ''' takes a remote md5sum without requiring python, and returns 1 if no file ''' - cmd = conn.shell.md5(path) + def _remote_checksum(self, conn, tmp, path): + ''' takes a remote checksum and returns 1 if no file ''' + inject = self.get_inject_vars(conn.host) + hostvars = HostVars(inject['combined_cache'], self.inventory, vault_password=self.vault_pass) + python_interp = hostvars[conn.host].get('ansible_python_interpreter', 'python') + cmd = conn.shell.checksum(path, python_interp) data = self._low_level_exec_command(conn, cmd, tmp, sudoable=True) data2 = utils.last_non_blank_line(data['stdout']) try: if data2 == '': # this may happen if the connection to the remote server - # failed, so just return "INVALIDMD5SUM" to avoid errors - return "INVALIDMD5SUM" + # failed, so just return "INVALIDCHECKSUM" to avoid errors + return "INVALIDCHECKSUM" else: return data2.split()[0] except IndexError: - sys.stderr.write("warning: md5sum command failed unusually, please report this to the list so it can be fixed\n") - sys.stderr.write("command: %s\n" % md5s) + sys.stderr.write("warning: Calculating checksum failed unusually, please report this to the list so it can be fixed\n") + sys.stderr.write("command: %s\n" % cmd) sys.stderr.write("----\n") sys.stderr.write("output: %s\n" % data) sys.stderr.write("----\n") # this will signal that it changed and allow things to keep going - return "INVALIDMD5SUM" + return "INVALIDCHECKSUM" # ***************************************************** diff --git a/lib/ansible/runner/action_plugins/assemble.py b/lib/ansible/runner/action_plugins/assemble.py index c6f7165d82..9f5d450c2f 100644 --- a/lib/ansible/runner/action_plugins/assemble.py +++ b/lib/ansible/runner/action_plugins/assemble.py @@ -108,10 +108,10 @@ class ActionModule(object): # Does all work assembling the file path = self._assemble_from_fragments(src, delimiter, _re) - pathmd5 = utils.md5s(path) - remote_md5 = self.runner._remote_md5(conn, tmp, dest) + path_checksum = utils.checksum_s(path) + remote_checksum = self.runner._remote_checksum(conn, tmp, dest) - if pathmd5 != remote_md5: + if path_checksum != remote_checksum: resultant = file(path).read() if self.runner.diff: dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True) diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py index 27b17b9969..2b3d387173 100644 --- a/lib/ansible/runner/action_plugins/copy.py +++ b/lib/ansible/runner/action_plugins/copy.py @@ -158,11 +158,11 @@ class ActionModule(object): tmp_path = self.runner._make_tmp_path(conn) for source_full, source_rel in source_files: - # Generate the MD5 hash of the local file. - local_md5 = utils.md5(source_full) + # Generate a hash of the local file. + local_checksum = utils.checksum(source_full) - # If local_md5 is not defined we can't find the file so we should fail out. - if local_md5 is None: + # If local_checksum is not defined we can't find the file so we should fail out. + if local_checksum is None: result = dict(failed=True, msg="could not find src=%s" % source_full) return ReturnData(conn=conn, result=result) @@ -174,27 +174,27 @@ class ActionModule(object): else: dest_file = conn.shell.join_path(dest) - # Attempt to get the remote MD5 Hash. - remote_md5 = self.runner._remote_md5(conn, tmp_path, dest_file) + # Attempt to get the remote checksum + remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file) - if remote_md5 == '3': - # The remote_md5 was executed on a directory. + if remote_checksum == '3': + # The remote_checksum was executed on a directory. if content is not None: # If source was defined as content remove the temporary file and fail out. self._remove_tempfile_if_content_defined(content, content_tempfile) result = dict(failed=True, msg="can not use content with a dir as dest") return ReturnData(conn=conn, result=result) else: - # Append the relative source location to the destination and retry remote_md5. + # Append the relative source location to the destination and retry remote_checksum dest_file = conn.shell.join_path(dest, source_rel) - remote_md5 = self.runner._remote_md5(conn, tmp_path, dest_file) + remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file) - if remote_md5 != '1' and not force: + if remote_checksum != '1' and not force: # remote_file does not exist so continue to next iteration. continue - if local_md5 != remote_md5: - # The MD5 hashes don't match and we will change or error out. + if local_checksum != remote_checksum: + # The checksums don't match and we will change or error out. changed = True # Create a tmp_path if missing only if this is not recursive. @@ -254,7 +254,7 @@ class ActionModule(object): module_executed = True else: - # no need to transfer the file, already correct md5, but still need to call + # no need to transfer the file, already correct hash, but still need to call # the file module in case we want to change attributes self._remove_tempfile_if_content_defined(content, content_tempfile) @@ -283,8 +283,8 @@ class ActionModule(object): module_executed = True module_result = module_return.result - if not module_result.get('md5sum'): - module_result['md5sum'] = local_md5 + if not module_result.get('checksum'): + module_result['checksum'] = local_checksum if module_result.get('failed') == True: return module_return if module_result.get('changed') == True: diff --git a/lib/ansible/runner/action_plugins/fetch.py b/lib/ansible/runner/action_plugins/fetch.py index 80e8a89936..825023a0bc 100644 --- a/lib/ansible/runner/action_plugins/fetch.py +++ b/lib/ansible/runner/action_plugins/fetch.py @@ -50,26 +50,40 @@ class ActionModule(object): flat = utils.boolean(flat) fail_on_missing = options.get('fail_on_missing', False) fail_on_missing = utils.boolean(fail_on_missing) - validate_md5 = options.get('validate_md5', True) - validate_md5 = utils.boolean(validate_md5) + validate_checksum = options.get('validate_checksum', None) + if validate_checksum is not None: + validate_checksum = utils.boolean(validate_checksum) + # Alias for validate_checksum (old way of specifying it) + validate_md5 = options.get('validate_md5', None) + if validate_md5 is not None: + validate_md5 = utils.boolean(validate_md5) + if validate_md5 is None and validate_checksum is None: + # Default + validate_checksum = True + elif validate_checksum is None: + validate_checksum = validate_md5 + elif validate_md5 is not None and validate_checksum is not None: + results = dict(failed=True, msg="validate_checksum and validate_md5 cannot both be specified") + return ReturnData(conn, result=results) + if source is None or dest is None: results = dict(failed=True, msg="src and dest are required") return ReturnData(conn=conn, result=results) source = conn.shell.join_path(source) - # calculate md5 sum for the remote file - remote_md5 = self.runner._remote_md5(conn, tmp, source) + # calculate checksum for the remote file + remote_checksum = self.runner._remote_checksum(conn, tmp, source) # use slurp if sudo and permissions are lacking remote_data = None - if remote_md5 in ('1', '2') or self.runner.sudo: + if remote_checksum in ('1', '2') or self.runner.sudo: slurpres = self.runner._execute_module(conn, tmp, 'slurp', 'src=%s' % source, inject=inject) if slurpres.is_successful(): if slurpres.result['encoding'] == 'base64': remote_data = base64.b64decode(slurpres.result['content']) if remote_data is not None: - remote_md5 = utils.md5s(remote_data) + remote_checksum = utils.checksum_s(remote_data) # the source path may have been expanded on the # target system, so we compare it here and use the # expanded version if it's different @@ -101,23 +115,23 @@ class ActionModule(object): # these don't fail because you may want to transfer a log file that possibly MAY exist # but keep going to fetch other log files - if remote_md5 == '0': + if remote_checksum == '0': result = dict(msg="unable to calculate the md5 sum of the remote file", file=source, changed=False) return ReturnData(conn=conn, result=result) - if remote_md5 == '1': + if remote_checksum == '1': if fail_on_missing: result = dict(failed=True, msg="the remote file does not exist", file=source) else: result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False) return ReturnData(conn=conn, result=result) - if remote_md5 == '2': + if remote_checksum == '2': result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False) return ReturnData(conn=conn, result=result) - # calculate md5 sum for the local file - local_md5 = utils.md5(dest) + # calculate checksum for the local file + local_checksum = utils.checksum(dest) - if remote_md5 != local_md5: + if remote_checksum != local_checksum: # create the containing directories, if needed if not os.path.isdir(os.path.dirname(dest)): os.makedirs(os.path.dirname(dest)) @@ -129,13 +143,27 @@ class ActionModule(object): f = open(dest, 'w') f.write(remote_data) f.close() - new_md5 = utils.md5(dest) - if validate_md5 and new_md5 != remote_md5: - result = dict(failed=True, md5sum=new_md5, msg="md5 mismatch", file=source, dest=dest, remote_md5sum=remote_md5) + new_checksum = utils.secure_hash(dest) + # For backwards compatibility. We'll return None on FIPS enabled + # systems + try: + new_md5 = utils.md5(dest) + except ValueError: + new_md5 = None + + if validate_checksum and new_checksum != remote_checksum: + result = dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum) return ReturnData(conn=conn, result=result) - result = dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=remote_md5) + result = dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum) return ReturnData(conn=conn, result=result) else: - result = dict(changed=False, md5sum=local_md5, file=source, dest=dest) + # For backwards compatibility. We'll return None on FIPS enabled + # systems + try: + local_md5 = utils.md5(dest) + except ValueError: + local_md5 = None + + result = dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum) return ReturnData(conn=conn, result=result) diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index 4f5a41df8a..2fe07c3039 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -87,10 +87,10 @@ class ActionModule(object): result = dict(failed=True, msg=type(e).__name__ + ": " + str(e)) return ReturnData(conn=conn, comm_ok=False, result=result) - local_md5 = utils.md5s(resultant) - remote_md5 = self.runner._remote_md5(conn, tmp, dest) + local_checksum = utils.checksum_s(resultant) + remote_checksum = self.runner._remote_checksum(conn, tmp, dest) - if local_md5 != remote_md5: + if local_checksum != remote_checksum: # template is different from the remote value diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py index a569403cac..1f831e4207 100644 --- a/lib/ansible/runner/action_plugins/unarchive.py +++ b/lib/ansible/runner/action_plugins/unarchive.py @@ -62,8 +62,8 @@ class ActionModule(object): else: source = utils.path_dwim(self.runner.basedir, source) - remote_md5 = self.runner._remote_md5(conn, tmp, dest) - if remote_md5 != '3': + remote_checksum = self.runner._remote_checksum(conn, tmp, dest) + if remote_checksum != '3': result = dict(failed=True, msg="dest '%s' must be an existing dir" % dest) return ReturnData(conn=conn, result=result) diff --git a/lib/ansible/runner/filter_plugins/core.py b/lib/ansible/runner/filter_plugins/core.py index 61b80bce2c..e2a13f8c4e 100644 --- a/lib/ansible/runner/filter_plugins/core.py +++ b/lib/ansible/runner/filter_plugins/core.py @@ -26,7 +26,7 @@ import re import collections import operator as py_operator from ansible import errors -from ansible.utils import md5s +from ansible.utils import md5s, checksum_s from distutils.version import LooseVersion, StrictVersion from random import SystemRandom from jinja2.filters import environmentfilter @@ -281,8 +281,13 @@ class FilterModule(object): # quote string for shell usage 'quote': quote, + # hash filters # md5 hex digest of string 'md5': md5s, + # sha1 hex digeset of string + 'sha1': checksum_s, + # checksum of string as used by ansible for checksuming files + 'checksum': checksum_s, # file glob 'fileglob': fileglob, diff --git a/lib/ansible/runner/shell_plugins/sh.py b/lib/ansible/runner/shell_plugins/sh.py index 1ee225830b..134c857f17 100644 --- a/lib/ansible/runner/shell_plugins/sh.py +++ b/lib/ansible/runner/shell_plugins/sh.py @@ -59,23 +59,17 @@ class ShellModule(object): cmd += ' && echo %s' % basetmp return cmd - def md5(self, path): + def checksum(self, path, python_interp): path = pipes.quote(path) # The following test needs to be SH-compliant. BASH-isms will # not work if /bin/sh points to a non-BASH shell. test = "rc=0; [ -r \"%s\" ] || rc=2; [ -f \"%s\" ] || rc=1; [ -d \"%s\" ] && echo 3 && exit 0" % ((path,) * 3) - md5s = [ - "(/usr/bin/md5sum %s 2>/dev/null)" % path, # Linux - "(/sbin/md5sum -q %s 2>/dev/null)" % path, # ? - "(/usr/bin/digest -a md5 %s 2>/dev/null)" % path, # Solaris 10+ - "(/sbin/md5 -q %s 2>/dev/null)" % path, # Freebsd - "(/usr/bin/md5 -n %s 2>/dev/null)" % path, # Netbsd - "(/bin/md5 -q %s 2>/dev/null)" % path, # Openbsd - "(/usr/bin/csum -h MD5 %s 2>/dev/null)" % path, # AIX - "(/bin/csum -h MD5 %s 2>/dev/null)" % path # AIX also + csums = [ + "(%s -c 'import hashlib; print(hashlib.sha1(open(\"%s\", \"rb\").read()).hexdigest())' 2>/dev/null)" % (python_interp, path), # Python > 2.4 (including python3) + "(%s -c 'import sha; print(sha.sha(open(\"%s\", \"rb\").read()).hexdigest())' 2>/dev/null)" % (python_interp, path), # Python == 2.4 ] - cmd = " || ".join(md5s) + cmd = " || ".join(csums) cmd = "%s; %s || (echo \"${rc} %s\")" % (test, cmd, path) return cmd diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 952e8537d0..e82ae8d374 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -68,6 +68,14 @@ try: except ImportError: import simplejson as json +# Note, sha1 is the only hash algorithm compatible with python2.4 and with +# FIPS-140 mode (as of 11-2014) +try: + from hashlib import sha1 as sha1 +except ImportError: + from sha import sha as sha1 + +# Backwards compat only try: from hashlib import md5 as _md5 except ImportError: @@ -821,22 +829,22 @@ def merge_hash(a, b): return result -def md5s(data): - ''' Return MD5 hex digest of data. ''' +def secure_hash_s(data, hash_func=sha1): + ''' Return a secure hash hex digest of data. ''' - digest = _md5() + digest = hash_func() try: digest.update(data) except UnicodeEncodeError: digest.update(data.encode('utf-8')) return digest.hexdigest() -def md5(filename): - ''' Return MD5 hex digest of local file, None if file is not present or a directory. ''' +def secure_hash(filename, hash_func=sha1): + ''' Return a secure hash hex digest of local file, None if file is not present or a directory. ''' if not os.path.exists(filename) or os.path.isdir(filename): return None - digest = _md5() + digest = hash_func() blocksize = 64 * 1024 try: infile = open(filename, 'rb') @@ -849,6 +857,19 @@ def md5(filename): raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e)) return digest.hexdigest() +# The checksum algorithm must match with the algorithm in ShellModule.checksum() method +checksum = secure_hash +checksum_s = secure_hash_s + +# Backwards compat. Some modules include md5s in their return values +# Continue to support that for now. As of ansible-1.8, all of those modules +# should also return "checksum" (sha1 for now) +def md5s(data): + return secure_hash_s(data, _md5) + +def md5(filename): + return secure_hash(filename, _md5) + def default(value, function): ''' syntactic sugar around lazy evaluation of defaults ''' if value is None: diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 50b686c1e0..ad2dfab0b7 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -26,6 +26,8 @@ from io import BytesIO from subprocess import call from ansible import errors from hashlib import sha256 +# Note: Only used for loading obsolete VaultAES files. All files are written +# using the newer VaultAES256 which does not require md5 from hashlib import md5 from binascii import hexlify from binascii import unhexlify diff --git a/test/integration/roles/test_assemble/tasks/main.yml b/test/integration/roles/test_assemble/tasks/main.yml index f06cee6ace..d0c1f15e56 100644 --- a/test/integration/roles/test_assemble/tasks/main.yml +++ b/test/integration/roles/test_assemble/tasks/main.yml @@ -37,7 +37,19 @@ assert: that: - "result.state == 'file'" - - "result.md5sum == '96905702a2ece40de6bf3a94b5062513'" + - "result.changed == True" + - "result.checksum == '048a1bd1951aa5ccc427eeb4ca19aee45e9c68b3'" + +- name: test assemble with all fragments + assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled1" + register: result + +- name: assert that the same assemble made no changes + assert: + that: + - "result.state == 'file'" + - "result.changed == False" + - "result.checksum == '048a1bd1951aa5ccc427eeb4ca19aee45e9c68b3'" - name: test assemble with fragments matching a regex assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled2" regexp="^fragment[1-3]$" @@ -47,7 +59,7 @@ assert: that: - "result.state == 'file'" - - "result.md5sum == 'eb9e3486a9cd6943b5242e573b9b9349'" + - "result.checksum == 'edfe2d7487ef8f5ebc0f1c4dc57ba7b70a7b8e2b'" - name: test assemble with a delimiter assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled3" delimiter="#--- delimiter ---#" @@ -57,7 +69,7 @@ assert: that: - "result.state == 'file'" - - "result.md5sum == '4773eac67aba3f0be745876331c8a450'" + - "result.checksum == '505359f48c65b3904127cf62b912991d4da7ed6d'" - name: test assemble with remote_src=False assemble: src="./" dest="{{output_dir}}/assembled4" remote_src=no @@ -67,7 +79,7 @@ assert: that: - "result.state == 'file'" - - "result.md5sum == '96905702a2ece40de6bf3a94b5062513'" + - "result.checksum == '048a1bd1951aa5ccc427eeb4ca19aee45e9c68b3'" - name: test assemble with remote_src=False and a delimiter assemble: src="./" dest="{{output_dir}}/assembled5" remote_src=no delimiter="#--- delimiter ---#" @@ -77,5 +89,5 @@ assert: that: - "result.state == 'file'" - - "result.md5sum == '4773eac67aba3f0be745876331c8a450'" + - "result.checksum == '505359f48c65b3904127cf62b912991d4da7ed6d'" diff --git a/test/integration/roles/test_command_shell/tasks/main.yml b/test/integration/roles/test_command_shell/tasks/main.yml index 3c273260c1..b331452b7c 100644 --- a/test/integration/roles/test_command_shell/tasks/main.yml +++ b/test/integration/roles/test_command_shell/tasks/main.yml @@ -185,7 +185,7 @@ "multiline echo" \ "with a new line in quotes" \ - | md5sum \ + | sha1sum \ | tr -s ' ' \ | cut -f1 -d ' ' echo "this is a second line" @@ -197,7 +197,7 @@ assert: that: - "shell_result6.changed" - - "shell_result6.stdout == '32f3cc201b69ed8afa3902b80f554ca8\nthis is a second line'" + - "shell_result6.stdout == '5575bb6b71c9558db0b6fbbf2f19909eeb4e3b98\nthis is a second line'" - name: execute a shell command using a literal multiline block with arguments in it shell: | diff --git a/test/integration/roles/test_copy/tasks/main.yml b/test/integration/roles/test_copy/tasks/main.yml index 47ed516657..fa09d37eb4 100644 --- a/test/integration/roles/test_copy/tasks/main.yml +++ b/test/integration/roles/test_copy/tasks/main.yml @@ -40,6 +40,7 @@ - "'group' in copy_result" - "'gid' in copy_result" - "'md5sum' in copy_result" + - "'checksum' in copy_result" - "'owner' in copy_result" - "'size' in copy_result" - "'src' in copy_result" @@ -51,10 +52,11 @@ that: - "copy_result.changed == true" -- name: verify that the file md5sum is correct - assert: - that: +- name: verify that the file checksums are correct + assert: + that: - "copy_result.md5sum == 'c47397529fe81ab62ba3f85e9f4c71f2'" + - "copy_result.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" - name: check the stat results of the file stat: path={{output_file}} @@ -71,6 +73,7 @@ - "stat_results.stat.isreg == true" - "stat_results.stat.issock == false" - "stat_results.stat.md5 == 'c47397529fe81ab62ba3f85e9f4c71f2'" + - "stat_results.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" - name: overwrite the file via same means copy: src=foo.txt dest={{output_file}} @@ -180,7 +183,7 @@ that: - "copy_result6.changed" - "copy_result6.dest == '{{output_dir|expanduser}}/multiline.txt'" - - "copy_result6.md5sum == '1627d51e7e607c92cf1a502bf0c6cce3'" + - "copy_result6.checksum == '9cd0697c6a9ff6689f0afb9136fa62e0b3fee903'" # test overwriting a file as an unprivileged user (pull request #8624) # this can't be relative to {{output_dir}} as ~root usually has mode 700 @@ -202,7 +205,7 @@ that: - "copy_result7.changed" - "copy_result7.dest == '/tmp/worldwritable/file.txt'" - - "copy_result7.md5sum == '73feffa4b7f6bb68e44cf984c85f6e88'" + - "copy_result7.checksum == 'bbe960a25ea311d21d40669e93df2003ba9b90a2'" - name: clean up file: dest=/tmp/worldwritable state=absent @@ -230,10 +233,10 @@ - stat_link_result.stat.islnk - name: get the md5 of the link target - shell: md5sum {{output_dir}}/follow_test | cut -f1 -sd ' ' + shell: sha1sum {{output_dir}}/follow_test | cut -f1 -sd ' ' register: target_file_result - name: assert that the link target was updated assert: that: - - replace_follow_result.md5sum == target_file_result.stdout + - replace_follow_result.checksum == target_file_result.stdout diff --git a/test/integration/roles/test_lineinfile/tasks/main.yml b/test/integration/roles/test_lineinfile/tasks/main.yml index 8d58cbba6f..3f8a8dc5ba 100644 --- a/test/integration/roles/test_lineinfile/tasks/main.yml +++ b/test/integration/roles/test_lineinfile/tasks/main.yml @@ -24,7 +24,7 @@ assert: that: - "result.changed == true" - - "result.md5sum == '6be7fb7fa7fb758c80a6dc0722979c40'" + - "result.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'" - "result.state == 'file'" - name: insert a line at the beginning of the file, and back it up @@ -42,19 +42,19 @@ stat: path={{result.backup}} register: result -- name: assert the backup file matches the previous md5 +- name: assert the backup file matches the previous hash assert: that: - - "result.stat.md5 == '6be7fb7fa7fb758c80a6dc0722979c40'" + - "result.stat.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'" - name: stat the test after the insert at the head stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after the insert at the head +- name: assert test hash is what we expect for the file with the insert at the head assert: that: - - "result.stat.md5 == '07c16434644a2a3cc1807c685917443a'" + - "result.stat.checksum == '7eade4042b23b800958fe807b5bfc29f8541ec09'" - name: insert a line at the end of the file lineinfile: dest={{output_dir}}/test.txt state=present line="New line at the end" insertafter="EOF" @@ -70,10 +70,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after the insert at the end +- name: assert test checksum matches after the insert at the end assert: that: - - "result.stat.md5 == 'da4c2150e5782fcede1840280ab87eff'" + - "result.stat.checksum == 'fb57af7dc10a1006061b000f1f04c38e4bef50a9'" - name: insert a line after the first line lineinfile: dest={{output_dir}}/test.txt state=present line="New line after line 1" insertafter="^This is line 1$" @@ -89,10 +89,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after the insert after the first line +- name: assert test checksum matches after the insert after the first line assert: that: - - "result.stat.md5 == '196722c8faaa28b960bee66fa4cce58c'" + - "result.stat.checksum == '5348da605b1bc93dbadf3a16474cdf22ef975bec'" - name: insert a line before the last line lineinfile: dest={{output_dir}}/test.txt state=present line="New line after line 5" insertbefore="^This is line 5$" @@ -108,10 +108,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after the insert before the last line +- name: assert test checksum matches after the insert before the last line assert: that: - - "result.stat.md5 == 'd5955ee042139dfef16dbe3a7334475f'" + - "result.stat.checksum == 'e1cae425403507feea4b55bb30a74decfdd4a23e'" - name: replace a line with backrefs lineinfile: dest={{output_dir}}/test.txt state=present line="This is line 3" backrefs=yes regexp="^(REF) .* \\1$" @@ -127,16 +127,16 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after backref line was replaced +- name: assert test checksum matches after backref line was replaced assert: that: - - "result.stat.md5 == '0f585270054e17be242743dd31c6f593'" + - "result.stat.checksum == '2ccdf45d20298f9eaece73b713648e5489a52444'" - name: remove the middle line lineinfile: dest={{output_dir}}/test.txt state=absent regexp="^This is line 3$" register: result -- name: assert that the line was inserted at the head of the file +- name: assert that the line was removed assert: that: - "result.changed == true" @@ -146,10 +146,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after the middle line was removed +- name: assert test checksum matches after the middle line was removed assert: that: - - "result.stat.md5 == '661603660051991b79429c2dc68d9a67'" + - "result.stat.checksum == 'a6ba6865547c19d4c203c38a35e728d6d1942c75'" - name: run a validation script that succeeds lineinfile: dest={{output_dir}}/test.txt state=absent regexp="^This is line 5$" validate="true %s" @@ -165,10 +165,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after the validation succeeded +- name: assert test checksum matches after the validation succeeded assert: that: - - "result.stat.md5 == '9af984939bd859f7794661e501b4f1a4'" + - "result.stat.checksum == '76955a4516a00a38aad8427afc9ee3e361024ba5'" - name: run a validation script that fails lineinfile: dest={{output_dir}}/test.txt state=absent regexp="^This is line 1$" validate="/bin/false %s" @@ -184,10 +184,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches the previous after the validation failed +- name: assert test checksum matches the previous after the validation failed assert: that: - - "result.stat.md5 == '9af984939bd859f7794661e501b4f1a4'" + - "result.stat.checksum == '76955a4516a00a38aad8427afc9ee3e361024ba5'" - name: use create=yes lineinfile: dest={{output_dir}}/new_test.txt create=yes insertbefore=BOF state=present line="This is a new file" @@ -204,10 +204,10 @@ register: result ignore_errors: yes -- name: assert the newly created test md5 matches +- name: assert the newly created test checksum matches assert: that: - - "result.stat.md5 == 'fef1d487711facfd7aa2c87d788c19d9'" + - "result.stat.checksum == '038f10f9e31202451b093163e81e06fbac0c6f3a'" # Test EOF in cases where file has no newline at EOF - name: testnoeof deploy the file for lineinfile @@ -238,10 +238,10 @@ stat: path={{output_dir}}/testnoeof.txt register: result -- name: testnoeof assert test md5 matches after the insert at the end +- name: testnoeof assert test checksum matches after the insert at the end assert: that: - - "result.stat.md5 == 'f75c9d51f45afd7295000e63ce655220'" + - "result.stat.checksum == 'f9af7008e3cb67575ce653d094c79cabebf6e523'" # Test EOF with empty file to make sure no unneccessary newline is added - name: testempty deploy the testempty file for lineinfile @@ -262,18 +262,18 @@ stat: path={{output_dir}}/testempty.txt register: result -- name: testempty assert test md5 matches after the insert at the end +- name: testempty assert test checksum matches after the insert at the end assert: that: - - "result.stat.md5 == '357dcbee8dfb4436f63bab00a235c45a'" + - "result.stat.checksum == 'f440dc65ea9cec3fd496c1479ddf937e1b949412'" - stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after insert the multiple lines +- name: assert test checksum matches after inserting multiple lines assert: that: - - "result.stat.md5 == 'c2510d5bc8fdef8e752b8f8e74c784c2'" + - "result.stat.checksum == 'bf5b711f8f0509355aaeb9d0d61e3e82337c1365'" - name: replace a line with backrefs included in the line lineinfile: dest={{output_dir}}/test.txt state=present line="New \\1 created with the backref" backrefs=yes regexp="^This is (line 4)$" @@ -289,10 +289,10 @@ stat: path={{output_dir}}/test.txt register: result -- name: assert test md5 matches after backref line was replaced +- name: assert test checksum matches after backref line was replaced assert: that: - - "result.stat.md5 == '65f955c2a9722fd43d07103d7756ff9b'" + - "result.stat.checksum == '04b7a54d0fb233a4e26c9e625325bb4874841b3c'" ################################################################### # issue 8535 @@ -332,10 +332,10 @@ stat: path={{output_dir}}/test_quoting.txt register: result -- name: assert test md5 matches after backref line was replaced +- name: assert test checksum matches after backref line was replaced assert: that: - - "result.stat.md5 == '29f349baf1b9c6703beeb346fe8dc669'" + - "result.stat.checksum == '7dc3cb033c3971e73af0eaed6623d4e71e5743f1'" - name: insert a line into the quoted file with a single quote lineinfile: dest={{output_dir}}/test_quoting.txt line="import g'" @@ -350,9 +350,9 @@ stat: path={{output_dir}}/test_quoting.txt register: result -- name: assert test md5 matches after backref line was replaced +- name: assert test checksum matches after backref line was replaced assert: that: - - "result.stat.md5 == 'fbe9c4ba2490f70eb1974ce31ec4a39f'" + - "result.stat.checksum == '73b271c2cc1cef5663713bc0f00444b4bf9f4543'" ################################################################### diff --git a/test/integration/roles/test_service/tasks/main.yml b/test/integration/roles/test_service/tasks/main.yml index ab4335a8a5..6f941eeb5c 100644 --- a/test/integration/roles/test_service/tasks/main.yml +++ b/test/integration/roles/test_service/tasks/main.yml @@ -6,7 +6,7 @@ assert: that: - "install_result.dest == '/usr/sbin/ansible_test_service'" - - "install_result.md5sum == '9ad49eaf390b30b1206b793ec71200ed'" + - "install_result.checksum == 'baaa79448a976922c080f1971321d203c6df0961'" - "install_result.state == 'file'" - "install_result.mode == '0755'" diff --git a/test/integration/roles/test_service/tasks/systemd_setup.yml b/test/integration/roles/test_service/tasks/systemd_setup.yml index 6d42933213..4a3a81a4a6 100644 --- a/test/integration/roles/test_service/tasks/systemd_setup.yml +++ b/test/integration/roles/test_service/tasks/systemd_setup.yml @@ -12,7 +12,7 @@ - "install_systemd_result.dest == '/usr/lib/systemd/system/ansible_test.service'" - "install_systemd_result.state == 'file'" - "install_systemd_result.mode == '0644'" - - "install_systemd_result.md5sum == '6be64a1e44e9e72a467e70a0b562444f'" + - "install_systemd_result.checksum == 'ca4b413fdf3cb2002f51893b9e42d2e449ec5afb'" - "install_broken_systemd_result.dest == '/usr/lib/systemd/system/ansible_test_broken.service'" - "install_broken_systemd_result.state == 'link'" diff --git a/test/integration/roles/test_service/tasks/sysv_setup.yml b/test/integration/roles/test_service/tasks/sysv_setup.yml index 83a1d6a8c4..1bc9dbc371 100644 --- a/test/integration/roles/test_service/tasks/sysv_setup.yml +++ b/test/integration/roles/test_service/tasks/sysv_setup.yml @@ -8,5 +8,5 @@ - "install_sysv_result.dest == '/etc/init.d/ansible_test'" - "install_sysv_result.state == 'file'" - "install_sysv_result.mode == '0755'" - - "install_sysv_result.md5sum == 'ebf6a9064ca8628187f3a6caf8e2a279'" + - "install_sysv_result.md5sum == '174fa255735064b420600e4c8637ea0eff28d0c1'" diff --git a/test/integration/roles/test_service/tasks/upstart_setup.yml b/test/integration/roles/test_service/tasks/upstart_setup.yml index 118d2da50e..e9607bb030 100644 --- a/test/integration/roles/test_service/tasks/upstart_setup.yml +++ b/test/integration/roles/test_service/tasks/upstart_setup.yml @@ -12,8 +12,8 @@ - "install_upstart_result.dest == '/etc/init/ansible_test.conf'" - "install_upstart_result.state == 'file'" - "install_upstart_result.mode == '0644'" - - "install_upstart_result.md5sum == 'ab3900ea4de8423add764c12aeb90c01'" + - "install_upstart_result.checksum == '5c314837b6c4dd6c68d1809653a2974e9078e02a'" - "install_upstart_broken_result.dest == '/etc/init/ansible_broken_test.conf'" - "install_upstart_broken_result.state == 'file'" - "install_upstart_broken_result.mode == '0644'" - - "install_upstart_broken_result.md5sum == '015e183d10c311276c3e269cbeb309b7'" + - "install_upstart_broken_result.checksum == 'e66497894f2b2bf71e1380a196cc26089cc24a10'" diff --git a/test/integration/roles/test_stat/tasks/main.yml b/test/integration/roles/test_stat/tasks/main.yml index f27721a697..b0b16d7f9e 100644 --- a/test/integration/roles/test_stat/tasks/main.yml +++ b/test/integration/roles/test_stat/tasks/main.yml @@ -46,6 +46,8 @@ - "'isuid' in stat_result.stat" - "'md5' in stat_result.stat" - "stat_result.stat.md5 == '5eb63bbbe01eeed093cb22bb8f5acdc3'" + - "'checksum' in stat_result.stat" + - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'" - "'mode' in stat_result.stat" # why is this 420? - "'mtime' in stat_result.stat" - "'nlink' in stat_result.stat" diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml index 0305885473..d7d812f3ba 100644 --- a/test/integration/roles/test_template/tasks/main.yml +++ b/test/integration/roles/test_template/tasks/main.yml @@ -27,6 +27,7 @@ - "'group' in template_result" - "'gid' in template_result" - "'md5sum' in template_result" + - "'checksum' in template_result" - "'owner' in template_result" - "'size' in template_result" - "'src' in template_result" diff --git a/test/units/TestModuleUtilsBasic.py b/test/units/TestModuleUtilsBasic.py index ceba17be4f..f5962a9478 100644 --- a/test/units/TestModuleUtilsBasic.py +++ b/test/units/TestModuleUtilsBasic.py @@ -7,7 +7,7 @@ from nose.tools import timed from ansible import errors from ansible.module_common import ModuleReplacer -from ansible.utils import md5 as utils_md5 +from ansible.utils import checksum as utils_checksum TEST_MODULE_DATA = """ from ansible.module_utils.basic import * @@ -113,8 +113,8 @@ class TestModuleUtilsBasic(unittest.TestCase): (rc, out, err) = self.module.run_command('echo "foo bar" > %s' % tmp_path, use_unsafe_shell=True) self.assertEqual(rc, 0) self.assertTrue(os.path.exists(tmp_path)) - md5sum = utils_md5(tmp_path) - self.assertEqual(md5sum, '5ceaa7ed396ccb8e959c02753cb4bd18') + checksum = utils_checksum(tmp_path) + self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec') except: raise finally: @@ -127,8 +127,8 @@ class TestModuleUtilsBasic(unittest.TestCase): (rc, out, err) = self.module.run_command('echo "foo bar" >> %s' % tmp_path, use_unsafe_shell=True) self.assertEqual(rc, 0) self.assertTrue(os.path.exists(tmp_path)) - md5sum = utils_md5(tmp_path) - self.assertEqual(md5sum, '5ceaa7ed396ccb8e959c02753cb4bd18') + checksum = utils_checksum(tmp_path) + self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec') except: raise finally: diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index af10a1e055..178eaae50c 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -366,6 +366,16 @@ class TestUtils(unittest.TestCase): self.assertEqual(ansible.utils.md5(os.path.join(os.path.dirname(__file__), 'ansible.cf')), None) + def test_checksum_s(self): + self.assertEqual(ansible.utils.checksum_s('ansible'), 'bef45157a43c9e5f469d188810814a4a8ab9f2ed') + # Need a test that causes UnicodeEncodeError See 4221 + + def test_checksum(self): + self.assertEqual(ansible.utils.checksum(os.path.join(os.path.dirname(__file__), 'ansible.cfg')), + '658b67c8ac7595adde7048425ff1f9aba270721a') + self.assertEqual(ansible.utils.md5(os.path.join(os.path.dirname(__file__), 'ansible.cf')), + None) + def test_default(self): self.assertEqual(ansible.utils.default(None, lambda: {}), {}) self.assertEqual(ansible.utils.default(dict(foo='bar'), lambda: {}), dict(foo='bar')) diff --git a/v2/ansible/parsing/vault/__init__.py b/v2/ansible/parsing/vault/__init__.py index 44f50f7d21..92c99fdad5 100644 --- a/v2/ansible/parsing/vault/__init__.py +++ b/v2/ansible/parsing/vault/__init__.py @@ -30,6 +30,8 @@ from io import BytesIO from subprocess import call from ansible import errors from hashlib import sha256 +# Note: Only used for loading obsolete VaultAES files. All files are written +# using the newer VaultAES256 which does not require md5 from hashlib import md5 from binascii import hexlify from binascii import unhexlify diff --git a/v2/ansible/playbook/role/__init__.py b/v2/ansible/playbook/role/__init__.py index 8f37970d59..67485f0f9c 100644 --- a/v2/ansible/playbook/role/__init__.py +++ b/v2/ansible/playbook/role/__init__.py @@ -23,7 +23,7 @@ from six import iteritems, string_types import os -from hashlib import md5 +from hashlib import sha1 from types import NoneType from ansible.errors import AnsibleError, AnsibleParserError @@ -39,7 +39,7 @@ __all__ = ['Role', 'ROLE_CACHE'] # The role cache is used to prevent re-loading roles, which -# may already exist. Keys into this cache are the MD5 hash +# may already exist. Keys into this cache are the SHA1 hash # of the role definition (for dictionary definitions, this # will be based on the repr() of the dictionary object) ROLE_CACHE = dict() @@ -60,7 +60,7 @@ class Role: self._handler_blocks = [] self._default_vars = dict() self._role_vars = dict() - + def __repr__(self): return self.get_name() From 507a1ef0934b9a03c9c76c4faa84b89731f22748 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 6 Nov 2014 23:17:17 -0800 Subject: [PATCH 353/813] Fix integration test to check for checksum, not md5sum --- test/integration/roles/test_service/tasks/sysv_setup.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_service/tasks/sysv_setup.yml b/test/integration/roles/test_service/tasks/sysv_setup.yml index 1bc9dbc371..796a2fe9a7 100644 --- a/test/integration/roles/test_service/tasks/sysv_setup.yml +++ b/test/integration/roles/test_service/tasks/sysv_setup.yml @@ -8,5 +8,5 @@ - "install_sysv_result.dest == '/etc/init.d/ansible_test'" - "install_sysv_result.state == 'file'" - "install_sysv_result.mode == '0755'" - - "install_sysv_result.md5sum == '174fa255735064b420600e4c8637ea0eff28d0c1'" + - "install_sysv_result.checksum == '174fa255735064b420600e4c8637ea0eff28d0c1'" From 24bebd85b4f281c7bb4b9da22fc0600065724e4d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 6 Nov 2014 13:14:38 -0600 Subject: [PATCH 354/813] Adding v2 task/block iterator and some reorganizing --- v2/ansible/executor/HostPlaybookIterator.py | 36 ------- .../executor/{TaskResult.py => __init__.py} | 0 .../executor/{HostLog.py => host_log.py} | 0 ...{HostLogManager.py => host_log_manager.py} | 0 ...aybookExecutor.py => playbook_executor.py} | 0 v2/ansible/executor/playbook_iterator.py | 97 +++++++++++++++++++ .../{TaskExecutor.py => task_executor.py} | 0 ...kQueueManager.py => task_queue_manager.py} | 0 .../{TemplateEngine.py => task_result.py} | 0 .../{VariableCache.py => template_engine.py} | 0 v2/ansible/parsing/yaml/__init__.py | 4 + v2/ansible/playbook/__init__.py | 6 +- v2/ansible/playbook/block.py | 22 ++++- v2/ansible/playbook/helpers.py | 31 +++++- v2/ansible/playbook/play.py | 41 +++++++- v2/ansible/playbook/role/__init__.py | 32 +++++- v2/ansible/playbook/role/definition.py | 1 + v2/ansible/playbook/task.py | 9 ++ v2/ansible/playbook/task_include.py | 25 ++++- .../executor/__init__.py} | 0 v2/test/executor/test_playbook_iterator.py | 83 ++++++++++++++++ v2/test/playbook/test_block.py | 6 ++ v2/test/playbook/test_play.py | 12 +++ v2/test/playbook/test_playbook.py | 1 + v2/test/playbook/test_task_include.py | 1 + 25 files changed, 358 insertions(+), 49 deletions(-) delete mode 100644 v2/ansible/executor/HostPlaybookIterator.py rename v2/ansible/executor/{TaskResult.py => __init__.py} (100%) rename v2/ansible/executor/{HostLog.py => host_log.py} (100%) rename v2/ansible/executor/{HostLogManager.py => host_log_manager.py} (100%) rename v2/ansible/executor/{PlaybookExecutor.py => playbook_executor.py} (100%) create mode 100644 v2/ansible/executor/playbook_iterator.py rename v2/ansible/executor/{TaskExecutor.py => task_executor.py} (100%) rename v2/ansible/executor/{TaskQueueManager.py => task_queue_manager.py} (100%) rename v2/ansible/executor/{TemplateEngine.py => task_result.py} (100%) rename v2/ansible/executor/{VariableCache.py => template_engine.py} (100%) rename v2/{ansible/executor/VariableManager.py => test/executor/__init__.py} (100%) create mode 100644 v2/test/executor/test_playbook_iterator.py diff --git a/v2/ansible/executor/HostPlaybookIterator.py b/v2/ansible/executor/HostPlaybookIterator.py deleted file mode 100644 index 07fab06714..0000000000 --- a/v2/ansible/executor/HostPlaybookIterator.py +++ /dev/null @@ -1,36 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -class HostPlaybookIterator: - - def __init__(self, host, playbook): - pass - - def get_next_task(self): - assert False - - def is_blocked(self): - # depending on strategy, either - # ‘linear’ -- all prev tasks must be completed for all hosts - # ‘free’ -- this host doesn’t have any more work to do - assert False - - diff --git a/v2/ansible/executor/TaskResult.py b/v2/ansible/executor/__init__.py similarity index 100% rename from v2/ansible/executor/TaskResult.py rename to v2/ansible/executor/__init__.py diff --git a/v2/ansible/executor/HostLog.py b/v2/ansible/executor/host_log.py similarity index 100% rename from v2/ansible/executor/HostLog.py rename to v2/ansible/executor/host_log.py diff --git a/v2/ansible/executor/HostLogManager.py b/v2/ansible/executor/host_log_manager.py similarity index 100% rename from v2/ansible/executor/HostLogManager.py rename to v2/ansible/executor/host_log_manager.py diff --git a/v2/ansible/executor/PlaybookExecutor.py b/v2/ansible/executor/playbook_executor.py similarity index 100% rename from v2/ansible/executor/PlaybookExecutor.py rename to v2/ansible/executor/playbook_executor.py diff --git a/v2/ansible/executor/playbook_iterator.py b/v2/ansible/executor/playbook_iterator.py new file mode 100644 index 0000000000..0d4f09b1e4 --- /dev/null +++ b/v2/ansible/executor/playbook_iterator.py @@ -0,0 +1,97 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +class PlaybookState: + + ''' + A helper class, which keeps track of the task iteration + state for a given playbook. This is used in the PlaybookIterator + class on a per-host basis. + ''' + def __init__(self, parent_iterator): + self._parent_iterator = parent_iterator + self._cur_play = 0 + self._task_list = None + self._cur_task_pos = 0 + + def next(self): + ''' + Determines and returns the next available task from the playbook, + advancing through the list of plays as it goes. + ''' + + while True: + # when we hit the end of the playbook entries list, we return + # None to indicate we're there + if self._cur_play > len(self._parent_iterator._playbook._entries) - 1: + return None + + # initialize the task list by calling the .compile() method + # on the play, which will call compile() for all child objects + if self._task_list is None: + self._task_list = self._parent_iterator._playbook._entries[self._cur_play].compile() + + # if we've hit the end of this plays task list, move on to the next + # and reset the position values for the next iteration + if self._cur_task_pos > len(self._task_list) - 1: + self._cur_play += 1 + self._task_list = None + self._cur_task_pos = 0 + continue + else: + # FIXME: do tag/conditional evaluation here and advance + # the task position if it should be skipped without + # returning a task + task = self._task_list[self._cur_task_pos] + self._cur_task_pos += 1 + + # Skip the task if it is the member of a role which has already + # been run, unless the role allows multiple executions + if task._role: + # FIXME: this should all be done via member functions + # instead of direct access to internal variables + if task._role.has_run() and not task._role._metadata._allow_duplicates: + continue + + return task + +class PlaybookIterator: + + ''' + The main iterator class, which keeps the state of the playbook + on a per-host basis using the above PlaybookState class. + ''' + + def __init__(self, inventory, log_manager, playbook): + self._playbook = playbook + self._log_manager = log_manager + self._host_entries = dict() + + # build the per-host dictionary of playbook states + for host in inventory.get_hosts(): + self._host_entries[host.get_name()] = PlaybookState(parent_iterator=self) + + def get_next_task_for_host(self, host): + ''' fetch the next task for the given host ''' + if host.get_name() not in self._host_entries: + raise AnsibleError("invalid host specified for playbook iteration") + + return self._host_entries[host.get_name()].next() diff --git a/v2/ansible/executor/TaskExecutor.py b/v2/ansible/executor/task_executor.py similarity index 100% rename from v2/ansible/executor/TaskExecutor.py rename to v2/ansible/executor/task_executor.py diff --git a/v2/ansible/executor/TaskQueueManager.py b/v2/ansible/executor/task_queue_manager.py similarity index 100% rename from v2/ansible/executor/TaskQueueManager.py rename to v2/ansible/executor/task_queue_manager.py diff --git a/v2/ansible/executor/TemplateEngine.py b/v2/ansible/executor/task_result.py similarity index 100% rename from v2/ansible/executor/TemplateEngine.py rename to v2/ansible/executor/task_result.py diff --git a/v2/ansible/executor/VariableCache.py b/v2/ansible/executor/template_engine.py similarity index 100% rename from v2/ansible/executor/VariableCache.py rename to v2/ansible/executor/template_engine.py diff --git a/v2/ansible/parsing/yaml/__init__.py b/v2/ansible/parsing/yaml/__init__.py index 4273abee53..a6c63feaa7 100644 --- a/v2/ansible/parsing/yaml/__init__.py +++ b/v2/ansible/parsing/yaml/__init__.py @@ -148,6 +148,10 @@ class DataLoader(): raise AnsibleParserError(YAML_SYNTAX_ERROR, obj=err_obj, show_content=show_content) + def get_basedir(self): + ''' returns the current basedir ''' + return self._basedir + def set_basedir(self, basedir): ''' sets the base directory, used to find files when a relative path is given ''' diff --git a/v2/ansible/playbook/__init__.py b/v2/ansible/playbook/__init__.py index f8f42b1163..2d594c4802 100644 --- a/v2/ansible/playbook/__init__.py +++ b/v2/ansible/playbook/__init__.py @@ -57,6 +57,9 @@ class Playbook: basedir = os.path.dirname(file_name) self._loader.set_basedir(basedir) + # also add the basedir to the list of module directories + push_basedir(basedir) + ds = self._loader.load_from_file(file_name) if not isinstance(ds, list): raise AnsibleParserError("playbooks must be a list of plays", obj=ds) @@ -75,4 +78,5 @@ class Playbook: self._entries.append(entry_obj) - + def get_entries(self): + return self._entries[:] diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index a082e97e5e..0fc19113f0 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -22,6 +22,7 @@ __metaclass__ = type from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base from ansible.playbook.helpers import load_list_of_tasks +from ansible.playbook.task_include import TaskInclude class Block(Base): @@ -35,8 +36,10 @@ class Block(Base): # similar to the 'else' clause for exceptions #_otherwise = FieldAttribute(isa='list') - def __init__(self, role=None): - self.role = role + def __init__(self, parent_block=None, role=None, task_include=None): + self._parent_block = parent_block + self._role = role + self._task_include = task_include super(Block, self).__init__() def get_variables(self): @@ -45,8 +48,8 @@ class Block(Base): return dict() @staticmethod - def load(data, role=None, loader=None): - b = Block(role=role) + def load(data, parent_block=None, role=None, task_include=None, loader=None): + b = Block(parent_block=parent_block, role=role, task_include=task_include) return b.load_data(data, loader=loader) def munge(self, ds): @@ -79,3 +82,14 @@ class Block(Base): #def _load_otherwise(self, attr, ds): # return self._load_list_of_tasks(ds, block=self, loader=self._loader) + def compile(self): + ''' + Returns the task list for this object + ''' + + task_list = [] + for task in self.block: + # FIXME: evaulate task tags/conditionals here + task_list.extend(task.compile()) + + return task_list diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py index f692f4baf6..1d79721dce 100644 --- a/v2/ansible/playbook/helpers.py +++ b/v2/ansible/playbook/helpers.py @@ -15,11 +15,16 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +import os + from types import NoneType from ansible.errors import AnsibleParserError +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject -def load_list_of_blocks(ds, role=None, loader=None): + +def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, loader=None): ''' Given a list of mixed task/block data (parsed from YAML), return a list of Block() objects, where implicit blocks @@ -34,7 +39,7 @@ def load_list_of_blocks(ds, role=None, loader=None): block_list = [] if ds: for block in ds: - b = Block.load(block, role=role, loader=loader) + b = Block.load(block, parent_block=parent_block, role=role, task_include=task_include, loader=loader) block_list.append(b) return block_list @@ -58,7 +63,17 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, loader=None raise AnsibleParserError("task/handler entries must be dictionaries (got a %s)" % type(task), obj=ds) if 'include' in task: + cur_basedir = None + if isinstance(task, AnsibleBaseYAMLObject) and loader: + pos_info = task.get_position_info() + new_basedir = os.path.dirname(pos_info[0]) + cur_basedir = loader.get_basedir() + loader.set_basedir(new_basedir) + t = TaskInclude.load(task, block=block, role=role, task_include=task_include, loader=loader) + + if cur_basedir and loader: + loader.set_basedir(cur_basedir) else: t = Task.load(task, block=block, role=role, task_include=task_include, loader=loader) @@ -85,3 +100,15 @@ def load_list_of_roles(ds, loader=None): return roles +def compile_block_list(block_list): + ''' + Given a list of blocks, compile them into a flat list of tasks + ''' + + task_list = [] + + for block in block_list: + task_list.extend(block.compile()) + + return task_list + diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index 07ee4707b4..c3d11e6cb2 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -25,7 +25,8 @@ from ansible.parsing.yaml import DataLoader from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base -from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles +from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles, compile_block_list +from ansible.playbook.role import Role __all__ = ['Play'] @@ -155,3 +156,41 @@ class Play(Base): return load_list_of_roles(ds, loader=self._loader) # FIXME: post_validation needs to ensure that su/sudo are not both set + + def _compile_roles(self): + ''' + Handles the role compilation step, returning a flat list of tasks + with the lowest level dependencies first. For example, if a role R + has a dependency D1, which also has a dependency D2, the tasks from + D2 are merged first, followed by D1, and lastly by the tasks from + the parent role R last. This is done for all roles in the Play. + ''' + + task_list = [] + + if len(self.roles) > 0: + for ri in self.roles: + # The internal list of roles are actualy RoleInclude objects, + # so we load the role from that now + role = Role.load(ri) + + # FIXME: evauluate conditional of roles here? + task_list.extend(role.compile()) + + return task_list + + def compile(self): + ''' + Compiles and returns the task list for this play, compiled from the + roles (which are themselves compiled recursively) and/or the list of + tasks specified in the play. + ''' + + task_list = [] + + task_list.extend(compile_block_list(self.pre_tasks)) + task_list.extend(self._compile_roles()) + task_list.extend(compile_block_list(self.tasks)) + task_list.extend(compile_block_list(self.post_tasks)) + + return task_list diff --git a/v2/ansible/playbook/role/__init__.py b/v2/ansible/playbook/role/__init__.py index 67485f0f9c..ab8a779fde 100644 --- a/v2/ansible/playbook/role/__init__.py +++ b/v2/ansible/playbook/role/__init__.py @@ -30,7 +30,7 @@ from ansible.errors import AnsibleError, AnsibleParserError from ansible.parsing.yaml import DataLoader from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base -from ansible.playbook.helpers import load_list_of_blocks +from ansible.playbook.helpers import load_list_of_blocks, compile_block_list from ansible.playbook.role.include import RoleInclude from ansible.playbook.role.metadata import RoleMetadata @@ -87,6 +87,10 @@ class Role: if parent_role: self.add_parent(parent_role) + # save the current base directory for the loader and set it to the current role path + cur_basedir = self._loader.get_basedir() + self._loader.set_basedir(self._role_path) + # load the role's files, if they exist metadata = self._load_role_yaml('meta') if metadata: @@ -110,6 +114,9 @@ class Role: if not isinstance(self._default_vars, (dict, NoneType)): raise AnsibleParserError("The default/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name, obj=ds) + # and finally restore the previous base directory + self._loader.set_basedir(cur_basedir) + def _load_role_yaml(self, subdir): file_path = os.path.join(self._role_path, subdir) if self._loader.path_exists(file_path) and self._loader.is_directory(file_path): @@ -186,3 +193,26 @@ class Role: return direct_deps + child_deps + def get_task_blocks(self): + return self._task_blocks[:] + + def get_handler_blocks(self): + return self._handler_blocks[:] + + def compile(self): + ''' + Returns the task list for this role, which is created by first + recursively compiling the tasks for all direct dependencies, and + then adding on the tasks for this role. + ''' + + task_list = [] + + deps = self.get_direct_dependencies() + for dep in deps: + task_list.extend(dep.compile()) + + task_list.extend(compile_block_list(self._task_blocks)) + + return task_list + diff --git a/v2/ansible/playbook/role/definition.py b/v2/ansible/playbook/role/definition.py index 08d62afbe4..34b0248820 100644 --- a/v2/ansible/playbook/role/definition.py +++ b/v2/ansible/playbook/role/definition.py @@ -124,6 +124,7 @@ class RoleDefinition(Base): # FIXME: make the parser smart about list/string entries # in the yaml so the error line/file can be reported # here + raise AnsibleError("the role '%s' was not found" % role_name) def _split_role_params(self, ds): diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 95571819af..c4c22025ed 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -60,6 +60,7 @@ class Task(Base): _delay = FieldAttribute(isa='int') _delegate_to = FieldAttribute(isa='string') _environment = FieldAttribute(isa='dict') + _failed_when = FieldAttribute(isa='string') _first_available_file = FieldAttribute(isa='list') _ignore_errors = FieldAttribute(isa='bool') @@ -179,3 +180,11 @@ class Task(Base): return new_ds + def compile(self): + ''' + For tasks, this is just a dummy method returning an array + with 'self' in it, so we don't have to care about task types + further up the chain. + ''' + + return [self] diff --git a/v2/ansible/playbook/task_include.py b/v2/ansible/playbook/task_include.py index 798ce020d1..dbbc388f68 100644 --- a/v2/ansible/playbook/task_include.py +++ b/v2/ansible/playbook/task_include.py @@ -24,7 +24,7 @@ from ansible.parsing.splitter import split_args, parse_kv from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base -from ansible.playbook.helpers import load_list_of_tasks +from ansible.playbook.helpers import load_list_of_blocks, compile_block_list from ansible.plugins import lookup_finder @@ -57,11 +57,12 @@ class TaskInclude(Base): _when = FieldAttribute(isa='list', default=[]) def __init__(self, block=None, role=None, task_include=None): - self._tasks = [] self._block = block self._role = role self._task_include = task_include + self._task_blocks = [] + super(TaskInclude, self).__init__() @staticmethod @@ -136,11 +137,27 @@ class TaskInclude(Base): def _load_include(self, attr, ds): - ''' loads the file name specified in the ds and returns a list of tasks ''' + ''' loads the file name specified in the ds and returns a list of blocks ''' data = self._loader.load_from_file(ds) if not isinstance(data, list): raise AnsibleParsingError("included task files must contain a list of tasks", obj=ds) - self._tasks = load_list_of_tasks(data, task_include=self, loader=self._loader) + self._task_blocks = load_list_of_blocks( + data, + parent_block=self._block, + task_include=self, + role=self._role, + loader=self._loader + ) return ds + + def compile(self): + ''' + Returns the task list for the included tasks. + ''' + + task_list = [] + task_list.extend(compile_block_list(self._task_blocks)) + return task_list + diff --git a/v2/ansible/executor/VariableManager.py b/v2/test/executor/__init__.py similarity index 100% rename from v2/ansible/executor/VariableManager.py rename to v2/test/executor/__init__.py diff --git a/v2/test/executor/test_playbook_iterator.py b/v2/test/executor/test_playbook_iterator.py new file mode 100644 index 0000000000..96db014fd6 --- /dev/null +++ b/v2/test/executor/test_playbook_iterator.py @@ -0,0 +1,83 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, MagicMock + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.executor.playbook_iterator import PlaybookIterator +from ansible.playbook import Playbook + +from test.mock.loader import DictDataLoader + +class TestPlaybookIterator(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_playbook_iterator(self): + fake_loader = DictDataLoader({ + "test_play.yml": """ + - hosts: all + roles: + - test_role + pre_tasks: + - debug: msg="this is a pre_task" + tasks: + - debug: msg="this is a regular task" + post_tasks: + - debug: msg="this is a post_task" + """, + '/etc/ansible/roles/test_role/tasks/main.yml': """ + - debug: msg="this is a role task" + """, + }) + + p = Playbook.load('test_play.yml', loader=fake_loader) + + hosts = [] + for i in range(0, 10): + host = MagicMock() + host.get_name.return_value = 'host%02d' % i + hosts.append(host) + + inventory = MagicMock() + inventory.get_hosts.return_value = hosts + + itr = PlaybookIterator(inventory, None, p) + task = itr.get_next_task_for_host(hosts[0]) + print(task) + self.assertIsNotNone(task) + task = itr.get_next_task_for_host(hosts[0]) + print(task) + self.assertIsNotNone(task) + task = itr.get_next_task_for_host(hosts[0]) + print(task) + self.assertIsNotNone(task) + task = itr.get_next_task_for_host(hosts[0]) + print(task) + self.assertIsNotNone(task) + task = itr.get_next_task_for_host(hosts[0]) + print(task) + self.assertIsNone(task) diff --git a/v2/test/playbook/test_block.py b/v2/test/playbook/test_block.py index 348681527b..9c1d06cbcb 100644 --- a/v2/test/playbook/test_block.py +++ b/v2/test/playbook/test_block.py @@ -75,3 +75,9 @@ class TestBlock(unittest.TestCase): self.assertEqual(len(b.block), 1) assert isinstance(b.block[0], Task) + def test_block_compile(self): + ds = [dict(action='foo')] + b = Block.load(ds) + tasks = b.compile() + self.assertEqual(len(tasks), 1) + self.assertIsInstance(tasks[0], Task) diff --git a/v2/test/playbook/test_play.py b/v2/test/playbook/test_play.py index 14732a1f9f..22486f4129 100644 --- a/v2/test/playbook/test_play.py +++ b/v2/test/playbook/test_play.py @@ -117,4 +117,16 @@ class TestPlay(unittest.TestCase): roles=['foo'], ), loader=fake_loader) + tasks = p.compile() + def test_play_compile(self): + p = Play.load(dict( + name="test play", + hosts=['foo'], + gather_facts=False, + tasks=[dict(action='shell echo "hello world"')], + )) + + tasks = p.compile() + self.assertEqual(len(tasks), 1) + self.assertIsInstance(tasks[0], Task) diff --git a/v2/test/playbook/test_playbook.py b/v2/test/playbook/test_playbook.py index 640057820e..f3ba6785f3 100644 --- a/v2/test/playbook/test_playbook.py +++ b/v2/test/playbook/test_playbook.py @@ -45,6 +45,7 @@ class TestPlaybook(unittest.TestCase): """, }) p = Playbook.load("test_file.yml", loader=fake_loader) + entries = p.get_entries() def test_bad_playbook_files(self): fake_loader = DictDataLoader({ diff --git a/v2/test/playbook/test_task_include.py b/v2/test/playbook/test_task_include.py index 42a63b7204..55f7461f05 100644 --- a/v2/test/playbook/test_task_include.py +++ b/v2/test/playbook/test_task_include.py @@ -45,6 +45,7 @@ class TestTaskInclude(unittest.TestCase): def test_basic_task_include(self): ti = TaskInclude.load(AnsibleMapping(include='foo.yml'), loader=self._fake_loader) + tasks = ti.compile() def test_task_include_with_loop(self): ti = TaskInclude.load(AnsibleMapping(include='foo.yml', with_items=['a', 'b', 'c']), loader=self._fake_loader) From b63ca685df9a3bd19a48051f0f9d9c59ce8cdb54 Mon Sep 17 00:00:00 2001 From: Ding Deng Date: Sat, 8 Nov 2014 23:30:26 +0800 Subject: [PATCH 355/813] Support new AWS regions: cn-north-1, eu-central-1. --- lib/ansible/module_utils/ec2.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index b4558ef0a4..417e1b9521 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -36,6 +36,8 @@ AWS_REGIONS = [ 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', + 'cn-north-1', + 'eu-central-1', 'eu-west-1', 'sa-east-1', 'us-east-1', From 90614283c40c9e2edb1995a24156abe2540c777b Mon Sep 17 00:00:00 2001 From: Sebastian Gumprich Date: Sun, 9 Nov 2014 18:03:59 +0000 Subject: [PATCH 356/813] Fixed 404-link for Michael DeHaan profile page. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ab08cf027f..96a3c20d46 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ Branch Info Authors ======= -Ansible was created by [Michael DeHaan](github.com/mpdehaan) (michael@ansible.com) and has contributions from over +Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael@ansible.com) and has contributions from over 800 users (and growing). Thanks everyone! Ansible is sponsored by [Ansible, Inc](http://ansible.com) From 8e32dda2e2efee3b391394383cb24b2e7a19f267 Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Mon, 10 Nov 2014 15:41:44 +0600 Subject: [PATCH 357/813] Delete import json. Several syntax changes. Import json called twice. In first it will call an exception, if json module not installed. --- plugins/inventory/zabbix.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/plugins/inventory/zabbix.py b/plugins/inventory/zabbix.py index 68cc5cc57b..2bc1e2e1cc 100755 --- a/plugins/inventory/zabbix.py +++ b/plugins/inventory/zabbix.py @@ -20,7 +20,7 @@ ###################################################################### """ -Zabbix Server external inventory script. +Zabbix Server external inventory script. ======================================== Returns hosts and hostgroups from Zabbix Server. @@ -31,7 +31,6 @@ Tested with Zabbix Server 2.0.6. """ import os, sys -import json import argparse import ConfigParser @@ -55,7 +54,7 @@ class ZabbixInventory(object): if config.has_option('zabbix', 'server'): self.zabbix_server = config.get('zabbix', 'server') - # login + # login if config.has_option('zabbix', 'username'): self.zabbix_username = config.get('zabbix', 'username') if config.has_option('zabbix', 'password'): @@ -84,7 +83,7 @@ class ZabbixInventory(object): for host in hostsData: hostname = host['name'] - data[self.defaultgroup]['hosts'].append(hostname) + data[self.defaultgroup]['hosts'].append(hostname) for group in host['groups']: groupname = group['name'] From 4c2d06d2feec0648498426e798a6dcd61cce1fa1 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 10 Nov 2014 10:52:23 -0600 Subject: [PATCH 358/813] Support nested modules with ANSIBLE_LIBRARY env var --- lib/ansible/utils/plugins.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/utils/plugins.py index 0d050fd13d..1955ade237 100644 --- a/lib/ansible/utils/plugins.py +++ b/lib/ansible/utils/plugins.py @@ -127,7 +127,7 @@ class PluginLoader(object): configured_paths = self.config.split(os.pathsep) for path in configured_paths: path = os.path.realpath(os.path.expanduser(path)) - contents = glob.glob("%s/*" % path) + contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path) for c in contents: if os.path.isdir(c) and c not in ret: ret.append(c) From d32e1adb1bc6861c67a3d141ce9f86f724b7667f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 10 Nov 2014 09:10:19 -0800 Subject: [PATCH 359/813] Mention change to fetch module's output --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0902313569..8beea7f154 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,9 @@ Some other notable changes: behaviour use the new module parameter track_submodules=yes * Checksumming of transferred files has been made more portable and now uses the sha1 algorithm instead of md5 to be compatible with FIPS-140. + - As a small side effect, the fetch module no longer returns a useful value + in remote_md5. If you need a replacement, switch to using remote_checksum + which returns the sha1sum of the remote file. And various other bug fixes and improvements ... From 30c50020a1b6add9a461c94960dabfa4d73c08fd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 10 Nov 2014 09:15:46 -0800 Subject: [PATCH 360/813] Better way to get the python_interpreter inventory variable --- lib/ansible/runner/__init__.py | 6 ++---- lib/ansible/runner/action_plugins/assemble.py | 2 +- lib/ansible/runner/action_plugins/copy.py | 4 ++-- lib/ansible/runner/action_plugins/fetch.py | 4 ++-- lib/ansible/runner/action_plugins/template.py | 2 +- lib/ansible/runner/action_plugins/unarchive.py | 2 +- 6 files changed, 9 insertions(+), 11 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 7641200544..6351e2aab8 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1159,11 +1159,9 @@ class Runner(object): # ***************************************************** - def _remote_checksum(self, conn, tmp, path): + def _remote_checksum(self, conn, tmp, path, inject): ''' takes a remote checksum and returns 1 if no file ''' - inject = self.get_inject_vars(conn.host) - hostvars = HostVars(inject['combined_cache'], self.inventory, vault_password=self.vault_pass) - python_interp = hostvars[conn.host].get('ansible_python_interpreter', 'python') + python_interp = inject['hostvars'][inject['inventory_hostname']].get('ansible_python_interpreter', 'python') cmd = conn.shell.checksum(path, python_interp) data = self._low_level_exec_command(conn, cmd, tmp, sudoable=True) data2 = utils.last_non_blank_line(data['stdout']) diff --git a/lib/ansible/runner/action_plugins/assemble.py b/lib/ansible/runner/action_plugins/assemble.py index 9f5d450c2f..b0a45c4970 100644 --- a/lib/ansible/runner/action_plugins/assemble.py +++ b/lib/ansible/runner/action_plugins/assemble.py @@ -109,7 +109,7 @@ class ActionModule(object): path = self._assemble_from_fragments(src, delimiter, _re) path_checksum = utils.checksum_s(path) - remote_checksum = self.runner._remote_checksum(conn, tmp, dest) + remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject) if path_checksum != remote_checksum: resultant = file(path).read() diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py index 2b3d387173..55524bca38 100644 --- a/lib/ansible/runner/action_plugins/copy.py +++ b/lib/ansible/runner/action_plugins/copy.py @@ -175,7 +175,7 @@ class ActionModule(object): dest_file = conn.shell.join_path(dest) # Attempt to get the remote checksum - remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file) + remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject) if remote_checksum == '3': # The remote_checksum was executed on a directory. @@ -187,7 +187,7 @@ class ActionModule(object): else: # Append the relative source location to the destination and retry remote_checksum dest_file = conn.shell.join_path(dest, source_rel) - remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file) + remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject) if remote_checksum != '1' and not force: # remote_file does not exist so continue to next iteration. diff --git a/lib/ansible/runner/action_plugins/fetch.py b/lib/ansible/runner/action_plugins/fetch.py index 825023a0bc..030058498a 100644 --- a/lib/ansible/runner/action_plugins/fetch.py +++ b/lib/ansible/runner/action_plugins/fetch.py @@ -73,7 +73,7 @@ class ActionModule(object): source = conn.shell.join_path(source) # calculate checksum for the remote file - remote_checksum = self.runner._remote_checksum(conn, tmp, source) + remote_checksum = self.runner._remote_checksum(conn, tmp, source, inject) # use slurp if sudo and permissions are lacking remote_data = None @@ -116,7 +116,7 @@ class ActionModule(object): # these don't fail because you may want to transfer a log file that possibly MAY exist # but keep going to fetch other log files if remote_checksum == '0': - result = dict(msg="unable to calculate the md5 sum of the remote file", file=source, changed=False) + result = dict(msg="unable to calculate the checksum of the remote file", file=source, changed=False) return ReturnData(conn=conn, result=result) if remote_checksum == '1': if fail_on_missing: diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index 2fe07c3039..75fd7ff5a6 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -88,7 +88,7 @@ class ActionModule(object): return ReturnData(conn=conn, comm_ok=False, result=result) local_checksum = utils.checksum_s(resultant) - remote_checksum = self.runner._remote_checksum(conn, tmp, dest) + remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject) if local_checksum != remote_checksum: diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py index 1f831e4207..f570a29d5c 100644 --- a/lib/ansible/runner/action_plugins/unarchive.py +++ b/lib/ansible/runner/action_plugins/unarchive.py @@ -62,7 +62,7 @@ class ActionModule(object): else: source = utils.path_dwim(self.runner.basedir, source) - remote_checksum = self.runner._remote_checksum(conn, tmp, dest) + remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject) if remote_checksum != '3': result = dict(failed=True, msg="dest '%s' must be an existing dir" % dest) return ReturnData(conn=conn, result=result) From 2bd927fd818c3cd645d5d21f4550a47b4ecb1dd2 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 22 Oct 2014 14:40:20 -0500 Subject: [PATCH 361/813] Support RackConnect v3 by allowing a network to be specified for use in determining ansible_ssh_host --- plugins/inventory/rax.py | 149 ++++++++++++++++++++++++--------------- 1 file changed, 94 insertions(+), 55 deletions(-) mode change 100755 => 100644 plugins/inventory/rax.py diff --git a/plugins/inventory/rax.py b/plugins/inventory/rax.py old mode 100755 new mode 100644 index 457c20962a..87b7f9cafc --- a/plugins/inventory/rax.py +++ b/plugins/inventory/rax.py @@ -1,8 +1,10 @@ #!/usr/bin/env python -# (c) 2013, Jesse Keating +# (c) 2013, Jesse Keating , +# Matt Martz # -# This file is part of Ansible, +# This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -17,16 +19,20 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -DOCUMENTATION = ''' ---- -inventory: rax -short_description: Rackspace Public Cloud external inventory script -description: - - Generates inventory that Ansible can understand by making API request to +""" +Rackspace Cloud Inventory + +Authors: + Jesse Keating , + Matt Martz + + +Description: + Generates inventory that Ansible can understand by making API request to Rackspace Public Cloud API - - | - When run against a specific host, this script returns the following - variables: + + When run against a specific host, this script returns variables similar to: rax_os-ext-sts_task_state rax_addresses rax_links @@ -50,63 +56,67 @@ description: rax_tenant_id rax_loaded - where some item can have nested structure. - - credentials are set in a credentials file -version_added: None -options: - creds_file: - description: - - File to find the Rackspace Public Cloud credentials in - required: true - default: null - region: - description: - - An optional value to narrow inventory scope, i.e. DFW, ORD, IAD, LON - required: false - default: null -authors: - - Jesse Keating - - Paul Durivage - - Matt Martz -notes: - - RAX_CREDS_FILE is an optional environment variable that points to a +Notes: + RAX_CREDS_FILE is an optional environment variable that points to a pyrax-compatible credentials file. - - If RAX_CREDS_FILE is not supplied, rax.py will look for a credentials file - at ~/.rackspace_cloud_credentials. - - See https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating - - RAX_REGION is an optional environment variable to narrow inventory search - scope - - RAX_REGION, if used, needs a value like ORD, DFW, SYD (a Rackspace - datacenter) and optionally accepts a comma-separated list - - RAX_ENV is an environment variable that will use an environment as + + If RAX_CREDS_FILE is not supplied, rax.py will look for a credentials file + at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, and + therefore requires a file formatted per the SDK's specifications. See + https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md + #authenticating + + RAX_REGION is an optional environment variable to narrow inventory search + scope. RAX_REGION, if used, needs a value like ORD, DFW, SYD (a Rackspace + datacenter) and optionally accepts a comma-separated list. + + RAX_ENV is an environment variable that will use an environment as configured in ~/.pyrax.cfg, see - https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration - - RAX_META_PREFIX is an environment variable that changes the prefix used + https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md + + RAX_META_PREFIX is an environment variable that changes the prefix used for meta key/value groups. For compatibility with ec2.py set to RAX_META_PREFIX=tag -requirements: [ "pyrax" ] -examples: - - description: List server instances - code: RAX_CREDS_FILE=~/.raxpub rax.py --list - - description: List servers in ORD datacenter only - code: RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list - - description: List servers in ORD and DFW datacenters - code: RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list - - description: Get server details for server named "server.example.com" - code: RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com -''' + + RAX_ACCESS_NETWORK is an environment variable that will tell the inventory + script to use a specific server network to determine the ansible_ssh_host + value. If no address is found, ansible_ssh_host will not be set. + + RAX_ACCESS_IP_VERSION is an environment variable related to + RAX_ACCESS_NETWORK that will attempt to determine the ansible_ssh_host + value for either IPv4 or IPv6. If no address is found, ansible_ssh_host + will not be set. Acceptable values are: 4 or 6. Values other than 4 or 6 + will be ignored, and 4 will be used. + +Examples: + List server instances + $ RAX_CREDS_FILE=~/.raxpub rax.py --list + + List servers in ORD datacenter only + $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list + + List servers in ORD and DFW datacenters + $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list + + Get server details for server named "server.example.com" + $ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com + + Use the instance private IP to connect (instead of public IP) + $ RAX_CREDS_FILE=~/.raxpub RAX_PRIVATE_IP=yes rax.py --list +""" import os import re import sys import argparse +import warnings import collections from types import NoneType try: import json -except: +except ImportError: import simplejson as json try: @@ -126,7 +136,7 @@ def to_dict(obj): instance = {} for key in dir(obj): value = getattr(obj, key) - if (isinstance(value, NON_CALLABLES) and not key.startswith('_')): + if isinstance(value, NON_CALLABLES) and not key.startswith('_'): key = rax_slugify(key) instance[key] = value @@ -154,10 +164,25 @@ def _list(regions): hostvars = collections.defaultdict(dict) images = {} + network = os.getenv('RAX_ACCESS_NETWORK', 'public') + try: + ip_version = int(os.getenv('RAX_ACCESS_IP_VERSION', 4)) + except: + ip_version = 4 + else: + if ip_version not in [4, 6]: + ip_version = 4 + # Go through all the regions looking for servers for region in regions: # Connect to the region cs = pyrax.connect_to_cloudservers(region=region) + if isinstance(cs, NoneType): + warnings.warn( + 'Connecting to Rackspace region "%s" has caused Pyrax to ' + 'return a NoneType. Is this a valid region?' % region, + RuntimeWarning) + continue for server in cs.servers.list(): # Create a group on region groups[region].append(server.name) @@ -198,7 +223,21 @@ def _list(regions): groups['image-%s' % server.image['id']].append(server.name) # And finally, add an IP address - hostvars[server.name]['ansible_ssh_host'] = server.accessIPv4 + ansible_ssh_host = None + # use accessIPv[46] instead of looping address for 'public' + if network == 'public': + if ip_version == 6 and server.accessIPv6: + ansible_ssh_host = server.accessIPv6 + elif server.accessIPv4: + ansible_ssh_host = server.accessIPv4 + else: + addresses = server.addresses.get(network, []) + for address in addresses: + if address.get('version') == ip_version: + ansible_ssh_host = address.get('addr') + break + if ansible_ssh_host: + hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host if hostvars: groups['_meta'] = {'hostvars': hostvars} From 1e92aadb5a00cfb2a7e066a73248aa83397b51df Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 3 Nov 2014 10:34:01 -0600 Subject: [PATCH 362/813] Add support for reading from a config file --- plugins/inventory/rax.ini | 55 ++++++++++++++++ plugins/inventory/rax.py | 133 +++++++++++++++++++++++++++----------- 2 files changed, 151 insertions(+), 37 deletions(-) create mode 100644 plugins/inventory/rax.ini diff --git a/plugins/inventory/rax.ini b/plugins/inventory/rax.ini new file mode 100644 index 0000000000..5215d0d291 --- /dev/null +++ b/plugins/inventory/rax.ini @@ -0,0 +1,55 @@ +# Ansible Rackspace external inventory script settings +# + +[rax] + +# Environment Variable: RAX_CREDS_FILE +# +# An optional configuration that points to a pyrax-compatible credentials +# file. +# +# If not supplied, rax.py will look for a credentials file +# at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, +# and therefore requires a file formatted per the SDK's specifications. +# +# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md +# creds_file = ~/.rackspace_cloud_credentials + +# Environment Variable: RAX_REGION +# +# An optional environment variable to narrow inventory search +# scope. If used, needs a value like ORD, DFW, SYD (a Rackspace +# datacenter) and optionally accepts a comma-separated list. +# regions = IAD,ORD,DFW + +# Environment Variable: RAX_ENV +# +# A configuration that will use an environment as configured in +# ~/.pyrax.cfg, see +# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md +# env = prod + +# Environment Variable: RAX_META_PREFIX +# Default: meta +# +# A configuration that changes the prefix used for meta key/value groups. +# For compatibility with ec2.py set to "tag" +# meta_prefix = meta + +# Environment Variable: RAX_ACCESS_NETWORK +# Default: public +# +# A configuration that will tell the inventory script to use a specific +# server network to determine the ansible_ssh_host value. If no address +# is found, ansible_ssh_host will not be set. +# access_network = public + +# Environment Variable: RAX_ACCESS_IP_VERSION +# Default: 4 +# +# A configuration related to "access_network" that will attempt to +# determine the ansible_ssh_host value for either IPv4 or IPv6. If no +# address is found, ansible_ssh_host will not be set. +# Acceptable values are: 4 or 6. Values other than 4 or 6 +# will be ignored, and 4 will be used. +# access_ip_version = 4 diff --git a/plugins/inventory/rax.py b/plugins/inventory/rax.py index 87b7f9cafc..778f903216 100644 --- a/plugins/inventory/rax.py +++ b/plugins/inventory/rax.py @@ -56,37 +56,75 @@ Description: rax_tenant_id rax_loaded -Notes: - RAX_CREDS_FILE is an optional environment variable that points to a - pyrax-compatible credentials file. +Configuration: + rax.py can be configured using a rax.ini file or via environment + variables. The rax.ini file should live in the same directory along side + this script. - If RAX_CREDS_FILE is not supplied, rax.py will look for a credentials file - at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, and - therefore requires a file formatted per the SDK's specifications. See - https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md - #authenticating + The section header for configuration values related to this + inventory plugin is [rax] - RAX_REGION is an optional environment variable to narrow inventory search - scope. RAX_REGION, if used, needs a value like ORD, DFW, SYD (a Rackspace - datacenter) and optionally accepts a comma-separated list. + [rax] + creds_file = ~/.rackspace_cloud_credentials + regions = IAD,ORD,DFW + env = prod + meta_prefix = meta + access_network = public + access_ip_version = 4 - RAX_ENV is an environment variable that will use an environment as - configured in ~/.pyrax.cfg, see - https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md + Each of these configurations also has a corresponding environment variable. + An environment variable will override a configuration file value. - RAX_META_PREFIX is an environment variable that changes the prefix used - for meta key/value groups. For compatibility with ec2.py set to - RAX_META_PREFIX=tag + creds_file: + Environment Variable: RAX_CREDS_FILE - RAX_ACCESS_NETWORK is an environment variable that will tell the inventory - script to use a specific server network to determine the ansible_ssh_host - value. If no address is found, ansible_ssh_host will not be set. + An optional configuration that points to a pyrax-compatible credentials + file. - RAX_ACCESS_IP_VERSION is an environment variable related to - RAX_ACCESS_NETWORK that will attempt to determine the ansible_ssh_host - value for either IPv4 or IPv6. If no address is found, ansible_ssh_host - will not be set. Acceptable values are: 4 or 6. Values other than 4 or 6 - will be ignored, and 4 will be used. + If not supplied, rax.py will look for a credentials file + at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, + and therefore requires a file formatted per the SDK's specifications. + + https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md + + regions: + Environment Variable: RAX_REGION + + An optional environment variable to narrow inventory search + scope. If used, needs a value like ORD, DFW, SYD (a Rackspace + datacenter) and optionally accepts a comma-separated list. + + environment: + Environment Variable: RAX_ENV + + A configuration that will use an environment as configured in + ~/.pyrax.cfg, see + https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md + + meta_prefix: + Environment Variable: RAX_META_PREFIX + Default: meta + + A configuration that changes the prefix used for meta key/value groups. + For compatibility with ec2.py set to "tag" + + access_network: + Environment Variable: RAX_ACCESS_NETWORK + Default: public + + A configuration that will tell the inventory script to use a specific + server network to determine the ansible_ssh_host value. If no address + is found, ansible_ssh_host will not be set. + + access_ip_version: + Environment Variable: RAX_ACCESS_IP_VERSION + Default: 4 + + A configuration related to "access_network" that will attempt to + determine the ansible_ssh_host value for either IPv4 or IPv6. If no + address is found, ansible_ssh_host will not be set. + Acceptable values are: 4 or 6. Values other than 4 or 6 + will be ignored, and 4 will be used. Examples: List server instances @@ -102,7 +140,7 @@ Examples: $ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com Use the instance private IP to connect (instead of public IP) - $ RAX_CREDS_FILE=~/.raxpub RAX_PRIVATE_IP=yes rax.py --list + $ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list """ import os @@ -111,8 +149,9 @@ import sys import argparse import warnings import collections +import ConfigParser -from types import NoneType +from ansible.constants import get_config, mk_boolean try: import json @@ -125,7 +164,20 @@ except ImportError: print('pyrax is required for this module') sys.exit(1) -NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) +NON_CALLABLES = (basestring, bool, dict, int, list, type(None)) + + +def load_config_file(): + p = ConfigParser.ConfigParser() + config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'rax.ini') + try: + p.read(config_file) + except ConfigParser.Error: + return None + else: + return p +p = load_config_file() def rax_slugify(value): @@ -163,10 +215,13 @@ def _list(regions): groups = collections.defaultdict(list) hostvars = collections.defaultdict(dict) images = {} + prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') - network = os.getenv('RAX_ACCESS_NETWORK', 'public') + network = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', + 'public') try: - ip_version = int(os.getenv('RAX_ACCESS_IP_VERSION', 4)) + ip_version = get_config(p, 'rax', 'access_ip_version', + 'RAX_ACCESS_IP_VERSION', 4, integer=True) except: ip_version = 4 else: @@ -177,7 +232,7 @@ def _list(regions): for region in regions: # Connect to the region cs = pyrax.connect_to_cloudservers(region=region) - if isinstance(cs, NoneType): + if cs is None: warnings.warn( 'Connecting to Rackspace region "%s" has caused Pyrax to ' 'return a NoneType. Is this a valid region?' % region, @@ -257,16 +312,18 @@ def parse_args(): def setup(): default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials') - env = os.getenv('RAX_ENV', None) + env = get_config(p, 'rax', 'environment', 'RAX_ENV', None) if env: pyrax.set_environment(env) keyring_username = pyrax.get_setting('keyring_username') # Attempt to grab credentials from environment first - try: - creds_file = os.path.expanduser(os.environ['RAX_CREDS_FILE']) - except KeyError, e: + creds_file = get_config(p, 'rax', 'creds_file', + 'RAX_CREDS_FILE', None) + if creds_file is not None: + creds_file = os.path.expanduser(creds_file) + else: # But if that fails, use the default location of # ~/.rackspace_cloud_credentials if os.path.isfile(default_creds_file): @@ -274,7 +331,7 @@ def setup(): elif not keyring_username: sys.stderr.write('No value in environment variable %s and/or no ' 'credentials file at %s\n' - % (e.message, default_creds_file)) + % ('RAX_CREDS_FILE', default_creds_file)) sys.exit(1) identity_type = pyrax.get_setting('identity_type') @@ -295,7 +352,9 @@ def setup(): if region: regions.append(region) else: - for region in os.getenv('RAX_REGION', 'all').split(','): + region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', + islist=True) + for region in region_list: region = region.strip().upper() if region == 'ALL': regions = pyrax.regions From b9b3c0ded6bc87420c8891ed28fb175f66d273f9 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 3 Nov 2014 10:34:59 -0600 Subject: [PATCH 363/813] Support boot from volume discovery --- plugins/inventory/rax.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/plugins/inventory/rax.py b/plugins/inventory/rax.py index 778f903216..ef45148c5b 100644 --- a/plugins/inventory/rax.py +++ b/plugins/inventory/rax.py @@ -160,6 +160,7 @@ except ImportError: try: import pyrax + from pyrax.utils import slugify except ImportError: print('pyrax is required for this module') sys.exit(1) @@ -215,6 +216,8 @@ def _list(regions): groups = collections.defaultdict(list) hostvars = collections.defaultdict(dict) images = {} + cbs_attachments = collections.defaultdict(dict) + prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') network = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', @@ -258,11 +261,33 @@ def _list(regions): hostvars[server.name]['rax_region'] = region for key, value in server.metadata.iteritems(): - prefix = os.getenv('RAX_META_PREFIX', 'meta') groups['%s_%s_%s' % (prefix, key, value)].append(server.name) groups['instance-%s' % server.id].append(server.name) groups['flavor-%s' % server.flavor['id']].append(server.name) + + # Handle boot from volume + if not server.image: + if not cbs_attachments[region]: + cbs = pyrax.connect_to_cloud_blockstorage(region) + for vol in cbs.list(): + if mk_boolean(vol.bootable): + for attachment in vol.attachments: + metadata = vol.volume_image_metadata + server_id = attachment['server_id'] + cbs_attachments[region][server_id] = { + 'id': metadata['image_id'], + 'name': slugify(metadata['image_name']) + } + image = cbs_attachments[region].get(server.id) + if image: + server.image = {'id': image['id']} + hostvars[server.name]['rax_image'] = server.image + hostvars[server.name]['rax_boot_source'] = 'volume' + images[image['id']] = image['name'] + else: + hostvars[server.name]['rax_boot_source'] = 'local' + try: imagegroup = 'image-%s' % images[server.image['id']] groups[imagegroup].append(server.name) From 2f03e0c90619394e962f986ad2cc2f9a779b215f Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 10 Nov 2014 11:49:22 -0600 Subject: [PATCH 364/813] Support fallbacks for access network and access ip version --- plugins/inventory/rax.ini | 6 +++-- plugins/inventory/rax.py | 53 ++++++++++++++++++++++++--------------- 2 files changed, 37 insertions(+), 22 deletions(-) diff --git a/plugins/inventory/rax.ini b/plugins/inventory/rax.ini index 5215d0d291..5a269e16a3 100644 --- a/plugins/inventory/rax.ini +++ b/plugins/inventory/rax.ini @@ -41,7 +41,8 @@ # # A configuration that will tell the inventory script to use a specific # server network to determine the ansible_ssh_host value. If no address -# is found, ansible_ssh_host will not be set. +# is found, ansible_ssh_host will not be set. Accepts a comma-separated +# list of network names, the first found wins. # access_network = public # Environment Variable: RAX_ACCESS_IP_VERSION @@ -51,5 +52,6 @@ # determine the ansible_ssh_host value for either IPv4 or IPv6. If no # address is found, ansible_ssh_host will not be set. # Acceptable values are: 4 or 6. Values other than 4 or 6 -# will be ignored, and 4 will be used. +# will be ignored, and 4 will be used. Accepts a comma separated list, +# the first found wins. # access_ip_version = 4 diff --git a/plugins/inventory/rax.py b/plugins/inventory/rax.py index ef45148c5b..10b72d322b 100644 --- a/plugins/inventory/rax.py +++ b/plugins/inventory/rax.py @@ -114,7 +114,8 @@ Configuration: A configuration that will tell the inventory script to use a specific server network to determine the ansible_ssh_host value. If no address - is found, ansible_ssh_host will not be set. + is found, ansible_ssh_host will not be set. Accepts a comma-separated + list of network names, the first found wins. access_ip_version: Environment Variable: RAX_ACCESS_IP_VERSION @@ -124,7 +125,8 @@ Configuration: determine the ansible_ssh_host value for either IPv4 or IPv6. If no address is found, ansible_ssh_host will not be set. Acceptable values are: 4 or 6. Values other than 4 or 6 - will be ignored, and 4 will be used. + will be ignored, and 4 will be used. Accepts a comma-separated list, + the first found wins. Examples: List server instances @@ -220,16 +222,18 @@ def _list(regions): prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') - network = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', - 'public') + networks = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', + 'public', islist=True) try: - ip_version = get_config(p, 'rax', 'access_ip_version', - 'RAX_ACCESS_IP_VERSION', 4, integer=True) + ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', + 'RAX_ACCESS_IP_VERSION', 4, + islist=True)) except: - ip_version = 4 + ip_versions = [4] else: - if ip_version not in [4, 6]: - ip_version = 4 + ip_versions = [v for v in ip_versions if v in [4, 6]] + if not ip_versions: + ip_versions = [4] # Go through all the regions looking for servers for region in regions: @@ -305,17 +309,26 @@ def _list(regions): # And finally, add an IP address ansible_ssh_host = None # use accessIPv[46] instead of looping address for 'public' - if network == 'public': - if ip_version == 6 and server.accessIPv6: - ansible_ssh_host = server.accessIPv6 - elif server.accessIPv4: - ansible_ssh_host = server.accessIPv4 - else: - addresses = server.addresses.get(network, []) - for address in addresses: - if address.get('version') == ip_version: - ansible_ssh_host = address.get('addr') - break + for network_name in networks: + if ansible_ssh_host: + break + if network_name == 'public': + for version_name in ip_versions: + if ansible_ssh_host: + break + if version_name == 6 and server.accessIPv6: + ansible_ssh_host = server.accessIPv6 + elif server.accessIPv4: + ansible_ssh_host = server.accessIPv4 + if not ansible_ssh_host: + addresses = server.addresses.get(network_name, []) + for address in addresses: + for version_name in ip_versions: + if ansible_ssh_host: + break + if address.get('version') == version_name: + ansible_ssh_host = address.get('addr') + break if ansible_ssh_host: hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host From cfe9f1c32f4f76eee5f385508b40ad4608916ba0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 10 Nov 2014 10:39:34 -0800 Subject: [PATCH 365/813] Expand tilde in path names --- lib/ansible/runner/shell_plugins/sh.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/runner/shell_plugins/sh.py b/lib/ansible/runner/shell_plugins/sh.py index 134c857f17..52e3f68f33 100644 --- a/lib/ansible/runner/shell_plugins/sh.py +++ b/lib/ansible/runner/shell_plugins/sh.py @@ -37,10 +37,12 @@ class ShellModule(object): return path.endswith('/') def chmod(self, mode, path): + path = os.path.expanduser(path) path = pipes.quote(path) return 'chmod %s %s' % (mode, path) def remove(self, path, recurse=False): + path = os.path.expanduser(path) path = pipes.quote(path) if recurse: return "rm -rf %s >/dev/null 2>&1" % path @@ -60,6 +62,7 @@ class ShellModule(object): return cmd def checksum(self, path, python_interp): + path = os.path.expanduser(path) path = pipes.quote(path) # The following test needs to be SH-compliant. BASH-isms will # not work if /bin/sh points to a non-BASH shell. From 3ccb0b82437e6ec8f30367adb11472a772e46a69 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 10 Nov 2014 11:25:45 -0800 Subject: [PATCH 366/813] Revert expanding hte tilde in shell plugin until we determine why it's forcing fetch down an alternate path --- lib/ansible/runner/shell_plugins/sh.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/runner/shell_plugins/sh.py b/lib/ansible/runner/shell_plugins/sh.py index 52e3f68f33..713e41b3f6 100644 --- a/lib/ansible/runner/shell_plugins/sh.py +++ b/lib/ansible/runner/shell_plugins/sh.py @@ -37,12 +37,12 @@ class ShellModule(object): return path.endswith('/') def chmod(self, mode, path): - path = os.path.expanduser(path) + #path = os.path.expanduser(path) path = pipes.quote(path) return 'chmod %s %s' % (mode, path) def remove(self, path, recurse=False): - path = os.path.expanduser(path) + #path = os.path.expanduser(path) path = pipes.quote(path) if recurse: return "rm -rf %s >/dev/null 2>&1" % path @@ -62,7 +62,7 @@ class ShellModule(object): return cmd def checksum(self, path, python_interp): - path = os.path.expanduser(path) + #path = os.path.expanduser(path) path = pipes.quote(path) # The following test needs to be SH-compliant. BASH-isms will # not work if /bin/sh points to a non-BASH shell. From 6a85f3ebc7ca7288a1955079987cecf5e924cfba Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 10 Nov 2014 12:00:49 -0800 Subject: [PATCH 367/813] Add comments/docstrings not to use md5 unless forced to by forces outside our control. --- lib/ansible/module_utils/basic.py | 11 ++++++++++- lib/ansible/utils/__init__.py | 5 +++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index b8cfea2014..b8118ed558 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1238,7 +1238,16 @@ class AnsibleModule(object): return digest.hexdigest() def md5(self, filename): - ''' Return MD5 hex digest of local file using digest_from_file(). ''' + ''' Return MD5 hex digest of local file using digest_from_file(). + + Do not use this function unless you have no other choice for: + 1) Optional backwards compatibility + 2) Compatibility with a third party protocol + + This function will not work on systems complying with FIPS-140-2. + + Most uses of this function can use the module.sha1 function instead. + ''' return self.digest_from_file(filename, _md5()) def sha1(self, filename): diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index e82ae8d374..770e9be6a8 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -864,6 +864,11 @@ checksum_s = secure_hash_s # Backwards compat. Some modules include md5s in their return values # Continue to support that for now. As of ansible-1.8, all of those modules # should also return "checksum" (sha1 for now) +# Do not use m5 unless it is needed for: +# 1) Optional backwards compatibility +# 2) Compliance with a third party protocol +# +# MD5 will not work on systems which are FIPS-140-2 compliant. def md5s(data): return secure_hash_s(data, _md5) From a33dccfa61ecdb8cf1fba0720e66c016059ea12b Mon Sep 17 00:00:00 2001 From: mmicael1 Date: Mon, 10 Nov 2014 22:56:38 +0100 Subject: [PATCH 368/813] Add tags options Add -t OR --tags options to pass to ansible-playbook --- bin/ansible-pull | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bin/ansible-pull b/bin/ansible-pull index 4f4da24d83..2d91324315 100755 --- a/bin/ansible-pull +++ b/bin/ansible-pull @@ -135,6 +135,8 @@ def main(args): help="vault password file") parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', help='ask for sudo password') + parser.add_option('-t', '--tags', dest='tags', default=False, + help='only run plays and tasks tagged with these values') options, args = parser.parse_args(args) hostname = socket.getfqdn() @@ -214,6 +216,8 @@ def main(args): cmd += ' -e "%s"' % ev if options.ask_sudo_pass: cmd += ' -K' + if options.tags: + cmd += ' -t "%s"' % options.tags os.chdir(options.dest) # RUN THE PLAYBOOK COMMAND From 2f7348fddf3add69eb620d5e1ca6cdf1ec55a534 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Tue, 11 Nov 2014 02:20:28 -0500 Subject: [PATCH 369/813] Update firewall rules, error handling, other comment/whitespace cleanup. --- .../scripts/ConfigureRemotingForAnsible.ps1 | 245 ++++++++++-------- 1 file changed, 134 insertions(+), 111 deletions(-) diff --git a/examples/scripts/ConfigureRemotingForAnsible.ps1 b/examples/scripts/ConfigureRemotingForAnsible.ps1 index 1cb3375725..39601d2a76 100644 --- a/examples/scripts/ConfigureRemotingForAnsible.ps1 +++ b/examples/scripts/ConfigureRemotingForAnsible.ps1 @@ -1,11 +1,18 @@ -# Script to set a windows computer up for remoting -# The script checks the current WinRM/Remoting configuration and makes the necessary changes -# set $VerbosePreference="Continue" before running the script in order to see the output of the script +# Configure a Windows host for remote management with Ansible +# ----------------------------------------------------------- +# +# This script checks the current WinRM/PSRemoting configuration and makes the +# necessary changes to allow Ansible to connect, authenticate and execute +# PowerShell commands. +# +# Set $VerbosePreference = "Continue" before running the script in order to +# see the output messages. # # Written by Trond Hindenes +# Updated by Chris Church # # Version 1.0 - July 6th, 2014 - +# Version 1.1 - November 11th, 2014 Param ( [string]$SubjectName = $env:COMPUTERNAME, @@ -14,7 +21,6 @@ Param ( ) -#region function defs Function New-LegacySelfSignedCert { Param ( @@ -22,10 +28,10 @@ Function New-LegacySelfSignedCert [int]$ValidDays = 365 ) - $name = new-object -com "X509Enrollment.CX500DistinguishedName.1" + $name = New-Object -COM "X509Enrollment.CX500DistinguishedName.1" $name.Encode("CN=$SubjectName", 0) - $key = new-object -com "X509Enrollment.CX509PrivateKey.1" + $key = New-Object -COM "X509Enrollment.CX509PrivateKey.1" $key.ProviderName = "Microsoft RSA SChannel Cryptographic Provider" $key.KeySpec = 1 $key.Length = 1024 @@ -33,149 +39,166 @@ Function New-LegacySelfSignedCert $key.MachineContext = 1 $key.Create() - $serverauthoid = new-object -com "X509Enrollment.CObjectId.1" + $serverauthoid = New-Object -COM "X509Enrollment.CObjectId.1" $serverauthoid.InitializeFromValue("1.3.6.1.5.5.7.3.1") - $ekuoids = new-object -com "X509Enrollment.CObjectIds.1" - $ekuoids.add($serverauthoid) - $ekuext = new-object -com "X509Enrollment.CX509ExtensionEnhancedKeyUsage.1" + $ekuoids = New-Object -COM "X509Enrollment.CObjectIds.1" + $ekuoids.Add($serverauthoid) + $ekuext = New-Object -COM "X509Enrollment.CX509ExtensionEnhancedKeyUsage.1" $ekuext.InitializeEncode($ekuoids) - $cert = new-object -com "X509Enrollment.CX509CertificateRequestCertificate.1" + $cert = New-Object -COM "X509Enrollment.CX509CertificateRequestCertificate.1" $cert.InitializeFromPrivateKey(2, $key, "") $cert.Subject = $name $cert.Issuer = $cert.Subject - $cert.NotBefore = (get-date).addDays(-1) + $cert.NotBefore = (Get-Date).AddDays(-1) $cert.NotAfter = $cert.NotBefore.AddDays($ValidDays) $cert.X509Extensions.Add($ekuext) $cert.Encode() - $enrollment = new-object -com "X509Enrollment.CX509Enrollment.1" + $enrollment = New-Object -COM "X509Enrollment.CX509Enrollment.1" $enrollment.InitializeFromRequest($cert) $certdata = $enrollment.CreateRequest(0) $enrollment.InstallResponse(2, $certdata, 0, "") - #return the thumprint of the last installed cert - ls "Cert:\LocalMachine\my"| Sort-Object notbefore -Descending | select -First 1 | select -expand Thumbprint + # Return the thumbprint of the last installed cert. + Get-ChildItem "Cert:\LocalMachine\my"| Sort-Object NotBefore -Descending | Select -First 1 | Select -Expand Thumbprint } -#endregion -#Start script +# Setup error handling. +Trap +{ + $_ + Exit 1 +} $ErrorActionPreference = "Stop" -#Detect PowerShell version -if ($PSVersionTable.PSVersion.Major -lt 3) + +# Detect PowerShell version. +If ($PSVersionTable.PSVersion.Major -lt 3) { - Write-Error "PowerShell/Windows Management Framework needs to be updated to 3 or higher. Stopping script" + Throw "PowerShell version 3 or higher is required." } -#Detect OS - $Win32_OS = Get-WmiObject Win32_OperatingSystem - switch ($Win32_OS.Version) - { - "6.2.9200" {$OSVersion = "Windows Server 2012"} - "6.1.7601" {$OSVersion = "Windows Server 2008R2"} - } - - - #Set up remoting - Write-verbose "Verifying WS-MAN" - if (!(get-service "WinRM")) - { - Write-Error "I couldnt find the winRM service on this computer. Stopping" - } - Elseif ((get-service "WinRM").Status -ne "Running") - { - Write-Verbose "Starting WinRM" +# Find and start the WinRM service. +Write-Verbose "Verifying WinRM service." +If (!(Get-Service "WinRM")) +{ + Throw "Unable to find the WinRM service." +} +ElseIf ((Get-Service "WinRM").Status -ne "Running") +{ + Write-Verbose "Starting WinRM service." Start-Service -Name "WinRM" -ErrorAction Stop - } +} - #At this point, winrm should be running - #Check that we have a ps session config - if (!(Get-PSSessionConfiguration -verbose:$false) -or (!(get-childitem WSMan:\localhost\Listener))) - { - Write-Verbose "PS remoting is not enabled. Activating" - try + +# WinRM should be running; check that we have a PS session config. +If (!(Get-PSSessionConfiguration -Verbose:$false) -or (!(Get-ChildItem WSMan:\localhost\Listener))) +{ + Write-Verbose "Enabling PS Remoting." + Try { Enable-PSRemoting -Force -ErrorAction SilentlyContinue - } - catch{} - } - Else - { - Write-Verbose "PS remoting is already active and running" - } - - #At this point, test a remoting connection to localhost, which should work - $result = invoke-command -ComputerName localhost -ScriptBlock {$env:computername} -ErrorVariable localremotingerror -ErrorAction SilentlyContinue - - $options = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck - $resultssl = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption $options -ErrorVariable localremotingsslerror -ErrorAction SilentlyContinue - - - if (!$result -and $resultssl) - { - Write-Verbose "HTTP-based sessions not enabled, HTTPS based sessions enabled" - } - ElseIf (!$result -and !$resultssl) - { - Write-error "Could not establish session on either HTTP or HTTPS. Breaking" - } - - - #at this point, make sure there is a SSL-based listener - $listeners = dir WSMan:\localhost\Listener - - if (!($listeners | where {$_.Keys -like "TRANSPORT=HTTPS"})) - { - #HTTPS-based endpoint does not exist. - if (($CreateSelfSignedCert) -and ($OSVersion -notmatch "2012")) - { - $thumprint = New-LegacySelfSignedCert -SubjectName $env:COMPUTERNAME } - if (($CreateSelfSignedCert) -and ($OSVersion -match "2012")) + Catch + { + } +} +Else +{ + Write-Verbose "PS Remoting is already enabled." +} + + +# Test a remoting connection to localhost, which should work. +$httpResult = Invoke-Command -ComputerName "localhost" -ScriptBlock {$env:COMPUTERNAME} -ErrorVariable httpError -ErrorAction SilentlyContinue +$httpsOptions = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck + +$httpsResult = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption $httpsOptions -ErrorVariable httpsError -ErrorAction SilentlyContinue + +If ($httpResult -and $httpsResult) +{ + Write-Verbose "HTTP and HTTPS sessions are enabled." +} +ElseIf ($httpsResult -and !$httpResult) +{ + Write-Verbose "HTTP sessions are disabled, HTTPS session are enabled." +} +ElseIf ($httpResult -and !$httpsResult) +{ + Write-Verbose "HTTPS sessions are disabled, HTTP session are enabled." +} +Else +{ + Throw "Unable to establish an HTTP or HTTPS remoting session." +} + + +# Make sure there is a SSL listener. +$listeners = Get-ChildItem WSMan:\localhost\Listener +If (!($listeners | Where {$_.Keys -like "TRANSPORT=HTTPS"})) +{ + # HTTPS-based endpoint does not exist. + If (Get-Command "New-SelfSignedCertificate" -ErrorAction SilentlyContinue) { $cert = New-SelfSignedCertificate -DnsName $env:COMPUTERNAME -CertStoreLocation "Cert:\LocalMachine\My" - $thumprint = $cert.Thumbprint + $thumbprint = $cert.Thumbprint } - - - + Else + { + $thumbprint = New-LegacySelfSignedCert -SubjectName $env:COMPUTERNAME + } + # Create the hashtables of settings to be used. $valueset = @{} - $valueset.add('Hostname',$env:COMPUTERNAME) - $valueset.add('CertificateThumbprint',$thumprint) + $valueset.Add('Hostname', $env:COMPUTERNAME) + $valueset.Add('CertificateThumbprint', $thumbprint) $selectorset = @{} - $selectorset.add('Transport','HTTPS') - $selectorset.add('Address','*') + $selectorset.Add('Transport', 'HTTPS') + $selectorset.Add('Address', '*') - Write-Verbose "Enabling SSL-based remoting" - New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset - } - Else - { - Write-Verbose "SSL-based remoting already active" - } + Write-Verbose "Enabling SSL listener." + New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset +} +Else +{ + Write-Verbose "SSL listener is already active." +} - #Check for basic authentication - $basicauthsetting = Get-ChildItem WSMan:\localhost\Service\Auth | where {$_.Name -eq "Basic"} - - if (($basicauthsetting.Value) -eq $false) - { - Write-Verbose "Enabling basic auth" +# Check for basic authentication. +$basicAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where {$_.Name -eq "Basic"} +If (($basicAuthSetting.Value) -eq $false) +{ + Write-Verbose "Enabling basic auth support." Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $true - } - Else - { - Write-verbose "basic auth already enabled" - } - -#FIrewall -netsh advfirewall firewall add rule Profile=public name="Allow WinRM HTTPS" dir=in localport=5986 protocol=TCP action=allow +} +Else +{ + Write-Verbose "Basic auth is already enabled." +} +# Configure firewall to allow WinRM HTTPS connections. +$fwtest1 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" +$fwtest2 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" profile=any +If ($fwtest1.count -lt 5) +{ + Write-Verbose "Adding firewall rule to allow WinRM HTTPS." + netsh advfirewall firewall add rule profile=any name="Allow WinRM HTTPS" dir=in localport=5986 protocol=TCP action=allow +} +ElseIf (($fwtest1.count -ge 5) -and ($fwtest2.count -lt 5)) +{ + Write-Verbose "Updating firewall rule to allow WinRM HTTPS for any profile." + netsh advfirewall firewall set rule name="Allow WinRM HTTPS" new profile=any +} +Else +{ + Write-Verbose "Firewall rule already exists to allow WinRM HTTPS." +} - Write-Verbose "PS Remoting successfully setup for Ansible" + +Write-Verbose "PS Remoting has been successfully configured for Ansible." From bc4272d2a26e47418c7d588208482d05a34a34cd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 11 Nov 2014 12:28:19 -0800 Subject: [PATCH 370/813] Expand tilde remotely in action plugins --- lib/ansible/runner/__init__.py | 21 +++++++++++++++++++ lib/ansible/runner/action_plugins/assemble.py | 1 + lib/ansible/runner/action_plugins/copy.py | 3 +++ lib/ansible/runner/action_plugins/fetch.py | 1 + lib/ansible/runner/action_plugins/template.py | 2 ++ .../runner/action_plugins/unarchive.py | 2 +- lib/ansible/runner/shell_plugins/sh.py | 7 ++++--- 7 files changed, 33 insertions(+), 4 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 6351e2aab8..8f271f0500 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1159,6 +1159,27 @@ class Runner(object): # ***************************************************** + def _remote_expand_user(self, conn, path, tmp): + ''' takes a remote path and performs tilde expansion on the remote host ''' + if not path.startswith('~'): + return path + split_path = path.split(os.path.sep, 1) + cmd = conn.shell.expand_user(split_path[0]) + data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False, su=False) + initial_fragment = utils.last_non_blank_line(data['stdout']) + + if not initial_fragment: + # Something went wrong trying to expand the path remotely. Return + # the original string + return path + + if len(split_path) > 1: + return os.path.join(initial_fragment, *split_path[1:]) + else: + return initial_fragment + + # ***************************************************** + def _remote_checksum(self, conn, tmp, path, inject): ''' takes a remote checksum and returns 1 if no file ''' python_interp = inject['hostvars'][inject['inventory_hostname']].get('ansible_python_interpreter', 'python') diff --git a/lib/ansible/runner/action_plugins/assemble.py b/lib/ansible/runner/action_plugins/assemble.py index b0a45c4970..287e934865 100644 --- a/lib/ansible/runner/action_plugins/assemble.py +++ b/lib/ansible/runner/action_plugins/assemble.py @@ -109,6 +109,7 @@ class ActionModule(object): path = self._assemble_from_fragments(src, delimiter, _re) path_checksum = utils.checksum_s(path) + dest = self.runner._remote_expand_user(conn, dest, tmp) remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject) if path_checksum != remote_checksum: diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py index 55524bca38..b180448988 100644 --- a/lib/ansible/runner/action_plugins/copy.py +++ b/lib/ansible/runner/action_plugins/copy.py @@ -157,6 +157,9 @@ class ActionModule(object): if "-tmp-" not in tmp_path: tmp_path = self.runner._make_tmp_path(conn) + # expand any user home dir specifier + dest = self.runner._remote_expand_user(conn, dest, tmp_path) + for source_full, source_rel in source_files: # Generate a hash of the local file. local_checksum = utils.checksum(source_full) diff --git a/lib/ansible/runner/action_plugins/fetch.py b/lib/ansible/runner/action_plugins/fetch.py index 030058498a..20574e6433 100644 --- a/lib/ansible/runner/action_plugins/fetch.py +++ b/lib/ansible/runner/action_plugins/fetch.py @@ -71,6 +71,7 @@ class ActionModule(object): return ReturnData(conn=conn, result=results) source = conn.shell.join_path(source) + source = self.runner._remote_expand_user(conn, source, tmp) # calculate checksum for the remote file remote_checksum = self.runner._remote_checksum(conn, tmp, source, inject) diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index 75fd7ff5a6..fd38c61063 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -75,6 +75,8 @@ class ActionModule(object): else: source = utils.path_dwim(self.runner.basedir, source) + # Expand any user home dir specification + dest = self.runner._remote_expand_user(conn, dest, tmp) if dest.endswith("/"): # CCTODO: Fix path for Windows hosts. base = os.path.basename(source) diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py index f570a29d5c..2a1c8d1cd6 100644 --- a/lib/ansible/runner/action_plugins/unarchive.py +++ b/lib/ansible/runner/action_plugins/unarchive.py @@ -54,7 +54,7 @@ class ActionModule(object): result = dict(failed=True, msg="src (or content) and dest are required") return ReturnData(conn=conn, result=result) - dest = os.path.expanduser(dest) # CCTODO: Fix path for Windows hosts. + dest = self.runner._remote_expand_user(conn, dest, tmp) # CCTODO: Fix path for Windows hosts. source = template.template(self.runner.basedir, os.path.expanduser(source), inject) if copy: if '_original_file' in inject: diff --git a/lib/ansible/runner/shell_plugins/sh.py b/lib/ansible/runner/shell_plugins/sh.py index 713e41b3f6..38698e7b4e 100644 --- a/lib/ansible/runner/shell_plugins/sh.py +++ b/lib/ansible/runner/shell_plugins/sh.py @@ -37,12 +37,10 @@ class ShellModule(object): return path.endswith('/') def chmod(self, mode, path): - #path = os.path.expanduser(path) path = pipes.quote(path) return 'chmod %s %s' % (mode, path) def remove(self, path, recurse=False): - #path = os.path.expanduser(path) path = pipes.quote(path) if recurse: return "rm -rf %s >/dev/null 2>&1" % path @@ -61,8 +59,11 @@ class ShellModule(object): cmd += ' && echo %s' % basetmp return cmd + def expand_user(self, user_path): + # Quote the user portion but leave the tilde to be expanded + return 'echo ~%s' % pipes.quote(user_path[1:]) + def checksum(self, path, python_interp): - #path = os.path.expanduser(path) path = pipes.quote(path) # The following test needs to be SH-compliant. BASH-isms will # not work if /bin/sh points to a non-BASH shell. From 565e5bbdfc42afe29c4f53b303352d7e8406dcba Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 11 Nov 2014 13:54:03 -0800 Subject: [PATCH 371/813] Fix up the new expand_user method. quoting anywhere in the user_home_path interferes with shell expansion so we have to check it for validity ourselves. --- lib/ansible/runner/shell_plugins/sh.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/lib/ansible/runner/shell_plugins/sh.py b/lib/ansible/runner/shell_plugins/sh.py index 38698e7b4e..0cf89278d6 100644 --- a/lib/ansible/runner/shell_plugins/sh.py +++ b/lib/ansible/runner/shell_plugins/sh.py @@ -16,9 +16,12 @@ # along with Ansible. If not, see . import os +import re import pipes import ansible.constants as C +_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$') + class ShellModule(object): def env_prefix(self, **kwargs): @@ -59,9 +62,21 @@ class ShellModule(object): cmd += ' && echo %s' % basetmp return cmd - def expand_user(self, user_path): - # Quote the user portion but leave the tilde to be expanded - return 'echo ~%s' % pipes.quote(user_path[1:]) + def expand_user(self, user_home_path): + ''' Return a command to expand tildes in a path + + It can be either "~" or "~username". We use the POSIX definition of + a username: + http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426 + http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276 + ''' + + # Check that the user_path to expand is safe + if user_home_path != '~': + if not _USER_HOME_PATH_RE.match(user_home_path): + # pipes.quote will make the shell return the string verbatim + user_home_path = pipes.quote(user_home_path) + return 'echo %s' % user_home_path def checksum(self, path, python_interp): path = pipes.quote(path) From d4d23b1b1f6b65edb13032104da11dee1b7d639f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 11 Nov 2014 20:05:27 -0800 Subject: [PATCH 372/813] Add error when checksumming will fail because python is not present on the remote. Comments for how the remote checksumming works. Make the checksumming marginally more robust. --- lib/ansible/runner/action_plugins/fetch.py | 29 ++++++++++++---------- lib/ansible/runner/shell_plugins/sh.py | 19 ++++++++++++-- 2 files changed, 33 insertions(+), 15 deletions(-) diff --git a/lib/ansible/runner/action_plugins/fetch.py b/lib/ansible/runner/action_plugins/fetch.py index 20574e6433..2fb6631536 100644 --- a/lib/ansible/runner/action_plugins/fetch.py +++ b/lib/ansible/runner/action_plugins/fetch.py @@ -114,19 +114,22 @@ class ActionModule(object): dest = dest.replace("//","/") - # these don't fail because you may want to transfer a log file that possibly MAY exist - # but keep going to fetch other log files - if remote_checksum == '0': - result = dict(msg="unable to calculate the checksum of the remote file", file=source, changed=False) - return ReturnData(conn=conn, result=result) - if remote_checksum == '1': - if fail_on_missing: - result = dict(failed=True, msg="the remote file does not exist", file=source) - else: - result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False) - return ReturnData(conn=conn, result=result) - if remote_checksum == '2': - result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False) + if remote_checksum in ('0', '1', '2', '3', '4'): + # these don't fail because you may want to transfer a log file that possibly MAY exist + # but keep going to fetch other log files + if remote_checksum == '0': + result = dict(msg="unable to calculate the checksum of the remote file", file=source, changed=False) + elif remote_checksum == '1': + if fail_on_missing: + result = dict(failed=True, msg="the remote file does not exist", file=source) + else: + result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False) + elif remote_checksum == '2': + result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False) + elif remote_checksum == '3': + result = dict(msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False) + elif remote_checksum == '4': + result = dict(msg="python isn't present on the remote system. Unable to fetch file", file=source, changed=False) return ReturnData(conn=conn, result=result) # calculate checksum for the local file diff --git a/lib/ansible/runner/shell_plugins/sh.py b/lib/ansible/runner/shell_plugins/sh.py index 0cf89278d6..95d48e9e7d 100644 --- a/lib/ansible/runner/shell_plugins/sh.py +++ b/lib/ansible/runner/shell_plugins/sh.py @@ -82,14 +82,29 @@ class ShellModule(object): path = pipes.quote(path) # The following test needs to be SH-compliant. BASH-isms will # not work if /bin/sh points to a non-BASH shell. - test = "rc=0; [ -r \"%s\" ] || rc=2; [ -f \"%s\" ] || rc=1; [ -d \"%s\" ] && echo 3 && exit 0" % ((path,) * 3) + # + # In the following test, each condition is a check and logical + # comparison (|| or &&) that sets the rc value. Every check is run so + # the last check in the series to fail will be the rc that is + # returned. + # + # If a check fails we error before invoking the hash functions because + # hash functions may successfully take the hash of a directory on BSDs + # (UFS filesystem?) which is not what the rest of the ansible code + # expects + # + # If all of the available hashing methods fail we fail with an rc of + # 0. This logic is added to the end of the cmd at the bottom of this + # function. + + test = "rc=flag; [ -r \"%(p)s\" ] || rc=2; [ -f \"%(p)s\" ] || rc=1; [ -d \"%(p)s\" ] && rc=3; %(i)s -V 2>/dev/null || rc=4; [ x\"$rc\" != \"xflag\" ] && echo \"${rc} %(p)s\" && exit 0" % dict(p=path, i=python_interp) csums = [ "(%s -c 'import hashlib; print(hashlib.sha1(open(\"%s\", \"rb\").read()).hexdigest())' 2>/dev/null)" % (python_interp, path), # Python > 2.4 (including python3) "(%s -c 'import sha; print(sha.sha(open(\"%s\", \"rb\").read()).hexdigest())' 2>/dev/null)" % (python_interp, path), # Python == 2.4 ] cmd = " || ".join(csums) - cmd = "%s; %s || (echo \"${rc} %s\")" % (test, cmd, path) + cmd = "%s; %s || (echo \"0 %s\")" % (test, cmd, path) return cmd def build_module_command(self, env_string, shebang, cmd, rm_tmp=None): From 9a7eb577187bf77999b1ffc9b89259176266421f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 11 Nov 2014 20:23:03 -0800 Subject: [PATCH 373/813] Some changes to FIPS compat since SLES implements it differently --- lib/ansible/module_utils/basic.py | 8 +++++++- lib/ansible/utils/__init__.py | 10 +++++++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index b8118ed558..4b23ccfa91 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -95,7 +95,11 @@ except ImportError: try: from hashlib import md5 as _md5 except ImportError: - from md5 import md5 as _md5 + try: + from md5 import md5 as _md5 + except ImportError: + # MD5 unavailable. Possibly FIPS mode + _md5 = None try: from hashlib import sha256 as _sha256 @@ -1248,6 +1252,8 @@ class AnsibleModule(object): Most uses of this function can use the module.sha1 function instead. ''' + if not _md5: + raise ValueError('MD5 not available. Possibly running in FIPS mode') return self.digest_from_file(filename, _md5()) def sha1(self, filename): diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 770e9be6a8..06ca8144cc 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -79,7 +79,11 @@ except ImportError: try: from hashlib import md5 as _md5 except ImportError: - from md5 import md5 as _md5 + try: + from md5 import md5 as _md5 + except ImportError: + # Assume we're running in FIPS mode here + _md5 = None PASSLIB_AVAILABLE = False try: @@ -870,9 +874,13 @@ checksum_s = secure_hash_s # # MD5 will not work on systems which are FIPS-140-2 compliant. def md5s(data): + if not _md5: + raise ValueError('MD5 not available. Possibly running in FIPS mode') return secure_hash_s(data, _md5) def md5(filename): + if not _md5: + raise ValueError('MD5 not available. Possibly running in FIPS mode') return secure_hash(filename, _md5) def default(value, function): From ffc2e5a62784239da201a26f6f5ac8159cee499d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 11 Nov 2014 21:04:07 -0800 Subject: [PATCH 374/813] Do not fail on failure of md5 checksumming tests for non_destructive tests --- test/integration/roles/test_copy/tasks/main.yml | 14 ++++++++++++-- test/integration/roles/test_stat/tasks/main.yml | 7 +++++-- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/test/integration/roles/test_copy/tasks/main.yml b/test/integration/roles/test_copy/tasks/main.yml index fa09d37eb4..2b671c122d 100644 --- a/test/integration/roles/test_copy/tasks/main.yml +++ b/test/integration/roles/test_copy/tasks/main.yml @@ -55,9 +55,14 @@ - name: verify that the file checksums are correct assert: that: - - "copy_result.md5sum == 'c47397529fe81ab62ba3f85e9f4c71f2'" - "copy_result.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" +- name: verify that the legacy md5sum is correct + assert: + that: + - "copy_result.md5sum == 'c47397529fe81ab62ba3f85e9f4c71f2'" + ignore_errors: True + - name: check the stat results of the file stat: path={{output_file}} register: stat_results @@ -72,9 +77,14 @@ - "stat_results.stat.isfifo == false" - "stat_results.stat.isreg == true" - "stat_results.stat.issock == false" - - "stat_results.stat.md5 == 'c47397529fe81ab62ba3f85e9f4c71f2'" - "stat_results.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" +- name: verify that the legacy md5sum is correct + assert: + that: + - "stat_results.stat.md5 == 'c47397529fe81ab62ba3f85e9f4c71f2'" + ignore_errors: True + - name: overwrite the file via same means copy: src=foo.txt dest={{output_file}} register: copy_result2 diff --git a/test/integration/roles/test_stat/tasks/main.yml b/test/integration/roles/test_stat/tasks/main.yml index b0b16d7f9e..f34f77a936 100644 --- a/test/integration/roles/test_stat/tasks/main.yml +++ b/test/integration/roles/test_stat/tasks/main.yml @@ -45,7 +45,6 @@ - "'issock' in stat_result.stat" - "'isuid' in stat_result.stat" - "'md5' in stat_result.stat" - - "stat_result.stat.md5 == '5eb63bbbe01eeed093cb22bb8f5acdc3'" - "'checksum' in stat_result.stat" - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'" - "'mode' in stat_result.stat" # why is this 420? @@ -63,4 +62,8 @@ - "'xgrp' in stat_result.stat" - "'xoth' in stat_result.stat" - "'xusr' in stat_result.stat" - + +- assert: + that: + - "stat_result.stat.md5 == '5eb63bbbe01eeed093cb22bb8f5acdc3'" + ignore_errors: True From 5bc81f9ae7a0b13dab8a741081261660f1080727 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 10 Nov 2014 23:41:50 -0600 Subject: [PATCH 375/813] Add ability to detect prompts in stdout from run_command --- lib/ansible/module_utils/basic.py | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 4b23ccfa91..cee6510f34 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1370,7 +1370,7 @@ class AnsibleModule(object): # rename might not preserve context self.set_context_if_different(dest, context, False) - def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False): + def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None): ''' Execute a command, returns rc, stdout, and stderr. args is the command to run @@ -1378,12 +1378,17 @@ class AnsibleModule(object): If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False If args is a string and use_unsafe_shell=True it run with shell=True. Other arguments: - - check_rc (boolean) Whether to call fail_json in case of - non zero RC. Default is False. - - close_fds (boolean) See documentation for subprocess.Popen(). - Default is True. - - executable (string) See documentation for subprocess.Popen(). - Default is None. + - check_rc (boolean) Whether to call fail_json in case of + non zero RC. Default is False. + - close_fds (boolean) See documentation for subprocess.Popen(). + Default is True. + - executable (string) See documentation for subprocess.Popen(). + Default is None. + - prompt_regex (string) A regex string (not a compiled regex) which + can be used to detect prompts in the stdout + which would otherwise cause the execution + to hang (especially if no input data is + specified) ''' shell = False @@ -1399,6 +1404,13 @@ class AnsibleModule(object): msg = "Argument 'args' to run_command must be list or string" self.fail_json(rc=257, cmd=args, msg=msg) + prompt_re = None + if prompt_regex: + try: + prompt_re = re.compile(prompt_regex, re.MULTILINE) + except re.error: + self.fail_json(msg="invalid prompt regular expression given to run_command") + # expand things like $HOME and ~ if not shell: args = [ os.path.expandvars(os.path.expanduser(x)) for x in args ] @@ -1492,6 +1504,10 @@ class AnsibleModule(object): stderr += dat if dat == '': rpipes.remove(cmd.stderr) + # if we're checking for prompts, do it now + if prompt_re: + if prompt_re.search(stdout) and not data: + return (257, stdout, "A prompt was encountered while running a command, but no input data was specified") # only break out if no pipes are left to read or # the pipes are completely read and # the process is terminated From b828b2578462610bd3d29974f19e4f3235fb46ce Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 12 Nov 2014 09:28:27 -0800 Subject: [PATCH 376/813] Add a fips fact and use it for integration tests --- lib/ansible/module_utils/facts.py | 8 ++++++++ test/integration/roles/test_copy/tasks/main.yml | 6 +++--- test/integration/roles/test_stat/tasks/main.yml | 2 +- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 09332e00be..5ceeb405d5 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -125,6 +125,7 @@ class Facts(object): self.get_cmdline() self.get_public_ssh_host_keys() self.get_selinux_facts() + self.get_fips_facts() self.get_pkg_mgr_facts() self.get_lsb_facts() self.get_date_time_facts() @@ -486,6 +487,13 @@ class Facts(object): self.facts['selinux']['type'] = 'unknown' + def get_fips_facts(self): + self.facts['fips'] = False + data = get_file_content('/proc/sys/crypto/fips_enabled') + if data and data == '1': + self.facts['fips'] = True + + def get_date_time_facts(self): self.facts['date_time'] = {} diff --git a/test/integration/roles/test_copy/tasks/main.yml b/test/integration/roles/test_copy/tasks/main.yml index 2b671c122d..7da4d6ad32 100644 --- a/test/integration/roles/test_copy/tasks/main.yml +++ b/test/integration/roles/test_copy/tasks/main.yml @@ -61,7 +61,7 @@ assert: that: - "copy_result.md5sum == 'c47397529fe81ab62ba3f85e9f4c71f2'" - ignore_errors: True + when: ansible_fips != True - name: check the stat results of the file stat: path={{output_file}} @@ -83,7 +83,7 @@ assert: that: - "stat_results.stat.md5 == 'c47397529fe81ab62ba3f85e9f4c71f2'" - ignore_errors: True + when: ansible_fips != True - name: overwrite the file via same means copy: src=foo.txt dest={{output_file}} @@ -242,7 +242,7 @@ that: - stat_link_result.stat.islnk -- name: get the md5 of the link target +- name: get the checksum of the link target shell: sha1sum {{output_dir}}/follow_test | cut -f1 -sd ' ' register: target_file_result diff --git a/test/integration/roles/test_stat/tasks/main.yml b/test/integration/roles/test_stat/tasks/main.yml index f34f77a936..0019fda2ae 100644 --- a/test/integration/roles/test_stat/tasks/main.yml +++ b/test/integration/roles/test_stat/tasks/main.yml @@ -66,4 +66,4 @@ - assert: that: - "stat_result.stat.md5 == '5eb63bbbe01eeed093cb22bb8f5acdc3'" - ignore_errors: True + when: ansible_fips != True From f109b818bf57a3fbf99dc03bf7b8adc262c0d4e8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 12 Nov 2014 11:45:39 -0800 Subject: [PATCH 377/813] Disable pip test on FIPS enabled systems because pip unconditionally uses md5 --- test/integration/destructive.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml index d341c4916b..bb76bf3026 100644 --- a/test/integration/destructive.yml +++ b/test/integration/destructive.yml @@ -2,7 +2,9 @@ gather_facts: True roles: - { role: test_service, tags: test_service } - - { role: test_pip, tags: test_pip } + # Current pip unconditionally uses md5. We can re-enable if pip switches + # to a different hash or allows us to not check md5 + - { role: test_pip, tags: test_pip, when: ansible_fips != True } - { role: test_gem, tags: test_gem } - { role: test_yum, tags: test_yum } - { role: test_apt, tags: test_apt } From 531eaddb3d1a8fd20e25eb72622fb438d29acaff Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 12 Nov 2014 11:46:17 -0800 Subject: [PATCH 378/813] Remove second invocation of mysql_user integration test Seems to have been mistakenly added when mysql_variable tests were added. --- test/integration/destructive.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml index bb76bf3026..07e86e36f2 100644 --- a/test/integration/destructive.yml +++ b/test/integration/destructive.yml @@ -11,5 +11,4 @@ - { role: test_apt_repository, tags: test_apt_repository } - { role: test_mysql_db, tags: test_mysql_db} - { role: test_mysql_user, tags: test_mysql_user} - - { role: test_mysql_user, tags: test_mysql_user} - { role: test_mysql_variables, tags: test_mysql_variables} From e05b22e0d1b60e873f9c69c4b123378f243f7f76 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 12 Nov 2014 11:47:11 -0800 Subject: [PATCH 379/813] Skip some md5 related unit tests when running in fips mode --- test/units/TestUtils.py | 27 +++++++++++++++++++++------ test/units/TestVault.py | 13 ++++++++++++- 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index 178eaae50c..3929ed0788 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -28,9 +28,18 @@ sys.setdefaultencoding("utf8") class TestUtils(unittest.TestCase): + def _is_fips(self): + try: + data = open('/proc/sys/crypto/fips_enabled').read().strip() + except: + return False + if data != '1': + return False + return True + def test_before_comment(self): ''' see if we can detect the part of a string before a comment. Used by INI parser in inventory ''' - + input = "before # comment" expected = "before " actual = ansible.utils.before_comment(input) @@ -357,10 +366,14 @@ class TestUtils(unittest.TestCase): dict(foo=dict(bar='qux'))) def test_md5s(self): + if self._is_fips(): + raise SkipTest('MD5 unavailable on FIPs enabled systems') self.assertEqual(ansible.utils.md5s('ansible'), '640c8a5376aa12fa15cf02130ce239a6') # Need a test that causes UnicodeEncodeError See 4221 def test_md5(self): + if self._is_fips(): + raise SkipTest('MD5 unavailable on FIPs enabled systems') self.assertEqual(ansible.utils.md5(os.path.join(os.path.dirname(__file__), 'ansible.cfg')), 'fb7b5b90ea63f04bde33e804b6fad42c') self.assertEqual(ansible.utils.md5(os.path.join(os.path.dirname(__file__), 'ansible.cf')), @@ -373,7 +386,7 @@ class TestUtils(unittest.TestCase): def test_checksum(self): self.assertEqual(ansible.utils.checksum(os.path.join(os.path.dirname(__file__), 'ansible.cfg')), '658b67c8ac7595adde7048425ff1f9aba270721a') - self.assertEqual(ansible.utils.md5(os.path.join(os.path.dirname(__file__), 'ansible.cf')), + self.assertEqual(ansible.utils.checksum(os.path.join(os.path.dirname(__file__), 'ansible.cf')), None) def test_default(self): @@ -443,10 +456,6 @@ class TestUtils(unittest.TestCase): hash = ansible.utils.do_encrypt('ansible', 'sha256_crypt') self.assertTrue(passlib.hash.sha256_crypt.verify('ansible', hash)) - hash = ansible.utils.do_encrypt('ansible', 'md5_crypt', salt_size=4) - self.assertTrue(passlib.hash.md5_crypt.verify('ansible', hash)) - - try: ansible.utils.do_encrypt('ansible', 'ansible') except ansible.errors.AnsibleError: @@ -454,6 +463,12 @@ class TestUtils(unittest.TestCase): else: raise AssertionError('Incorrect exception, expected AnsibleError') + def test_do_encrypt_md5(self): + if self._is_fips: + raise SkipTest('MD5 unavailable on FIPS systems') + hash = ansible.utils.do_encrypt('ansible', 'md5_crypt', salt_size=4) + self.assertTrue(passlib.hash.md5_crypt.verify('ansible', hash)) + def test_last_non_blank_line(self): self.assertEqual(ansible.utils.last_non_blank_line('a\n\nb\n\nc'), 'c') self.assertEqual(ansible.utils.last_non_blank_line(''), '') diff --git a/test/units/TestVault.py b/test/units/TestVault.py index 415d5c14aa..b720d72e84 100644 --- a/test/units/TestVault.py +++ b/test/units/TestVault.py @@ -36,6 +36,15 @@ except ImportError: class TestVaultLib(TestCase): + def _is_fips(self): + try: + data = open('/proc/sys/crypto/fips_enabled').read().strip() + except: + return False + if data != '1': + return False + return True + def test_methods_exist(self): v = VaultLib('ansible') slots = ['is_encrypted', @@ -77,6 +86,8 @@ class TestVaultLib(TestCase): assert v.version == "9.9" def test_encrypt_decrypt_aes(self): + if self._is_fips(): + raise SkipTest('MD5 not available on FIPS enabled systems') if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') @@ -84,7 +95,7 @@ class TestVaultLib(TestCase): enc_data = v.encrypt("foobar") dec_data = v.decrypt(enc_data) assert enc_data != "foobar", "encryption failed" - assert dec_data == "foobar", "decryption failed" + assert dec_data == "foobar", "decryption failed" def test_encrypt_decrypt_aes256(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: From 44eb19d5535d0fccb6e5817138a4167db71eac37 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 12 Nov 2014 12:05:25 -0800 Subject: [PATCH 380/813] Make VaultEditor Tests compatible with FIPS mode Migrate one test to vault-1.1. Skip the two other vault 1.0 tests if running on a FIPS enabled system --- test/units/TestVaultEditor.py | 29 ++++++++++++++----- ...oo-ansible-1.0-ansible-newline-ansible.yml | 4 --- ...oo-ansible-1.1-ansible-newline-ansible.yml | 6 ++++ 3 files changed, 27 insertions(+), 12 deletions(-) delete mode 100644 test/units/vault_test_data/foo-ansible-1.0-ansible-newline-ansible.yml create mode 100644 test/units/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml diff --git a/test/units/TestVaultEditor.py b/test/units/TestVaultEditor.py index cf7515370a..cfa5bc13e6 100644 --- a/test/units/TestVaultEditor.py +++ b/test/units/TestVaultEditor.py @@ -37,6 +37,15 @@ except ImportError: class TestVaultEditor(TestCase): + def _is_fips(self): + try: + data = open('/proc/sys/crypto/fips_enabled').read().strip() + except: + return False + if data != '1': + return False + return True + def test_methods_exist(self): v = VaultEditor(None, None, None) slots = ['create_file', @@ -51,6 +60,8 @@ class TestVaultEditor(TestCase): assert hasattr(v, slot), "VaultLib is missing the %s method" % slot def test_decrypt_1_0(self): + if self._is_fips(): + raise SkipTest('Vault-1.0 will not function on FIPS enabled systems') if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest dirpath = tempfile.mkdtemp() @@ -75,18 +86,18 @@ class TestVaultEditor(TestCase): assert error_hit == False, "error decrypting 1.0 file" assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() - def test_decrypt_1_0_newline(self): + def test_decrypt_1_1_newline(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest dirpath = tempfile.mkdtemp() - filename = os.path.join(dirpath, "foo-ansible-1.0-ansible-newline-ansible.yml") + filename = os.path.join(dirpath, "foo-ansible-1.1-ansible-newline-ansible.yml") shutil.rmtree(dirpath) shutil.copytree("vault_test_data", dirpath) ve = VaultEditor(None, "ansible\nansible\n", filename) # make sure the password functions for the cipher error_hit = False - try: + try: ve.decrypt_file() except errors.AnsibleError, e: error_hit = True @@ -97,8 +108,8 @@ class TestVaultEditor(TestCase): f.close() shutil.rmtree(dirpath) - assert error_hit == False, "error decrypting 1.0 file with newline in password" - #assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + assert error_hit == False, "error decrypting 1.1 file with newline in password" + #assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip() def test_decrypt_1_1(self): @@ -112,7 +123,7 @@ class TestVaultEditor(TestCase): # make sure the password functions for the cipher error_hit = False - try: + try: ve.decrypt_file() except errors.AnsibleError, e: error_hit = True @@ -123,11 +134,13 @@ class TestVaultEditor(TestCase): f.close() shutil.rmtree(dirpath) - assert error_hit == False, "error decrypting 1.0 file" - assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + assert error_hit == False, "error decrypting 1.1 file" + assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip() def test_rekey_migration(self): + if self._is_fips(): + raise SkipTest('Vault-1.0 will not function on FIPS enabled systems') if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest dirpath = tempfile.mkdtemp() diff --git a/test/units/vault_test_data/foo-ansible-1.0-ansible-newline-ansible.yml b/test/units/vault_test_data/foo-ansible-1.0-ansible-newline-ansible.yml deleted file mode 100644 index dd4e6e746b..0000000000 --- a/test/units/vault_test_data/foo-ansible-1.0-ansible-newline-ansible.yml +++ /dev/null @@ -1,4 +0,0 @@ -$ANSIBLE_VAULT;1.0;AES -53616c7465645f5ff0442ae8b08e2ff316d0d6512013185df7aded44f3c0eeef1b7544d078be1fe7 -ed88d0fedcb11928df45558f4b7f80fce627fbb08c5288885ab053f4129175779a8f24f5c1113731 -7d22cee14284670953c140612edf62f92485123fc4f15099ffe776e906e08145 diff --git a/test/units/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml b/test/units/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml new file mode 100644 index 0000000000..6e025a1c40 --- /dev/null +++ b/test/units/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +61333063333663376535373431643063613232393438623732643966613962363563383132363631 +3235363730623635323039623439343561313566313361630a313632643338613636303637623765 +64356531643630303636323064336439393335313836366235336464633635376339663830333232 +6338353337663139320a646632386131646431656165656338633535386535623236393265373634 +37656134633661333935346434363237613435323865356234323264663838643931 From 40caa11082e08c0ef840ca33f01e1543363ab510 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 12 Nov 2014 16:23:49 -0500 Subject: [PATCH 381/813] implemented info action for galaxy --- bin/ansible-galaxy | 72 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 63 insertions(+), 9 deletions(-) diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy index 9018e6c205..9a73023b83 100755 --- a/bin/ansible-galaxy +++ b/bin/ansible-galaxy @@ -135,6 +135,7 @@ An optional section for the role authors to include contact information, or a we #------------------------------------------------------------------------------------- VALID_ACTIONS = ("init", "info", "install", "list", "remove") +SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) def get_action(args): """ @@ -237,6 +238,7 @@ def exit_without_ignore(options, rc=1): print '- you can use --ignore-errors to skip failed roles.' sys.exit(rc) + #------------------------------------------------------------------------------------- # Galaxy API functions #------------------------------------------------------------------------------------- @@ -257,7 +259,7 @@ def api_get_config(api_server): except: return None -def api_lookup_role_by_name(api_server, role_name): +def api_lookup_role_by_name(api_server, role_name, notify=True): """ Uses the Galaxy API to do a lookup on the role owner/name. """ @@ -268,7 +270,8 @@ def api_lookup_role_by_name(api_server, role_name): parts = role_name.split(".") user_name = ".".join(parts[0:-1]) role_name = parts[-1] - print "- downloading role '%s', owned by %s" % (role_name, user_name) + if notify: + print "- downloading role '%s', owned by %s" % (role_name, user_name) except: parser.print_help() print "- invalid role name (%s). Specify role as format: username.rolename" % role_name @@ -640,7 +643,7 @@ def execute_init(args, options, parser): categories = [] if not offline: categories = api_get_list(api_server, "categories") or [] - + # group the list of platforms from the api based # on their names, with the release field being # appended to a list of versions @@ -676,7 +679,57 @@ def execute_info(args, options, parser): from the galaxy API. """ - pass + if len(args) == 0: + # the user needs to specify a role + parser.print_help() + print "- you must specify a user/role name" + sys.exit(1) + + api_server = get_opt(options, "api_server", "galaxy.ansible.com") + api_config = api_get_config(api_server) + roles_path = get_opt(options, "roles_path") + + for role in args: + + role_info = {} + + install_info = get_galaxy_install_info(role, options) + if install_info: + if 'version' in install_info: + install_info['intalled_version'] = install_info['version'] + install_info.pop('version', None) + role_info.update(install_info) + + remote_data = api_lookup_role_by_name(api_server, role, False) + if remote_data: + role_info.update(remote_data) + + metadata = get_role_metadata(role, options) + if metadata: + role_info.update(metadata) + + role_spec = ansible.utils.role_spec_parse(role) + if role_spec: + role_info.update(role_spec) + + if role_info: + print "- %s:" % (role) + import pprint + for k in sorted(role_info.keys()): + + if k in SKIP_INFO_KEYS: + continue + + if isinstance(role_info[k], dict): + print "\t%s: " % (k) + for key in sorted(role_info[k].keys()): + if key in SKIP_INFO_KEYS: + continue + print "\t\t%s: %s" % (key, role_info[k][key]) + else: + print "\t%s: %s" % (k, role_info[k]) + else: + print "- the role %s was not found" % role def execute_install(args, options, parser): """ @@ -687,23 +740,24 @@ def execute_install(args, options, parser): """ role_file = get_opt(options, "role_file", None) - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - no_deps = get_opt(options, "no_deps", False) - roles_path = get_opt(options, "roles_path") - if len(args) == 0 and not role_file: + if len(args) == 0 and role_file is None: # the user needs to specify one of either --role-file # or specify a single user/role name parser.print_help() print "- you must specify a user/role name or a roles file" sys.exit() - elif len(args) == 1 and role_file: + elif len(args) == 1 and not role_file is None: # using a role file is mutually exclusive of specifying # the role name on the command line parser.print_help() print "- please specify a user/role name, or a roles file, but not both" sys.exit(1) + api_server = get_opt(options, "api_server", "galaxy.ansible.com") + no_deps = get_opt(options, "no_deps", False) + roles_path = get_opt(options, "roles_path") + roles_done = [] if role_file: f = open(role_file, 'r') From 0f2f022bb0760b079bb3cc832b29e986e1bb970b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 12 Nov 2014 17:55:58 -0500 Subject: [PATCH 382/813] removed YAML as documented local facts format as it is not supported and would require extra modules on target. --- docsite/rst/playbooks_variables.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index f9e3dda4e2..738148106b 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -671,7 +671,7 @@ For instance, what if you want users to be able to control some aspect about how .. note:: Perhaps "local facts" is a bit of a misnomer, it means "locally supplied user values" as opposed to "centrally supplied user values", or what facts are -- "locally dynamically determined values". If a remotely managed system has an "/etc/ansible/facts.d" directory, any files in this directory -ending in ".fact", can be YAML, JSON, INI, or executable files returning JSON, and these can supply local facts in Ansible. +ending in ".fact", can be JSON, INI, or executable files returning JSON, and these can supply local facts in Ansible. For instance assume a /etc/ansible/facts.d/preferences.fact:: @@ -689,7 +689,7 @@ And you will see the following fact added:: "ansible_local": { "preferences": { "general": { - "asdf" : "1", + "asdf" : "1", "bar" : "2" } } @@ -707,7 +707,7 @@ can allow that fact to be used during that particular play. Otherwise, it will Here is an example of what that might look like:: - hosts: webservers - tasks: + tasks: - name: create directory for ansible custom facts file: state=directory recurse=yes path=/etc/ansible/facts.d - name: install custom impi fact From 697582fdde7c4b1a72e6c673548d95ba82a6d1f7 Mon Sep 17 00:00:00 2001 From: Jon Hadfield Date: Thu, 13 Nov 2014 20:20:08 +0000 Subject: [PATCH 383/813] Fix issue whereby file will be transferred before checking the creates argument. --- .../runner/action_plugins/unarchive.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py index 2a1c8d1cd6..87bae2674c 100644 --- a/lib/ansible/runner/action_plugins/unarchive.py +++ b/lib/ansible/runner/action_plugins/unarchive.py @@ -49,11 +49,30 @@ class ActionModule(object): source = options.get('src', None) dest = options.get('dest', None) copy = utils.boolean(options.get('copy', 'yes')) + creates = options.get('creates', None) if source is None or dest is None: result = dict(failed=True, msg="src (or content) and dest are required") return ReturnData(conn=conn, result=result) + if creates: + # do not run the command if the line contains creates=filename + # and the filename already exists. This allows idempotence + # of command executions. + module_args_tmp = "path=%s" % creates + module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject, + complex_args=complex_args, persist_files=True) + stat = module_return.result.get('stat', None) + if stat and stat.get('exists', False): + return ReturnData( + conn=conn, + comm_ok=True, + result=dict( + skipped=True, + msg=("skipped, since %s exists" % creates) + ) + ) + dest = self.runner._remote_expand_user(conn, dest, tmp) # CCTODO: Fix path for Windows hosts. source = template.template(self.runner.basedir, os.path.expanduser(source), inject) if copy: From 504995bda220cb271d080ae569c9186798dabc71 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 13 Nov 2014 18:32:27 -0500 Subject: [PATCH 384/813] allow fact objects to be instantiated w/o triggering all fact collection this opens the ability to do specific facts at much lower cost. --- lib/ansible/module_utils/facts.py | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 5ceeb405d5..0ad70f61a9 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -46,7 +46,7 @@ except ImportError: import simplejson as json # -------------------------------------------------------------- -# timeout function to make sure some fact gathering +# timeout function to make sure some fact gathering # steps do not exceed a time limit class TimeoutError(Exception): @@ -118,20 +118,23 @@ class Facts(object): { 'path' : '/usr/bin/pkg', 'name' : 'pkg' }, ] - def __init__(self): + def __init__(self, load_on_init=True): + self.facts = {} - self.get_platform_facts() - self.get_distribution_facts() - self.get_cmdline() - self.get_public_ssh_host_keys() - self.get_selinux_facts() - self.get_fips_facts() - self.get_pkg_mgr_facts() - self.get_lsb_facts() - self.get_date_time_facts() - self.get_user_facts() - self.get_local_facts() - self.get_env_facts() + + if load_on_init: + self.get_platform_facts() + self.get_distribution_facts() + self.get_cmdline() + self.get_public_ssh_host_keys() + self.get_selinux_facts() + self.get_fips_facts() + self.get_pkg_mgr_facts() + self.get_lsb_facts() + self.get_date_time_facts() + self.get_user_facts() + self.get_local_facts() + self.get_env_facts() def populate(self): return self.facts From 11e79d9627dfdef641252f54505a717afaec9b2c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 14 Nov 2014 07:52:51 -0800 Subject: [PATCH 385/813] Update core modules to pull in latest apt fixes --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 6317d3a988..6be2fbb1c5 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 6317d3a988f7269340cb7a0d105d2c671ca1cd1e +Subproject commit 6be2fbb1c577d34b0dbb51c7338da0b79286658f From 6030be3835758797c2520898bc208936a6386098 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 14 Nov 2014 07:55:10 -0800 Subject: [PATCH 386/813] Add unittest for discrete functions in the apt module --- test/units/module_tests/TestApt.py | 42 ++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 test/units/module_tests/TestApt.py diff --git a/test/units/module_tests/TestApt.py b/test/units/module_tests/TestApt.py new file mode 100644 index 0000000000..e7f2dafc95 --- /dev/null +++ b/test/units/module_tests/TestApt.py @@ -0,0 +1,42 @@ +import collections +import mock +import os +import unittest + +from ansible.modules.core.packaging.os.apt import ( + expand_pkgspec_from_fnmatches, +) + + +class AptExpandPkgspecTestCase(unittest.TestCase): + + def setUp(self): + FakePackage = collections.namedtuple("Package", ("name",)) + self.fake_cache = [ FakePackage("apt"), + FakePackage("apt-utils"), + FakePackage("not-selected"), + ] + + def test_trivial(self): + foo = ["apt"] + self.assertEqual( + expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo) + + def test_version_wildcard(self): + foo = ["apt=1.0*"] + self.assertEqual( + expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo) + + def test_pkgname_wildcard_version_wildcard(self): + foo = ["apt*=1.0*"] + m_mock = mock.Mock() + self.assertEqual( + expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache), + ['apt', 'apt-utils']) + + def test_pkgname_expands(self): + foo = ["apt*"] + m_mock = mock.Mock() + self.assertEqual( + expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache), + ["apt", "apt-utils"]) From dd60036fb9778dde702b73e2542b8e05af9ad1ff Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 14 Nov 2014 11:13:36 -0500 Subject: [PATCH 387/813] added changed status to return when new early create check is triggered, added skipped check to test on create existing --- lib/ansible/runner/action_plugins/unarchive.py | 1 + test/integration/roles/test_unarchive/tasks/main.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py index 87bae2674c..b528a25a39 100644 --- a/lib/ansible/runner/action_plugins/unarchive.py +++ b/lib/ansible/runner/action_plugins/unarchive.py @@ -69,6 +69,7 @@ class ActionModule(object): comm_ok=True, result=dict( skipped=True, + changed=False, msg=("skipped, since %s exists" % creates) ) ) diff --git a/test/integration/roles/test_unarchive/tasks/main.yml b/test/integration/roles/test_unarchive/tasks/main.yml index 073ccf9145..7caa68e65c 100644 --- a/test/integration/roles/test_unarchive/tasks/main.yml +++ b/test/integration/roles/test_unarchive/tasks/main.yml @@ -87,6 +87,7 @@ assert: that: - "unarchive02c.changed == false" + - "unarchive02c.skipped == true" - name: remove our tar.gz unarchive destination file: path={{output_dir}}/test-unarchive-tar-gz state=absent From a1d990a67385f9e3da6193ea251cef13511265bd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 14 Nov 2014 10:25:18 -0600 Subject: [PATCH 388/813] Minor tweaks to v2 playbook iterator to support executor testing --- v2/ansible/executor/playbook_iterator.py | 58 ++++++++++++++++++------ 1 file changed, 43 insertions(+), 15 deletions(-) diff --git a/v2/ansible/executor/playbook_iterator.py b/v2/ansible/executor/playbook_iterator.py index 0d4f09b1e4..88bec5a331 100644 --- a/v2/ansible/executor/playbook_iterator.py +++ b/v2/ansible/executor/playbook_iterator.py @@ -31,37 +31,49 @@ class PlaybookState: self._cur_play = 0 self._task_list = None self._cur_task_pos = 0 + self._done = False - def next(self): + def next(self, peek=False): ''' Determines and returns the next available task from the playbook, advancing through the list of plays as it goes. ''' + task = None + + # we save these locally so that we can peek at the next task + # without updating the internal state of the iterator + cur_play = self._cur_play + task_list = self._task_list + cur_task_pos = self._cur_task_pos + while True: - # when we hit the end of the playbook entries list, we return - # None to indicate we're there - if self._cur_play > len(self._parent_iterator._playbook._entries) - 1: + # when we hit the end of the playbook entries list, we set a flag + # and return None to indicate we're there + # FIXME: accessing the entries and parent iterator playbook members + # should be done through accessor functions + if self._done or cur_play > len(self._parent_iterator._playbook._entries) - 1: + self._done = True return None # initialize the task list by calling the .compile() method # on the play, which will call compile() for all child objects - if self._task_list is None: - self._task_list = self._parent_iterator._playbook._entries[self._cur_play].compile() + if task_list is None: + task_list = self._parent_iterator._playbook._entries[cur_play].compile() # if we've hit the end of this plays task list, move on to the next # and reset the position values for the next iteration - if self._cur_task_pos > len(self._task_list) - 1: - self._cur_play += 1 - self._task_list = None - self._cur_task_pos = 0 + if cur_task_pos > len(task_list) - 1: + cur_play += 1 + task_list = None + cur_task_pos = 0 continue else: # FIXME: do tag/conditional evaluation here and advance # the task position if it should be skipped without # returning a task - task = self._task_list[self._cur_task_pos] - self._cur_task_pos += 1 + task = task_list[cur_task_pos] + cur_task_pos += 1 # Skip the task if it is the member of a role which has already # been run, unless the role allows multiple executions @@ -71,7 +83,16 @@ class PlaybookState: if task._role.has_run() and not task._role._metadata._allow_duplicates: continue - return task + # Break out of the while loop now that we have our task + break + + # If we're not just peeking at the next task, save the internal state + if not peek: + self._cur_play = cur_play + self._task_list = task_list + self._cur_task_pos = cur_task_pos + + return task class PlaybookIterator: @@ -84,14 +105,21 @@ class PlaybookIterator: self._playbook = playbook self._log_manager = log_manager self._host_entries = dict() + self._first_host = None # build the per-host dictionary of playbook states for host in inventory.get_hosts(): + if self._first_host is None: + self._first_host = host self._host_entries[host.get_name()] = PlaybookState(parent_iterator=self) - def get_next_task_for_host(self, host): + def get_next_task(self, peek=False): + ''' returns the next task for host[0] ''' + return self._host_entries[self._first_host.get_name()].next(peek=peek) + + def get_next_task_for_host(self, host, peek=False): ''' fetch the next task for the given host ''' if host.get_name() not in self._host_entries: raise AnsibleError("invalid host specified for playbook iteration") - return self._host_entries[host.get_name()].next() + return self._host_entries[host.get_name()].next(peek=peek) From b95475c1f98fb414e8a5a9c7bc3ab26aaf4cac22 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 14 Nov 2014 08:47:41 -0800 Subject: [PATCH 389/813] Unittest the get_split_image_tag function in the docker module --- lib/ansible/modules/core | 2 +- test/units/module_tests/TestDocker.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 test/units/module_tests/TestDocker.py diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 6be2fbb1c5..c6522620c5 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 6be2fbb1c577d34b0dbb51c7338da0b79286658f +Subproject commit c6522620c562d24031ad32187de83c3768df3c77 diff --git a/test/units/module_tests/TestDocker.py b/test/units/module_tests/TestDocker.py new file mode 100644 index 0000000000..f381620346 --- /dev/null +++ b/test/units/module_tests/TestDocker.py @@ -0,0 +1,20 @@ +import collections +import mock +import os +import unittest + +from ansible.modules.core.cloud.docker.docker import get_split_image_tag + +class DockerSplitImageTagTestCase(unittest.TestCase): + + def test_trivial(self): + self.assertEqual(get_split_image_tag('test'), ('test', 'latest')) + + def test_with_org_name(self): + self.assertEqual(get_split_image_tag('ansible/centos7-ansible'), ('ansible/centos7-ansible', 'latest')) + + def test_with_tag(self): + self.assertEqual(get_split_image_tag('test:devel'), ('test', 'devel')) + + def test_with_tag_and_org_name(self): + self.assertEqual(get_split_image_tag('ansible/centos7-ansible:devel'), ('ansible/centos7-ansible', 'devel')) From d14dcdc2225ea2d73781da19744a581e2c4b008d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 14 Nov 2014 08:59:36 -0800 Subject: [PATCH 390/813] Docker test does not require the mock module --- test/units/module_tests/TestDocker.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/units/module_tests/TestDocker.py b/test/units/module_tests/TestDocker.py index f381620346..b8c8cf1e23 100644 --- a/test/units/module_tests/TestDocker.py +++ b/test/units/module_tests/TestDocker.py @@ -1,5 +1,4 @@ import collections -import mock import os import unittest From dd9dc637d1dd9aea5bd2aad8ec77eed5af9416a7 Mon Sep 17 00:00:00 2001 From: Mario de Frutos Date: Fri, 5 Sep 2014 08:40:27 +0200 Subject: [PATCH 391/813] Test install with wildcard --- test/integration/roles/test_apt/tasks/apt.yml | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/test/integration/roles/test_apt/tasks/apt.yml b/test/integration/roles/test_apt/tasks/apt.yml index d1cdeeb1a2..32b3806b85 100644 --- a/test/integration/roles/test_apt/tasks/apt.yml +++ b/test/integration/roles/test_apt/tasks/apt.yml @@ -77,4 +77,27 @@ that: - "not apt_result.changed" +# UNINSTALL AGAIN +- name: uninstall hello with apt + apt: pkg=hello state=absent purge=yes + register: apt_result + +# INSTALL WITH VERSION WILDCARD +- name: install hello with apt + apt: name=hello=2.7* state=present + register: apt_result + +- name: check hello with wildcard with dpkg + shell: dpkg --get-selections | fgrep hello + failed_when: False + register: dpkg_result + +- debug: var=apt_result +- debug: var=dpkg_result + +- name: verify installation of hello + assert: + that: + - "apt_result.changed" + - "dpkg_result.rc == 0" From c3c6a07e6524f8f489659d021f95f1a2be7aa663 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 14 Nov 2014 10:02:21 -0800 Subject: [PATCH 392/813] Update apt test for newer version of test package and pull in an apt module fix --- lib/ansible/modules/core | 2 +- test/integration/roles/test_apt/tasks/apt.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index c6522620c5..fb4854ebcb 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit c6522620c562d24031ad32187de83c3768df3c77 +Subproject commit fb4854ebcbc35b3038530de91a472ef7d0b7b710 diff --git a/test/integration/roles/test_apt/tasks/apt.yml b/test/integration/roles/test_apt/tasks/apt.yml index 32b3806b85..5457c2ef78 100644 --- a/test/integration/roles/test_apt/tasks/apt.yml +++ b/test/integration/roles/test_apt/tasks/apt.yml @@ -84,7 +84,7 @@ # INSTALL WITH VERSION WILDCARD - name: install hello with apt - apt: name=hello=2.7* state=present + apt: name=hello=2.* state=present register: apt_result - name: check hello with wildcard with dpkg From 8d37c6f091579313dc3e44dbe16adc69c2c23056 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 14 Nov 2014 16:20:52 -0500 Subject: [PATCH 393/813] minor clarifications of variable precedence --- docsite/rst/playbooks_variables.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 738148106b..34d5a77be4 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -955,9 +955,10 @@ a use for it. If multiple variables of the same name are defined in different places, they win in a certain order, which is:: - * -e variables always win - * then comes "most everything else" - * then comes variables defined in inventory + * extra vars (-e in the command line) always win + * then comes connection variables defined in inventory (ansible_ssh_user, etc) + * then comes "most everything else" (command line switches, vars in play, included vars, etc) + * then comes the rest of the variables defined in inventory * then comes facts discovered about a system * then "role defaults", which are the most "defaulty" and lose in priority to everything. From 9fa163c565aee3e99c65d7084289f8edb4d8ceb6 Mon Sep 17 00:00:00 2001 From: John Batty Date: Sat, 15 Nov 2014 21:40:42 +0000 Subject: [PATCH 394/813] Fix documentation link to issue template On the Ansible Community page (http://docs.ansible.com/community.html) in the "I'd like to report a bug" section, the link to the "issue template" is broken - it links to URL https://raw2.github.com/ansible/ansible/devel/ISSUE_TEMPLATE.md, which results in a GitHub 404 (Page not found) error page . Fix points link to a URL that works. --- docsite/rst/community.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index d16070239e..4d2de28ce1 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -68,7 +68,7 @@ to see if the issue has already been reported. MODULE related bugs however should go to `ansible-modules-core `_ or `ansible-modules-extras `_ based on the classification of the module. This is listed on the bottom of the docs page for any module. -When filing a bug, please use the `issue template `_ to provide all relevant information, regardless of what repo you are filing a ticket against. +When filing a bug, please use the `issue template `_ to provide all relevant information, regardless of what repo you are filing a ticket against. Knowing your ansible version and the exact commands you are running, and what you expect, saves time and helps us help everyone with their issues more quickly. From e0110903c7dcbb21fdc2975244e04c5a07ad13d0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 15 Nov 2014 17:01:25 -0500 Subject: [PATCH 395/813] put start and step docs into their own topic, moved from playbook_intro which is meant to be kept simple --- docsite/rst/playbooks_special_topics.rst | 1 + docsite/rst/playbooks_startnstep.rst | 38 ++++++++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 docsite/rst/playbooks_startnstep.rst diff --git a/docsite/rst/playbooks_special_topics.rst b/docsite/rst/playbooks_special_topics.rst index 078b27f253..c57f5796c9 100644 --- a/docsite/rst/playbooks_special_topics.rst +++ b/docsite/rst/playbooks_special_topics.rst @@ -17,3 +17,4 @@ and adopt these only if they seem relevant or useful to your environment. playbooks_prompts playbooks_tags playbooks_vault + playbooks_startnstep diff --git a/docsite/rst/playbooks_startnstep.rst b/docsite/rst/playbooks_startnstep.rst new file mode 100644 index 0000000000..1238a710d8 --- /dev/null +++ b/docsite/rst/playbooks_startnstep.rst @@ -0,0 +1,38 @@ +Start and Step +====================== +.. versionadded:: 1.8 + +.. contents:: Topics + +This shows a few special ways to run playbooks, very useful for testing and debugging. + + +Start-at-task +````````````` +.. versionadded:: 1.2 + +If you want to start executing your playbook at a particular task, you can do so +with the ``--start-at`` option:: + + ansible-playbook playbook.yml --start-at="install packages" + +The above will start executing your playbook at a task named "install packages". + + +Step +```` +.. versionadded:: 1.1 + + +Playbooks can also be executed interactively with ``--step``:: + + ansible-playbook playbook.yml --step + +This will cause ansible to stop on each task, and ask if it should execute that task. +Say you had a task called "configure ssh", the playbook run will stop and ask:: + + Perform task: configure ssh (y/n/c): + +Answering "y" will execute the task, answering "n" will skip the task, and answering "c" +will continue executing all the remaining tasks without asking. + From 897965ce4df7c64c47a62851c8b55dd543632f21 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 15 Nov 2014 17:09:55 -0500 Subject: [PATCH 396/813] removed start/step from intro, added markers to start n step --- docsite/rst/playbooks_intro.rst | 19 ------------------- docsite/rst/playbooks_startnstep.rst | 5 ++++- 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index cc55f28261..4bc3bccf2d 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -335,25 +335,6 @@ Let's run a playbook using a parallelism level of 10:: ansible-playbook playbook.yml -f 10 -Playbooks can also be executed interactively with ``--step``:: - - ansible-playbook playbook.yml --step - -This will cause ansible to stop on each task, and ask if it should execute that task. -Say you had a task called "configure ssh", the playbook run will stop and ask:: - - Perform task: configure ssh (y/n/c): - -Answering "y" will execute the task, answering "n" will skip the task, and answering "c" -will continue executing all the remaining tasks without asking. - -If you want to start executing your playbook at a particular task, you can do so -with the ``--start-at`` option:: - - ansible-playbook playbook.yml --start-at="install packages" - -The above will start executing your playbook at a task named "install packages". - .. _ansible-pull: Ansible-Pull diff --git a/docsite/rst/playbooks_startnstep.rst b/docsite/rst/playbooks_startnstep.rst index 1238a710d8..c2f0d7514c 100644 --- a/docsite/rst/playbooks_startnstep.rst +++ b/docsite/rst/playbooks_startnstep.rst @@ -1,12 +1,13 @@ Start and Step ====================== -.. versionadded:: 1.8 .. contents:: Topics This shows a few special ways to run playbooks, very useful for testing and debugging. +.. _start_at_task + Start-at-task ````````````` .. versionadded:: 1.2 @@ -19,6 +20,8 @@ with the ``--start-at`` option:: The above will start executing your playbook at a task named "install packages". +.. _step + Step ```` .. versionadded:: 1.1 From d1e80ea1e60a908550474520c2de56954bb7c313 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 17 Nov 2014 11:17:47 -0500 Subject: [PATCH 397/813] removed version added info, too old to matter small rephrasing for clarification --- docsite/rst/playbooks_startnstep.rst | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/docsite/rst/playbooks_startnstep.rst b/docsite/rst/playbooks_startnstep.rst index c2f0d7514c..ac06962cf2 100644 --- a/docsite/rst/playbooks_startnstep.rst +++ b/docsite/rst/playbooks_startnstep.rst @@ -1,19 +1,14 @@ Start and Step ====================== -.. contents:: Topics - -This shows a few special ways to run playbooks, very useful for testing and debugging. +This shows a few alternative ways to run playbooks. These modes are very useful for testing new plays or debugging. .. _start_at_task Start-at-task ````````````` -.. versionadded:: 1.2 - -If you want to start executing your playbook at a particular task, you can do so -with the ``--start-at`` option:: +If you want to start executing your playbook at a particular task, you can do so with the ``--start-at`` option:: ansible-playbook playbook.yml --start-at="install packages" @@ -24,8 +19,6 @@ The above will start executing your playbook at a task named "install packages". Step ```` -.. versionadded:: 1.1 - Playbooks can also be executed interactively with ``--step``:: From 9a0f8f015877e8f1ae3c728a035120a25d4e7fa9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 17 Nov 2014 15:30:22 -0600 Subject: [PATCH 398/813] Split out various vars-related things to avoid merging too early Fixes #9498 --- lib/ansible/playbook/__init__.py | 8 ++- lib/ansible/playbook/play.py | 49 +++++++++++++------ lib/ansible/playbook/task.py | 16 ++++-- lib/ansible/runner/__init__.py | 18 +++++-- test/integration/Makefile | 2 +- .../roles/test_var_precedence/tasks/main.yml | 4 ++ test/units/TestPlayVarsFiles.py | 34 ++++++------- 7 files changed, 88 insertions(+), 43 deletions(-) diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 58e2bafe18..28e1d923eb 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -399,6 +399,9 @@ class PlayBook(object): remote_user=task.remote_user, remote_port=task.play.remote_port, module_vars=task.module_vars, + play_vars=task.play_vars, + play_file_vars=task.play_file_vars, + role_vars=task.role_vars, default_vars=task.default_vars, extra_vars=self.extra_vars, private_key_file=self.private_key_file, @@ -500,7 +503,7 @@ class PlayBook(object): def _save_play_facts(host, facts): # saves play facts in SETUP_CACHE, unless the module executed was # set_fact, in which case we add them to the VARS_CACHE - if task.module_name == 'set_fact': + if task.module_name in ('set_fact', 'include_vars'): utils.update_hash(self.VARS_CACHE, host, facts) else: utils.update_hash(self.SETUP_CACHE, host, facts) @@ -605,6 +608,9 @@ class PlayBook(object): transport=play.transport, is_playbook=True, module_vars=play.vars, + play_vars=play.vars, + play_file_vars=play.vars_file_vars, + role_vars=play.role_vars, default_vars=play.default_vars, check=self.check, diff=self.diff, diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 742c12b382..b793247826 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -33,7 +33,7 @@ import uuid class Play(object): __slots__ = [ - 'hosts', 'name', 'vars', 'default_vars', 'vars_prompt', 'vars_files', + 'hosts', 'name', 'vars', 'vars_file_vars', 'role_vars', 'default_vars', 'vars_prompt', 'vars_files', 'handlers', 'remote_user', 'remote_port', 'included_roles', 'accelerate', 'accelerate_port', 'accelerate_ipv6', 'sudo', 'sudo_user', 'transport', 'playbook', 'tags', 'gather_facts', 'serial', '_ds', '_handlers', '_tasks', @@ -65,6 +65,8 @@ class Play(object): self.vars_prompt = ds.get('vars_prompt', {}) self.playbook = playbook self.vars = self._get_vars() + self.vars_file_vars = dict() # these are vars read in from vars_files: + self.role_vars = dict() # these are vars read in from vars/main.yml files in roles self.basedir = basedir self.roles = ds.get('roles', None) self.tags = ds.get('tags', None) @@ -108,10 +110,6 @@ class Play(object): self._update_vars_files_for_host(None) - # apply any extra_vars specified on the command line now - if type(self.playbook.extra_vars) == dict: - self.vars = utils.combine_vars(self.vars, self.playbook.extra_vars) - # template everything to be efficient, but do not pre-mature template # tasks/handlers as they may have inventory scope overrides _tasks = ds.pop('tasks', []) @@ -224,6 +222,7 @@ class Play(object): for role in roles: role_path,role_vars = self._get_role_path(role) role_vars = utils.combine_vars(passed_vars, role_vars) + vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))) vars_data = {} if os.path.isfile(vars): @@ -232,10 +231,12 @@ class Play(object): if not isinstance(vars_data, dict): raise errors.AnsibleError("vars from '%s' are not a dict" % vars) role_vars = utils.combine_vars(vars_data, role_vars) + defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))) defaults_data = {} if os.path.isfile(defaults): defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password) + # the meta directory contains the yaml that should # hold the list of dependencies (if any) meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))) @@ -287,13 +288,15 @@ class Play(object): dep_vars = utils.combine_vars(passed_vars, dep_vars) dep_vars = utils.combine_vars(role_vars, dep_vars) + vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars'))) vars_data = {} if os.path.isfile(vars): vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password) if vars_data: - #dep_vars = utils.combine_vars(vars_data, dep_vars) dep_vars = utils.combine_vars(dep_vars, vars_data) + pass + defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults'))) dep_defaults_data = {} if os.path.isfile(defaults): @@ -338,6 +341,19 @@ class Play(object): dep_stack.append([role,role_path,role_vars,defaults_data]) return dep_stack + def _load_role_vars_files(self, vars_files): + # process variables stored in vars/main.yml files + role_vars = {} + for filename in vars_files: + if os.path.exists(filename): + new_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password) + if new_vars: + if type(new_vars) != dict: + raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_vars))) + role_vars = utils.combine_vars(role_vars, new_vars) + + return role_vars + def _load_role_defaults(self, defaults_files): # process default variables default_vars = {} @@ -364,10 +380,10 @@ class Play(object): if type(roles) != list: raise errors.AnsibleError("value of 'roles:' must be a list") - new_tasks = [] - new_handlers = [] - new_vars_files = [] - defaults_files = [] + new_tasks = [] + new_handlers = [] + role_vars_files = [] + defaults_files = [] pre_tasks = ds.get('pre_tasks', None) if type(pre_tasks) != list: @@ -434,7 +450,7 @@ class Play(object): nt[k] = special_vars[k] new_handlers.append(nt) if os.path.isfile(vars_file): - new_vars_files.append(vars_file) + role_vars_files.append(vars_file) if os.path.isfile(defaults_file): defaults_files.append(defaults_file) if os.path.isdir(library): @@ -462,13 +478,12 @@ class Play(object): new_tasks.append(dict(meta='flush_handlers')) new_handlers.extend(handlers) - new_vars_files.extend(vars_files) ds['tasks'] = new_tasks ds['handlers'] = new_handlers - ds['vars_files'] = new_vars_files ds['role_names'] = role_names + self.role_vars = self._load_role_vars_files(role_vars_files) self.default_vars = self._load_role_defaults(defaults_files) return ds @@ -535,8 +550,7 @@ class Play(object): results.append(Task(self, x)) continue - task_vars = self.vars.copy() - task_vars.update(vars) + task_vars = vars.copy() if original_file: task_vars['_original_file'] = original_file @@ -601,6 +615,9 @@ class Play(object): task = Task( self, x, module_vars=task_vars, + play_vars=self.vars, + play_file_vars=self.vars_file_vars, + role_vars=self.role_vars, default_vars=default_vars, additional_conditions=list(additional_conditions), role_name=role_name @@ -818,7 +835,7 @@ class Play(object): target_filename = filename4 update_vars_cache(host, data, target_filename=target_filename) else: - self.vars = utils.combine_vars(self.vars, data) + self.vars_file_vars = utils.combine_vars(self.vars_file_vars, data) # we did process this file return True # we did not process this file diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index db10f7c494..ebe43f63c1 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -26,7 +26,7 @@ class Task(object): __slots__ = [ 'name', 'meta', 'action', 'when', 'async_seconds', 'async_poll_interval', - 'notify', 'module_name', 'module_args', 'module_vars', 'default_vars', + 'notify', 'module_name', 'module_args', 'module_vars', 'play_vars', 'play_file_vars', 'role_vars', 'default_vars', 'play', 'notified_by', 'tags', 'register', 'role_name', 'delegate_to', 'first_available_file', 'ignore_errors', 'local_action', 'transport', 'sudo', 'remote_user', 'sudo_user', 'sudo_pass', @@ -45,7 +45,7 @@ class Task(object): 'su', 'su_user', 'su_pass', 'no_log', 'run_once', ] - def __init__(self, play, ds, module_vars=None, default_vars=None, additional_conditions=None, role_name=None): + def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, default_vars=None, additional_conditions=None, role_name=None): ''' constructor loads from a task or handler datastructure ''' # meta directives are used to tell things like ansible/playbook to run @@ -119,9 +119,12 @@ class Task(object): elif not x in Task.VALID_KEYS: raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x) - self.module_vars = module_vars - self.default_vars = default_vars - self.play = play + self.module_vars = module_vars + self.play_vars = play_vars + self.play_file_vars = play_file_vars + self.role_vars = role_vars + self.default_vars = default_vars + self.play = play # load various attributes self.name = ds.get('name', None) @@ -219,6 +222,9 @@ class Task(object): # combine the default and module vars here for use in templating all_vars = self.default_vars.copy() + all_vars = utils.combine_vars(all_vars, self.play_vars) + all_vars = utils.combine_vars(all_vars, self.play_file_vars) + all_vars = utils.combine_vars(all_vars, self.role_vars) all_vars = utils.combine_vars(all_vars, self.module_vars) self.async_seconds = ds.get('async', 0) # not async by default diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 8f271f0500..1d236f5f11 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -134,7 +134,10 @@ class Runner(object): sudo=False, # whether to run sudo or not sudo_user=C.DEFAULT_SUDO_USER, # ex: 'root' module_vars=None, # a playbooks internals thing - default_vars=None, # ditto + play_vars=None, # + play_file_vars=None, # + role_vars=None, # + default_vars=None, # extra_vars=None, # extra vars specified with he playbook(s) is_playbook=False, # running from playbook or not? inventory=None, # reference to Inventory object @@ -176,6 +179,9 @@ class Runner(object): self.inventory = utils.default(inventory, lambda: ansible.inventory.Inventory(host_list)) self.module_vars = utils.default(module_vars, lambda: {}) + self.play_vars = utils.default(play_vars, lambda: {}) + self.play_file_vars = utils.default(play_file_vars, lambda: {}) + self.role_vars = utils.default(role_vars, lambda: {}) self.default_vars = utils.default(default_vars, lambda: {}) self.extra_vars = utils.default(extra_vars, lambda: {}) @@ -629,10 +635,16 @@ class Runner(object): inject = utils.combine_vars(inject, host_variables) # then the setup_cache which contains facts gathered inject = utils.combine_vars(inject, self.setup_cache.get(host, {})) - # followed by vars (vars, vars_files, vars/main.yml) - inject = utils.combine_vars(inject, self.vars_cache.get(host, {})) + # next come variables from vars and vars files + inject = utils.combine_vars(inject, self.play_vars) + inject = utils.combine_vars(inject, self.play_file_vars) + # next come variables from role vars/main.yml files + inject = utils.combine_vars(inject, self.role_vars) # then come the module variables inject = utils.combine_vars(inject, module_vars) + # followed by vars_cache things (set_fact, include_vars, and + # vars_files which had host-specific templating done) + inject = utils.combine_vars(inject, self.vars_cache.get(host, {})) # and finally -e vars are the highest priority inject = utils.combine_vars(inject, self.extra_vars) # and then special vars diff --git a/test/integration/Makefile b/test/integration/Makefile index 6568c53017..b03c3eff78 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -19,7 +19,7 @@ TMPDIR = $(shell mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') VAULT_PASSWORD_FILE = vault-password -all: non_destructive destructive includes unicode test_var_precedence check_mode test_hash test_handlers test_group_by test_vault parsing +all: parsing test_var_precedence unicode non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault parsing: ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario1; [ $$? -eq 3 ] diff --git a/test/integration/roles/test_var_precedence/tasks/main.yml b/test/integration/roles/test_var_precedence/tasks/main.yml index 1915ebdb91..7850e6b646 100644 --- a/test/integration/roles/test_var_precedence/tasks/main.yml +++ b/test/integration/roles/test_var_precedence/tasks/main.yml @@ -1,3 +1,7 @@ +- debug: var=extra_var +- debug: var=vars_var +- debug: var=vars_files_var +- debug: var=vars_files_var_role - assert: that: - 'extra_var == "extra_var"' diff --git a/test/units/TestPlayVarsFiles.py b/test/units/TestPlayVarsFiles.py index d1b1f9dfa2..f241936a12 100644 --- a/test/units/TestPlayVarsFiles.py +++ b/test/units/TestPlayVarsFiles.py @@ -82,8 +82,8 @@ class TestMe(unittest.TestCase): os.remove(temp_path) # make sure the variable was loaded - assert 'foo' in play.vars, "vars_file was not loaded into play.vars" - assert play.vars['foo'] == 'bar', "foo was not set to bar in play.vars" + assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars" + assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars" def test_vars_file_nonlist_error(self): @@ -133,10 +133,10 @@ class TestMe(unittest.TestCase): os.remove(temp_path2) # make sure the variables were loaded - assert 'foo' in play.vars, "vars_file was not loaded into play.vars" - assert play.vars['foo'] == 'bar', "foo was not set to bar in play.vars" - assert 'baz' in play.vars, "vars_file2 was not loaded into play.vars" - assert play.vars['baz'] == 'bang', "baz was not set to bang in play.vars" + assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars" + assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars" + assert 'baz' in play.vars_file_vars, "vars_file2 was not loaded into play.vars_file_vars" + assert play.vars_file_vars['baz'] == 'bang', "baz was not set to bang in play.vars_file_vars" def test_vars_files_first_found(self): @@ -160,8 +160,8 @@ class TestMe(unittest.TestCase): os.remove(temp_path) # make sure the variable was loaded - assert 'foo' in play.vars, "vars_file was not loaded into play.vars" - assert play.vars['foo'] == 'bar', "foo was not set to bar in play.vars" + assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars" + assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars" def test_vars_files_multiple_found(self): @@ -187,9 +187,9 @@ class TestMe(unittest.TestCase): os.remove(temp_path2) # make sure the variables were loaded - assert 'foo' in play.vars, "vars_file was not loaded into play.vars" - assert play.vars['foo'] == 'bar', "foo was not set to bar in play.vars" - assert 'baz' not in play.vars, "vars_file2 was loaded after vars_file1 was loaded" + assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars" + assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars" + assert 'baz' not in play.vars_file_vars, "vars_file2 was loaded after vars_file1 was loaded" def test_vars_files_assert_all_found(self): @@ -227,7 +227,7 @@ class TestMe(unittest.TestCase): # VARIABLE PRECEDENCE TESTS ######################################## - # On the first run vars_files are loaded into play.vars by host == None + # On the first run vars_files are loaded into play.vars_file_vars by host == None # * only files with vars from host==None will work here # On the secondary run(s), a host is given and the vars_files are loaded into VARS_CACHE # * this only occurs if host is not None, filename2 has vars in the name, and filename3 does not @@ -273,8 +273,8 @@ class TestMe(unittest.TestCase): def test_vars_files_two_vars_in_name(self): - # self.vars = ds['vars'] - # self.vars += _get_vars() ... aka extra_vars + # self.vars_file_vars = ds['vars'] + # self.vars_file_vars += _get_vars() ... aka extra_vars # make a temp dir temp_dir = mkdtemp() @@ -299,7 +299,7 @@ class TestMe(unittest.TestCase): # cleanup shutil.rmtree(temp_dir) - assert 'foo' in play.vars, "double var templated vars_files filename not loaded" + assert 'foo' in play.vars_file_vars, "double var templated vars_files filename not loaded" def test_vars_files_two_vars_different_scope(self): @@ -337,7 +337,7 @@ class TestMe(unittest.TestCase): # cleanup shutil.rmtree(temp_dir) - assert 'foo' not in play.vars, \ + assert 'foo' not in play.vars_file_vars, \ "mixed scope vars_file loaded into play vars" assert 'foo' in play.playbook.VARS_CACHE['localhost'], \ "differently scoped templated vars_files filename not loaded" @@ -376,7 +376,7 @@ class TestMe(unittest.TestCase): # cleanup shutil.rmtree(temp_dir) - assert 'foo' not in play.vars, \ + assert 'foo' not in play.vars_file_vars, \ "mixed scope vars_file loaded into play vars" assert 'foo' in play.playbook.VARS_CACHE['localhost'], \ "differently scoped templated vars_files filename not loaded" From 4ae2d58d728a9185252593d818cf029b208b9b6a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 17 Nov 2014 13:32:42 -0800 Subject: [PATCH 399/813] Fix detection of fips mode in test_do_encrypt_md5 --- test/units/TestUtils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index 3929ed0788..541849fd66 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -464,7 +464,7 @@ class TestUtils(unittest.TestCase): raise AssertionError('Incorrect exception, expected AnsibleError') def test_do_encrypt_md5(self): - if self._is_fips: + if self._is_fips(): raise SkipTest('MD5 unavailable on FIPS systems') hash = ansible.utils.do_encrypt('ansible', 'md5_crypt', salt_size=4) self.assertTrue(passlib.hash.md5_crypt.verify('ansible', hash)) From 25607e5cf4dec7113182770d2cf771950345922e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 17 Nov 2014 16:36:49 -0800 Subject: [PATCH 400/813] When run in FIPS mode, allow vault to fail only when using legacy format --- lib/ansible/utils/vault.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index ad2dfab0b7..66f18d5c9b 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -26,9 +26,18 @@ from io import BytesIO from subprocess import call from ansible import errors from hashlib import sha256 + # Note: Only used for loading obsolete VaultAES files. All files are written # using the newer VaultAES256 which does not require md5 -from hashlib import md5 +try: + from hashlib import md5 +except ImportError: + try: + from md5 import md5 + except ImportError: + # MD5 unavailable. Possibly FIPS mode + md5 = None + from binascii import hexlify from binascii import unhexlify from ansible import constants as C @@ -358,6 +367,8 @@ class VaultAES(object): # http://stackoverflow.com/a/16761459 def __init__(self): + if not md5: + raise errors.AnsibleError('md5 hash is unavailable (Could be due to FIPS mode). Legacy VaultAES format is unavailable.') if not HAS_AES: raise errors.AnsibleError(CRYPTO_UPGRADE) From 91b0149c5265a9588d040def34747e36b063d95b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 17 Nov 2014 23:59:55 -0500 Subject: [PATCH 401/813] a cache plugin that stores facts persistently in local json dumps --- lib/ansible/cache/jsonfile.py | 137 ++++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 lib/ansible/cache/jsonfile.py diff --git a/lib/ansible/cache/jsonfile.py b/lib/ansible/cache/jsonfile.py new file mode 100644 index 0000000000..81918a2836 --- /dev/null +++ b/lib/ansible/cache/jsonfile.py @@ -0,0 +1,137 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import time +import json +import errno + +from ansible import constants as C +from ansible import utils +from ansible.cache.base import BaseCacheModule + + +class CacheModule(BaseCacheModule): + """ + A caching module backed by json files. + """ + def __init__(self, *args, **kwargs): + + self._timeout = float(C.CACHE_PLUGIN_TIMEOUT) + self._cache = {} + self._cache_dir = C.CACHE_PLUGIN_CONNECTION # expects a dir path + + if not os.path.exists(self._cache_dir): + try: + os.makedirs(self._cache_dir) + except (OSError,IOError), e: + utils.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e))) + return None + + def get(self, key): + + if key in self._cache: + return self._cache.get(key) + + if self.has_expired(key): + raise KeyError + + cachefile = "%s/%s" % (self._cache_dir, key) + try: + f = open( cachefile, 'r') + except (OSError,IOError), e: + utils.warning("error while trying to write to %s : %s" % (cachefile, str(e))) + else: + value = json.load(f) + self._cache[key] = value + return value + finally: + f.close() + + def set(self, key, value): + + self._cache[key] = value + + cachefile = "%s/%s" % (self._cache_dir, key) + try: + #TODO: check if valid keys can have invalid FS chars, base32? + f = open(cachefile, 'w') + except (OSError,IOError), e: + utils.warning("error while trying to read %s : %s" % (cachefile, str(e))) + else: + json.dump(value, f, ensure_ascii=False) + finally: + f.close() + + def has_expired(self, key): + + cachefile = "%s/%s" % (self._cache_dir, key) + try: + st = os.stat(cachefile) + except (OSError,IOError), e: + if e.errno == errno.ENOENT: + return False + else: + utils.warning("error while trying to stat %s : %s" % (cachefile, str(e))) + + if time.time() - st.st_mtime <= self._timeout: + return False + + if key in self._cache: + del self._cache[key] + return True + + def keys(self): + keys = [] + for k in os.listdir(self._cache_dir): + if not self.has_expired(k): + keys.append(k) + return keys + + def contains(self, key): + + if key in self._cache: + return True + + if self.has_expired(key): + return False + try: + st = os.stat("%s/%s" % (self._cache_dir, key)) + return True + except (OSError,IOError), e: + if e.errno == errno.ENOENT: + return False + else: + utils.warning("error while trying to stat %s : %s" % (cachefile, str(e))) + + def delete(self, key): + del self._cache[key] + try: + os.remove("%s/%s" % (self._cache_dir, key)) + except (OSError,IOError), e: + pass #TODO: only pass on non existing? + + def flush(self): + self._cache = {} + for key in self.keys(): + self.delete(key) + + def copy(self): + ret = dict() + for key in self.keys(): + ret[key] = self.get(key) + return ret From 0defe19c4bcd94e24d52400a89e4ca775f4d0c8b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 18 Nov 2014 13:11:33 -0800 Subject: [PATCH 402/813] Update core modules for apt fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index fb4854ebcb..195e7c5a13 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit fb4854ebcbc35b3038530de91a472ef7d0b7b710 +Subproject commit 195e7c5a1373ffd79d450a4f7da313bdaad18315 From f8ca975c759d1fac55349f515b8cbf1d8bc32273 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 18 Nov 2014 17:20:53 -0500 Subject: [PATCH 403/813] The Guru program has been folded in with Ansible Tower. Have questions or interested in Ansible support? We'd love to hear from you. Email us at info@ansible.com. --- docsite/rst/guru.rst | 10 ---------- docsite/rst/index.rst | 1 - lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 4 files changed, 2 insertions(+), 13 deletions(-) delete mode 100644 docsite/rst/guru.rst diff --git a/docsite/rst/guru.rst b/docsite/rst/guru.rst deleted file mode 100644 index e4f07fd347..0000000000 --- a/docsite/rst/guru.rst +++ /dev/null @@ -1,10 +0,0 @@ -Ansible Guru -```````````` - -While many users should be able to get on fine with the documentation, mailing list, and IRC, sometimes you want a bit more. - -`Ansible Guru `_ is an offering from Ansible, Inc that helps users who would like more dedicated help with Ansible, including building playbooks, best practices, architecture suggestions, and more -- all from our awesome support and services team. It also includes some useful discounts and also some free T-shirts, though you shouldn't get it just for the free shirts! It's a great way to train up to becoming an Ansible expert. - -For those interested, click through the link above. You can sign up in minutes! - -For users looking for more hands-on help, we also have some more information on our `Services page `_, and support is also included with :doc:`tower`. diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst index 8085c509fe..996d324fc9 100644 --- a/docsite/rst/index.rst +++ b/docsite/rst/index.rst @@ -38,5 +38,4 @@ This documentation covers the current released version of Ansible (1.7.2) and al faq glossary YAMLSyntax - guru diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 195e7c5a13..2970b339eb 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 195e7c5a1373ffd79d450a4f7da313bdaad18315 +Subproject commit 2970b339eb8ea6031e6153cabe45459bc2bd5754 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 5a514ccdda..ad181b7aa9 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 5a514ccddae85ccc5802eea8751401600e45c32f +Subproject commit ad181b7aa949848e3085065e09195cb28c34fdf7 From 36401fdb97123b42a31115742e8452d763be61b2 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 18 Nov 2014 17:22:58 -0500 Subject: [PATCH 404/813] Update git submodules. --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 2970b339eb..195e7c5a13 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 2970b339eb8ea6031e6153cabe45459bc2bd5754 +Subproject commit 195e7c5a1373ffd79d450a4f7da313bdaad18315 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index ad181b7aa9..e34f62bb1f 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit ad181b7aa949848e3085065e09195cb28c34fdf7 +Subproject commit e34f62bb1fe296a91800a73709b60ad394bc50b4 From 662c63f1a08cb143b51887b47b62d4b161a20780 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 18 Nov 2014 18:42:55 -0500 Subject: [PATCH 405/813] Update contributor info in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 96a3c20d46..0e46111c53 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@ Authors ======= Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael@ansible.com) and has contributions from over -800 users (and growing). Thanks everyone! +900 users (and growing). Thanks everyone! Ansible is sponsored by [Ansible, Inc](http://ansible.com) From aa1a46092d93e4292402668261cf8512925447f3 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 19 Nov 2014 09:40:52 -0600 Subject: [PATCH 406/813] Fix missing dep on parsing integration test for preparation role --- test/integration/Makefile | 10 +++++----- test/integration/roles/test_bad_parsing/meta/main.yml | 2 ++ 2 files changed, 7 insertions(+), 5 deletions(-) create mode 100644 test/integration/roles/test_bad_parsing/meta/main.yml diff --git a/test/integration/Makefile b/test/integration/Makefile index b03c3eff78..ac4aafe3f0 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -22,11 +22,11 @@ VAULT_PASSWORD_FILE = vault-password all: parsing test_var_precedence unicode non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault parsing: - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario1; [ $$? -eq 3 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario2; [ $$? -eq 3 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario3; [ $$? -eq 3 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario4; [ $$? -eq 3 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags common,scenario5; [ $$? -eq 3 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario1; [ $$? -eq 3 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario2; [ $$? -eq 3 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario3; [ $$? -eq 3 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario4; [ $$? -eq 3 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5; [ $$? -eq 3 ] ansible-playbook good_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) includes: diff --git a/test/integration/roles/test_bad_parsing/meta/main.yml b/test/integration/roles/test_bad_parsing/meta/main.yml new file mode 100644 index 0000000000..c845eccfcd --- /dev/null +++ b/test/integration/roles/test_bad_parsing/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - { role: prepare_tests } From c4c3cc315d1c8f34f2b52b0a3d1af7bdbe5c9ae4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 19 Nov 2014 11:50:02 -0800 Subject: [PATCH 407/813] Transform both values of a task name into a byte str prior to comparing Fixes #9571 --- lib/ansible/callbacks.py | 2 ++ lib/ansible/utils/__init__.py | 11 +++++++++++ test/integration/Makefile | 5 ++--- test/integration/unicode.yml | 17 +++++++++++++++++ 4 files changed, 32 insertions(+), 3 deletions(-) diff --git a/lib/ansible/callbacks.py b/lib/ansible/callbacks.py index d6dfb3c11c..a4b62fb005 100644 --- a/lib/ansible/callbacks.py +++ b/lib/ansible/callbacks.py @@ -603,11 +603,13 @@ class PlaybookCallbacks(object): call_callback_module('playbook_on_no_hosts_remaining') def on_task_start(self, name, is_conditional): + name = utils.to_bytes(name) msg = "TASK: [%s]" % name if is_conditional: msg = "NOTIFIED: [%s]" % name if hasattr(self, 'start_at'): + self.start_at = utils.to_bytes(self.start_at) if name == self.start_at or fnmatch.fnmatch(name, self.start_at): # we found out match, we can get rid of this now del self.start_at diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 06ca8144cc..674ca1cb11 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1265,13 +1265,24 @@ def make_su_cmd(su_user, executable, cmd): ) return ('/bin/sh -c ' + pipes.quote(sudocmd), None, success_key) +# For v2, consider either using kitchen or copying my code from there for +# to_unicode and to_bytes handling (TEK) _TO_UNICODE_TYPES = (unicode, type(None)) def to_unicode(value): + # Use with caution -- this function is not encoding safe (non-utf-8 values + # will cause tracebacks if they contain bytes from 0x80-0xff inclusive) if isinstance(value, _TO_UNICODE_TYPES): return value return value.decode("utf-8") +def to_bytes(value): + # Note: value is assumed to be a basestring to mirror to_unicode. Better + # implementations (like kitchen.text.converters.to_bytes) bring that check + # into the function + if isinstance(value, str): + return value + return value.encode('utf-8') def get_diff(diff): # called by --diff usage in playbook and runner via callbacks diff --git a/test/integration/Makefile b/test/integration/Makefile index ac4aafe3f0..b732eb02f8 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -34,13 +34,12 @@ includes: unicode: ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v $(TEST_FLAGS) + # Test the start-at-task flag #9571 + ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v --start-at-task '*¶' -e 'start_at_task=True' $(TEST_FLAGS) non_destructive: ansible-playbook non_destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) -mine: - ansible-playbook mine.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) - destructive: ansible-playbook destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml index 69c737a8a3..60fb14214b 100644 --- a/test/integration/unicode.yml +++ b/test/integration/unicode.yml @@ -41,3 +41,20 @@ tasks: - debug: msg='Unicode is a good thing â„¢' - debug: msg=ÐБВГД + +# Run this test by adding to the CLI: -e start_at_task=True --start-at-task '*¶' +- name: 'Show that we can skip to unicode named tasks' + hosts: localhost + gather_facts: false + vars: + flag: 'original' + start_at_task: False + tasks: + - name: 'Override flag var' + set_fact: flag='new' + + - name: 'A unicode task at the end of the playbook: ¶' + assert: + that: + - 'flag == "original"' + when: start_at_task|bool From 554858f3af8efcd5afeaae560599ecd42b683edc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 19 Nov 2014 12:31:38 -0800 Subject: [PATCH 408/813] Update core modules to pull in a git module fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 195e7c5a13..c46f39f044 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 195e7c5a1373ffd79d450a4f7da313bdaad18315 +Subproject commit c46f39f0442ecaaa5eec60d8d895ee80ff7ba656 From 11b634104f3c989155c40a694476f9ff7718a08d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 19 Nov 2014 13:05:11 -0800 Subject: [PATCH 409/813] Small fix to hardlink handling in file module --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index c46f39f044..19b328c4df 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit c46f39f0442ecaaa5eec60d8d895ee80ff7ba656 +Subproject commit 19b328c4df2157b6c0191e9144236643ce2be890 From c8bfc61172a5908eacee6656b10c31a79385fa82 Mon Sep 17 00:00:00 2001 From: Brian Schott Date: Thu, 20 Nov 2014 13:15:38 -0500 Subject: [PATCH 410/813] change location of ec2.ini example to moved location --- docsite/rst/intro_dynamic_inventory.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index f8a5c92b2d..28536971bf 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -87,7 +87,7 @@ marking it executable:: ansible -i ec2.py -u ubuntu us-east-1d -m ping -The second option is to copy the script to `/etc/ansible/hosts` and `chmod +x` it. You will also need to copy the `ec2.ini `_ file to `/etc/ansible/ec2.ini`. Then you can run ansible as you would normally. +The second option is to copy the script to `/etc/ansible/hosts` and `chmod +x` it. You will also need to copy the `ec2.ini `_ file to `/etc/ansible/ec2.ini`. Then you can run ansible as you would normally. To successfully make an API call to AWS, you will need to configure Boto (the Python interface to AWS). There are a `variety of methods `_ available, but the simplest is just to export two environment variables:: From 4b6b58ab11fde889ee5301be773f554fa8f49a41 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 20 Nov 2014 12:20:32 -0600 Subject: [PATCH 411/813] Fix role parameter precedence after 9a0f8f0 Fixes #9497 --- lib/ansible/playbook/__init__.py | 1 + lib/ansible/playbook/play.py | 42 ++++++++++++++++++++++++-------- lib/ansible/playbook/task.py | 6 +++-- lib/ansible/runner/__init__.py | 4 +++ 4 files changed, 41 insertions(+), 12 deletions(-) diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 28e1d923eb..d3c0aa5300 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -402,6 +402,7 @@ class PlayBook(object): play_vars=task.play_vars, play_file_vars=task.play_file_vars, role_vars=task.role_vars, + role_params=task.role_params, default_vars=task.default_vars, extra_vars=self.extra_vars, private_key_file=self.private_key_file, diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index b793247826..0dcbca8684 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -221,6 +221,14 @@ class Play(object): raise errors.AnsibleError("too many levels of recursion while resolving role dependencies") for role in roles: role_path,role_vars = self._get_role_path(role) + + # save just the role params for this role, which exclude the special + # keywords 'role', 'tags', and 'when'. + role_params = role_vars.copy() + for item in ('role', 'tags', 'when'): + if item in role_params: + del role_params[item] + role_vars = utils.combine_vars(passed_vars, role_vars) vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))) @@ -249,6 +257,13 @@ class Play(object): for dep in dependencies: allow_dupes = False (dep_path,dep_vars) = self._get_role_path(dep) + + # save the dep params, just as we did above + dep_params = dep_vars.copy() + for item in ('role', 'tags', 'when'): + if item in dep_params: + del dep_params[item] + meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta'))) if os.path.isfile(meta): meta_data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password) @@ -332,13 +347,13 @@ class Play(object): dep_vars['when'] = tmpcond self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1) - dep_stack.append([dep,dep_path,dep_vars,dep_defaults_data]) + dep_stack.append([dep, dep_path, dep_vars, dep_params, dep_defaults_data]) # only add the current role when we're at the top level, # otherwise we'll end up in a recursive loop if level == 0: self.included_roles.append(role) - dep_stack.append([role,role_path,role_vars,defaults_data]) + dep_stack.append([role, role_path, role_vars, role_params, defaults_data]) return dep_stack def _load_role_vars_files(self, vars_files): @@ -400,12 +415,12 @@ class Play(object): # make role_path available as variable to the task for idx, val in enumerate(roles): this_uuid = str(uuid.uuid4()) - roles[idx][-2]['role_uuid'] = this_uuid - roles[idx][-2]['role_path'] = roles[idx][1] + roles[idx][-3]['role_uuid'] = this_uuid + roles[idx][-3]['role_path'] = roles[idx][1] role_names = [] - for (role,role_path,role_vars,default_vars) in roles: + for (role, role_path, role_vars, role_params, default_vars) in roles: # special vars must be extracted from the dict to the included tasks special_keys = [ "sudo", "sudo_user", "when", "with_items" ] special_vars = {} @@ -438,13 +453,13 @@ class Play(object): role_names.append(role_name) if os.path.isfile(task): - nt = dict(include=pipes.quote(task), vars=role_vars, default_vars=default_vars, role_name=role_name) + nt = dict(include=pipes.quote(task), vars=role_vars, role_params=role_params, default_vars=default_vars, role_name=role_name) for k in special_keys: if k in special_vars: nt[k] = special_vars[k] new_tasks.append(nt) if os.path.isfile(handler): - nt = dict(include=pipes.quote(handler), vars=role_vars, role_name=role_name) + nt = dict(include=pipes.quote(handler), vars=role_vars, role_params=role_params, role_name=role_name) for k in special_keys: if k in special_vars: nt[k] = special_vars[k] @@ -509,7 +524,7 @@ class Play(object): # ************************************************* - def _load_tasks(self, tasks, vars=None, default_vars=None, sudo_vars=None, + def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sudo_vars=None, additional_conditions=None, original_file=None, role_name=None): ''' handle task and handler include statements ''' @@ -521,6 +536,8 @@ class Play(object): additional_conditions = [] if vars is None: vars = {} + if role_params is None: + role_params = {} if default_vars is None: default_vars = {} if sudo_vars is None: @@ -572,11 +589,15 @@ class Play(object): included_additional_conditions.append(x[k]) elif type(x[k]) is list: included_additional_conditions.extend(x[k]) - elif k in ("include", "vars", "default_vars", "sudo", "sudo_user", "role_name", "no_log"): + elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log"): continue else: include_vars[k] = x[k] + # get any role parameters specified + role_params = x.get('role_params', {}) + + # get any role default variables specified default_vars = x.get('default_vars', {}) if not default_vars: default_vars = self.default_vars @@ -609,7 +630,7 @@ class Play(object): for y in data: if isinstance(y, dict) and 'include' in y: y['role_name'] = new_role - loaded = self._load_tasks(data, mv, default_vars, included_sudo_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role) + loaded = self._load_tasks(data, mv, role_params, default_vars, included_sudo_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role) results += loaded elif type(x) == dict: task = Task( @@ -618,6 +639,7 @@ class Play(object): play_vars=self.vars, play_file_vars=self.vars_file_vars, role_vars=self.role_vars, + role_params=role_params, default_vars=default_vars, additional_conditions=list(additional_conditions), role_name=role_name diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index ebe43f63c1..783f488fa1 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -26,7 +26,7 @@ class Task(object): __slots__ = [ 'name', 'meta', 'action', 'when', 'async_seconds', 'async_poll_interval', - 'notify', 'module_name', 'module_args', 'module_vars', 'play_vars', 'play_file_vars', 'role_vars', 'default_vars', + 'notify', 'module_name', 'module_args', 'module_vars', 'play_vars', 'play_file_vars', 'role_vars', 'role_params', 'default_vars', 'play', 'notified_by', 'tags', 'register', 'role_name', 'delegate_to', 'first_available_file', 'ignore_errors', 'local_action', 'transport', 'sudo', 'remote_user', 'sudo_user', 'sudo_pass', @@ -45,7 +45,7 @@ class Task(object): 'su', 'su_user', 'su_pass', 'no_log', 'run_once', ] - def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, default_vars=None, additional_conditions=None, role_name=None): + def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None): ''' constructor loads from a task or handler datastructure ''' # meta directives are used to tell things like ansible/playbook to run @@ -123,6 +123,7 @@ class Task(object): self.play_vars = play_vars self.play_file_vars = play_file_vars self.role_vars = role_vars + self.role_params = role_params self.default_vars = default_vars self.play = play @@ -226,6 +227,7 @@ class Task(object): all_vars = utils.combine_vars(all_vars, self.play_file_vars) all_vars = utils.combine_vars(all_vars, self.role_vars) all_vars = utils.combine_vars(all_vars, self.module_vars) + all_vars = utils.combine_vars(all_vars, self.role_params) self.async_seconds = ds.get('async', 0) # not async by default self.async_seconds = template.template_from_string(play.basedir, self.async_seconds, all_vars) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 1d236f5f11..0d16746255 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -137,6 +137,7 @@ class Runner(object): play_vars=None, # play_file_vars=None, # role_vars=None, # + role_params=None, # default_vars=None, # extra_vars=None, # extra vars specified with he playbook(s) is_playbook=False, # running from playbook or not? @@ -182,6 +183,7 @@ class Runner(object): self.play_vars = utils.default(play_vars, lambda: {}) self.play_file_vars = utils.default(play_file_vars, lambda: {}) self.role_vars = utils.default(role_vars, lambda: {}) + self.role_params = utils.default(role_params, lambda: {}) self.default_vars = utils.default(default_vars, lambda: {}) self.extra_vars = utils.default(extra_vars, lambda: {}) @@ -645,6 +647,8 @@ class Runner(object): # followed by vars_cache things (set_fact, include_vars, and # vars_files which had host-specific templating done) inject = utils.combine_vars(inject, self.vars_cache.get(host, {})) + # role parameters next + inject = utils.combine_vars(inject, self.role_params) # and finally -e vars are the highest priority inject = utils.combine_vars(inject, self.extra_vars) # and then special vars From 9660afd3e05b581641028a059739932d4ea92d51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20G=C3=B6ttschkes?= Date: Thu, 20 Nov 2014 20:18:04 +0100 Subject: [PATCH 412/813] Adding inventory parameter to documentation Adding the inventory parameter `ansible_sudo` to the list of behavioural inventory parameters in the intro_inventory documentation. --- docsite/rst/intro_inventory.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index 5b409e8e65..920bba816e 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -205,6 +205,8 @@ mentioned:: The default ssh user name to use. ansible_ssh_pass The ssh password to use (this is insecure, we strongly recommend using --ask-pass or SSH keys) + ansible_sudo + The boolean to decide if sudo should be used for this host. Defaults to false. ansible_sudo_pass The sudo password to use (this is insecure, we strongly recommend using --ask-sudo-pass) ansible_sudo_exe (new in version 1.8) From 0ba2298fddfafca3db122cb837c9174739e6f98b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 12 Nov 2014 17:29:11 -0500 Subject: [PATCH 413/813] used del instead of pop, removed unused pprint import Conflicts: lib/ansible/modules/core lib/ansible/modules/extras --- bin/ansible-galaxy | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy index 9a73023b83..5fd92dde2c 100755 --- a/bin/ansible-galaxy +++ b/bin/ansible-galaxy @@ -697,7 +697,7 @@ def execute_info(args, options, parser): if install_info: if 'version' in install_info: install_info['intalled_version'] = install_info['version'] - install_info.pop('version', None) + del install_info['version'] role_info.update(install_info) remote_data = api_lookup_role_by_name(api_server, role, False) @@ -714,7 +714,6 @@ def execute_info(args, options, parser): if role_info: print "- %s:" % (role) - import pprint for k in sorted(role_info.keys()): if k in SKIP_INFO_KEYS: From f6f02c93e79f6204c9e251edc1f1453a7055c6f9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 20 Nov 2014 13:23:46 -0800 Subject: [PATCH 414/813] Update to HEAD of hte modules --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index e34f62bb1f..e64751b0eb 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit e34f62bb1fe296a91800a73709b60ad394bc50b4 +Subproject commit e64751b0eb44c8ada6a6047eaf2303d98f8f505b From 02f6ca034edbee20fb9c60eb20b9df8da9a43a74 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 20 Nov 2014 16:32:33 -0500 Subject: [PATCH 415/813] zpelling fix --- docsite/rst/playbooks_variables.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 08b8ce60eb..253cee2ba4 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -302,7 +302,7 @@ Shuffle Filter .. versionadded:: 1.8 -This filter will randomize an existing list, giving a differnt order every invocation. +This filter will randomize an existing list, giving a different order every invocation. To get a random list from an existing list:: @@ -317,12 +317,12 @@ Other Useful Filters -------------------- To concatenate a list into a string:: - + {{ list | join(" ") }} To get the last name of a file path, like 'foo.txt' out of '/etc/asdf/foo.txt':: - {{ path | basename }} + {{ path | basename }} To get the directory from a path:: @@ -331,7 +331,7 @@ To get the directory from a path:: To expand a path containing a tilde (`~`) character (new in version 1.5):: {{ path | expanduser }} - + To get the real path of a link (new in version 1.8):: {{ path | readlink }} From be6ef11e96d9a8565a1df2a10a6c5700206fefca Mon Sep 17 00:00:00 2001 From: Rohan McGovern Date: Fri, 21 Nov 2014 10:50:40 +1000 Subject: [PATCH 416/813] test_git: add tests for version= case This test covers a recently fixed bug, https://github.com/ansible/ansible-modules-core/issues/313 --- .../integration/roles/test_git/tasks/main.yml | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml index 14623a2ce9..5febae962e 100644 --- a/test/integration/roles/test_git/tasks/main.yml +++ b/test/integration/roles/test_git/tasks/main.yml @@ -153,6 +153,52 @@ that: - 'git_result.changed' +# Test that a specific revision can be checked out + +- name: clean out the checkout_dir + shell: rm -rf {{ checkout_dir }}/* + +- name: clone to specific revision + git: repo={{ repo_format1 }} dest={{ checkout_dir }} version=df4612ba925fbc1b3c51cbb006f51a0443bd2ce9 + +- name: check HEAD after clone to revision + command: git rev-parse HEAD chdir="{{ checkout_dir }}" + register: git_result + +- assert: + that: + - 'git_result.stdout == "df4612ba925fbc1b3c51cbb006f51a0443bd2ce9"' + +- name: update to specific revision + git: repo={{ repo_format1 }} dest={{ checkout_dir }} version=4e739a34719654db7b04896966e2354e1256ea5d + register: git_result + +- assert: + that: + - 'git_result.changed' + +- name: check HEAD after update to revision + command: git rev-parse HEAD chdir="{{ checkout_dir }}" + register: git_result + +- assert: + that: + - 'git_result.stdout == "4e739a34719654db7b04896966e2354e1256ea5d"' + +# Test a revision not available under refs/heads/ or refs/tags/ + +- name: attempt to get unavailable revision + git: + repo: https://github.com/ansible/ansible-examples.git + dest: '{{ checkout_dir }}' + version: 2cfde3668b8bb10fbe2b9d5cec486025ad8cc51b + ignore_errors: true + register: git_result + +- assert: + that: + - 'git_result.failed' + # # Submodule tests # From ef6769d6c86f560e71b16c69ff7a66ef29cbe79c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 21 Nov 2014 09:22:12 -0800 Subject: [PATCH 417/813] Clean up the format of pull #9590 --- test/integration/roles/test_git/tasks/main.yml | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml index 5febae962e..cbdd8f9556 100644 --- a/test/integration/roles/test_git/tasks/main.yml +++ b/test/integration/roles/test_git/tasks/main.yml @@ -155,11 +155,14 @@ # Test that a specific revision can be checked out -- name: clean out the checkout_dir - shell: rm -rf {{ checkout_dir }}/* +- name: clear checkout_dir + file: state=absent path={{ checkout_dir }} - name: clone to specific revision - git: repo={{ repo_format1 }} dest={{ checkout_dir }} version=df4612ba925fbc1b3c51cbb006f51a0443bd2ce9 + git: + repo: "{{ repo_format1 }}" + dest: "{{ checkout_dir }}" + version: df4612ba925fbc1b3c51cbb006f51a0443bd2ce9 - name: check HEAD after clone to revision command: git rev-parse HEAD chdir="{{ checkout_dir }}" @@ -170,7 +173,10 @@ - 'git_result.stdout == "df4612ba925fbc1b3c51cbb006f51a0443bd2ce9"' - name: update to specific revision - git: repo={{ repo_format1 }} dest={{ checkout_dir }} version=4e739a34719654db7b04896966e2354e1256ea5d + git: + repo: "{{ repo_format1 }}" + dest: "{{ checkout_dir }}" + version: 4e739a34719654db7b04896966e2354e1256ea5d register: git_result - assert: @@ -189,8 +195,8 @@ - name: attempt to get unavailable revision git: - repo: https://github.com/ansible/ansible-examples.git - dest: '{{ checkout_dir }}' + repo: "{{ repo_format1 }}" + dest: "{{ checkout_dir }}" version: 2cfde3668b8bb10fbe2b9d5cec486025ad8cc51b ignore_errors: true register: git_result From ac4dc1f739e813d6542b4e37e968c62f57511de4 Mon Sep 17 00:00:00 2001 From: Kishin Yagami Date: Sun, 17 Aug 2014 11:47:59 +0900 Subject: [PATCH 418/813] enable environment keyword at play level --- lib/ansible/playbook/play.py | 5 +++-- lib/ansible/playbook/task.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 0dcbca8684..1bc0a893ba 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -38,7 +38,7 @@ class Play(object): 'accelerate_port', 'accelerate_ipv6', 'sudo', 'sudo_user', 'transport', 'playbook', 'tags', 'gather_facts', 'serial', '_ds', '_handlers', '_tasks', 'basedir', 'any_errors_fatal', 'roles', 'max_fail_pct', '_play_hosts', 'su', 'su_user', - 'vault_password', 'no_log', + 'vault_password', 'no_log', 'environment', ] # to catch typos and so forth -- these are userland names @@ -48,7 +48,7 @@ class Play(object): 'tasks', 'handlers', 'remote_user', 'user', 'port', 'include', 'accelerate', 'accelerate_port', 'accelerate_ipv6', 'sudo', 'sudo_user', 'connection', 'tags', 'gather_facts', 'serial', 'any_errors_fatal', 'roles', 'role_names', 'pre_tasks', 'post_tasks', 'max_fail_percentage', - 'su', 'su_user', 'vault_password', 'no_log', + 'su', 'su_user', 'vault_password', 'no_log', 'environment', ] # ************************************************* @@ -71,6 +71,7 @@ class Play(object): self.roles = ds.get('roles', None) self.tags = ds.get('tags', None) self.vault_password = vault_password + self.environment = ds.get('environment', {}) if self.tags is None: self.tags = [] diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 783f488fa1..bdffba5527 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -133,7 +133,7 @@ class Task(object): self.register = ds.get('register', None) self.sudo = utils.boolean(ds.get('sudo', play.sudo)) self.su = utils.boolean(ds.get('su', play.su)) - self.environment = ds.get('environment', {}) + self.environment = ds.get('environment', play.environment) self.role_name = role_name self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log self.run_once = utils.boolean(ds.get('run_once', 'false')) From e9b6aaf5d8836ce7ffdca855e006c2131fe19632 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Sun, 23 Nov 2014 22:53:10 -0500 Subject: [PATCH 419/813] Update PowerShell command line processing to handle parameters passed via splatting. --- .../runner/connection_plugins/winrm.py | 2 +- .../runner/shell_plugins/powershell.py | 8 +++-- .../roles/test_win_script/defaults/main.yml | 5 +++ .../files/test_script_with_splatting.ps1 | 6 ++++ .../roles/test_win_script/tasks/main.yml | 32 +++++++++++++++++++ 5 files changed, 49 insertions(+), 4 deletions(-) create mode 100644 test/integration/roles/test_win_script/defaults/main.yml create mode 100644 test/integration/roles/test_win_script/files/test_script_with_splatting.ps1 diff --git a/lib/ansible/runner/connection_plugins/winrm.py b/lib/ansible/runner/connection_plugins/winrm.py index d6e51710b5..32b630bc42 100644 --- a/lib/ansible/runner/connection_plugins/winrm.py +++ b/lib/ansible/runner/connection_plugins/winrm.py @@ -143,7 +143,7 @@ class Connection(object): vvv("EXEC %s" % cmd, host=self.host) # For script/raw support. if cmd_parts and cmd_parts[0].lower().endswith('.ps1'): - script = powershell._build_file_cmd(cmd_parts) + script = powershell._build_file_cmd(cmd_parts, quote_args=False) cmd_parts = powershell._encode_script(script, as_list=True) try: result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True) diff --git a/lib/ansible/runner/shell_plugins/powershell.py b/lib/ansible/runner/shell_plugins/powershell.py index 7254df6f7e..93761d321d 100644 --- a/lib/ansible/runner/shell_plugins/powershell.py +++ b/lib/ansible/runner/shell_plugins/powershell.py @@ -53,9 +53,11 @@ def _encode_script(script, as_list=False): return cmd_parts return ' '.join(cmd_parts) -def _build_file_cmd(cmd_parts): +def _build_file_cmd(cmd_parts, quote_args=True): '''Build command line to run a file, given list of file name plus args.''' - return ' '.join(_common_args + ['-ExecutionPolicy', 'Unrestricted', '-File'] + ['"%s"' % x for x in cmd_parts]) + if quote_args: + cmd_parts = ['"%s"' % x for x in cmd_parts] + return ' '.join(['&'] + cmd_parts) class ShellModule(object): @@ -110,7 +112,7 @@ class ShellModule(object): cmd_parts = shlex.split(cmd, posix=False) if not cmd_parts[0].lower().endswith('.ps1'): cmd_parts[0] = '%s.ps1' % cmd_parts[0] - script = _build_file_cmd(cmd_parts) + script = _build_file_cmd(cmd_parts, quote_args=False) if rm_tmp: rm_tmp = _escape(rm_tmp) script = '%s; Remove-Item "%s" -Force -Recurse;' % (script, rm_tmp) diff --git a/test/integration/roles/test_win_script/defaults/main.yml b/test/integration/roles/test_win_script/defaults/main.yml new file mode 100644 index 0000000000..a2c6475e75 --- /dev/null +++ b/test/integration/roles/test_win_script/defaults/main.yml @@ -0,0 +1,5 @@ +--- + +# Parameters to pass to test scripts. +test_win_script_value: VaLuE +test_win_script_splat: "@{This='THIS'; That='THAT'; Other='OTHER'}" diff --git a/test/integration/roles/test_win_script/files/test_script_with_splatting.ps1 b/test/integration/roles/test_win_script/files/test_script_with_splatting.ps1 new file mode 100644 index 0000000000..429a9a3b7a --- /dev/null +++ b/test/integration/roles/test_win_script/files/test_script_with_splatting.ps1 @@ -0,0 +1,6 @@ +# Test script to make sure the Ansible script module works when arguments are +# passed via splatting (http://technet.microsoft.com/en-us/magazine/gg675931.aspx) + +Write-Host $args.This +Write-Host $args.That +Write-Host $args.Other diff --git a/test/integration/roles/test_win_script/tasks/main.yml b/test/integration/roles/test_win_script/tasks/main.yml index 1edfd0b006..e1e5f25611 100644 --- a/test/integration/roles/test_win_script/tasks/main.yml +++ b/test/integration/roles/test_win_script/tasks/main.yml @@ -46,6 +46,38 @@ - "not test_script_with_args_result|failed" - "test_script_with_args_result|changed" +- name: run test script that takes parameters passed via splatting + script: test_script_with_splatting.ps1 "@{ This = 'this'; That = '{{ test_win_script_value }}'; Other = 'other'}" + register: test_script_with_splatting_result + +- name: check that script ran and received parameters via splatting + assert: + that: + - "test_script_with_splatting_result.rc == 0" + - "test_script_with_splatting_result.stdout" + - "test_script_with_splatting_result.stdout_lines[0] == 'this'" + - "test_script_with_splatting_result.stdout_lines[1] == test_win_script_value" + - "test_script_with_splatting_result.stdout_lines[2] == 'other'" + - "not test_script_with_splatting_result.stderr" + - "not test_script_with_splatting_result|failed" + - "test_script_with_splatting_result|changed" + +- name: run test script that takes splatted parameters from a variable + script: test_script_with_splatting.ps1 {{ test_win_script_splat|quote }} + register: test_script_with_splatting2_result + +- name: check that script ran and received parameters via splatting from a variable + assert: + that: + - "test_script_with_splatting2_result.rc == 0" + - "test_script_with_splatting2_result.stdout" + - "test_script_with_splatting2_result.stdout_lines[0] == 'THIS'" + - "test_script_with_splatting2_result.stdout_lines[1] == 'THAT'" + - "test_script_with_splatting2_result.stdout_lines[2] == 'OTHER'" + - "not test_script_with_splatting2_result.stderr" + - "not test_script_with_splatting2_result|failed" + - "test_script_with_splatting2_result|changed" + - name: run test script that has errors script: test_script_with_errors.ps1 register: test_script_with_errors_result From 067112b5ee8b3d3924cee83eb63ee2faa6418006 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 24 Nov 2014 20:55:08 -0800 Subject: [PATCH 420/813] Code for modules to perform more robust quoting of database identifiers --- lib/ansible/module_utils/database.py | 114 ++++++++++++++++++++++++++ lib/ansible/modules/core | 2 +- test/units/TestModuleUtilsDatabase.py | 103 +++++++++++++++++++++++ 3 files changed, 218 insertions(+), 1 deletion(-) create mode 100644 lib/ansible/module_utils/database.py create mode 100644 test/units/TestModuleUtilsDatabase.py diff --git a/lib/ansible/module_utils/database.py b/lib/ansible/module_utils/database.py new file mode 100644 index 0000000000..ca7942d048 --- /dev/null +++ b/lib/ansible/module_utils/database.py @@ -0,0 +1,114 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2014, Toshio Kuratomi +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +class SQLParseError(Exception): + pass + +class UnclosedQuoteError(SQLParseError): + pass + +# maps a type of identifier to the maximum number of dot levels that are +# allowed to specifiy that identifier. For example, a database column can be +# specified by up to 4 levels: database.schema.table.column +_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, schema=2, table=3, column=4, role=1) + +def _find_end_quote(identifier): + accumulate = 0 + while True: + try: + quote = identifier.index('"') + except ValueError: + raise UnclosedQuoteError + accumulate = accumulate + quote + try: + next_char = identifier[quote+1] + except IndexError: + return accumulate + if next_char == '"': + try: + identifier = identifier[quote+2:] + accumulate = accumulate + 2 + except IndexError: + raise UnclosedQuoteError + else: + return accumulate + + +def _identifier_parse(identifier): + if not identifier: + raise SQLParseError('Identifier name unspecified or unquoted trailing dot') + + already_quoted = False + if identifier.startswith('"'): + already_quoted = True + try: + end_quote = _find_end_quote(identifier[1:]) + 1 + except UnclosedQuoteError: + already_quoted = False + else: + if end_quote < len(identifier) - 1: + if identifier[end_quote+1] == '.': + dot = end_quote + 1 + first_identifier = identifier[:dot] + next_identifier = identifier[dot+1:] + further_identifiers = _identifier_parse(next_identifier) + further_identifiers.insert(0, first_identifier) + else: + import q ; q.q(identifier) + raise SQLParseError('User escaped identifiers must escape extra double quotes') + else: + further_identifiers = [identifier] + + if not already_quoted: + try: + dot = identifier.index('.') + except ValueError: + identifier = identifier.replace('"', '""') + identifier = ''.join(('"', identifier, '"')) + further_identifiers = [identifier] + else: + if dot == 0 or dot >= len(identifier) - 1: + identifier = identifier.replace('"', '""') + identifier = ''.join(('"', identifier, '"')) + further_identifiers = [identifier] + else: + first_identifier = identifier[:dot] + next_identifier = identifier[dot+1:] + further_identifiers = _identifier_parse(next_identifier) + first_identifier = first_identifier.replace('"', '""') + first_identifier = ''.join(('"', first_identifier, '"')) + further_identifiers.insert(0, first_identifier) + + return further_identifiers + + +def pg_quote_identifier(identifier, id_type): + identifier_fragments = _identifier_parse(identifier) + if len(identifier_fragments) > _IDENTIFIER_TO_DOT_LEVEL[id_type]: + raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _IDENTIFIER_TO_DOT_LEVEL[id_type])) + return '.'.join(identifier_fragments) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 19b328c4df..1b0afb137c 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 19b328c4df2157b6c0191e9144236643ce2be890 +Subproject commit 1b0afb137c78383c47b3aaa31f4b849ddcb8783f diff --git a/test/units/TestModuleUtilsDatabase.py b/test/units/TestModuleUtilsDatabase.py new file mode 100644 index 0000000000..635eadb42c --- /dev/null +++ b/test/units/TestModuleUtilsDatabase.py @@ -0,0 +1,103 @@ +import collections +import mock +import os + +from nose import tools + +from ansible.module_utils.database import ( + pg_quote_identifier, + SQLParseError, +) + + +# Note: Using nose's generator test cases here so we can't inherit from +# unittest.TestCase +class TestQuotePgIdentifier(object): + + # These are all valid strings + # The results are based on interpreting the identifier as a table name + valid = { + # User quoted + '"public.table"': '"public.table"', + '"public"."table"': '"public"."table"', + '"schema test"."table test"': '"schema test"."table test"', + + # We quote part + 'public.table': '"public"."table"', + '"public".table': '"public"."table"', + 'public."table"': '"public"."table"', + 'schema test.table test': '"schema test"."table test"', + '"schema test".table test': '"schema test"."table test"', + 'schema test."table test"': '"schema test"."table test"', + + # Embedded double quotes + 'table "test"': '"table ""test"""', + 'public."table ""test"""': '"public"."table ""test"""', + 'public.table "test"': '"public"."table ""test"""', + 'schema "test".table': '"schema ""test"""."table"', + '"schema ""test""".table': '"schema ""test"""."table"', + '"""wat"""."""test"""': '"""wat"""."""test"""', + # Sigh, handle these as well: + '"no end quote': '"""no end quote"', + 'schema."table': '"schema"."""table"', + '"schema.table': '"""schema"."table"', + 'schema."table.something': '"schema"."""table"."something"', + + # Embedded dots + '"schema.test"."table.test"': '"schema.test"."table.test"', + '"schema.".table': '"schema."."table"', + '"schema."."table"': '"schema."."table"', + 'schema.".table"': '"schema".".table"', + '"schema".".table"': '"schema".".table"', + '"schema.".".table"': '"schema.".".table"', + # These are valid but maybe not what the user intended + '."table"': '".""table"""', + 'table.': '"table."', + } + + invalid = { + ('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots', + ('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots', + ('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots', + ('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots", + ('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots", + ('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots", + ('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots", + ('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra double quotes', + ('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra double quotes', + ('"schema."table"','table'): 'User escaped identifiers must escape extra double quotes', + ('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot', + } + + def check_valid_quotes(self, identifier, quoted_identifier): + tools.eq_(pg_quote_identifier(identifier, 'table'), quoted_identifier) + + def test_valid_quotes(self): + for identifier in self.valid: + yield self.check_valid_quotes, identifier, self.valid[identifier] + + def check_invalid_quotes(self, identifier, id_type, msg): + if hasattr(tools, 'assert_raises_regexp'): + tools.assert_raises_regexp(SQLParseError, msg, pg_quote_identifier, *(identifier, id_type)) + else: + tools.assert_raises(SQLParseError, pg_quote_identifier, *(identifier, id_type)) + + def test_invalid_quotes(self): + for test in self.invalid: + yield self.check_invalid_quotes, test[0], test[1], self.invalid[test] + + def test_how_many_dots(self): + tools.eq_(pg_quote_identifier('role', 'role'), '"role"') + tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support role with more than 1 dots", pg_quote_identifier, *('role.more', 'role')) + + tools.eq_(pg_quote_identifier('db', 'database'), '"db"') + tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support database with more than 1 dots", pg_quote_identifier, *('db.more', 'database')) + + tools.eq_(pg_quote_identifier('db.schema', 'schema'), '"db"."schema"') + tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support schema with more than 2 dots", pg_quote_identifier, *('db.schema.more', 'schema')) + + tools.eq_(pg_quote_identifier('db.schema.table', 'table'), '"db"."schema"."table"') + tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support table with more than 3 dots", pg_quote_identifier, *('db.schema.table.more', 'table')) + + tools.eq_(pg_quote_identifier('db.schema.table.column', 'column'), '"db"."schema"."table"."column"') + tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support column with more than 4 dots", pg_quote_identifier, *('db.schema.table.column.more', 'column')) From 64a8ad9a21861f702ee2c33974cb0fff793c285e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 24 Nov 2014 21:02:24 -0800 Subject: [PATCH 421/813] Remove debugging code --- lib/ansible/module_utils/database.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/module_utils/database.py b/lib/ansible/module_utils/database.py index ca7942d048..cb6c7c46b1 100644 --- a/lib/ansible/module_utils/database.py +++ b/lib/ansible/module_utils/database.py @@ -79,7 +79,6 @@ def _identifier_parse(identifier): further_identifiers = _identifier_parse(next_identifier) further_identifiers.insert(0, first_identifier) else: - import q ; q.q(identifier) raise SQLParseError('User escaped identifiers must escape extra double quotes') else: further_identifiers = [identifier] From 19606afe5f47f044c6d49935e2bd37f3c66b81e3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 24 Nov 2014 22:57:41 -0800 Subject: [PATCH 422/813] Update postgresql modules so that we get all the quoting of identifier fixes --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 1b0afb137c..fbc4ed7a88 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 1b0afb137c78383c47b3aaa31f4b849ddcb8783f +Subproject commit fbc4ed7a886109b8ba761609f80e6813d85d3e72 From 0287e9a23d29e253054ad6a110c7f5ba6a939595 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 25 Nov 2014 00:45:59 -0800 Subject: [PATCH 423/813] Normalize the identifier quoting so we can reuse the functions for mysql --- lib/ansible/module_utils/database.py | 39 ++++++++++++++++------------ 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/lib/ansible/module_utils/database.py b/lib/ansible/module_utils/database.py index cb6c7c46b1..68b294a436 100644 --- a/lib/ansible/module_utils/database.py +++ b/lib/ansible/module_utils/database.py @@ -35,13 +35,14 @@ class UnclosedQuoteError(SQLParseError): # maps a type of identifier to the maximum number of dot levels that are # allowed to specifiy that identifier. For example, a database column can be # specified by up to 4 levels: database.schema.table.column -_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, schema=2, table=3, column=4, role=1) +_PG_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, schema=2, table=3, column=4, role=1) +_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1) -def _find_end_quote(identifier): +def _find_end_quote(identifier, quote_char='"'): accumulate = 0 while True: try: - quote = identifier.index('"') + quote = identifier.index(quote_char) except ValueError: raise UnclosedQuoteError accumulate = accumulate + quote @@ -49,7 +50,7 @@ def _find_end_quote(identifier): next_char = identifier[quote+1] except IndexError: return accumulate - if next_char == '"': + if next_char == quote_char: try: identifier = identifier[quote+2:] accumulate = accumulate + 2 @@ -59,15 +60,15 @@ def _find_end_quote(identifier): return accumulate -def _identifier_parse(identifier): +def _identifier_parse(identifier, quote_char='"'): if not identifier: raise SQLParseError('Identifier name unspecified or unquoted trailing dot') already_quoted = False - if identifier.startswith('"'): + if identifier.startswith(quote_char): already_quoted = True try: - end_quote = _find_end_quote(identifier[1:]) + 1 + end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1 except UnclosedQuoteError: already_quoted = False else: @@ -87,27 +88,33 @@ def _identifier_parse(identifier): try: dot = identifier.index('.') except ValueError: - identifier = identifier.replace('"', '""') - identifier = ''.join(('"', identifier, '"')) + identifier = identifier.replace(quote_char, quote_char*2) + identifier = ''.join((quote_char, identifier, quote_char)) further_identifiers = [identifier] else: if dot == 0 or dot >= len(identifier) - 1: - identifier = identifier.replace('"', '""') - identifier = ''.join(('"', identifier, '"')) + identifier = identifier.replace(quote_char, quote_char*2) + identifier = ''.join((quote_char, identifier, quote_char)) further_identifiers = [identifier] else: first_identifier = identifier[:dot] next_identifier = identifier[dot+1:] further_identifiers = _identifier_parse(next_identifier) - first_identifier = first_identifier.replace('"', '""') - first_identifier = ''.join(('"', first_identifier, '"')) + first_identifier = first_identifier.replace(quote_char, quote_char*2) + first_identifier = ''.join((quote_char, first_identifier, quote_char)) further_identifiers.insert(0, first_identifier) return further_identifiers def pg_quote_identifier(identifier, id_type): - identifier_fragments = _identifier_parse(identifier) - if len(identifier_fragments) > _IDENTIFIER_TO_DOT_LEVEL[id_type]: - raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _IDENTIFIER_TO_DOT_LEVEL[id_type])) + identifier_fragments = _identifier_parse(identifier, quote_char='"') + if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]: + raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type])) + return '.'.join(identifier_fragments) + +def mysql_quote_identifier(identifier, id_type): + identifier_fragments = _identifier_parse(identifier, quote_char='`') + if len(identifier_fragments) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]: + raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _IDENTIFIER_TO_DOT_LEVEL[id_type])) return '.'.join(identifier_fragments) From fcf0975c904a64e1fc008e2251b2325bae76ee41 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 25 Nov 2014 01:47:07 -0800 Subject: [PATCH 424/813] Fix a few mysql related typos --- lib/ansible/module_utils/database.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/database.py b/lib/ansible/module_utils/database.py index 68b294a436..3c8bdaab80 100644 --- a/lib/ansible/module_utils/database.py +++ b/lib/ansible/module_utils/database.py @@ -36,7 +36,7 @@ class UnclosedQuoteError(SQLParseError): # allowed to specifiy that identifier. For example, a database column can be # specified by up to 4 levels: database.schema.table.column _PG_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, schema=2, table=3, column=4, role=1) -_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1) +_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1) def _find_end_quote(identifier, quote_char='"'): accumulate = 0 @@ -116,5 +116,5 @@ def pg_quote_identifier(identifier, id_type): def mysql_quote_identifier(identifier, id_type): identifier_fragments = _identifier_parse(identifier, quote_char='`') if len(identifier_fragments) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]: - raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _IDENTIFIER_TO_DOT_LEVEL[id_type])) + raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type])) return '.'.join(identifier_fragments) From 8e7447962e4d53e4cb94602cd76592364ae13740 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 25 Nov 2014 01:47:30 -0800 Subject: [PATCH 425/813] Update the modules to pull in mysql identifier escaping --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index fbc4ed7a88..10ebcccedb 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit fbc4ed7a886109b8ba761609f80e6813d85d3e72 +Subproject commit 10ebcccedb542c7e1c499e77a1f53da98d373bc3 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index e64751b0eb..317654dba5 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit e64751b0eb44c8ada6a6047eaf2303d98f8f505b +Subproject commit 317654dba5cae905b5d6eed78f5c6c6984cc2f02 From 199d6c0b8bbd62d0a378deb157b326fd2e396cc7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 25 Nov 2014 02:30:32 -0800 Subject: [PATCH 426/813] Fix some problems with the generic quote char --- lib/ansible/module_utils/database.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/database.py b/lib/ansible/module_utils/database.py index 3c8bdaab80..50defb15d6 100644 --- a/lib/ansible/module_utils/database.py +++ b/lib/ansible/module_utils/database.py @@ -38,7 +38,7 @@ class UnclosedQuoteError(SQLParseError): _PG_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, schema=2, table=3, column=4, role=1) _MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1) -def _find_end_quote(identifier, quote_char='"'): +def _find_end_quote(identifier, quote_char): accumulate = 0 while True: try: @@ -60,7 +60,7 @@ def _find_end_quote(identifier, quote_char='"'): return accumulate -def _identifier_parse(identifier, quote_char='"'): +def _identifier_parse(identifier, quote_char): if not identifier: raise SQLParseError('Identifier name unspecified or unquoted trailing dot') @@ -77,10 +77,10 @@ def _identifier_parse(identifier, quote_char='"'): dot = end_quote + 1 first_identifier = identifier[:dot] next_identifier = identifier[dot+1:] - further_identifiers = _identifier_parse(next_identifier) + further_identifiers = _identifier_parse(next_identifier, quote_char) further_identifiers.insert(0, first_identifier) else: - raise SQLParseError('User escaped identifiers must escape extra double quotes') + raise SQLParseError('User escaped identifiers must escape extra quotes') else: further_identifiers = [identifier] @@ -99,7 +99,7 @@ def _identifier_parse(identifier, quote_char='"'): else: first_identifier = identifier[:dot] next_identifier = identifier[dot+1:] - further_identifiers = _identifier_parse(next_identifier) + further_identifiers = _identifier_parse(next_identifier, quote_char) first_identifier = first_identifier.replace(quote_char, quote_char*2) first_identifier = ''.join((quote_char, first_identifier, quote_char)) further_identifiers.insert(0, first_identifier) From d36c38c35e78ba49c3c56afe824d69d35c4bed18 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 25 Nov 2014 02:36:38 -0800 Subject: [PATCH 427/813] Exception message changes --- test/units/TestModuleUtilsDatabase.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/units/TestModuleUtilsDatabase.py b/test/units/TestModuleUtilsDatabase.py index 635eadb42c..5278d6db5a 100644 --- a/test/units/TestModuleUtilsDatabase.py +++ b/test/units/TestModuleUtilsDatabase.py @@ -63,9 +63,9 @@ class TestQuotePgIdentifier(object): ('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots", ('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots", ('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots", - ('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra double quotes', - ('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra double quotes', - ('"schema."table"','table'): 'User escaped identifiers must escape extra double quotes', + ('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes', + ('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes', + ('"schema."table"','table'): 'User escaped identifiers must escape extra quotes', ('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot', } From 9a77aefc338d15c5fe5c1407200cff7eeb8dfd16 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 25 Nov 2014 08:06:15 -0800 Subject: [PATCH 428/813] Special case the lone asterisk fragment in mysql --- lib/ansible/module_utils/database.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/database.py b/lib/ansible/module_utils/database.py index 50defb15d6..0dd1990d3e 100644 --- a/lib/ansible/module_utils/database.py +++ b/lib/ansible/module_utils/database.py @@ -117,4 +117,12 @@ def mysql_quote_identifier(identifier, id_type): identifier_fragments = _identifier_parse(identifier, quote_char='`') if len(identifier_fragments) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]: raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type])) - return '.'.join(identifier_fragments) + + special_cased_fragments = [] + for fragment in identifier_fragments: + if fragment == '`*`': + special_cased_fragments.append('*') + else: + special_cased_fragments.append(fragment) + + return '.'.join(special_cased_fragments) From 7d2937b1ccde7b49e725d774fba74f1eddfacab4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 25 Nov 2014 11:23:22 -0500 Subject: [PATCH 429/813] minor fixes to template function - make sure it calls itself correctly, now passes same params as it recieves - vars is reserved, changed for templatevars to avoid confustion - forcing mustaches again since the removal broke 'listification' as per #9622 - fixes incorrectly successful tests using undefined var, now it is defined - now returns empty list if items is None to avoid errors --- lib/ansible/runner/__init__.py | 25 ++++++++++++++----------- lib/ansible/utils/__init__.py | 2 +- lib/ansible/utils/template.py | 16 ++++++++-------- test/units/TestUtils.py | 6 ++---- 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 0d16746255..082dd44c8a 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -723,18 +723,21 @@ class Runner(object): # strip out any jinja2 template syntax within # the data returned by the lookup plugin items = utils._clean_data_struct(items, from_remote=True) - if type(items) != list: - raise errors.AnsibleError("lookup plugins have to return a list: %r" % items) + if items is None: + items = [] + else: + if type(items) != list: + raise errors.AnsibleError("lookup plugins have to return a list: %r" % items) - if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng', 'zypper' ]: - # hack for apt, yum, and pkgng so that with_items maps back into a single module call - use_these_items = [] - for x in items: - inject['item'] = x - if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): - use_these_items.append(x) - inject['item'] = ",".join(use_these_items) - items = None + if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng', 'zypper' ]: + # hack for apt, yum, and pkgng so that with_items maps back into a single module call + use_these_items = [] + for x in items: + inject['item'] = x + if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): + use_these_items.append(x) + inject['item'] = ",".join(use_these_items) + items = None def _safe_template_complex_args(args, inject): # Ensure the complex args here are a dictionary, but diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 674ca1cb11..78133f8ab6 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1469,7 +1469,7 @@ def listify_lookup_plugin_terms(terms, basedir, inject): # if not already a list, get ready to evaluate with Jinja2 # not sure why the "/" is in above code :) try: - new_terms = template.template(basedir, terms, inject, convert_bare=True, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR) + new_terms = template.template(basedir, "{{%s}}" % terms, inject, convert_bare=True, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR) if isinstance(new_terms, basestring) and "{{" in new_terms: pass else: diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index 5146057dac..73f03afe7a 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -100,33 +100,33 @@ def lookup(name, *args, **kwargs): else: raise errors.AnsibleError("lookup plugin (%s) not found" % name) -def template(basedir, varname, vars, lookup_fatal=True, depth=0, expand_lists=True, convert_bare=False, fail_on_undefined=False, filter_fatal=True): +def template(basedir, varname, templatevars, lookup_fatal=True, depth=0, expand_lists=True, convert_bare=False, fail_on_undefined=False, filter_fatal=True): ''' templates a data structure by traversing it and substituting for other data structures ''' from ansible import utils try: if convert_bare and isinstance(varname, basestring): first_part = varname.split(".")[0].split("[")[0] - if first_part in vars and '{{' not in varname and '$' not in varname: + if first_part in templatevars and '{{' not in varname and '$' not in varname: varname = "{{%s}}" % varname - + if isinstance(varname, basestring): if '{{' in varname or '{%' in varname: - varname = template_from_string(basedir, varname, vars, fail_on_undefined) + varname = template_from_string(basedir, varname, templatevars, fail_on_undefined) if (varname.startswith("{") and not varname.startswith("{{")) or varname.startswith("["): - eval_results = utils.safe_eval(varname, locals=vars, include_exceptions=True) + eval_results = utils.safe_eval(varname, locals=templatevars, include_exceptions=True) if eval_results[1] is None: varname = eval_results[0] return varname - + elif isinstance(varname, (list, tuple)): - return [template(basedir, v, vars, lookup_fatal, depth, expand_lists, fail_on_undefined=fail_on_undefined) for v in varname] + return [template(basedir, v, templatevars, lookup_fatal, depth, expand_lists, convert_bare, fail_on_undefined, filter_fatal) for v in varname] elif isinstance(varname, dict): d = {} for (k, v) in varname.iteritems(): - d[k] = template(basedir, v, vars, lookup_fatal, depth, expand_lists, fail_on_undefined=fail_on_undefined) + d[k] = template(basedir, v, templatevars, lookup_fatal, depth, expand_lists, convert_bare, fail_on_undefined, filter_fatal) return d else: return varname diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index 541849fd66..d93fc70329 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -568,10 +568,8 @@ class TestUtils(unittest.TestCase): basedir = os.path.dirname(__file__) # Straight lookups - self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict()), - ['things']) - self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['one', 'two'])), - ['one', 'two']) + self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=[])), []) + self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['one', 'two'])), ['one', 'two']) # Variable interpolation self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['{{ foo }}', '{{ bar }}'], foo="hello", bar="world")), From e3feb104c305b1b7e6e97d6be2eb4d83a43515cd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 25 Nov 2014 15:55:53 -0500 Subject: [PATCH 430/813] fixes issues with listyfing failing too often --- lib/ansible/utils/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 78133f8ab6..db0653f80b 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1469,7 +1469,7 @@ def listify_lookup_plugin_terms(terms, basedir, inject): # if not already a list, get ready to evaluate with Jinja2 # not sure why the "/" is in above code :) try: - new_terms = template.template(basedir, "{{%s}}" % terms, inject, convert_bare=True, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR) + new_terms = template.template(basedir, "{{%s}}" % terms, inject, convert_bare=True) if isinstance(new_terms, basestring) and "{{" in new_terms: pass else: From 968c56d7654b8609ddd0449295503c564446d8f3 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 25 Nov 2014 12:06:58 -0800 Subject: [PATCH 431/813] Expand playbook_dir in time for loading tasks Fixes #9618 --- lib/ansible/playbook/play.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 0dcbca8684..f63d710be3 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -79,17 +79,21 @@ class Play(object): elif type(self.tags) != list: self.tags = [] - # make sure we have some special internal variables set - self.vars['playbook_dir'] = os.path.abspath(self.basedir) + # make sure we have some special internal variables set, which + # we use later when loading tasks and handlers + load_vars = dict() + load_vars['playbook_dir'] = os.path.abspath(self.basedir) if self.playbook.inventory.basedir() is not None: - self.vars['inventory_dir'] = self.playbook.inventory.basedir() + load_vars['inventory_dir'] = self.playbook.inventory.basedir() if self.playbook.inventory.src() is not None: - self.vars['inventory_file'] = self.playbook.inventory.src() + load_vars['inventory_file'] = self.playbook.inventory.src() # template the play vars with themselves and the extra vars # from the playbook, to make sure they're correct all_vars = utils.combine_vars(self.vars, self.playbook.extra_vars) + all_vars = utils.combine_vars(all_vars, load_vars) self.vars = template(basedir, self.vars, all_vars) + self.vars = utils.combine_vars(self.vars, load_vars) # We first load the vars files from the datastructure # so we have the default variables to pass into the roles @@ -157,8 +161,7 @@ class Play(object): raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ' '("su", "su_user") cannot be used together') - load_vars = {} - load_vars['role_names'] = ds.get('role_names',[]) + load_vars['role_names'] = ds.get('role_names', []) self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars) self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars) From db145a368d94e3665fd4abec18e2db72cf6fc594 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 25 Nov 2014 16:12:15 -0500 Subject: [PATCH 432/813] now only flattened ignores failonundefined cause of it's special need --- lib/ansible/runner/lookup_plugins/flattened.py | 2 +- lib/ansible/utils/__init__.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/runner/lookup_plugins/flattened.py b/lib/ansible/runner/lookup_plugins/flattened.py index 831b2e9130..b93573fe40 100644 --- a/lib/ansible/runner/lookup_plugins/flattened.py +++ b/lib/ansible/runner/lookup_plugins/flattened.py @@ -50,7 +50,7 @@ class LookupModule(object): if isinstance(term, basestring): # convert a variable to a list - term2 = utils.listify_lookup_plugin_terms(term, self.basedir, inject) + term2 = utils.listify_lookup_plugin_terms(term, self.basedir, inject, fail_on_undefined=False) # but avoid converting a plain string to a list of one string if term2 != [ term ]: term = term2 diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index db0653f80b..1bfe16c5b9 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1451,7 +1451,7 @@ def safe_eval(expr, locals={}, include_exceptions=False): return expr -def listify_lookup_plugin_terms(terms, basedir, inject): +def listify_lookup_plugin_terms(terms, basedir, inject, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR): from ansible.utils import template @@ -1469,7 +1469,7 @@ def listify_lookup_plugin_terms(terms, basedir, inject): # if not already a list, get ready to evaluate with Jinja2 # not sure why the "/" is in above code :) try: - new_terms = template.template(basedir, "{{%s}}" % terms, inject, convert_bare=True) + new_terms = template.template(basedir, "{{%s}}" % terms, inject, convert_bare=True, fail_on_undefined=fail_on_undefined) if isinstance(new_terms, basestring) and "{{" in new_terms: pass else: From da7e75b8a95235ec0e7eab42d07de5c729dada01 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 25 Nov 2014 16:17:05 -0500 Subject: [PATCH 433/813] Update CHANGELOG.md --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8beea7f154..6c3f5d9b7e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,9 @@ Major changes: * new omit value can be used to leave off a parameter when not set, like so module_name: a=1 b={{ c | default(omit) }}, would not pass value for b (not even an empty value) if c was not set. * developers: 'baby JSON' in module responses, originally intended for writing modules in bash, is removed as a feature to simplify logic, script module remains available for running bash scripts. * async jobs started in "fire & forget" mode can now be checked on at a later time. +* added ability to subcategorize modules for docs.ansible.com +* added ability for shipped modules to have aliases with symlinks +* added ability to deprecate older modules by starting with "_" and including "deprecated: message why" in module docs New Modules: @@ -33,6 +36,7 @@ New Modules: Some other notable changes: * added the ability to set "instance filters" in the ec2.ini to limit results from the inventory plugin. +* upgrades for various variable precedence items and parsing related items * added a new "follow" parameter to the file and copy modules, which allows actions to be taken on the target of a symlink rather than the symlink itself. * if a module should ever traceback, it will return a standard error, catchable by ignore_errors, versus an 'unreachable' * ec2_lc: added support for multiple new parameters like kernel_id, ramdisk_id and ebs_optimized. @@ -69,6 +73,7 @@ Some other notable changes: - As a small side effect, the fetch module no longer returns a useful value in remote_md5. If you need a replacement, switch to using remote_checksum which returns the sha1sum of the remote file. +* ansible-doc CLI tool contains various improvements for working with different terminals And various other bug fixes and improvements ... From 75d05168e6c9049aaf543082bfd373764bbee7ec Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 25 Nov 2014 16:49:45 -0600 Subject: [PATCH 434/813] Bumping files for 1.9 --- CHANGELOG.md | 11 ++++++++++- RELEASES.txt | 3 ++- VERSION | 2 +- lib/ansible/__init__.py | 2 +- packaging/debian/changelog | 10 ++++++++-- packaging/rpm/ansible.spec | 3 +++ 6 files changed, 25 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c3f5d9b7e..16c41a3141 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,16 @@ Ansible Changes By Release ========================== -## 1.8 "You Really Got Me" - Active Development +## 1.9 "Dancin In the Streets" - ACTIVE DEVELOPMENT + +Major Changes: + +New Modules: + +Some other notable changes: + + +## 1.8 "You Really Got Me" - Nov 25, 2014 Major changes: diff --git a/RELEASES.txt b/RELEASES.txt index 72323a146f..fe64ddcf06 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -4,11 +4,12 @@ Ansible Releases at a Glance Active Development ++++++++++++++++++ -1.8 "You Really Got Me" ---- FALL 2014 +1.9 "Dancing In the Streets" WINTER 2015 Released ++++++++ +1.8 "You Really Got Me" ---- 11-25-2014 1.7.2 "Summer Nights" -------- 09-24-2014 1.7.1 "Summer Nights" -------- 08-14-2014 1.7 "Summer Nights" -------- 08-06-2014 diff --git a/VERSION b/VERSION index 6259340971..2e0e38c63a 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.8 +1.9 diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py index 2585fdc30f..27e79a41ca 100644 --- a/lib/ansible/__init__.py +++ b/lib/ansible/__init__.py @@ -14,5 +14,5 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -__version__ = '1.8' +__version__ = '1.9' __author__ = 'Michael DeHaan' diff --git a/packaging/debian/changelog b/packaging/debian/changelog index 168b519dbc..5b7cb7c2f7 100644 --- a/packaging/debian/changelog +++ b/packaging/debian/changelog @@ -1,9 +1,15 @@ -ansible (1.8) unstable; urgency=low +ansible (1.9) unstable; urgency=low - * 1.8 release (PENDING) + * 1.9 release (PENDING) -- Michael DeHaan Wed, 21 Oct 2015 04:29:00 -0500 +ansible (1.8) unstable; urgency=low + + * 1.8 release + + -- Michael DeHaan Tue, 25 Nov 2014 17:00:00 -0500 + ansible (1.7.2) unstable; urgency=low * 1.7.2 release diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec index c6b85fd1ab..71061b601b 100644 --- a/packaging/rpm/ansible.spec +++ b/packaging/rpm/ansible.spec @@ -110,6 +110,9 @@ rm -rf %{buildroot} %changelog +* Tue Nov 25 2014 Michael DeHaan - 1.8.0 +- Release 1.8.0 + * Wed Sep 24 2014 Michael DeHaan - 1.7.2 - Release 1.7.2 From b992a183787ecbffe8d8a98b2001637b40d54d97 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 25 Nov 2014 15:35:16 -0800 Subject: [PATCH 435/813] Really update to a newer core module version :-) --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 10ebcccedb..480b68b6f8 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 10ebcccedb542c7e1c499e77a1f53da98d373bc3 +Subproject commit 480b68b6f860a4d59479ac90544dba34c03d8461 From 704f7d7b40a346c2b9202ab176a3d1afb6e65332 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 25 Nov 2014 16:20:50 -0800 Subject: [PATCH 436/813] Fix for a traceback in the git module --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 480b68b6f8..41559311d8 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 480b68b6f860a4d59479ac90544dba34c03d8461 +Subproject commit 41559311d8e330d369c764f42c0e0396f626f177 From f27ffdcbf02bcfc900615f58886be77d3b452c3b Mon Sep 17 00:00:00 2001 From: Adrian Lopez Date: Wed, 26 Nov 2014 14:41:39 +0100 Subject: [PATCH 437/813] Is it not possible to set a comment in the same line --- docsite/rst/playbooks_variables.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 253cee2ba4..84f0a1f5b5 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -759,7 +759,8 @@ To configure fact caching, enable it in ansible.cfg as follows:: [defaults] fact_caching = redis - fact_caching_timeout = 86400 # seconds + fact_caching_timeout = 86400 + # seconds At the time of writing, Redis is the only supported fact caching engine. To get redis up and running, perform the equivalent OS commands:: From bc505050b083808320e8c869567a36772727898d Mon Sep 17 00:00:00 2001 From: Hagai Kariti Date: Wed, 26 Nov 2014 15:45:38 +0200 Subject: [PATCH 438/813] Don't template play vars by themselves, it's too early --- lib/ansible/playbook/play.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index f63d710be3..a9700b705b 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -88,13 +88,6 @@ class Play(object): if self.playbook.inventory.src() is not None: load_vars['inventory_file'] = self.playbook.inventory.src() - # template the play vars with themselves and the extra vars - # from the playbook, to make sure they're correct - all_vars = utils.combine_vars(self.vars, self.playbook.extra_vars) - all_vars = utils.combine_vars(all_vars, load_vars) - self.vars = template(basedir, self.vars, all_vars) - self.vars = utils.combine_vars(self.vars, load_vars) - # We first load the vars files from the datastructure # so we have the default variables to pass into the roles self.vars_files = ds.get('vars_files', []) From 4afa7ca1b1b304f883c9ecd92fc6e16794602b58 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 26 Nov 2014 08:34:38 -0800 Subject: [PATCH 439/813] Refresh the core modules to pull in mysql fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 41559311d8..7dd2859f9b 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 41559311d8e330d369c764f42c0e0396f626f177 +Subproject commit 7dd2859f9b13e9df3baa9f2ef947e3630a6e7dbc From c697d01151ad485a71936bab06d99f071532ef3f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 26 Nov 2014 10:55:37 -0800 Subject: [PATCH 440/813] Integration tests for #9242 and #9640 --- test/integration/Makefile | 2 +- test/integration/inventory | 4 ++++ test/integration/test_var_precedence.yml | 7 +++++++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index b732eb02f8..77c81a76b9 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -57,7 +57,7 @@ test_hash: ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' test_var_precedence: - ansible-playbook test_var_precedence.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e 'extra_var=extra_var' + ansible-playbook test_var_precedence.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e 'extra_var=extra_var' -e 'extra_var_override=extra_var_override' test_vault: ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) --list-tasks diff --git a/test/integration/inventory b/test/integration/inventory index 59bb395205..a9f160c989 100644 --- a/test/integration/inventory +++ b/test/integration/inventory @@ -9,6 +9,10 @@ testhost2 ansible_ssh_host=127.0.0.1 ansible_connection=local [inven_overridehosts] invenoverride ansible_ssh_host=127.0.0.1 ansible_connection=local +[all:vars] +extra_var_override=FROM_INVENTORY +inven_var=inventory_var + [inven_overridehosts:vars] foo=foo var_dir=vars diff --git a/test/integration/test_var_precedence.yml b/test/integration/test_var_precedence.yml index bbe89a872c..8bddfff447 100644 --- a/test/integration/test_var_precedence.yml +++ b/test/integration/test_var_precedence.yml @@ -4,6 +4,8 @@ - vars_var: "vars_var" - param_var: "BAD!" - vars_files_var: "BAD!" + - extra_var_override_once_removed: "{{ extra_var_override }}" + - from_inventory_once_removed: "{{ inven_var | default('BAD!') }}" vars_files: - vars/test_var_precedence.yml roles: @@ -15,17 +17,22 @@ - name: use set_fact to override the registered_var set_fact: registered_var="this is from set_fact" - debug: var=extra_var + - debug: var=extra_var_override_once_removed - debug: var=vars_var - debug: var=vars_files_var - debug: var=vars_files_var_role - debug: var=registered_var + - debug: var=from_inventory_once_removed - assert: that: - 'extra_var == "extra_var"' + - 'extra_var_override == "extra_var_override"' + - 'extra_var_override_once_removed == "extra_var_override"' - 'vars_var == "vars_var"' - 'vars_files_var == "vars_files_var"' - 'vars_files_var_role == "vars_files_var_role3"' - 'registered_var == "this is from set_fact"' + - 'from_inventory_once_removed == "inventory_var"' - hosts: inven_overridehosts vars_files: From ae054dbc4044ef83546677492723b29ef198dee5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 26 Nov 2014 14:46:45 -0800 Subject: [PATCH 441/813] Pull in a fix for specifying a single role attribute for postgresql users --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 7dd2859f9b..2a794fa776 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 7dd2859f9b13e9df3baa9f2ef947e3630a6e7dbc +Subproject commit 2a794fa77693a58ed0c2585d3f70f686c38dbe93 From 9a5cbf747a3209bd91aa61b36eaa0d0813a3295e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 26 Nov 2014 22:06:37 -0500 Subject: [PATCH 442/813] fine tuned lookup/templating errors --- lib/ansible/runner/lookup_plugins/flattened.py | 4 ++-- lib/ansible/utils/__init__.py | 6 ++++-- lib/ansible/utils/template.py | 6 ++++-- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/lib/ansible/runner/lookup_plugins/flattened.py b/lib/ansible/runner/lookup_plugins/flattened.py index b93573fe40..6d9dd613be 100644 --- a/lib/ansible/runner/lookup_plugins/flattened.py +++ b/lib/ansible/runner/lookup_plugins/flattened.py @@ -50,7 +50,7 @@ class LookupModule(object): if isinstance(term, basestring): # convert a variable to a list - term2 = utils.listify_lookup_plugin_terms(term, self.basedir, inject, fail_on_undefined=False) + term2 = utils.listify_lookup_plugin_terms(term, self.basedir, inject) # but avoid converting a plain string to a list of one string if term2 != [ term ]: term = term2 @@ -59,7 +59,7 @@ class LookupModule(object): # if it's a list, check recursively for items that are a list term = self.flatten(term, inject) ret.extend(term) - else: + else: ret.append(term) return ret diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 1bfe16c5b9..1541be5783 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1451,7 +1451,7 @@ def safe_eval(expr, locals={}, include_exceptions=False): return expr -def listify_lookup_plugin_terms(terms, basedir, inject, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR): +def listify_lookup_plugin_terms(terms, basedir, inject): from ansible.utils import template @@ -1469,11 +1469,13 @@ def listify_lookup_plugin_terms(terms, basedir, inject, fail_on_undefined=C.DEFA # if not already a list, get ready to evaluate with Jinja2 # not sure why the "/" is in above code :) try: - new_terms = template.template(basedir, "{{%s}}" % terms, inject, convert_bare=True, fail_on_undefined=fail_on_undefined) + new_terms = template.template(basedir, terms, inject, convert_bare=True, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR) if isinstance(new_terms, basestring) and "{{" in new_terms: pass else: terms = new_terms + except errors.AnsibleUndefinedVariable: + raise except jinja2.exceptions.UndefinedError, e: raise errors.AnsibleUndefinedVariable('undefined variable in items: %s' % e) except: diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index 73f03afe7a..c2b14d8454 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -86,12 +86,14 @@ JINJA2_ALLOWED_OVERRIDES = ['trim_blocks', 'lstrip_blocks', 'newline_sequence', def lookup(name, *args, **kwargs): from ansible import utils instance = utils.plugins.lookup_loader.get(name.lower(), basedir=kwargs.get('basedir',None)) - vars = kwargs.get('vars', None) + tvars = kwargs.get('vars', None) if instance is not None: # safely catch run failures per #5059 try: - ran = instance.run(*args, inject=vars, **kwargs) + ran = instance.run(*args, inject=tvars, **kwargs) + except errors.AnsibleUndefinedVariable: + raise except Exception, e: ran = None if ran: From a68a90f01dd15831315e5c176ecbb2ae5fd21bb6 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 26 Nov 2014 23:23:59 -0500 Subject: [PATCH 443/813] codename fix :) --- CHANGELOG.md | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 16c41a3141..24f331c83b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,14 +1,9 @@ Ansible Changes By Release ========================== -## 1.9 "Dancin In the Streets" - ACTIVE DEVELOPMENT - -Major Changes: - -New Modules: - -Some other notable changes: +## 1.9 "Dancing In the Street" - ACTIVE DEVELOPMENT +in progress, details pending ## 1.8 "You Really Got Me" - Nov 25, 2014 From 43d7f1210112a845995dfba9051b6a015ff0db66 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 26 Nov 2014 23:24:29 -0500 Subject: [PATCH 444/813] codename fix --- RELEASES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASES.txt b/RELEASES.txt index fe64ddcf06..8153dab565 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -4,7 +4,7 @@ Ansible Releases at a Glance Active Development ++++++++++++++++++ -1.9 "Dancing In the Streets" WINTER 2015 +1.9 "Dancing In the Street - in progress Released ++++++++ From f64f564fc0b10b5145b0106a73a9029bb19c0268 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 26 Nov 2014 23:24:45 -0500 Subject: [PATCH 445/813] missing endquote --- RELEASES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASES.txt b/RELEASES.txt index 8153dab565..5bc9399822 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -4,7 +4,7 @@ Ansible Releases at a Glance Active Development ++++++++++++++++++ -1.9 "Dancing In the Street - in progress +1.9 "Dancing In the Street" - in progress Released ++++++++ From bcc2d755433d3fef8b577dd812b6664fa3a56147 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 26 Nov 2014 23:29:05 -0500 Subject: [PATCH 446/813] Have changelog reflect 1.8.1 on devel branch --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 24f331c83b..9a6668557d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,13 @@ Ansible Changes By Release in progress, details pending +## 1.8.1 "You Really Got Me" - Nov 26, 2014 + +* Various bug fixes in postgresql and mysql modules. +* Fixed a bug related to lookup plugins used within roles not finding files based on the relative paths to the roles files/ directory. +* Fixed a bug related to vars specified in plays being templated too early, resulting in incorrect variable interpolation. +* Fixed a bug related to git submodules in bare repos. + ## 1.8 "You Really Got Me" - Nov 25, 2014 Major changes: From 86202b9fe3d01e693124f8873ae9fe4e32afcd46 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 26 Nov 2014 23:29:29 -0500 Subject: [PATCH 447/813] Update releases --- RELEASES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASES.txt b/RELEASES.txt index 5bc9399822..ddcce78efa 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -9,7 +9,7 @@ Active Development Released ++++++++ -1.8 "You Really Got Me" ---- 11-25-2014 +1.8.1 "You Really Got Me" -- 11-26-2014 1.7.2 "Summer Nights" -------- 09-24-2014 1.7.1 "Summer Nights" -------- 08-14-2014 1.7 "Summer Nights" -------- 08-06-2014 From 466fa8b3d8b9de2c46ef97f17dc5c4a7e2d53ac0 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 27 Nov 2014 19:53:37 -0500 Subject: [PATCH 448/813] Make sure .git history doesn't show up in distribution --- MANIFEST.in | 2 ++ 1 file changed, 2 insertions(+) diff --git a/MANIFEST.in b/MANIFEST.in index 5fdfe50f34..9e76e56a65 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -7,3 +7,5 @@ recursive-include docs * include Makefile include VERSION include MANIFEST.in +exclude lib/ansible/modules/core/.git +exclude lib/ansible/modules/extras/.git From 2c364a1d4c26d6e04a7117c1573a704265f97f24 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 27 Nov 2014 20:07:24 -0500 Subject: [PATCH 449/813] prune vs exclude --- MANIFEST.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 9e76e56a65..948d176139 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -7,5 +7,5 @@ recursive-include docs * include Makefile include VERSION include MANIFEST.in -exclude lib/ansible/modules/core/.git -exclude lib/ansible/modules/extras/.git +prune lib/ansible/modules/core/.git +prune lib/ansible/modules/extras/.git From 8665f94ecb2805dcb861e4d7e75629cd975f4a6c Mon Sep 17 00:00:00 2001 From: Chris Church Date: Fri, 28 Nov 2014 09:52:39 -0500 Subject: [PATCH 450/813] Make sure Windows modules are installed. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index fd3fb0a8a3..0d1f677ab7 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ setup(name='ansible', package_dir={ 'ansible': 'lib/ansible' }, packages=find_packages('lib'), package_data={ - '': ['module_utils/*.ps1'], + '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1'], }, scripts=[ 'bin/ansible', From a1c529488251150bba267b916f1f7b8ae4b42117 Mon Sep 17 00:00:00 2001 From: follower Date: Sat, 29 Nov 2014 14:55:25 +1300 Subject: [PATCH 451/813] Fix misspelled "necessarily" --- hacking/module_formatter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 04f098fc98..0a7d1c884c 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -384,7 +384,7 @@ def process_category(category, categories, options, env, template, outputname): category_file.write("""\n\n .. note:: - %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale. - - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not neccessarily) less activity maintained than 'core' modules. + - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less activity maintained than 'core' modules. - Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub `_, extras tickets to `ansible/ansible-modules-extras on GitHub `_ """ % (DEPRECATED, NOTCORE)) category_file.close() From f80e766d979a2bb469fa799db2aadad4ef3df1e4 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sat, 29 Nov 2014 18:11:10 -0500 Subject: [PATCH 452/813] Need to include extras in setup to accomodate future windows extras modules --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 0d1f677ab7..d4ac0c3d4d 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ setup(name='ansible', package_dir={ 'ansible': 'lib/ansible' }, packages=find_packages('lib'), package_data={ - '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1'], + '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1'], }, scripts=[ 'bin/ansible', From 23d959db713de9f08957e0f868e7df100f4f1314 Mon Sep 17 00:00:00 2001 From: Thomas Quinot Date: Sun, 30 Nov 2014 10:33:53 +0100 Subject: [PATCH 453/813] Report location (filename and line number) for inventory syntax errors When AnsibleError is raised for a syntax error in an inventory file, report filename and line number to help pinpointing the error. --- lib/ansible/inventory/ini.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index 3848696006..2c05253bb3 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -36,6 +36,7 @@ class InventoryParser(object): def __init__(self, filename=C.DEFAULT_HOST_LIST): with open(filename) as fh: + self.filename = filename self.lines = fh.readlines() self.groups = {} self.hosts = {} @@ -87,8 +88,8 @@ class InventoryParser(object): self.groups = dict(all=all, ungrouped=ungrouped) active_group_name = 'ungrouped' - for line in self.lines: - line = utils.before_comment(line).strip() + for lineno in range(len(self.lines)): + line = utils.before_comment(self.lines[lineno]).strip() if line.startswith("[") and line.endswith("]"): active_group_name = line.replace("[","").replace("]","") if ":vars" in line or ":children" in line: @@ -142,7 +143,7 @@ class InventoryParser(object): try: (k,v) = t.split("=", 1) except ValueError, e: - raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e))) + raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e))) host.set_variable(k, self._parse_value(v)) self.groups[active_group_name].add_host(host) @@ -153,8 +154,8 @@ class InventoryParser(object): def _parse_group_children(self): group = None - for line in self.lines: - line = line.strip() + for lineno in range(len(self.lines)): + line = self.lines[lineno].strip() if line is None or line == '': continue if line.startswith("[") and ":children]" in line: @@ -169,7 +170,7 @@ class InventoryParser(object): elif group: kid_group = self.groups.get(line, None) if kid_group is None: - raise errors.AnsibleError("child group is not defined: (%s)" % line) + raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line)) else: group.add_child_group(kid_group) @@ -180,13 +181,13 @@ class InventoryParser(object): def _parse_group_variables(self): group = None - for line in self.lines: - line = line.strip() + for lineno in range(len(self.lines)): + line = self.lines[lineno].strip() if line.startswith("[") and ":vars]" in line: line = line.replace("[","").replace(":vars]","") group = self.groups.get(line, None) if group is None: - raise errors.AnsibleError("can't add vars to undefined group: %s" % line) + raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line)) elif line.startswith("#") or line.startswith(";"): pass elif line.startswith("["): @@ -195,7 +196,7 @@ class InventoryParser(object): pass elif group: if "=" not in line: - raise errors.AnsibleError("variables assigned to group must be in key=value form") + raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1)) else: (k, v) = [e.strip() for e in line.split("=", 1)] group.set_variable(k, self._parse_value(v)) From 9ee367e0441891812fa96bad8bdf010342fef991 Mon Sep 17 00:00:00 2001 From: Tomasz Kontusz Date: Sun, 30 Nov 2014 14:55:59 +0100 Subject: [PATCH 454/813] Add mock and nose to requirements for running unit tests --- test/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/README.md b/test/README.md index 3e746062cd..bb3f229d1f 100644 --- a/test/README.md +++ b/test/README.md @@ -12,7 +12,7 @@ mock interfaces rather than producing side effects. Playbook engine code is better suited for integration tests. -Requirements: sudo pip install paramiko PyYAML jinja2 httplib2 passlib +Requirements: sudo pip install paramiko PyYAML jinja2 httplib2 passlib nose mock integration ----------- From e61e8a37f50860534610ef767315d70ba61583a2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 1 Dec 2014 11:51:09 -0600 Subject: [PATCH 455/813] Use extra vars when creating HostVars Fixes #9667 --- lib/ansible/runner/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 082dd44c8a..ce61e7d90f 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -668,7 +668,7 @@ class Runner(object): ''' executes any module one or more times ''' inject = self.get_inject_vars(host) - hostvars = HostVars(inject['combined_cache'], self.inventory, vault_password=self.vault_pass) + hostvars = HostVars(utils.merge_hash(inject['combined_cache'], self.extra_vars), self.inventory, vault_password=self.vault_pass) inject['hostvars'] = hostvars host_connection = inject.get('ansible_connection', self.transport) From 86b21a1b8d00d50f2d90416a05329f2e7e403345 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 1 Dec 2014 10:46:22 -0800 Subject: [PATCH 456/813] Integration tests for https://github.com/ansible/ansible-modules-core/issues/416 --- lib/ansible/modules/core | 2 +- .../roles/test_mysql_user/tasks/main.yml | 28 +++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 2a794fa776..3a80b734e6 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 2a794fa77693a58ed0c2585d3f70f686c38dbe93 +Subproject commit 3a80b734e6e4c1ebe8cbd40b4957a7589520caf5 diff --git a/test/integration/roles/test_mysql_user/tasks/main.yml b/test/integration/roles/test_mysql_user/tasks/main.yml index 7ad42d471b..cdfb7c4950 100644 --- a/test/integration/roles/test_mysql_user/tasks/main.yml +++ b/test/integration/roles/test_mysql_user/tasks/main.yml @@ -118,6 +118,34 @@ - include: remove_user.yml user_name={{user_name_2}} user_password={{ user_password_1 }} +- name: give user access to database via wildcard + mysql_user: name={{ user_name_1 }} priv=%db.*:SELECT append_privs=yes password={{ user_password_1 }} + +- name: show grants access for user1 on multiple database + command: mysql "-e SHOW GRANTS FOR '{{ user_name_1 }}'@'localhost';" + register: result + +- name: assert grant access for user1 on multiple database + assert: + that: + - "'%db' in result.stdout" + - "'SELECT' in result.stdout" + +- name: change user access to database via wildcard + mysql_user: name={{ user_name_1 }} priv=%db.*:INSERT append_privs=yes password={{ user_password_1 }} + +- name: show grants access for user1 on multiple database + command: mysql "-e SHOW GRANTS FOR '{{ user_name_1 }}'@'localhost';" + register: result + +- name: assert grant access for user1 on multiple database + assert: + that: + - "'%db' in result.stdout" + - "'INSERT' in result.stdout" + +- include: remove_user.yml user_name={{user_name_1}} user_password={{ user_password_1 }} + # ============================================================ # Update user password for a user. # Assert the user password is updated and old password can no longer be used. From fe062419862e97cf658c11d41da273a8fc8819e8 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 1 Dec 2014 11:56:52 -0500 Subject: [PATCH 457/813] AWS Guide overhaul, WIP. --- docsite/rst/guide_aws.rst | 352 ++++++++++-------------- docsite/rst/intro_dynamic_inventory.rst | 4 +- 2 files changed, 142 insertions(+), 214 deletions(-) diff --git a/docsite/rst/guide_aws.rst b/docsite/rst/guide_aws.rst index 3456a2f4bc..e1bb2e5c83 100644 --- a/docsite/rst/guide_aws.rst +++ b/docsite/rst/guide_aws.rst @@ -6,120 +6,141 @@ Amazon Web Services Guide Introduction ```````````` -.. note:: This section of the documentation is under construction. We are in the process of adding more examples about all of the EC2 modules - and how they work together. There's also an ec2 example in the language_features directory of `the ansible-examples github repository `_ that you may wish to consult. Once complete, there will also be new examples of ec2 in ansible-examples. - -Ansible contains a number of core modules for interacting with Amazon Web Services (AWS). These also work with Eucalyptus, which is an AWS compatible private cloud solution. There are other supported cloud types, but this documentation chapter is about AWS API clouds. The purpose of this +Ansible contains a number of modules for controlling Amazon Web Services (AWS). The purpose of this section is to explain how to put Ansible modules together (and use inventory scripts) to use Ansible in AWS context. -Requirements for the AWS modules are minimal. All of the modules require and are tested against boto 2.5 or higher. You'll need this Python module installed on the execution host. If you are using Red Hat Enterprise Linux or CentOS, install boto from `EPEL `_: +Requirements for the AWS modules are minimal. -.. code-block:: bash +All of the modules require and are tested against recent versions of boto. You'll need this Python module installed on your control machine. Boto can be installed from your OS distribution or python's "pip install boto". - $ yum install python-boto +Whereas classically ansible will execute tasks in it's host loop against multiple remote machines, most cloud-control steps occur on your local machine with reference to the regions to control. -You can also install it via pip if you want. - -The following steps will often execute outside the host loop, so it makes sense to add localhost to inventory. Ansible -may not require this step in the future:: - - [local] - localhost - -And in your playbook steps we'll typically be using the following pattern for provisioning steps:: +In your playbook steps we'll typically be using the following pattern for provisioning steps:: - hosts: localhost connection: local gather_facts: False + tasks: + - ... + +.. _aws_authentication: + +Authentication +`````````````` + +Authentication with the AWS-related modules is handled by either +specifying your access and secret key as ENV variables or module arguments. + +For environment variables:: + + export AWS_ACCESS_KEY_ID='AK123' + export AWS_SECRET_ACCESS_KEY='abc123' + +For storing these in a vars_file, ideally encrypted with ansible-vault:: + + --- + ec2_access_key: "--REMOVED--" + ec2_secret_key: "--REMOVED--" .. _aws_provisioning: Provisioning ```````````` -The ec2 module provides the ability to provision instances within EC2. Typically the provisioning task will be performed against your Ansible master server in a play that operates on localhost using the ``local`` connection type. If you are doing an EC2 operation mid-stream inside a regular play operating on remote hosts, you may want to use the ``local_action`` keyword for that particular task. Read :doc:`playbooks_delegation` for more about local actions. +The ec2 module provisions and de-provisions instances within EC2. -.. note:: +An example of making sure there are only 5 instances tagged 'Demo' in EC2 follows. - Authentication with the AWS-related modules is handled by either - specifying your access and secret key as ENV variables or passing - them as module arguments. +In the example below, the "exact_count" of instances is set to 5. This means if there are 0 instances already existing, then +5 new instances would be created. If there were 2 instances, only 3 would be created, and if there were 8 instances, 3 instances would +be terminated. -.. note:: +What is being counted is specified by the "count_tag" parameter. The parameter "instance_tags" is used to apply tags to the newly created +instance. - To talk to specific endpoints, the environmental variable EC2_URL - can be set. This is useful if using a private cloud like Eucalyptus, - exporting the variable as EC2_URL=https://myhost:8773/services/Eucalyptus. - This can be set using the 'environment' keyword in Ansible if you like. + - hosts: localhost + gather_facts: False -Here is an example of provisioning a number of instances in ad-hoc mode: + tasks: -.. code-block:: bash + - name: Provision a set of instances + ec2: + key_name: my_key + group: test + instance_type: t2.micro + image: "{{ ami_id }}" + wait: true + exact_count: 5 + count_tag: + Name: Demo + instance_tags: + Name: Demo + register: ec2 - # ansible localhost -m ec2 -a "image=ami-6e649707 instance_type=m1.large keypair=mykey group=webservers wait=yes" -c local +The data about what instances are created is being saved by the "register" keyword in the variable named "ec2". -In a play, this might look like (assuming the parameters are held as vars):: +From this, we'll use the add_host module to dynamically create a host group consisting of these new instances. This facilitates performing configuration actions on the hosts immediately in a subsequent task:: + + # demo_setup.yml + + - hosts: localhost + gather_facts: False + + tasks: + + - name: Provision a set of instances + ec2: + key_name: my_key + group: test + instance_type: t2.micro + image: "{{ ami_id }}" + wait: true + exact_count: 5 + count_tag: + Name: Demo + instance_tags: + Name: Demo + + - name: Add all instance public IPs to host group + add_host: hostname={{ item.public_ip }} groupname=ec2hosts + with_items: ec2.instances + +With the host group now created, a second play at the bottom of the the same provisioning playbook file might now have some configuration steps:: + + # demo_setup.yml - tasks: - name: Provision a set of instances - ec2: > - keypair={{mykeypair}} - group={{security_group}} - instance_type={{instance_type}} - image={{image}} - wait=true - count={{number}} - register: ec2 + hosts: localhost + # ... AS ABOVE ... - -By registering the return its then possible to dynamically create a host group consisting of these new instances. This facilitates performing configuration actions on the hosts immediately in a subsequent task:: - - - name: Add all instance public IPs to host group - add_host: hostname={{ item.public_ip }} groupname=ec2hosts - with_items: ec2.instances - -With the host group now created, a second play in your provision playbook might now have some configuration steps:: - - - name: Configuration play - hosts: ec2hosts + - hosts: ec2hosts + name: configuration play user: ec2-user gather_facts: true tasks: - - name: Check NTP service - service: name=ntpd state=started -Rather than include configuration inline, you may also choose to just do it as a task include or a role. - -The method above ties the configuration of a host with the provisioning step. This isn't always ideal and leads us onto the next section. - -.. _aws_advanced: - -Advanced Usage -`````````````` + - name: Check NTP service + service: name=ntpd state=started .. _aws_host_inventory: Host Inventory -++++++++++++++ +`````````````` -Once your nodes are spun up, you'll probably want to talk to them again. The best way to handle this is to use the ec2 inventory plugin. +Once your nodes are spun up, you'll probably want to talk to them again. With a cloud setup, it's best to not maintain a static list of cloud hostnames +in text files. Rather, the best way to handle this is to use the ec2 dynamic inventory script. -Even for larger environments, you might have nodes spun up from Cloud Formations or other tooling. You don't have to use Ansible to spin up guests. Once these are created and you wish to configure them, the EC2 API can be used to return system grouping with the help of the EC2 inventory script. This script can be used to group resources by their security group or tags. Tagging is highly recommended in EC2 and can provide an easy way to sort between host groups and roles. The inventory script is documented doc:`api` section. +This will also dynamically select nodes that were even created outside of Ansible, and allow Ansible to manage them. -You may wish to schedule a regular refresh of the inventory cache to accommodate for frequent changes in resources: +See the doc:`aws_example` for how to use this, then flip back over to this chapter. -.. code-block:: bash - - # ./ec2.py --refresh-cache +.. _aws_tags_and_groups: -Put this into a crontab as appropriate to make calls from your Ansible master server to the EC2 API endpoints and gather host information. The aim is to keep the view of hosts as up-to-date as possible, so schedule accordingly. Playbook calls could then also be scheduled to act on the refreshed hosts inventory after each refresh. This approach means that machine images can remain "raw", containing no payload and OS-only. Configuration of the workload is handled entirely by Ansible. +Tags And Groups And Variables +````````````````````````````` -Tags -++++ - -There's a feature in the ec2 inventory script where hosts tagged with -certain keys and values automatically appear in certain groups. +When using the ec2 inventory script, hosts automatically appear in groups based on how they are tagged in EC2. For instance, if a host is given the "class" tag with the value of "webserver", it will be automatically discoverable via a dynamic group like so:: @@ -128,178 +149,83 @@ it will be automatically discoverable via a dynamic group like so:: tasks: - ping -Using this philosophy can be a great way to manage groups dynamically, without -having to maintain separate inventory. +Using this philosophy can be a great way to keep systems seperated by the function they perform. + +In this example, if we wanted to define variables that are automatically applied to each machine tagged with the 'class' of 'webserver', 'group_vars' +in ansible can be used. See :doc:`splitting_out_vars`. + +Similar groups are available for regions and other classifications, and can be similarly assigned variables using the same mechanism. .. _aws_pull: -Pull Configuration -++++++++++++++++++ +Autoscaling with Ansible Pull +````````````````````````````` -For some the delay between refreshing host information and acting on that host information (i.e. running Ansible tasks against the hosts) may be too long. This may be the case in such scenarios where EC2 AutoScaling is being used to scale the number of instances as a result of a particular event. Such an event may require that hosts come online and are configured as soon as possible (even a 1 minute delay may be undesirable). Its possible to pre-bake machine images which contain the necessary ansible-pull script and components to pull and run a playbook via git. The machine images could be configured to run ansible-pull upon boot as part of the bootstrapping procedure. +Amazon Autoscaling features automatically increase or decrease capacity based on load. There are also Ansible ansibles shown in the cloud documentation that +can configure autoscaling policy. + +When nodes come online, it may not be sufficient to wait for the next cycle of an ansible command to come along and configure that node. + +To do this, pre-bake machine images which contain the necessary ansible-pull invocation. Ansible-pull is a command line tool that fetches a playbook from a git server and runs it locally. + +One of the challenges of this approach is that there needs to be a centralized way to store data about the results of pull commands in an autoscaling context. +For this reason, the autoscaling solution provided below in the next section can be a better approach. Read :ref:`ansible-pull` for more information on pull-mode playbooks. -(Various developments around Ansible are also going to make this easier in the near future. Stay tuned!) - .. _aws_autoscale: Autoscaling with Ansible Tower -++++++++++++++++++++++++++++++ +`````````````````````````````` :doc:`tower` also contains a very nice feature for auto-scaling use cases. In this mode, a simple curl script can call a defined URL and the server will "dial out" to the requester and configure an instance that is spinning up. This can be a great way -to reconfigure ephemeral nodes. See the Tower documentation for more details. Click on the Tower link in the sidebar for details. +to reconfigure ephemeral nodes. See the Tower install and product documentation for more details. A benefit of using the callback in Tower over pull mode is that job results are still centrally recorded and less information has to be shared with remote hosts. -.. _aws_use_cases: - -Use Cases -````````` - -This section covers some usage examples built around a specific use case. - .. _aws_cloudformation_example: -Example 1 -+++++++++ +Ansible With (And Versus) CloudFormation +```````````````````````````````````````` - Example 1: I'm using CloudFormation to deploy a specific infrastructure stack. I'd like to manage configuration of the instances with Ansible. +CloudFormation is a Amazon technology for defining a cloud stack as a JSON document. -Provision instances with your tool of choice and consider using the inventory plugin to group hosts based on particular tags or security group. Consider tagging instances you wish to managed with Ansible with a suitably unique key=value tag. +Ansible modules provide an easier to use interface than CloudFormation in many examples, without defining a complex JSON document. +This is recommended for most users. -.. note:: Ansible also has a cloudformation module you may wish to explore. +However, for users that have decided to use CloudFormation, there is an Ansible module that can be used to apply a CloudFormation template +to Amazon. -.. _aws_autoscale_example: +When using Ansible with CloudFormation, typically Ansible will be used with a tool like Packer to build images, and CloudFormation will launch +those images, or ansible will be invoked through user data once the image comes online, or a combination of the two. -Example 2 -+++++++++ +Please see the examples in the Ansible CloudFormation module for more details. - Example 2: I'm using AutoScaling to dynamically scale up and scale down the number of instances. This means the number of hosts is constantly fluctuating but I'm letting EC2 automatically handle the provisioning of these instances. I don't want to fully bake a machine image, I'd like to use Ansible to configure the hosts. +.. _aws_image_build: -There are several approaches to this use case. The first is to use the inventory plugin to regularly refresh host information and then target hosts based on the latest inventory data. The second is to use ansible-pull triggered by a user-data script (specified in the launch configuration) which would then mean that each instance would fetch Ansible and the latest playbook from a git repository and run locally to configure itself. You could also use the Tower callback feature. +AWS Image Building With Ansible +``````````````````````````````` -.. _aws_builds: +Many users may want to have images boot to a more complete configuration rather than configuring them entirely after instantiation. To do this, +one of many programs can be used with Ansible playbooks to define and upload a base image, which will then get it's own AMI ID for usage with +the ec2 module or other Ansible AWS modules such as ec2_asg or the cloudformation module. Possible tools include Packer, aminator, and Ansible's +ec2_ami module. -Example 3 -+++++++++ +Generally speaking, we find most users using Packer. - Example 3: I don't want to use Ansible to manage my instances but I'd like to consider using Ansible to build my fully-baked machine images. +`Documentation for the Ansible Packer provisioner can be found here `_. -There's nothing to stop you doing this. If you like working with Ansible's playbook format then writing a playbook to create an image; create an image file with dd, give it a filesystem and then install packages and finally chroot into it for further configuration. Ansible has the 'chroot' plugin for this purpose, just add the following to your inventory file:: +If you do not want to adopt Packer at this time, configuring a base-image with Ansible after provisioning (as shown above) is acceptable. - /chroot/path ansible_connection=chroot +.. aws_next_steps:: -And in your playbook:: - - hosts: /chroot/path - -Example 4 -+++++++++ - - How would I create a new ec2 instance, provision it and then destroy it all in the same play? - -.. code-block:: yaml - - # Use the ec2 module to create a new host and then add - # it to a special "ec2hosts" group. - - - hosts: localhost - connection: local - gather_facts: False - vars: - ec2_access_key: "--REMOVED--" - ec2_secret_key: "--REMOVED--" - keypair: "mykeyname" - instance_type: "t1.micro" - image: "ami-d03ea1e0" - group: "mysecuritygroup" - region: "us-west-2" - zone: "us-west-2c" - tasks: - - name: make one instance - ec2: image={{ image }} - instance_type={{ instance_type }} - aws_access_key={{ ec2_access_key }} - aws_secret_key={{ ec2_secret_key }} - keypair={{ keypair }} - instance_tags='{"foo":"bar"}' - region={{ region }} - group={{ group }} - wait=true - register: ec2_info - - - debug: var=ec2_info - - debug: var=item - with_items: ec2_info.instance_ids - - - add_host: hostname={{ item.public_ip }} groupname=ec2hosts - with_items: ec2_info.instances - - - name: wait for instances to listen on port:22 - wait_for: - state=started - host={{ item.public_dns_name }} - port=22 - with_items: ec2_info.instances - - - # Connect to the node and gather facts, - # including the instance-id. These facts - # are added to inventory hostvars for the - # duration of the playbook's execution - # Typical "provisioning" tasks would go in - # this playbook. - - - hosts: ec2hosts - gather_facts: True - user: ec2-user - sudo: True - tasks: - - # fetch instance data from the metadata servers in ec2 - - ec2_facts: - - # show all known facts for this host - - debug: var=hostvars[inventory_hostname] - - # just show the instance-id - - debug: msg="{{ hostvars[inventory_hostname]['ansible_ec2_instance_id'] }}" - - - # Using the instanceid, call the ec2 module - # locally to remove the instance by declaring - # its state is "absent" - - - hosts: ec2hosts - gather_facts: True - connection: local - vars: - ec2_access_key: "--REMOVED--" - ec2_secret_key: "--REMOVED--" - region: "us-west-2" - tasks: - - name: destroy all instances - ec2: state='absent' - aws_access_key={{ ec2_access_key }} - aws_secret_key={{ ec2_secret_key }} - region={{ region }} - instance_ids={{ item }} - wait=true - with_items: hostvars[inventory_hostname]['ansible_ec2_instance_id'] - - -.. note:: more examples of this are pending. You may also be interested in the ec2_ami module for taking AMIs of running instances. - -.. _aws_pending: - -Pending Information -``````````````````` - -In the future look here for more topics. +Next Steps: Explore Modules +``````````````````````````` +Ansible ships with lots of modules for configuring a wide array of EC2 services. Browse the "Cloud" category of the module +documentation for a full list with examples. .. seealso:: @@ -309,7 +235,7 @@ In the future look here for more topics. An introduction to playbooks :doc:`playbooks_delegation` Delegation, useful for working with loud balancers, clouds, and locally executed steps. - `User Mailing List `_ + `User Mailing List `_ Have a question? Stop by the google group! `irc.freenode.net `_ #ansible IRC chat channel diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 28536971bf..e6743c100e 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -189,7 +189,9 @@ To see the complete list of variables available for an instance, run the script ./ec2.py --host ec2-12-12-12-12.compute-1.amazonaws.com Note that the AWS inventory script will cache results to avoid repeated API calls, and this cache setting is configurable in ec2.ini. To -explicitly clear the cache, you can run the ec2.py script with the ``--refresh-cache`` parameter. +explicitly clear the cache, you can run the ec2.py script with the ``--refresh-cache`` parameter:: + + # ./ec2.py --refresh-cache .. _other_inventory_scripts: From 9bbfddedf600d149c86aec92001dc0fb049ef650 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 1 Dec 2014 14:24:41 -0500 Subject: [PATCH 458/813] Best practices docs tweaks. --- docsite/rst/playbooks_best_practices.rst | 85 ++++++++++++++++++------ 1 file changed, 65 insertions(+), 20 deletions(-) diff --git a/docsite/rst/playbooks_best_practices.rst b/docsite/rst/playbooks_best_practices.rst index 473e20db93..de2e27774c 100644 --- a/docsite/rst/playbooks_best_practices.rst +++ b/docsite/rst/playbooks_best_practices.rst @@ -1,7 +1,7 @@ Best Practices ============== -Here are some tips for making the most of Ansible playbooks. +Here are some tips for making the most of Ansible and Ansible playbooks. You can find some example playbooks illustrating these best practices in our `ansible-examples repository `_. (NOTE: These may not use all of the features in the latest release, but are still an excellent reference!). @@ -12,10 +12,13 @@ You can find some example playbooks illustrating these best practices in our `an Content Organization ++++++++++++++++++++++ -The following section shows one of many possible ways to organize playbook content. Your usage of Ansible should fit your needs, however, not ours, so feel free to modify this approach and organize as you see fit. +The following section shows one of many possible ways to organize playbook content. -(One thing you will definitely want to do though, is use the "roles" organization feature, which is documented as part -of the main playbooks page. See :doc:`playbooks_roles`). +Your usage of Ansible should fit your needs, however, not ours, so feel free to modify this approach and organize as you see fit. + +One thing you will definitely want to do though, is use the "roles" organization feature, which is documented as part +of the main playbooks page. See :doc:`playbooks_roles`. You absolutely should be using roles. Roles are great. Use roles. Roles! +Did we say that enough? Roles are great. .. _directory_layout: @@ -34,6 +37,9 @@ The top level of the directory would contain files and directories like so:: hostname1 # if systems need specific variables, put them here hostname2 # "" + library/ # if any custom modules, put them here (optional) + filter_plugins/ # if any custom filter plugins, put them here (optional) + site.yml # master playbook webservers.yml # playbook for webserver tier dbservers.yml # playbook for dbserver tier @@ -60,12 +66,30 @@ The top level of the directory would contain files and directories like so:: monitoring/ # "" fooapp/ # "" +.. note: If you find yourself having too many top level playbooks (for instance you have a playbook you wrote for a specific hotfix, etc), it may +make sense to have a playbooks/ directory instead. This can be a good idea as you get larger. If you do this, +configure your roles_path in ansible.cfg to find your roles location. + +.. _use_dynamic_inventory_with_clouds: + +Use Dynamic Inventory With Clouds +````````````````````````````````` + +If you are using a cloud provider, you should not be managing your inventory in a static file. See :doc:`intro_dynamic_inventory`. + +This does not just apply to clouds -- If you have another system maintaing a canonical list of systems +in your infrastructure, usage of dynamic inventory is a great idea in general. + .. _stage_vs_prod: -How to Arrange Inventory, Stage vs Production -````````````````````````````````````````````` +How to Differentiate Stage vs Production +````````````````````````````````````````` -In the example below, the *production* file contains the inventory of all of your production hosts. Of course you can pull inventory from an external data source as well, but this is just a basic example. +If managing static inventory, it is frequently asked how to differentiate different types of environments. The following example +shows a good way to do this. Similar methods of grouping could be adapted to dynamic inventory (for instance, consider applying the AWS +tag "environment:production", and you'll get a group of systems automatically discovered named "ec2_tag_environment_production". + +Let's show a static inventory example though. Below, the *production* file contains the inventory of all of your production hosts. It is suggested that you define groups based on purpose of the host (roles) and also geography or datacenter location (if applicable):: @@ -106,13 +130,14 @@ It is suggested that you define groups based on purpose of the host (roles) and boston-webservers boston-dbservers - .. _groups_and_hosts: Group And Host Variables ```````````````````````` -Now, groups are nice for organization, but that's not all groups are good for. You can also assign variables to them! For instance, atlanta has its own NTP servers, so when setting up ntp.conf, we should use them. Let's set those now:: +This section extends on the previous example. + +Groups are nice for organization, but that's not all groups are good for. You can also assign variables to them! For instance, atlanta has its own NTP servers, so when setting up ntp.conf, we should use them. Let's set those now:: --- # file: group_vars/atlanta @@ -140,6 +165,9 @@ We can define specific hardware variance in systems in a host_vars file, but avo foo_agent_port: 86 bar_agent_port: 99 +Again, if we are using dynamic inventory sources, many dynamic groups are automatically created. So a tag like "class:webserver" would load in +variables from the file "group_vars/ec2_tag_class_webserver" automatically. + .. _split_by_role: Top Level Playbooks Are Separated By Role @@ -162,6 +190,12 @@ In a file like webservers.yml (also at the top level), we simply map the configu - common - webtier +The idea here is that we can choose to configure our whole infrastructure by "running" site.yml or we could just choose to run a subset by running +webservers.yml. This is analogous to the "--limit" parameter to ansible but a little more explicit:: + + ansible-playbook site.yml --limit webservers + ansible-playbook webservers.yml + .. _role_organization: Task And Handler Organization For A Role @@ -286,7 +320,7 @@ parameter in your playbooks to make it clear, especially as some modules support Group By Roles ++++++++++++++ -A system can be in multiple groups. See :doc:`intro_inventory` and :doc:`intro_patterns`. Having groups named after things like +We're somewhat repeating ourselves with this tip, but it's worth repeating. A system can be in multiple groups. See :doc:`intro_inventory` and :doc:`intro_patterns`. Having groups named after things like *webservers* and *dbservers* is repeated in the examples because it's a very powerful concept. This allows playbooks to target machines based on role, as well as to assign role specific variables @@ -299,7 +333,7 @@ See :doc:`playbooks_roles`. Operating System and Distribution Variance ++++++++++++++++++++++++++++++++++++++++++ -When dealing with a parameter that is different between two different operating systems, the best way to handle this is +When dealing with a parameter that is different between two different operating systems, a great way to handle this is by using the group_by module. This makes a dynamic group of hosts matching certain criteria, even if that group is not defined in the inventory file:: @@ -307,20 +341,19 @@ This makes a dynamic group of hosts matching certain criteria, even if that grou --- # talk to all hosts just so we can learn about them - - hosts: all - tasks: - - group_by: key={{ ansible_distribution }} + - group_by: key=os_{{ ansible_distribution }} # now just on the CentOS hosts... - - hosts: CentOS + - hosts: os_CentOS gather_facts: False - tasks: - # tasks that only happen on CentOS go here +This will throw all systems into a dynamic group based on the operating system name. + If group-specific settings are needed, this can also be done. For example:: --- @@ -328,20 +361,29 @@ If group-specific settings are needed, this can also be done. For example:: asdf: 10 --- - # file: group_vars/CentOS + # file: group_vars/os_CentOS asdf: 42 In the above example, CentOS machines get the value of '42' for asdf, but other machines get '10'. +This can be used not only to set variables, but also to apply certain roles to only certain systems. + +Alternatively, if only variables are needed: + + - hosts: all + tasks: + - include_vars: "os_{{ ansible_distribution }}.yml" + - debug: var=asdf + +This will pull in variables based on the OS name. .. _ship_modules_with_playbooks: Bundling Ansible Modules With Playbooks +++++++++++++++++++++++++++++++++++++++ -.. versionadded:: 0.5 - If a playbook has a "./library" directory relative to its YAML file, this directory can be used to add ansible modules that will -automatically be in the ansible module path. This is a great way to keep modules that go with a playbook together. +automatically be in the ansible module path. This is a great way to keep modules that go with a playbook together. This is shown +in the directory structure example at the start of this section. .. _whitespace: @@ -369,6 +411,8 @@ for you. For example, you will probably not need ``vars``, ``vars_files``, ``vars_prompt`` and ``--extra-vars`` all at once, while also using an external inventory file. +If something feels complicated, it probably is, and may be a good opportunity to simply things. + .. _version_control: Version Control @@ -395,3 +439,4 @@ changed the rules that are automating your infrastructure. Complete playbook files from the github project source `Mailing List `_ Questions? Help? Ideas? Stop by the list on Google Groups + From 6570a6c6de8734dafd0d14895d4808ec05c6a4ee Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 1 Dec 2014 14:59:25 -0600 Subject: [PATCH 459/813] Use additional vars when templating included file names Fixes #9669 --- lib/ansible/playbook/play.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index a9700b705b..882d174c0a 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -619,8 +619,14 @@ class Play(object): dirname = self.basedir if original_file: dirname = os.path.dirname(original_file) - include_file = template(dirname, tokens[0], mv) + + # temp vars are used here to avoid trampling on the existing vars structures + temp_vars = utils.merge_hash(self.vars, self.vars_file_vars) + temp_vars = utils.merge_hash(temp_vars, mv) + temp_vars = utils.merge_hash(temp_vars, self.playbook.extra_vars) + include_file = template(dirname, tokens[0], temp_vars) include_filename = utils.path_dwim(dirname, include_file) + data = utils.parse_yaml_from_file(include_filename, vault_password=self.vault_password) if 'role_name' in x and data is not None: for y in data: From f2b853f7a07358908d56ed36a5af3b5dc09a2735 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 1 Dec 2014 17:36:57 -0500 Subject: [PATCH 460/813] changed plugin load priority to be path based, not suffix based. --- lib/ansible/utils/plugins.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/utils/plugins.py index 1955ade237..29771d0ed9 100644 --- a/lib/ansible/utils/plugins.py +++ b/lib/ansible/utils/plugins.py @@ -167,17 +167,20 @@ class PluginLoader(object): else: suffixes = ['.py', ''] - for suffix in suffixes: - full_name = '%s%s' % (name, suffix) - if full_name in self._plugin_path_cache: - return self._plugin_path_cache[full_name] + # loop over paths and then loop over suffixes to find plugin + for i in self._get_paths(): + for suffix in suffixes: + full_name = '%s%s' % (name, suffix) + + if full_name in self._plugin_path_cache: + return self._plugin_path_cache[full_name] - for i in self._get_paths(): path = os.path.join(i, full_name) if os.path.isfile(path): self._plugin_path_cache[full_name] = path return path + # if nothing is found, try finding alias/deprecated if not name.startswith('_'): return self.find_plugin('_' + name, suffixes, transport) From 7329bcde993161c1338c569932559c4fafeeb886 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 1 Dec 2014 18:57:40 -0800 Subject: [PATCH 461/813] New integration tests for postgresql --- test/integration/destructive.yml | 1 + .../setup_postgresql_db/defaults/main.yml | 5 + .../setup_postgresql_db/files/pg_hba.conf | 10 + .../roles/setup_postgresql_db/tasks/main.yml | 71 ++ .../setup_postgresql_db/vars/Ubuntu-12.yml | 11 + .../setup_postgresql_db/vars/Ubuntu-14.yml | 10 + .../setup_postgresql_db/vars/default.yml | 8 + .../roles/test_postgresql/defaults/main.yml | 8 + .../roles/test_postgresql/meta/main.yml | 3 + .../roles/test_postgresql/tasks/main.yml | 882 ++++++++++++++++++ 10 files changed, 1009 insertions(+) create mode 100644 test/integration/roles/setup_postgresql_db/defaults/main.yml create mode 100644 test/integration/roles/setup_postgresql_db/files/pg_hba.conf create mode 100644 test/integration/roles/setup_postgresql_db/tasks/main.yml create mode 100644 test/integration/roles/setup_postgresql_db/vars/Ubuntu-12.yml create mode 100644 test/integration/roles/setup_postgresql_db/vars/Ubuntu-14.yml create mode 100644 test/integration/roles/setup_postgresql_db/vars/default.yml create mode 100644 test/integration/roles/test_postgresql/defaults/main.yml create mode 100644 test/integration/roles/test_postgresql/meta/main.yml create mode 100644 test/integration/roles/test_postgresql/tasks/main.yml diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml index 07e86e36f2..21e1ec047a 100644 --- a/test/integration/destructive.yml +++ b/test/integration/destructive.yml @@ -9,6 +9,7 @@ - { role: test_yum, tags: test_yum } - { role: test_apt, tags: test_apt } - { role: test_apt_repository, tags: test_apt_repository } + - { role: test_postgresql, tags: test_postgresql} - { role: test_mysql_db, tags: test_mysql_db} - { role: test_mysql_user, tags: test_mysql_user} - { role: test_mysql_variables, tags: test_mysql_variables} diff --git a/test/integration/roles/setup_postgresql_db/defaults/main.yml b/test/integration/roles/setup_postgresql_db/defaults/main.yml new file mode 100644 index 0000000000..08f3a91b46 --- /dev/null +++ b/test/integration/roles/setup_postgresql_db/defaults/main.yml @@ -0,0 +1,5 @@ +postgresql_service: postgresql + +postgresql_packages: + - postgresql-server + - python-psycopg2 diff --git a/test/integration/roles/setup_postgresql_db/files/pg_hba.conf b/test/integration/roles/setup_postgresql_db/files/pg_hba.conf new file mode 100644 index 0000000000..a8defb8ee6 --- /dev/null +++ b/test/integration/roles/setup_postgresql_db/files/pg_hba.conf @@ -0,0 +1,10 @@ +# !!! This file managed by Ansible. Any local changes may be overwritten. !!! + +# Database administrative login by UNIX sockets +# note: you may wish to restrict this further later +local all postgres trust + +# TYPE DATABASE USER CIDR-ADDRESS METHOD +local all all md5 +host all all 127.0.0.1/32 md5 +host all all ::1/128 md5 diff --git a/test/integration/roles/setup_postgresql_db/tasks/main.yml b/test/integration/roles/setup_postgresql_db/tasks/main.yml new file mode 100644 index 0000000000..1b3f103961 --- /dev/null +++ b/test/integration/roles/setup_postgresql_db/tasks/main.yml @@ -0,0 +1,71 @@ +- include_vars: '{{ item }}' + with_first_found: + - files: + - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' + - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml' + - '{{ ansible_os_family }}.yml' + - 'default.yml' + paths: '../vars' + +# Make sure we start fresh +- name: remove rpm dependencies for postgresql test + yum: name={{ item }} state=absent + with_items: postgresql_packages + when: ansible_pkg_mgr == 'yum' + +- name: remove dpkg dependencies for postgresql test + apt: name={{ item }} state=absent + with_items: postgresql_packages + when: ansible_pkg_mgr == 'apt' + +- name: remove old db (red hat) + command: rm -rf "{{ pg_dir }}" + ignore_errors: True + when: ansible_os_family == "RedHat" + +# Theoretically, pg_dropcluster should work but it doesn't so rm files +- name: remove old db config (debian) + command: rm -rf /etc/postgresql + ignore_errors: True + when: ansible_os_family == "Debian" + +- name: remove old db files (debian) + command: rm -rf /var/lib/postgresql + ignore_errors: True + when: ansible_os_family == "Debian" + +- name: install rpm dependencies for postgresql test + yum: name={{ item }} state=latest + with_items: postgresql_packages + when: ansible_pkg_mgr == 'yum' + +- name: install dpkg dependencies for postgresql test + apt: name={{ item }} state=latest + with_items: postgresql_packages + when: ansible_pkg_mgr == 'apt' + +- name: Initialize postgres (systemd) + command: postgresql-setup initdb + when: ansible_distribution == "Fedora" or (ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 7) + +- name: Initialize postgres (sysv) + command: /sbin/service postgresql initdb + when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int <= 6 + +- name: Iniitalize postgres (upstart) + command: /usr/bin/pg_createcluster {{ pg_ver }} main + when: ansible_os_family == 'Debian' + +- name: Copy pg_hba into place + copy: src=pg_hba.conf dest="{{ pg_hba_location }}" owner="postgres" group="root" mode="0644" + +- name: Generate locale on Debian systems + command: locale-gen pt_BR + when: ansible_os_family == 'Debian' + +- name: Generate locale on Debian systems + command: locale-gen es_MX + when: ansible_os_family == 'Debian' + +- name: restart postgresql service + service: name={{ postgresql_service }} state=restarted diff --git a/test/integration/roles/setup_postgresql_db/vars/Ubuntu-12.yml b/test/integration/roles/setup_postgresql_db/vars/Ubuntu-12.yml new file mode 100644 index 0000000000..b2507c9849 --- /dev/null +++ b/test/integration/roles/setup_postgresql_db/vars/Ubuntu-12.yml @@ -0,0 +1,11 @@ +postgresql_service: "postgresql" + +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python-psycopg2" + +pg_hba_location: "/etc/postgresql/9.1/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.1/main" +pg_ver: 9.1 + diff --git a/test/integration/roles/setup_postgresql_db/vars/Ubuntu-14.yml b/test/integration/roles/setup_postgresql_db/vars/Ubuntu-14.yml new file mode 100644 index 0000000000..7d704264da --- /dev/null +++ b/test/integration/roles/setup_postgresql_db/vars/Ubuntu-14.yml @@ -0,0 +1,10 @@ +postgresql_service: "postgresql" + +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python-psycopg2" + +pg_hba_location: "/etc/postgresql/9.3/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.3/main" +pg_ver: 9.3 diff --git a/test/integration/roles/setup_postgresql_db/vars/default.yml b/test/integration/roles/setup_postgresql_db/vars/default.yml new file mode 100644 index 0000000000..dc7db0fc98 --- /dev/null +++ b/test/integration/roles/setup_postgresql_db/vars/default.yml @@ -0,0 +1,8 @@ +postgresql_service: "postgresql" + +postgresql_packages: + - "postgresql-server" + - "python-psycopg2" + +pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" +pg_dir: "/var/lib/pgsql/data" diff --git a/test/integration/roles/test_postgresql/defaults/main.yml b/test/integration/roles/test_postgresql/defaults/main.yml new file mode 100644 index 0000000000..cfc50737c6 --- /dev/null +++ b/test/integration/roles/test_postgresql/defaults/main.yml @@ -0,0 +1,8 @@ +--- +# defaults file for test_postgresql_db +db_name: 'ansible_db' +db_user1: 'ansible_db_user1' +db_user2: 'ansible_db_user2' + +tmp_dir: '/tmp' + diff --git a/test/integration/roles/test_postgresql/meta/main.yml b/test/integration/roles/test_postgresql/meta/main.yml new file mode 100644 index 0000000000..85b1dc7e4c --- /dev/null +++ b/test/integration/roles/test_postgresql/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_postgresql_db diff --git a/test/integration/roles/test_postgresql/tasks/main.yml b/test/integration/roles/test_postgresql/tasks/main.yml new file mode 100644 index 0000000000..e814b5fd9e --- /dev/null +++ b/test/integration/roles/test_postgresql/tasks/main.yml @@ -0,0 +1,882 @@ +# +# Create and destroy db +# +- name: Create DB + sudo_user: postgres + sudo: True + postgresql_db: + state: present + name: "{{ db_name }}" + register: result + +- name: assert that module reports the db was created + assert: + that: + - "result.changed == true" + - "result.db =='{{ db_name }}'" + +- name: Check that database created + sudo_user: postgres + sudo: True + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + +- name: Run create on an already created db + sudo_user: postgres + sudo: True + postgresql_db: + state: present + name: "{{ db_name }}" + register: result + +- name: assert that module reports the db was unchanged + assert: + that: + - "result.changed == false" + +- name: Destroy DB + sudo_user: postgres + sudo: True + postgresql_db: + state: absent + name: "{{ db_name }}" + register: result + +- name: assert that module reports the db was changed + assert: + that: + - "result.changed == true" + +- name: Check that database was destroyed + sudo_user: postgres + sudo: True + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Destroy DB + sudo_user: postgres + sudo: True + postgresql_db: + state: absent + name: "{{ db_name }}" + register: result + +- name: assert that removing an alreaady removed db makes no change + assert: + that: + - "result.changed == false" + + +# This corner case works to add but not to drop. This is sufficiently crazy +# that I'm not going to attempt to fix it unless someone lets me know that they +# need the functionality +# +# - postgresql_db: +# state: 'present' +# name: '"silly.""name"' +# - shell: echo "select datname from pg_database where datname = 'silly.""name';" | psql +# register: result +# +# - assert: +# that: "result.stdout_lines[-1] == '(1 row)'" +# - postgresql_db: +# state: absent +# name: '"silly.""name"' +# - shell: echo "select datname from pg_database where datname = 'silly.""name';" | psql +# register: result +# +# - assert: +# that: "result.stdout_lines[-1] == '(0 rows)'" + +# +# Test encoding, collate, ctype, template options +# +- name: Create a DB with encoding, collate, ctype, and template options + sudo_user: postgres + sudo: True + postgresql_db: + name: '{{ db_name }}' + state: 'present' + encoding: 'LATIN1' + lc_collate: 'pt_BR' + lc_ctype: 'es_MX' + template: 'template0' + +- name: Check that the DB has all of our options + sudo_user: postgres + sudo: True + shell: echo "select datname, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'LATIN1' in result.stdout_lines[-2]" + - "'pt_BR' in result.stdout_lines[-2]" + - "'es_MX' in result.stdout_lines[-2]" + - "'UTF8' not in result.stdout_lines[-2]" + - "'en_US' not in result.stdout_lines[-2]" + +- name: Check that running db cration with options a second time does nothing + sudo_user: postgres + sudo: True + postgresql_db: + name: '{{ db_name }}' + state: 'present' + encoding: 'LATIN1' + lc_collate: 'pt_BR' + lc_ctype: 'es_MX' + template: 'template0' + register: result + +- assert: + that: + - 'result.changed == False' + + +- name: Check that attempting to change encoding returns an error + sudo_user: postgres + sudo: True + postgresql_db: + name: '{{ db_name }}' + state: 'present' + encoding: 'UTF8' + lc_collate: 'pt_BR' + lc_ctype: 'es_MX' + template: 'template0' + register: result + ignore_errors: True + +- assert: + that: + - 'result.failed == True' + +- name: Cleanup test DB + sudo_user: postgres + sudo: True + postgresql_db: + name: '{{ db_name }}' + state: 'absent' + +- shell: echo "select datname, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}';" | psql + sudo_user: postgres + sudo: True + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +# +# Create and destroy user +# +- name: Create a user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + register: result + +- name: Check that ansible reports they were created + assert: + that: + - "result.changed == True" + +- name: Check that they were created + sudo_user: postgres + sudo: True + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + +- name: Check that creating user a second time does nothing + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + register: result + +- name: Check that ansible reports no change + assert: + that: + - "result.changed == False" + +- name: Remove user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: 'absent' + register: result + +- name: Check that ansible reports they were removed + assert: + that: + - "result.changed == True" + +- name: Check that they were removed + sudo_user: postgres + sudo: True + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Check that removing user a second time does nothing + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: 'absent' + register: result + +- name: Check that ansible reports no change + assert: + that: + - "result.changed == False" + +- name: Create a user with all role attributes + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: "present" + role_attr_flags: "SUPERUSER,CREATEROLE,CREATEDB,INHERIT,login" + +- name: Check that the user has the requested role attributes + sudo_user: postgres + sudo: True + shell: echo "select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'super:t' in result.stdout_lines[-2]" + - "'createrole:t' in result.stdout_lines[-2]" + - "'create:t' in result.stdout_lines[-2]" + - "'inherit:t' in result.stdout_lines[-2]" + - "'login:t' in result.stdout_lines[-2]" + +- name: Modify a user to have no role attributes + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: "present" + role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN" + register: result + +- name: Check that ansible reports it modified the role + assert: + that: + - "result.changed == True" + +- name: Check that the user has the requested role attributes + sudo_user: postgres + sudo: True + shell: echo "select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'super:f' in result.stdout_lines[-2]" + - "'createrole:f' in result.stdout_lines[-2]" + - "'create:f' in result.stdout_lines[-2]" + - "'inherit:f' in result.stdout_lines[-2]" + - "'login:f' in result.stdout_lines[-2]" + +- name: Modify a single role attribute on a user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: "present" + role_attr_flags: "LOGIN" + register: result + +- name: Check that ansible reports it modified the role + assert: + that: + - "result.changed == True" + +- name: Check that the user has the requested role attributes + sudo_user: postgres + sudo: True + shell: echo "select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'super:f' in result.stdout_lines[-2]" + - "'createrole:f' in result.stdout_lines[-2]" + - "'create:f' in result.stdout_lines[-2]" + - "'inherit:f' in result.stdout_lines[-2]" + - "'login:t' in result.stdout_lines[-2]" + +- name: Cleanup the user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: 'absent' + +- name: Check that they were removed + sudo_user: postgres + sudo: True + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +### TODO: test expires, fail_on_user + +# +# Test db ownership +# +- name: Create an unprivileged user to own a DB + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + +- name: Create db with user ownership + sudo_user: postgres + sudo: True + postgresql_db: + name: "{{ db_name }}" + state: "present" + owner: "{{ db_user1 }}" + +- name: Check that the user owns the newly created DB + sudo_user: postgres + sudo: True + shell: echo "select pg_catalog.pg_get_userbyid(datdba) from pg_catalog.pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user1 }}' == '{{ result.stdout_lines[-2] | trim }}'" + +- name: Change the owner on an existing db + sudo_user: postgres + sudo: True + postgresql_db: + name: "{{ db_name }}" + state: "present" + owner: "postgres" + register: result + +- name: assert that ansible says it changed the db + assert: + that: + - "result.changed == True" + +- name: Check that the user owns the newly created DB + sudo_user: postgres + sudo: True + shell: echo "select pg_catalog.pg_get_userbyid(datdba) from pg_catalog.pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'postgres' == '{{ result.stdout_lines[-2] | trim }}'" + +- name: Cleanup db + sudo_user: postgres + sudo: True + postgresql_db: + name: "{{ db_name }}" + state: "absent" + +- name: Check that database was destroyed + sudo_user: postgres + sudo: True + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Cleanup test user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: 'absent' + +- name: Check that they were removed + sudo_user: postgres + sudo: True + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +# +# Test settings privleges +# +- name: Create db + sudo_user: postgres + sudo: True + postgresql_db: + name: "{{ db_name }}" + state: "present" + +- name: Create some tables on the db + sudo_user: postgres + sudo: True + shell: echo "create table test_table1 (field text);" | psql {{ db_name }} + +- sudo_user: postgres + sudo: True + shell: echo "create table test_table2 (field text);" | psql {{ db_name }} + +- name: Create a user with some permissions on the db + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + db: "{{ db_name }}" + priv: 'test_table1:INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER/test_table2:INSERT/CREATE,CONNECT,TEMP' + +- name: Check that the user has the requested permissions (table1) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + register: result_table1 + +- name: Check that the user has the requested permissions (table2) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- name: Check that the user has the requested permissions (database) + sudo_user: postgres + sudo: True + shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + register: result_database + +- assert: + that: + - "result_table1.stdout_lines[-1] == '(7 rows)'" + - "'INSERT' in result_table1.stdout" + - "'SELECT' in result_table1.stdout" + - "'UPDATE' in result_table1.stdout" + - "'DELETE' in result_table1.stdout" + - "'TRUNCATE' in result_table1.stdout" + - "'REFERENCES' in result_table1.stdout" + - "'TRIGGER' in result_table1.stdout" + - "result_table2.stdout_lines[-1] == '(1 row)'" + - "'INSERT' == '{{ result_table2.stdout_lines[-2] | trim }}'" + - "result_database.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user1 }}=CTc/postgres' in result_database.stdout_lines[-2]" + +- name: Add another permission for the user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + db: "{{ db_name }}" + priv: 'test_table2:select' + register: results + +- name: Check that ansible reports it changed the user + assert: + that: + - "results.changed == True" + +- name: Check that the user has the requested permissions (table2) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- assert: + that: + - "result_table2.stdout_lines[-1] == '(2 rows)'" + - "'INSERT' in result_table2.stdout" + - "'SELECT' in result_table2.stdout" + + +# +# Test priv setting via postgresql_privs module +# (Depends on state from previous _user privs tests) +# + +- name: Revoke a privilege + sudo_user: postgres + sudo: True + postgresql_privs: + type: "table" + state: "absent" + roles: "{{ db_user1 }}" + privs: "INSERT" + objs: "test_table2" + db: "{{ db_name }}" + register: results + +- name: Check that ansible reports it changed the user + assert: + that: + - "results.changed == True" + +- name: Check that the user has the requested permissions (table2) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- assert: + that: + - "result_table2.stdout_lines[-1] == '(1 row)'" + - "'SELECT' == '{{ result_table2.stdout_lines[-2] | trim }}'" + +- name: Revoke many privileges on multiple tables + sudo_user: postgres + sudo: True + postgresql_privs: + state: "absent" + roles: "{{ db_user1 }}" + privs: "INSERT,select,UPDATE,TRUNCATE,REFERENCES,TRIGGER,delete" + objs: "test_table2,test_table1" + db: "{{ db_name }}" + register: results + +- name: Check that ansible reports it changed the user + assert: + that: + - "results.changed == True" + +- name: Check that permissions were revoked (table1) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + register: result_table1 + +- name: Check that permissions were revoked (table2) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- assert: + that: + - "result_table1.stdout_lines[-1] == '(0 rows)'" + - "result_table2.stdout_lines[-1] == '(0 rows)'" + +- name: Revoke database privileges + sudo_user: postgres + sudo: True + postgresql_privs: + type: "database" + state: "absent" + roles: "{{ db_user1 }}" + privs: "Create,connect,TEMP" + objs: "{{ db_name }}" + db: "{{ db_name }}" + +- name: Check that the user has the requested permissions (database) + sudo_user: postgres + sudo: True + shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + register: result_database + +- assert: + that: + - "result_database.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user1 }}' not in result_database.stdout" + +- name: Grant database privileges + sudo_user: postgres + sudo: True + postgresql_privs: + type: "database" + state: "present" + roles: "{{ db_user1 }}" + privs: "CREATE,connect" + objs: "{{ db_name }}" + db: "{{ db_name }}" + register: results + +- name: Check that ansible reports it changed the user + assert: + that: + - "results.changed == True" + +- name: Check that the user has the requested permissions (database) + sudo_user: postgres + sudo: True + shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + register: result_database + +- assert: + that: + - "result_database.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user1 }}=Cc' in result_database.stdout" + +- name: Grant a single privilege on a table + sudo_user: postgres + sudo: True + postgresql_privs: + state: "present" + roles: "{{ db_user1 }}" + privs: "INSERT" + objs: "test_table1" + db: "{{ db_name }}" + +- name: Check that permissions were added (table1) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + register: result_table1 + +- assert: + that: + - "result_table1.stdout_lines[-1] == '(1 row)'" + - "'{{ result_table1.stdout_lines[-2] | trim }}' == 'INSERT'" + +- name: Grant many privileges on multiple tables + sudo_user: postgres + sudo: True + postgresql_privs: + state: "present" + roles: "{{ db_user1 }}" + privs: 'INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,trigger' + objs: "test_table2,test_table1" + db: "{{ db_name }}" + +- name: Check that permissions were added (table1) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + register: result_table1 + +- name: Check that permissions were added (table2) + sudo_user: postgres + sudo: True + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- assert: + that: + - "result_table1.stdout_lines[-1] == '(7 rows)'" + - "'INSERT' in result_table1.stdout" + - "'SELECT' in result_table1.stdout" + - "'UPDATE' in result_table1.stdout" + - "'DELETE' in result_table1.stdout" + - "'TRUNCATE' in result_table1.stdout" + - "'REFERENCES' in result_table1.stdout" + - "'TRIGGER' in result_table1.stdout" + - "result_table2.stdout_lines[-1] == '(7 rows)'" + - "'INSERT' in result_table2.stdout" + - "'SELECT' in result_table2.stdout" + - "'UPDATE' in result_table2.stdout" + - "'DELETE' in result_table2.stdout" + - "'TRUNCATE' in result_table2.stdout" + - "'REFERENCES' in result_table2.stdout" + - "'TRIGGER' in result_table2.stdout" + +# +# Cleanup +# +- name: Cleanup db + sudo_user: postgres + sudo: True + postgresql_db: + name: "{{ db_name }}" + state: "absent" + +- name: Check that database was destroyed + sudo_user: postgres + sudo: True + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Cleanup test user + sudo_user: postgres + sudo: True + postgresql_user: + name: "{{ db_user1 }}" + state: 'absent' + +- name: Check that they were removed + sudo_user: postgres + sudo: True + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +# +# Test login_user functionality +# +- name: Create a user to test login module parameters + sudo: True + sudo_user: postgres + postgresql_user: + name: "{{ db_user1 }}" + state: "present" + encrypted: 'no' + password: "password" + role_attr_flags: "CREATEDB,LOGIN,CREATEROLE" + +- name: Create db + postgresql_db: + name: "{{ db_name }}" + state: "present" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that database created + sudo: True + sudo_user: postgres + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + +- name: Create a user + postgresql_user: + name: "{{ db_user2 }}" + state: "present" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + db: "{{ db_name }}" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that they were created + sudo: True + sudo_user: postgres + shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + +- name: Grant database privileges + postgresql_privs: + type: "database" + state: "present" + roles: "{{ db_user2 }}" + privs: "CREATE,connect" + objs: "{{ db_name }}" + db: "{{ db_name }}" + login: "{{ db_user1 }}" + password: "password" + host: "localhost" + +- name: Check that the user has the requested permissions (database) + sudo: True + sudo_user: postgres + shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + register: result_database + +- assert: + that: + - "result_database.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user2 }}=Cc' in result_database.stdout" + +- name: Remove user + postgresql_user: + name: "{{ db_user2 }}" + state: 'absent' + priv: "ALL" + db: "{{ db_name }}" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that they were removed + sudo: True + sudo_user: postgres + shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Destroy DB + postgresql_db: + state: absent + name: "{{ db_name }}" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that database was destroyed + sudo: True + sudo_user: postgres + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +# +# Cleanup +# +- name: Cleanup test user + sudo: True + sudo_user: postgres + postgresql_user: + name: "{{ db_user1 }}" + state: 'absent' + +- name: Check that they were removed + sudo: True + sudo_user: postgres + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + From de267b56557f36c93c9f90a89796bdd58c1ca84d Mon Sep 17 00:00:00 2001 From: Chris Church Date: Mon, 1 Dec 2014 22:18:35 -0500 Subject: [PATCH 462/813] Fix PowerShell plugin issues affecting fetch module when used against Windows hosts. --- lib/ansible/runner/__init__.py | 2 +- lib/ansible/runner/connection_plugins/winrm.py | 2 +- lib/ansible/runner/shell_plugins/powershell.py | 16 ++++++++++++++-- .../roles/test_win_fetch/tasks/main.yml | 9 +++++---- 4 files changed, 21 insertions(+), 8 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index ce61e7d90f..0254449e7d 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1193,7 +1193,7 @@ class Runner(object): return path if len(split_path) > 1: - return os.path.join(initial_fragment, *split_path[1:]) + return conn.shell.join_path(initial_fragment, *split_path[1:]) else: return initial_fragment diff --git a/lib/ansible/runner/connection_plugins/winrm.py b/lib/ansible/runner/connection_plugins/winrm.py index d6e51710b5..7a761e69b2 100644 --- a/lib/ansible/runner/connection_plugins/winrm.py +++ b/lib/ansible/runner/connection_plugins/winrm.py @@ -193,7 +193,7 @@ class Connection(object): def fetch_file(self, in_path, out_path): out_path = out_path.replace('\\', '/') vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) - buffer_size = 2**20 # 1MB chunks + buffer_size = 2**19 # 0.5MB chunks if not os.path.exists(os.path.dirname(out_path)): os.makedirs(os.path.dirname(out_path)) out_file = None diff --git a/lib/ansible/runner/shell_plugins/powershell.py b/lib/ansible/runner/shell_plugins/powershell.py index 7254df6f7e..0125721c64 100644 --- a/lib/ansible/runner/shell_plugins/powershell.py +++ b/lib/ansible/runner/shell_plugins/powershell.py @@ -84,12 +84,24 @@ class ShellModule(object): # FIXME: Support system temp path! return _encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile) - def md5(self, path): + def expand_user(self, user_home_path): + # PowerShell only supports "~" (not "~username"). Resolve-Path ~ does + # not seem to work remotely, though by default we are always starting + # in the user's home directory. + if user_home_path == '~': + script = 'Write-Host (Get-Location).Path' + elif user_home_path.startswith('~\\'): + script = 'Write-Host ((Get-Location).Path + "%s")' % _escape(user_home_path[1:]) + else: + script = 'Write-Host "%s"' % _escape(user_home_path) + return _encode_script(script) + + def checksum(self, path, python_interp): path = _escape(path) script = ''' If (Test-Path -PathType Leaf "%(path)s") { - $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; + $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider; $fp = [System.IO.File]::Open("%(path)s", [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); $fp.Dispose(); diff --git a/test/integration/roles/test_win_fetch/tasks/main.yml b/test/integration/roles/test_win_fetch/tasks/main.yml index b07b681bdd..8c0f5aa21f 100644 --- a/test/integration/roles/test_win_fetch/tasks/main.yml +++ b/test/integration/roles/test_win_fetch/tasks/main.yml @@ -18,11 +18,11 @@ - name: clean out the test directory local_action: file name={{ output_dir|mandatory }} state=absent - tags: me + run_once: true - name: create the test directory local_action: file name={{ output_dir }} state=directory - tags: me + run_once: true - name: fetch a small file fetch: src="C:/Windows/win.ini" dest={{ output_dir }} @@ -145,7 +145,7 @@ - "not fetch_missing_nofail|changed" - name: attempt to fetch a non-existent file - fail on missing - fetch: src="C:/this_file_should_not_exist.txt" dest={{ output_dir }} fail_on_missing=yes + fetch: src="~/this_file_should_not_exist.txt" dest={{ output_dir }} fail_on_missing=yes register: fetch_missing ignore_errors: true @@ -164,5 +164,6 @@ - name: check fetch directory result assert: that: - - "fetch_dir|failed" + # Doesn't fail anymore, only returns a message. + - "not fetch_dir|changed" - "fetch_dir.msg" From 29d41bb789383c3ff59269b28877ea0f270f5861 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 1 Dec 2014 21:25:35 -0600 Subject: [PATCH 463/813] Revise patch from earlier using even more variable sources for HostVars Superceeds e61e8a3 Fixes #9684 --- lib/ansible/runner/__init__.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index ce61e7d90f..40e9cd4ffa 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -667,8 +667,22 @@ class Runner(object): def _executor_internal(self, host, new_stdin): ''' executes any module one or more times ''' + # We build the proper injected dictionary for all future + # templating operations in this run inject = self.get_inject_vars(host) - hostvars = HostVars(utils.merge_hash(inject['combined_cache'], self.extra_vars), self.inventory, vault_password=self.vault_pass) + + # Then we selectively merge some variable dictionaries down to a + # single dictionary, used to template the HostVars for this host + temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass) + temp_vars = utils.merge_hash(temp_vars, inject['combined_cache']) + temp_vars = utils.merge_hash(temp_vars, self.play_vars) + temp_vars = utils.merge_hash(temp_vars, self.play_file_vars) + temp_vars = utils.merge_hash(temp_vars, self.extra_vars) + + hostvars = HostVars(temp_vars, self.inventory, vault_password=self.vault_pass) + + # and we save the HostVars in the injected dictionary so they + # may be referenced from playbooks/templates inject['hostvars'] = hostvars host_connection = inject.get('ansible_connection', self.transport) From 9b591d293efaca08f708bc0b95e49ea4abaa5d80 Mon Sep 17 00:00:00 2001 From: Justin Lecher Date: Tue, 2 Dec 2014 08:42:49 +0100 Subject: [PATCH 464/813] Ansible is available in the main Gentoo repository Signed-off-by: Justin Lecher --- packaging/gentoo/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/gentoo/README.md b/packaging/gentoo/README.md index 7420860642..991692c9c7 100644 --- a/packaging/gentoo/README.md +++ b/packaging/gentoo/README.md @@ -1,3 +1,3 @@ -Gentoo ebuilds are available here: +Gentoo ebuilds are available in the main tree: -https://github.com/uu/ubuilds +emerge ansible From 1d2e23bc7e7b5b843e0227eec1de37dddc31a61e Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Tue, 2 Dec 2014 10:09:31 -0500 Subject: [PATCH 465/813] changed time.py to timetest.py to avoid keyword usage --- docsite/rst/developing_modules.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index aff5fab556..709697c7bb 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -48,7 +48,7 @@ modules. Keep in mind, though, that some modules in ansible's source tree are so look at `service` or `yum`, and don't stare too close into things like `async_wrapper` or you'll turn to stone. Nobody ever executes async_wrapper directly. -Ok, let's get going with an example. We'll use Python. For starters, save this as a file named `time.py`:: +Ok, let's get going with an example. We'll use Python. For starters, save this as a file named `timetest.py`:: #!/usr/bin/python @@ -73,7 +73,7 @@ There's a useful test script in the source checkout for ansible:: Let's run the script you just wrote with that:: - ansible/hacking/test-module -m ./time.py + ansible/hacking/test-module -m ./timetest.py You should see output that looks something like this:: From 8790be31541f468bce4fad05e95fec17d41e7247 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 2 Dec 2014 08:55:13 -0800 Subject: [PATCH 466/813] Now that we have all of the postgres db being blown away the package manager is initializing a new db. We don't need to do it manually anymore --- .../integration/roles/setup_postgresql_db/tasks/main.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/test/integration/roles/setup_postgresql_db/tasks/main.yml b/test/integration/roles/setup_postgresql_db/tasks/main.yml index 1b3f103961..91571f49ef 100644 --- a/test/integration/roles/setup_postgresql_db/tasks/main.yml +++ b/test/integration/roles/setup_postgresql_db/tasks/main.yml @@ -52,9 +52,12 @@ command: /sbin/service postgresql initdb when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int <= 6 -- name: Iniitalize postgres (upstart) - command: /usr/bin/pg_createcluster {{ pg_ver }} main - when: ansible_os_family == 'Debian' +# The package install should initialize a db cluster provided that the old db +# cluster was entirely removed. So this shouldn't be needed +#- name: Iniitalize postgres (upstart) +# command: /usr/bin/pg_createcluster {{ pg_ver }} main +# ignore_errors: True +# when: ansible_os_family == 'Debian' - name: Copy pg_hba into place copy: src=pg_hba.conf dest="{{ pg_hba_location }}" owner="postgres" group="root" mode="0644" From a3b5efadd600c0eebf533e9e9891ac854da191f9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 2 Dec 2014 13:41:52 -0600 Subject: [PATCH 467/813] Remove references to "baby JSON" in module developing documentation --- docsite/rst/developing_modules.rst | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index aff5fab556..596fa47417 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -309,8 +309,7 @@ You should also never do this in a module:: print "some status message" -Because the output is supposed to be valid JSON. Except that's not quite true, -but we'll get to that later. +Because the output is supposed to be valid JSON. Modules must not output anything on standard error, because the system will merge standard out with standard error and prevent the JSON from parsing. Capturing standard @@ -343,7 +342,7 @@ and guidelines: * If packaging modules in an RPM, they only need to be installed on the control machine and should be dropped into /usr/share/ansible. This is entirely optional and up to you. -* Modules should return JSON or key=value results all on one line. JSON is best if you can do JSON. All return types must be hashes (dictionaries) although they can be nested. Lists or simple scalar values are not supported, though they can be trivially contained inside a dictionary. +* Modules should output valid JSON only. All return types must be hashes (dictionaries) although they can be nested. Lists or simple scalar values are not supported, though they can be trivially contained inside a dictionary. * In the event of failure, a key of 'failed' should be included, along with a string explanation in 'msg'. Modules that raise tracebacks (stacktraces) are generally considered 'poor' modules, though Ansible can deal with these returns and will automatically convert anything unparseable into a failed result. If you are using the AnsibleModule common Python code, the 'failed' element will be included for you automatically when you call 'fail_json'. @@ -351,21 +350,6 @@ and guidelines: * As results from many hosts will be aggregated at once, modules should return only relevant output. Returning the entire contents of a log file is generally bad form. -.. _module_dev_shorthand: - -Shorthand Vs JSON -````````````````` - -To make it easier to write modules in bash and in cases where a JSON -module might not be available, it is acceptable for a module to return -key=value output all on one line, like this. The Ansible parser -will know what to do:: - - somekey=1 somevalue=2 rc=3 favcolor=red - -If you're writing a module in Python or Ruby or whatever, though, returning -JSON is probably the simplest way to go. - .. _module_documenting: Documenting Your Module From 8130ed8de0bc1174bb9a9c585a73771cf53b4215 Mon Sep 17 00:00:00 2001 From: Devin Christensen Date: Tue, 2 Dec 2014 14:23:38 -0700 Subject: [PATCH 468/813] Allow .gitkeep in the jsonfile fact cache dir --- lib/ansible/cache/jsonfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cache/jsonfile.py b/lib/ansible/cache/jsonfile.py index 81918a2836..8b4c892a40 100644 --- a/lib/ansible/cache/jsonfile.py +++ b/lib/ansible/cache/jsonfile.py @@ -98,7 +98,7 @@ class CacheModule(BaseCacheModule): def keys(self): keys = [] for k in os.listdir(self._cache_dir): - if not self.has_expired(k): + if not (k.startswith('.') or self.has_expired(k)): keys.append(k) return keys From 61a30e5f49c14319c43f9321631a7c3f6f8b6554 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 3 Dec 2014 07:26:42 -0500 Subject: [PATCH 469/813] better exception handling with delegated hosts --- lib/ansible/inventory/__init__.py | 4 ++-- lib/ansible/runner/__init__.py | 28 ++++++++++++++-------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 7d279b7b4d..2048046d3c 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -420,7 +420,7 @@ class Inventory(object): group = self.get_group(groupname) if group is None: - raise Exception("group not found: %s" % groupname) + raise errors.AnsibleError("group not found: %s" % groupname) vars = {} @@ -439,7 +439,7 @@ class Inventory(object): host = self.get_host(hostname) if not host: - raise Exception("host not found: %s" % hostname) + raise errors.AnsibleError("host not found: %s" % hostname) return host.get_variables() def get_host_variables(self, hostname, update_cached=False, vault_password=None): diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index f0de42764a..fad769c4ed 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -394,20 +394,20 @@ class Runner(object): actual_user = inject.get('ansible_ssh_user', self.remote_user) thisuser = None - if host in inject['hostvars']: - if inject['hostvars'][host].get('ansible_ssh_user'): - # user for delegate host in inventory - thisuser = inject['hostvars'][host].get('ansible_ssh_user') - else: - # look up the variables for the host directly from inventory - try: - host_vars = self.inventory.get_variables(host, vault_password=self.vault_pass) - if 'ansible_ssh_user' in host_vars: - thisuser = host_vars['ansible_ssh_user'] - except Exception, e: - # the hostname was not found in the inventory, so - # we just ignore this and try the next method - pass + try: + if host in inject['hostvars']: + if inject['hostvars'][host].get('ansible_ssh_user'): + # user for delegate host in inventory + thisuser = inject['hostvars'][host].get('ansible_ssh_user') + else: + # look up the variables for the host directly from inventory + host_vars = self.inventory.get_variables(host, vault_password=self.vault_pass) + if 'ansible_ssh_user' in host_vars: + thisuser = host_vars['ansible_ssh_user'] + except errors.AnsibleException, e: + # the hostname was not found in the inventory, so + # we just ignore this and try the next method + pass if thisuser is None and self.remote_user: # user defined by play/runner From 05435f380b89f6576dcdedb2da248be09d3f7306 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Dec 2014 07:16:51 -0800 Subject: [PATCH 470/813] Update to newer core and extras modules --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 3a80b734e6..dda6d89060 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 3a80b734e6e4c1ebe8cbd40b4957a7589520caf5 +Subproject commit dda6d89060f01a19efc46b8e4af53e455ad4731f diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 317654dba5..68bd8a55ae 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 317654dba5cae905b5d6eed78f5c6c6984cc2f02 +Subproject commit 68bd8a55aee7079e1e1197654d7db1645a717208 From 00c0d7ce31fbd5a1d1597fe927d184a0ce18ac4c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Dec 2014 07:37:51 -0800 Subject: [PATCH 471/813] Ubuntu still having problems initializing the postgres db --- .../roles/setup_postgresql_db/tasks/main.yml | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/test/integration/roles/setup_postgresql_db/tasks/main.yml b/test/integration/roles/setup_postgresql_db/tasks/main.yml index 91571f49ef..d306ac3b7a 100644 --- a/test/integration/roles/setup_postgresql_db/tasks/main.yml +++ b/test/integration/roles/setup_postgresql_db/tasks/main.yml @@ -52,12 +52,11 @@ command: /sbin/service postgresql initdb when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int <= 6 -# The package install should initialize a db cluster provided that the old db -# cluster was entirely removed. So this shouldn't be needed -#- name: Iniitalize postgres (upstart) -# command: /usr/bin/pg_createcluster {{ pg_ver }} main -# ignore_errors: True -# when: ansible_os_family == 'Debian' +- name: Iniitalize postgres (upstart) + command: /usr/bin/pg_createcluster {{ pg_ver }} main + # Sometimes package install creates the db cluster, sometimes this step is needed + ignore_errors: True + when: ansible_os_family == 'Debian' - name: Copy pg_hba into place copy: src=pg_hba.conf dest="{{ pg_hba_location }}" owner="postgres" group="root" mode="0644" From eeec4f73e792380c6a11f84663781a0b245f3a89 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Dec 2014 08:18:39 -0800 Subject: [PATCH 472/813] Pull in doc fixes for modules --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index dda6d89060..5af4463823 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit dda6d89060f01a19efc46b8e4af53e455ad4731f +Subproject commit 5af446382326aa93f89772316a84105b5110817f diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 68bd8a55ae..19e688b017 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 68bd8a55aee7079e1e1197654d7db1645a717208 +Subproject commit 19e688b01750b3b5ad02cbfe51533056068a3224 From f1386bb1141f8d8b3ba05f190753f9d3f39cad78 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 3 Dec 2014 09:27:27 -0600 Subject: [PATCH 473/813] Use more variable sources when templating the play ds Fixes #9699 --- lib/ansible/playbook/play.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 882d174c0a..6e7cc0fc94 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -108,10 +108,16 @@ class Play(object): self._update_vars_files_for_host(None) # template everything to be efficient, but do not pre-mature template - # tasks/handlers as they may have inventory scope overrides + # tasks/handlers as they may have inventory scope overrides. We also + # create a set of temporary variables for templating, so we don't + # trample on the existing vars structures _tasks = ds.pop('tasks', []) _handlers = ds.pop('handlers', []) - ds = template(basedir, ds, self.vars) + + temp_vars = utils.merge_hash(self.vars, self.vars_file_vars) + temp_vars = utils.merge_hash(temp_vars, self.playbook.extra_vars) + + ds = template(basedir, ds, temp_vars) ds['tasks'] = _tasks ds['handlers'] = _handlers From 1ec8b6e3c5a9f9275233f67778be93ccabbb2a02 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Dec 2014 10:45:54 -0800 Subject: [PATCH 474/813] Have remote_expanduser honor sudo and su users. Fixes #9663 --- lib/ansible/runner/__init__.py | 10 ++++- test/integration/destructive.yml | 2 + .../roles/test_sudo/tasks/main.yml | 44 +++++++++++++++++++ .../roles/test_sudo/templates/bar.j2 | 1 + .../roles/test_sudo/vars/default.yml | 1 + 5 files changed, 57 insertions(+), 1 deletion(-) create mode 100644 test/integration/roles/test_sudo/tasks/main.yml create mode 100644 test/integration/roles/test_sudo/templates/bar.j2 create mode 100644 test/integration/roles/test_sudo/vars/default.yml diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index f0de42764a..4d2bd66016 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1196,8 +1196,16 @@ class Runner(object): ''' takes a remote path and performs tilde expansion on the remote host ''' if not path.startswith('~'): return path + split_path = path.split(os.path.sep, 1) - cmd = conn.shell.expand_user(split_path[0]) + expand_path = split_path[0] + if expand_path == '~': + if self.sudo and self.sudo_user: + expand_path = '~%s' % self.sudo_user + elif self.su and self.su_user: + expand_path = '~%s' % self.su_user + + cmd = conn.shell.expand_user(expand_path) data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False, su=False) initial_fragment = utils.last_non_blank_line(data['stdout']) diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml index 21e1ec047a..4720319482 100644 --- a/test/integration/destructive.yml +++ b/test/integration/destructive.yml @@ -1,6 +1,8 @@ - hosts: testhost gather_facts: True roles: + # In destructive because it creates and removes a user + - { role: test_sudo, tags: test_sudo} - { role: test_service, tags: test_service } # Current pip unconditionally uses md5. We can re-enable if pip switches # to a different hash or allows us to not check md5 diff --git a/test/integration/roles/test_sudo/tasks/main.yml b/test/integration/roles/test_sudo/tasks/main.yml new file mode 100644 index 0000000000..0460486d0c --- /dev/null +++ b/test/integration/roles/test_sudo/tasks/main.yml @@ -0,0 +1,44 @@ +- include_vars: default.yml + +- name: Create test user + user: + name: "{{ sudo_test_user }}" + +- name: tilde expansion honors sudo in file + sudo: True + sudo_user: "{{ sudo_test_user }}" + file: + path: "~/foo.txt" + state: touch + +- name: check that the path in the user's home dir was created + stat: + path: "~{{ sudo_test_user }}/foo.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + +- name: tilde expansion honors sudo in template + sudo: True + sudo_user: "{{ sudo_test_user }}" + template: + src: "bar.j2" + dest: "~/bar.txt" + +- name: check that the path in the user's home dir was created + stat: + path: "~{{ sudo_test_user }}/bar.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + +- name: Remove test user and their home dir + user: + name: "{{ sudo_test_user }}" + state: "absent" + remove: "yes" + diff --git a/test/integration/roles/test_sudo/templates/bar.j2 b/test/integration/roles/test_sudo/templates/bar.j2 new file mode 100644 index 0000000000..6f184d1814 --- /dev/null +++ b/test/integration/roles/test_sudo/templates/bar.j2 @@ -0,0 +1 @@ +{{ sudo_test_user }} diff --git a/test/integration/roles/test_sudo/vars/default.yml b/test/integration/roles/test_sudo/vars/default.yml new file mode 100644 index 0000000000..f2f7b728b2 --- /dev/null +++ b/test/integration/roles/test_sudo/vars/default.yml @@ -0,0 +1 @@ +sudo_test_user: ansibletest1 From 1c5f62529521ccf64b4c62629ceb171e6314d6e9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 3 Dec 2014 14:19:11 -0500 Subject: [PATCH 475/813] corrected exception name --- lib/ansible/runner/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index fad769c4ed..5ee79e609c 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -404,7 +404,7 @@ class Runner(object): host_vars = self.inventory.get_variables(host, vault_password=self.vault_pass) if 'ansible_ssh_user' in host_vars: thisuser = host_vars['ansible_ssh_user'] - except errors.AnsibleException, e: + except errors.AnsibleError, e: # the hostname was not found in the inventory, so # we just ignore this and try the next method pass From 4dfd86d8478d36f1e774c8770a0a8eb610d9ffb1 Mon Sep 17 00:00:00 2001 From: James Keener Date: Wed, 3 Dec 2014 16:28:55 -0500 Subject: [PATCH 476/813] Issue-9704 Better handling of missing python When they python interpreter is set incorrectly for the machine the file is being checked for (e.g. for the local or the remote), the error manifests as a readability or directory missing error which can be very misleading. --- lib/ansible/runner/action_plugins/copy.py | 4 ++++ lib/ansible/runner/action_plugins/fetch.py | 2 +- lib/ansible/runner/action_plugins/unarchive.py | 3 +++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py index b180448988..bb579e48a8 100644 --- a/lib/ansible/runner/action_plugins/copy.py +++ b/lib/ansible/runner/action_plugins/copy.py @@ -192,6 +192,10 @@ class ActionModule(object): dest_file = conn.shell.join_path(dest, source_rel) remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject) + if remote_checksum == '4': + result = dict(msg="python isn't present on the system. Unable to compute checksum", failed=True) + return ReturnData(conn=conn, result=result) + if remote_checksum != '1' and not force: # remote_file does not exist so continue to next iteration. continue diff --git a/lib/ansible/runner/action_plugins/fetch.py b/lib/ansible/runner/action_plugins/fetch.py index 2fb6631536..3fa748ccbd 100644 --- a/lib/ansible/runner/action_plugins/fetch.py +++ b/lib/ansible/runner/action_plugins/fetch.py @@ -129,7 +129,7 @@ class ActionModule(object): elif remote_checksum == '3': result = dict(msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False) elif remote_checksum == '4': - result = dict(msg="python isn't present on the remote system. Unable to fetch file", file=source, changed=False) + result = dict(msg="python isn't present on the system. Unable to compute checksum", file=source, changed=False) return ReturnData(conn=conn, result=result) # calculate checksum for the local file diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py index b528a25a39..cfcaf454bd 100644 --- a/lib/ansible/runner/action_plugins/unarchive.py +++ b/lib/ansible/runner/action_plugins/unarchive.py @@ -83,6 +83,9 @@ class ActionModule(object): source = utils.path_dwim(self.runner.basedir, source) remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject) + if remote_checksum == '4': + result = dict(failed=True, msg="python isn't present on the system. Unable to compute checksum") + return ReturnData(conn=conn, result=result) if remote_checksum != '3': result = dict(failed=True, msg="dest '%s' must be an existing dir" % dest) return ReturnData(conn=conn, result=result) From a2b2e5499271ee4e510aaa2bbeb6f77835e63c2c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Dec 2014 14:42:01 -0800 Subject: [PATCH 477/813] Fix checksum code to work with delegate_to/local_action Fixes #9704 --- lib/ansible/runner/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 4d2bd66016..8da794ba6f 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1223,7 +1223,10 @@ class Runner(object): def _remote_checksum(self, conn, tmp, path, inject): ''' takes a remote checksum and returns 1 if no file ''' - python_interp = inject['hostvars'][inject['inventory_hostname']].get('ansible_python_interpreter', 'python') + if 'delegate_to' in inject and inject['delegate_to']: + python_interp = inject['hostvars'][inject['delegate_to']].get('ansible_python_interpreter', 'python') + else: + python_interp = inject['hostvars'][inject['inventory_hostname']].get('ansible_python_interpreter', 'python') cmd = conn.shell.checksum(path, python_interp) data = self._low_level_exec_command(conn, cmd, tmp, sudoable=True) data2 = utils.last_non_blank_line(data['stdout']) From ae17b993d995675d2495aa4628a085541ab37de0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Dec 2014 14:43:46 -0800 Subject: [PATCH 478/813] Update modules to allow USAGE as a valid grant option for postgres --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 5af4463823..b766390ae2 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 5af446382326aa93f89772316a84105b5110817f +Subproject commit b766390ae2e0fc79a32bb3a55eed959655b76a43 From 2d17d18c1b486a22727b11f6ecae8177e90c76b3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 4 Dec 2014 07:44:21 -0500 Subject: [PATCH 479/813] mentioned gathering settings in fact caching. --- docsite/rst/playbooks_variables.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 84f0a1f5b5..06da540452 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -762,7 +762,9 @@ To configure fact caching, enable it in ansible.cfg as follows:: fact_caching_timeout = 86400 # seconds -At the time of writing, Redis is the only supported fact caching engine. +You might also want to change the 'gathering' setting to 'smart' or 'explicit' or set gather_facts to False in most plays. + +At the time of writing, Redis is the only supported fact caching engine. To get redis up and running, perform the equivalent OS commands:: yum install redis From e938f554b74c35cc3a13f72bce46ebe5fb3aab3d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Dec 2014 10:58:14 -0500 Subject: [PATCH 480/813] better exception handling for unexpected exceptions --- lib/ansible/runner/__init__.py | 4 ++++ lib/ansible/utils/template.py | 5 ++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 8da794ba6f..b1652d86e3 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -733,6 +733,10 @@ class Runner(object): result = utils.jsonify(dict(changed=False, skipped=True)) self.callbacks.on_skipped(host, None) return ReturnData(host=host, result=result) + except errors.AnsibleError, e: + raise + except Exception, e: + raise errors.AnsibleError("Unexpected error while executing task: %s" % str(e)) # strip out any jinja2 template syntax within # the data returned by the lookup plugin diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index c2b14d8454..3e7f5e4d81 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -89,13 +89,12 @@ def lookup(name, *args, **kwargs): tvars = kwargs.get('vars', None) if instance is not None: - # safely catch run failures per #5059 try: ran = instance.run(*args, inject=tvars, **kwargs) - except errors.AnsibleUndefinedVariable: + except errors.AnsibleError: raise except Exception, e: - ran = None + raise errors.AnsibleError('Unexpected error in during lookup: %s' % e) if ran: ran = ",".join(ran) return ran From 97408fe5b25b0cb2b58dfb34ffffd01c8da8fd51 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 4 Dec 2014 09:14:53 -0500 Subject: [PATCH 481/813] Revert "Make listify respect the global setting for undefined variables." This 'mostly' reverts commit 2769098fe7fcb51302cc8fabe9a1ff3f51aeec6f. Conflicts: lib/ansible/utils/__init__.py test/units/TestUtils.py --- lib/ansible/utils/__init__.py | 7 +------ test/units/TestUtils.py | 12 ++---------- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 1541be5783..a735e9c0b0 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -48,7 +48,6 @@ import sys import json import subprocess import contextlib -import jinja2.exceptions from vault import VaultLib @@ -1469,15 +1468,11 @@ def listify_lookup_plugin_terms(terms, basedir, inject): # if not already a list, get ready to evaluate with Jinja2 # not sure why the "/" is in above code :) try: - new_terms = template.template(basedir, terms, inject, convert_bare=True, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR) + new_terms = template.template(basedir, "{{ %s }}" % terms, inject) if isinstance(new_terms, basestring) and "{{" in new_terms: pass else: terms = new_terms - except errors.AnsibleUndefinedVariable: - raise - except jinja2.exceptions.UndefinedError, e: - raise errors.AnsibleUndefinedVariable('undefined variable in items: %s' % e) except: pass diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index d93fc70329..99dd24565c 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -566,17 +566,9 @@ class TestUtils(unittest.TestCase): def test_listify_lookup_plugin_terms(self): basedir = os.path.dirname(__file__) - # Straight lookups - self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=[])), []) - self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['one', 'two'])), ['one', 'two']) - - # Variable interpolation - self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['{{ foo }}', '{{ bar }}'], foo="hello", bar="world")), - ['hello', 'world']) - with self.assertRaises(ansible.errors.AnsibleError) as ex: - ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['{{ foo }}', '{{ bar_typo }}'], foo="hello", bar="world")) - self.assertTrue("undefined variable in items: 'bar_typo'" in ex.exception.msg) + #self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=[])), []) + #self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['one', 'two'])), ['one', 'two']) def test_deprecated(self): sys_stderr = sys.stderr From 446cba65093d54ea517739d534101f04a30afeb1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 4 Dec 2014 12:30:18 -0500 Subject: [PATCH 482/813] fixed integration test as with_items always returns a list, even if empty --- test/integration/roles/test_conditionals/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_conditionals/tasks/main.yml b/test/integration/roles/test_conditionals/tasks/main.yml index 3d3c2ec9de..90509d7f63 100644 --- a/test/integration/roles/test_conditionals/tasks/main.yml +++ b/test/integration/roles/test_conditionals/tasks/main.yml @@ -277,7 +277,7 @@ assert: that: - "'skipped' in result" - - result.skipped + - result.results.skipped - name: test a with_items loop skipping a single item debug: var=item From 1eb31249998aec1182533e5737b080e43218db1a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 4 Dec 2014 10:53:48 -0800 Subject: [PATCH 483/813] Make test for skipping agree with the way current output formats a skip --- test/integration/roles/test_conditionals/tasks/main.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_conditionals/tasks/main.yml b/test/integration/roles/test_conditionals/tasks/main.yml index 90509d7f63..8d794e497f 100644 --- a/test/integration/roles/test_conditionals/tasks/main.yml +++ b/test/integration/roles/test_conditionals/tasks/main.yml @@ -276,8 +276,9 @@ - name: assert the task was skipped assert: that: - - "'skipped' in result" - - result.results.skipped + - "result.results|length == 1" + - "'skipped' in result.results[0]" + - "result.results[0].skipped == True" - name: test a with_items loop skipping a single item debug: var=item From 57c77691ec95f4b9589843600d0becbbbe335cd4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 4 Dec 2014 11:35:03 -0800 Subject: [PATCH 484/813] Add a check that tilde expansion with copy works --- .../integration/roles/test_sudo/files/baz.txt | 1 + .../roles/test_sudo/tasks/main.yml | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 test/integration/roles/test_sudo/files/baz.txt diff --git a/test/integration/roles/test_sudo/files/baz.txt b/test/integration/roles/test_sudo/files/baz.txt new file mode 100644 index 0000000000..a69dd57604 --- /dev/null +++ b/test/integration/roles/test_sudo/files/baz.txt @@ -0,0 +1 @@ +testing tilde expansion with sudo diff --git a/test/integration/roles/test_sudo/tasks/main.yml b/test/integration/roles/test_sudo/tasks/main.yml index 0460486d0c..022e7d7422 100644 --- a/test/integration/roles/test_sudo/tasks/main.yml +++ b/test/integration/roles/test_sudo/tasks/main.yml @@ -19,6 +19,7 @@ - assert: that: - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ sudo_test_user }}'" - name: tilde expansion honors sudo in template sudo: True @@ -35,6 +36,24 @@ - assert: that: - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ sudo_test_user }}'" + +- name: tilde expansion honors sudo in copy + sudo: True + sudo_user: "{{ sudo_test_user }}" + copy: + src: baz.txt + dest: "~/baz.txt" + +- name: check that the path in the user's home dir was created + stat: + path: "~{{ sudo_test_user }}/baz.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ sudo_test_user }}'" - name: Remove test user and their home dir user: From 3b80f63e22ad2a3e8e0c66412bdc79116e093eaa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 4 Dec 2014 11:39:35 -0800 Subject: [PATCH 485/813] Have known_hosts function use the url parameter instead of getting a specific attribute out of the module dict. This makes the function useful in more places --- lib/ansible/module_utils/known_hosts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index c997596fd4..99dbf2c03a 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -40,7 +40,7 @@ def add_git_host_key(module, url, accept_hostkey=True, create_dir=True): """ idempotently add a git url hostkey """ - fqdn = get_fqdn(module.params['repo']) + fqdn = get_fqdn(url) if fqdn: known_host = check_hostkey(module, fqdn) From bf5d8ee678601d6c3cef09c4f735ea0a9e61e70c Mon Sep 17 00:00:00 2001 From: Veres Lajos Date: Thu, 4 Dec 2014 22:23:35 +0000 Subject: [PATCH 486/813] typofixes - https://github.com/vlajos/misspell_fixer --- docsite/rst/developing_modules.rst | 2 +- docsite/rst/developing_test_pr.rst | 2 +- docsite/rst/guide_aws.rst | 2 +- examples/ansible.cfg | 2 +- hacking/test-module | 4 ++-- lib/ansible/module_utils/facts.py | 2 +- lib/ansible/runner/__init__.py | 2 +- test/integration/roles/test_copy/tasks/main.yml | 2 +- test/integration/roles/test_file/tasks/main.yml | 4 ++-- test/integration/roles/test_lineinfile/tasks/main.yml | 2 +- .../roles/test_mysql_db/tasks/state_dump_import.yml | 2 +- test/integration/roles/test_mysql_user/tasks/main.yml | 8 ++++---- test/integration/roles/test_win_stat/tasks/main.yml | 2 +- test/units/TestModuleUtilsBasic.py | 2 +- test/units/TestUtils.py | 2 +- ticket_stubs/great_idea.md | 2 +- ticket_stubs/module_repo.md | 2 +- ticket_stubs/no_thanks.md | 2 +- ticket_stubs/pr_duplicate.md | 2 +- ticket_stubs/thanks.md | 2 +- v2/ansible/compat/__init__.py | 2 +- v2/ansible/errors/__init__.py | 4 ++-- v2/ansible/parsing/yaml/__init__.py | 4 ++-- v2/ansible/playbook/play.py | 2 +- 24 files changed, 31 insertions(+), 31 deletions(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 1e5e017e8e..decd5b305c 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -453,7 +453,7 @@ gives them slightly higher development priority (though they'll work in exactly Deprecating and making module aliases `````````````````````````````````````` -Starting in 1.8 you can deprecate modules by renaming them with a preceeding _, i.e. old_cloud.py to +Starting in 1.8 you can deprecate modules by renaming them with a preceding _, i.e. old_cloud.py to _old_cloud.py, This will keep the module available but hide it from the primary docs and listing. You can also rename modules and keep an alias to the old name by using a symlink that starts with _. diff --git a/docsite/rst/developing_test_pr.rst b/docsite/rst/developing_test_pr.rst index 76b0a53eef..ee4520bf6c 100644 --- a/docsite/rst/developing_test_pr.rst +++ b/docsite/rst/developing_test_pr.rst @@ -29,7 +29,7 @@ and then commenting on that particular issue on GitHub. Here's how: or Docker for this, but they are optional. It is also useful to have virtual machines of different Linux or other flavors, since some features (apt vs. yum, for example) are specific to those OS versions. -First, you will need to configure your testing environment with the neccessary tools required to run our test +First, you will need to configure your testing environment with the necessary tools required to run our test suites. You will need at least:: git diff --git a/docsite/rst/guide_aws.rst b/docsite/rst/guide_aws.rst index e1bb2e5c83..7f05833550 100644 --- a/docsite/rst/guide_aws.rst +++ b/docsite/rst/guide_aws.rst @@ -149,7 +149,7 @@ it will be automatically discoverable via a dynamic group like so:: tasks: - ping -Using this philosophy can be a great way to keep systems seperated by the function they perform. +Using this philosophy can be a great way to keep systems separated by the function they perform. In this example, if we wanted to define variables that are automatically applied to each machine tagged with the 'class' of 'webserver', 'group_vars' in ansible can be used. See :doc:`splitting_out_vars`. diff --git a/examples/ansible.cfg b/examples/ansible.cfg index b3e862da51..a89fa47664 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -147,7 +147,7 @@ filter_plugins = /usr/share/ansible_plugins/filter_plugins # avoid issues. #http_user_agent = ansible-agent -# if set to a persistant type (not 'memory', for example 'redis') fact values +# if set to a persistent type (not 'memory', for example 'redis') fact values # from previous runs in Ansible will be stored. This may be useful when # wanting to use, for example, IP information from one group of servers # without having to talk to them in the same playbook run to get their diff --git a/hacking/test-module b/hacking/test-module index b6fe1f5cdb..c226f32e88 100755 --- a/hacking/test-module +++ b/hacking/test-module @@ -58,7 +58,7 @@ def parse(): parser.add_option('-D', '--debugger', dest='debugger', help="path to python debugger (e.g. /usr/bin/pdb)") parser.add_option('-I', '--interpreter', dest='interpreter', - help="path to interpeter to use for this module (e.g. ansible_python_interpreter=/usr/bin/python)", + help="path to interpreter to use for this module (e.g. ansible_python_interpreter=/usr/bin/python)", metavar='INTERPRETER_TYPE=INTERPRETER_PATH') parser.add_option('-c', '--check', dest='check', action='store_true', help="run the module in check mode") @@ -104,7 +104,7 @@ def boilerplate_module(modfile, args, interpreter, check): inject = {} if interpreter: if '=' not in interpreter: - print 'interpeter must by in the form of ansible_python_interpreter=/usr/bin/python' + print 'interpreter must by in the form of ansible_python_interpreter=/usr/bin/python' sys.exit(1) interpreter_type, interpreter_path = interpreter.split('=') if not interpreter_type.startswith('ansible_'): diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 5ceeb405d5..38082fe854 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -1355,7 +1355,7 @@ class HPUX(Hardware): self.facts['memtotal_mb'] = int(data) / 1024 except AttributeError: #For systems where memory details aren't sent to syslog or the log has rotated, use parsed - #adb output. Unfortunatley /dev/kmem doesn't have world-read, so this only works as root. + #adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root. if os.access("/dev/kmem", os.R_OK): rc, out, err = module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True) if not err: diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 8da794ba6f..1065f9c682 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -982,7 +982,7 @@ class Runner(object): # render module_args and complex_args templates try: # When templating module_args, we need to be careful to ensure - # that no variables inadvertantly (or maliciously) add params + # that no variables inadvertently (or maliciously) add params # to the list of args. We do this by counting the number of k=v # pairs before and after templating. num_args_pre = self._count_module_args(module_args, allow_dupes=True) diff --git a/test/integration/roles/test_copy/tasks/main.yml b/test/integration/roles/test_copy/tasks/main.yml index 7da4d6ad32..5e77295fbb 100644 --- a/test/integration/roles/test_copy/tasks/main.yml +++ b/test/integration/roles/test_copy/tasks/main.yml @@ -220,7 +220,7 @@ - name: clean up file: dest=/tmp/worldwritable state=absent -# test overwritting a link using "follow=yes" so that the link +# test overwriting a link using "follow=yes" so that the link # is preserved and the link target is updated - name: create a test file to symlink to diff --git a/test/integration/roles/test_file/tasks/main.yml b/test/integration/roles/test_file/tasks/main.yml index 26de23b1ca..2126587e6c 100644 --- a/test/integration/roles/test_file/tasks/main.yml +++ b/test/integration/roles/test_file/tasks/main.yml @@ -188,7 +188,7 @@ - "file11_result.uid == 1235" - name: fail to create soft link to non existent file - file: src=/noneexistant dest={{output_dir}}/soft2.txt state=link force=no + file: src=/noneexistent dest={{output_dir}}/soft2.txt state=link force=no register: file12_result ignore_errors: true @@ -198,7 +198,7 @@ - "file12_result.failed == true" - name: force creation soft link to non existent - file: src=/noneexistant dest={{output_dir}}/soft2.txt state=link force=yes + file: src=/noneexistent dest={{output_dir}}/soft2.txt state=link force=yes register: file13_result - name: verify that link was created diff --git a/test/integration/roles/test_lineinfile/tasks/main.yml b/test/integration/roles/test_lineinfile/tasks/main.yml index 3f8a8dc5ba..d809bf1983 100644 --- a/test/integration/roles/test_lineinfile/tasks/main.yml +++ b/test/integration/roles/test_lineinfile/tasks/main.yml @@ -243,7 +243,7 @@ that: - "result.stat.checksum == 'f9af7008e3cb67575ce653d094c79cabebf6e523'" -# Test EOF with empty file to make sure no unneccessary newline is added +# Test EOF with empty file to make sure no unnecessary newline is added - name: testempty deploy the testempty file for lineinfile copy: src=testempty.txt dest={{output_dir}}/testempty.txt register: result diff --git a/test/integration/roles/test_mysql_db/tasks/state_dump_import.yml b/test/integration/roles/test_mysql_db/tasks/state_dump_import.yml index 1980b40638..44267e1edb 100644 --- a/test/integration/roles/test_mysql_db/tasks/state_dump_import.yml +++ b/test/integration/roles/test_mysql_db/tasks/state_dump_import.yml @@ -41,7 +41,7 @@ - "result.changed == true" - "result.db =='{{ db_name }}'" -- name: assert database was backup succesfully +- name: assert database was backup successfully command: file {{ db_file_name }} register: result diff --git a/test/integration/roles/test_mysql_user/tasks/main.yml b/test/integration/roles/test_mysql_user/tasks/main.yml index cdfb7c4950..68042e7491 100644 --- a/test/integration/roles/test_mysql_user/tasks/main.yml +++ b/test/integration/roles/test_mysql_user/tasks/main.yml @@ -153,22 +153,22 @@ - include: user_password_update_test.yml # ============================================================ -# Assert create user with SELECT privileges, attemp to create database and update privileges to create database +# Assert create user with SELECT privileges, attempt to create database and update privileges to create database # - include: test_privs.yml current_privilege=SELECT current_append_privs=no # ============================================================ -# Assert creating user with SELECT privileges, attemp to create database and append privileges to create database +# Assert creating user with SELECT privileges, attempt to create database and append privileges to create database # - include: test_privs.yml current_privilege=DROP current_append_privs=yes # ============================================================ -# Assert create user with SELECT privileges, attemp to create database and update privileges to create database +# Assert create user with SELECT privileges, attempt to create database and update privileges to create database # - include: test_privs.yml current_privilege='UPDATE,ALTER' current_append_privs=no # ============================================================ -# Assert creating user with SELECT privileges, attemp to create database and append privileges to create database +# Assert creating user with SELECT privileges, attempt to create database and append privileges to create database # - include: test_privs.yml current_privilege='INSERT,DELETE' current_append_privs=yes diff --git a/test/integration/roles/test_win_stat/tasks/main.yml b/test/integration/roles/test_win_stat/tasks/main.yml index a526976ec9..5069f51a80 100644 --- a/test/integration/roles/test_win_stat/tasks/main.yml +++ b/test/integration/roles/test_win_stat/tasks/main.yml @@ -72,7 +72,7 @@ register: win_stat_no_args ignore_errors: true -- name: check win_stat result witn no path argument +- name: check win_stat result with no path argument assert: that: - "win_stat_no_args|failed" diff --git a/test/units/TestModuleUtilsBasic.py b/test/units/TestModuleUtilsBasic.py index f5962a9478..18a4e0d772 100644 --- a/test/units/TestModuleUtilsBasic.py +++ b/test/units/TestModuleUtilsBasic.py @@ -329,5 +329,5 @@ class TestModuleUtilsBasicHelpers(unittest.TestCase): # The overzealous-ness here may lead to us changing the algorithm in # the future. We could make it consume less of the data (with the - # possiblity of leaving partial passwords exposed) and encourage + # possibility of leaving partial passwords exposed) and encourage # people to use no_log instead of relying on this obfuscation. diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index d93fc70329..9560014e0f 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -725,7 +725,7 @@ class TestUtils(unittest.TestCase): # jinja2 loop blocks with lots of complexity _test_combo( # in memory of neighbors cat - # we preserve line breaks unless a line continuation character preceeds them + # we preserve line breaks unless a line continuation character precedes them 'a {% if x %} y {%else %} {{meow}} {% endif %} "cookie\nchip" \\\ndone\nand done', ['a', '{% if x %}', 'y', '{%else %}', '{{meow}}', '{% endif %}', '"cookie\nchip"', 'done\n', 'and', 'done'] ) diff --git a/ticket_stubs/great_idea.md b/ticket_stubs/great_idea.md index 4ad794e797..b6f35fc10c 100644 --- a/ticket_stubs/great_idea.md +++ b/ticket_stubs/great_idea.md @@ -1,4 +1,4 @@ -Submission Recieved +Submission Received =================== Hi! diff --git a/ticket_stubs/module_repo.md b/ticket_stubs/module_repo.md index 7cfbf6c1de..13791eaaa2 100644 --- a/ticket_stubs/module_repo.md +++ b/ticket_stubs/module_repo.md @@ -6,7 +6,7 @@ Hi! Thanks very much for your interest in Ansible. It sincerely means a lot to us. This appears to be a submission about a module, and aside from action_plugins, if you know what those are, the modules -in ansible are now moved two seperate repos. We would appreciate if you can submit this there instead. +in ansible are now moved two separate repos. We would appreciate if you can submit this there instead. If this is about a new module, submit pull requests or ideas to: diff --git a/ticket_stubs/no_thanks.md b/ticket_stubs/no_thanks.md index 7e43f26695..e9249ba033 100644 --- a/ticket_stubs/no_thanks.md +++ b/ticket_stubs/no_thanks.md @@ -14,7 +14,7 @@ open dialog. You can stop by the development list, and we'd be glad to talk abo * https://groups.google.com/forum/#!forum/ansible-devel -In the future, sometimes starting a discussion on the development list prior to implenting a feature can make getting things included a little easier, but it's not always neccessary. +In the future, sometimes starting a discussion on the development list prior to implenting a feature can make getting things included a little easier, but it's not always necessary. Thank you once again for this and your interest in Ansible! diff --git a/ticket_stubs/pr_duplicate.md b/ticket_stubs/pr_duplicate.md index 7294e94ef6..a2c3b48ea2 100644 --- a/ticket_stubs/pr_duplicate.md +++ b/ticket_stubs/pr_duplicate.md @@ -15,7 +15,7 @@ However, we're absolutely always up for discussion. Since this is a really busy * https://groups.google.com/forum/#!forum/ansible-devel -In the future, sometimes starting a discussion on the development list prior to implenting a feature can make getting things included a little easier, but it's not always neccessary. +In the future, sometimes starting a discussion on the development list prior to implenting a feature can make getting things included a little easier, but it's not always necessary. Thank you once again for this and your interest in Ansible! diff --git a/ticket_stubs/thanks.md b/ticket_stubs/thanks.md index c77019889a..646571d568 100644 --- a/ticket_stubs/thanks.md +++ b/ticket_stubs/thanks.md @@ -1,4 +1,4 @@ -Submission Recieved +Submission Received =================== Hi! diff --git a/v2/ansible/compat/__init__.py b/v2/ansible/compat/__init__.py index ab861135c7..e77b77d2a6 100644 --- a/v2/ansible/compat/__init__.py +++ b/v2/ansible/compat/__init__.py @@ -20,7 +20,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type ''' -Compat library for ansible. This contains compatiblity definitions for older python +Compat library for ansible. This contains compatibility definitions for older python When we need to import a module differently depending on python version, do it here. Then in the code we can simply import from compat in order to get what we want. ''' diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py index d4d93d0e4f..2813507df2 100644 --- a/v2/ansible/errors/__init__.py +++ b/v2/ansible/errors/__init__.py @@ -61,7 +61,7 @@ class AnsibleError(Exception): def _get_error_lines_from_file(self, file_name, line_number): ''' Returns the line in the file which coresponds to the reported error - location, as well as the line preceeding it (if the error did not + location, as well as the line preceding it (if the error did not occur on the first line), to provide context to the error. ''' @@ -82,7 +82,7 @@ class AnsibleError(Exception): Given an object reporting the location of the exception in a file, return detailed information regarding it including: - * the line which caused the error as well as the one preceeding it + * the line which caused the error as well as the one preceding it * causes and suggested remedies for common syntax errors If this error was created with show_content=False, the reporting of content diff --git a/v2/ansible/parsing/yaml/__init__.py b/v2/ansible/parsing/yaml/__init__.py index a6c63feaa7..3f5ebb7c99 100644 --- a/v2/ansible/parsing/yaml/__init__.py +++ b/v2/ansible/parsing/yaml/__init__.py @@ -130,12 +130,12 @@ class DataLoader(): show_content = False return (data, show_content) except (IOError, OSError) as e: - raise AnsibleParserError("an error occured while trying to read the file '%s': %s" % (file_name, str(e))) + raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, str(e))) def _handle_error(self, yaml_exc, file_name, show_content): ''' Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the - file name/position where a YAML exception occured, and raises an AnsibleParserError + file name/position where a YAML exception occurred, and raises an AnsibleParserError to display the syntax exception information. ''' diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index c3d11e6cb2..6dd92ffba0 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -170,7 +170,7 @@ class Play(Base): if len(self.roles) > 0: for ri in self.roles: - # The internal list of roles are actualy RoleInclude objects, + # The internal list of roles are actually RoleInclude objects, # so we load the role from that now role = Role.load(ri) From b042fcc349f965a60943a3bfcf25f143b5fff3dc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 5 Dec 2014 12:35:43 -0800 Subject: [PATCH 487/813] Fix for delegate_to with hosts that aren't in inventory --- lib/ansible/runner/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index b1652d86e3..4f861e50ee 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1227,7 +1227,7 @@ class Runner(object): def _remote_checksum(self, conn, tmp, path, inject): ''' takes a remote checksum and returns 1 if no file ''' - if 'delegate_to' in inject and inject['delegate_to']: + if 'delegate_to' in inject and inject['delegate_to'] and inject['delegate_to'] in inject['hostvars']: python_interp = inject['hostvars'][inject['delegate_to']].get('ansible_python_interpreter', 'python') else: python_interp = inject['hostvars'][inject['inventory_hostname']].get('ansible_python_interpreter', 'python') From e86259cdf398eb0caf6c831a4f2c784b944cb6b7 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Sat, 6 Dec 2014 21:24:50 -0500 Subject: [PATCH 488/813] Docfix: running background tasks without polling Fix the example in the doc to explicitly set the poll interval to zero so the job doesn't poll. To run a background task without polling, you need to set the poll interval to zero. However, Ansible's default poll setting is 15 seconds, so not specifying the poll interval will cause a background job to poll. --- docsite/rst/intro_adhoc.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_adhoc.rst b/docsite/rst/intro_adhoc.rst index 2646945be4..770c2bd5be 100644 --- a/docsite/rst/intro_adhoc.rst +++ b/docsite/rst/intro_adhoc.rst @@ -230,7 +230,7 @@ checked on later. The same job ID is given to the same task on all hosts, so you won't lose track. If you kick hosts and don't want to poll, it looks like this:: - $ ansible all -B 3600 -a "/usr/bin/long_running_operation --do-stuff" + $ ansible all -B 3600 -P 0 -a "/usr/bin/long_running_operation --do-stuff" If you do decide you want to check on the job status later, you can:: From 39c488203f7322d4ee45c501e4ac979b0079da59 Mon Sep 17 00:00:00 2001 From: Jesse Buchanan Date: Sun, 7 Dec 2014 14:29:57 -0500 Subject: [PATCH 489/813] Add integration test for ansible-modules-core #460 See https://github.com/ansible/ansible-modules-core/pull/460 --- test/integration/roles/test_file/tasks/main.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/test/integration/roles/test_file/tasks/main.yml b/test/integration/roles/test_file/tasks/main.yml index 26de23b1ca..529a8dfdf1 100644 --- a/test/integration/roles/test_file/tasks/main.yml +++ b/test/integration/roles/test_file/tasks/main.yml @@ -106,6 +106,15 @@ that: - "file6_result.changed == true" +- name: touch a hard link + file: src={{output_file}} dest={{output_dir}}/hard.txt state=touch + register: file6_touch_result + +- name: verify that the hard link was touched + assert: + that: + - "file6_touch_result.changed == true" + - name: create a directory file: path={{output_dir}}/foobar state=directory register: file7_result From 2a288141d3d3ea709d83772b431f6d58dae22198 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 8 Dec 2014 10:55:04 -0800 Subject: [PATCH 490/813] Fix template module to fail if remote checksumming failed --- lib/ansible/runner/action_plugins/template.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index fd38c61063..15e8e3a9a0 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -92,6 +92,11 @@ class ActionModule(object): local_checksum = utils.checksum_s(resultant) remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject) + if remote_checksum in ('0', '1', '2', '3', '4'): + result = dict(failed=True, msg="failed to checksum remote file." + " Checksum error code: %s" % remote_checksum) + return ReturnData(conn=conn, comm_ok=True, result=result) + if local_checksum != remote_checksum: # template is different from the remote value From 3269a349f32a2b5c1fc079a2d174e01c541f444b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 8 Dec 2014 12:44:44 -0800 Subject: [PATCH 491/813] Fix remote_checksum with delegate_to and add tests for several ways that delegate_to works --- lib/ansible/runner/__init__.py | 24 +++++++++++-- test/integration/Makefile | 5 ++- test/integration/inventory | 3 ++ test/integration/test_delegate_to.yml | 50 +++++++++++++++++++++++++++ 4 files changed, 78 insertions(+), 4 deletions(-) create mode 100644 test/integration/test_delegate_to.yml diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 4f861e50ee..ebf20cb7d3 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1227,10 +1227,28 @@ class Runner(object): def _remote_checksum(self, conn, tmp, path, inject): ''' takes a remote checksum and returns 1 if no file ''' - if 'delegate_to' in inject and inject['delegate_to'] and inject['delegate_to'] in inject['hostvars']: - python_interp = inject['hostvars'][inject['delegate_to']].get('ansible_python_interpreter', 'python') + + # Lookup the python interp from the host or delegate + + # host == inven_host when there is no delegate + host = inject['inventory_hostname'] + if 'delegate_to' in inject: + delegate = inject['delegate_to'] + if delegate: + # host == None when the delegate is not in inventory + host = None + # delegate set, check whether the delegate has inventory vars + delegate = template.template(self.basedir, delegate, inject) + if delegate in inject['hostvars']: + # host == delegate if we need to lookup the + # python_interpreter from the delegate's inventory vars + host = delegate + + if host: + python_interp = inject['hostvars'][host].get('ansible_python_interpreter', 'python') else: - python_interp = inject['hostvars'][inject['inventory_hostname']].get('ansible_python_interpreter', 'python') + python_interp = 'python' + cmd = conn.shell.checksum(path, python_interp) data = self._low_level_exec_command(conn, cmd, tmp, sudoable=True) data2 = utils.last_non_blank_line(data['stdout']) diff --git a/test/integration/Makefile b/test/integration/Makefile index 77c81a76b9..fc973e368f 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -19,7 +19,7 @@ TMPDIR = $(shell mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') VAULT_PASSWORD_FILE = vault-password -all: parsing test_var_precedence unicode non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault +all: parsing test_var_precedence unicode non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault test_delegate_to parsing: ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario1; [ $$? -eq 3 ] @@ -65,6 +65,9 @@ test_vault: ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) --syntax-check ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) +test_delegate_to: + ansible-playbook test_delegate_to.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) + test_winrm: ansible-playbook test_winrm.yml -i inventory.winrm -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) diff --git a/test/integration/inventory b/test/integration/inventory index a9f160c989..72d80aabeb 100644 --- a/test/integration/inventory +++ b/test/integration/inventory @@ -1,6 +1,9 @@ [local] testhost ansible_ssh_host=127.0.0.1 ansible_connection=local testhost2 ansible_ssh_host=127.0.0.1 ansible_connection=local +# For testing delegate_to +testhost3 ansible_ssh_host=127.0.0.3 +testhost4 ansible_ssh_host=127.0.0.4 # the following inline declarations are accompanied # by (preferred) group_vars/ and host_vars/ variables diff --git a/test/integration/test_delegate_to.yml b/test/integration/test_delegate_to.yml new file mode 100644 index 0000000000..4ffac5568f --- /dev/null +++ b/test/integration/test_delegate_to.yml @@ -0,0 +1,50 @@ +- hosts: testhost3 + roles: + - { role: prepare_tests } + vars: + - template_role: ./roles/test_template + - templated_var: foo + tasks: + - name: Test no delegate_to + setup: + register: setup_results + + - assert: + that: + - '"127.0.0.3" in setup_results.ansible_facts.ansible_env["SSH_CONNECTION"]' + + - name: Test delegate_to with host in inventory + setup: + register: setup_results + delegate_to: testhost4 + + - assert: + that: + - '"127.0.0.4" in setup_results.ansible_facts.ansible_env["SSH_CONNECTION"]' + + - name: Test delegate_to with host not in inventory + setup: + register: setup_results + delegate_to: 127.0.0.254 + + - assert: + that: + - '"127.0.0.254" in setup_results.ansible_facts.ansible_env["SSH_CONNECTION"]' +# +# Smoketest some other modules do not error as a canary +# + - name: Test file works with delegate_to and a host in inventory + file: path={{ output_dir }}/foo.txt mode=0644 state=touch + delegate_to: testhost4 + + - name: Test file works with delegate_to and a host not in inventory + file: path={{ output_dir }}/test_follow_link mode=0644 state=touch + delegate_to: 127.0.0.254 + + - name: Test template works with delegate_to and a host in inventory + template: src={{ template_role }}/templates/foo.j2 dest={{ output_dir }}/foo.txt + delegate_to: testhost4 + + - name: Test template works with delegate_to and a host not in inventory + template: src={{ template_role }}/templates/foo.j2 dest={{ output_dir }}/foo.txt + delegate_to: 127.0.0.254 From f2d2f425ae07cff897208f578ef32d9befd1c4e6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 8 Dec 2014 13:07:34 -0800 Subject: [PATCH 492/813] Fix comment --- lib/ansible/runner/action_plugins/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py index bb579e48a8..9f6797a02a 100644 --- a/lib/ansible/runner/action_plugins/copy.py +++ b/lib/ansible/runner/action_plugins/copy.py @@ -197,7 +197,7 @@ class ActionModule(object): return ReturnData(conn=conn, result=result) if remote_checksum != '1' and not force: - # remote_file does not exist so continue to next iteration. + # remote_file exists so continue to next iteration. continue if local_checksum != remote_checksum: From 3a5aec974308e9779240679e34e5134ba7d27d34 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 8 Dec 2014 13:08:26 -0800 Subject: [PATCH 493/813] file does not exist is not an error when checksumming for the template modules --- lib/ansible/runner/action_plugins/template.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index 15e8e3a9a0..11c02796e3 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -92,7 +92,9 @@ class ActionModule(object): local_checksum = utils.checksum_s(resultant) remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject) - if remote_checksum in ('0', '1', '2', '3', '4'): + if remote_checksum in ('0', '2', '3', '4'): + # Note: 1 means the file is not present which is fine; template + # will create it result = dict(failed=True, msg="failed to checksum remote file." " Checksum error code: %s" % remote_checksum) return ReturnData(conn=conn, comm_ok=True, result=result) From 8d6ea38ee02e88d34b2664068a0abf53564ea3e7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 8 Dec 2014 13:39:01 -0800 Subject: [PATCH 494/813] Disable automatic running of test_delegate_to with an explanation of what it would take to set this up in our automated test systems --- test/integration/Makefile | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index fc973e368f..cf15c753cf 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -19,7 +19,7 @@ TMPDIR = $(shell mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') VAULT_PASSWORD_FILE = vault-password -all: parsing test_var_precedence unicode non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault test_delegate_to +all: parsing test_var_precedence unicode non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault parsing: ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario1; [ $$? -eq 3 ] @@ -65,6 +65,12 @@ test_vault: ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) --syntax-check ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) +# test_delegate_to does not work unless we have permission to ssh to localhost. +# Would take some more effort on our test systems to implement that -- probably +# the test node should create an ssh public-private key pair that allows the +# root user on a node to ssh to itself. Until then, this is not in make all. +# Have to run it manually. Ordinary users should be able to run this test as +# long as they have permissions to login to their local machine via ssh. test_delegate_to: ansible-playbook test_delegate_to.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) From c725aa5e4737c706d03a2e680e6a2cda5e27a778 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 8 Dec 2014 17:26:55 -0500 Subject: [PATCH 495/813] updated submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index b766390ae2..db5668b84c 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit b766390ae2e0fc79a32bb3a55eed959655b76a43 +Subproject commit db5668b84c3a19498b843d0bfe34574aef40c193 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 19e688b017..d2d0ed2259 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 19e688b01750b3b5ad02cbfe51533056068a3224 +Subproject commit d2d0ed2259fc37b4d17266f820964d3ff58667c7 From c4d5e9195ba7e33f81383cea8daf4f904fc577a2 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Mon, 8 Dec 2014 21:52:03 -0500 Subject: [PATCH 496/813] Docfix: checking a background task Minor changes to wording on how to check a background task with async_status. Fixes #9740 --- docsite/rst/intro_adhoc.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docsite/rst/intro_adhoc.rst b/docsite/rst/intro_adhoc.rst index 770c2bd5be..2978343abe 100644 --- a/docsite/rst/intro_adhoc.rst +++ b/docsite/rst/intro_adhoc.rst @@ -225,16 +225,16 @@ Ensure a service is stopped:: Time Limited Background Operations `````````````````````````````````` -Long running operations can be backgrounded, and their status can be -checked on later. The same job ID is given to the same task on all -hosts, so you won't lose track. If you kick hosts and don't want -to poll, it looks like this:: +Long running operations can be backgrounded, and their status can be checked on +later. If you kick hosts and don't want to poll, it looks like this:: $ ansible all -B 3600 -P 0 -a "/usr/bin/long_running_operation --do-stuff" -If you do decide you want to check on the job status later, you can:: +If you do decide you want to check on the job status later, you can use the +async_status module, passing it the job id that was returned when you ran +the original job in the background:: - $ ansible all -m async_status -a "jid=123456789" + $ ansible web1.example.com -m async_status -a "jid=488359678239.2844" Polling is built-in and looks like this:: From 050a2dc919f3cd97714375679d2d89f2c07f0d0f Mon Sep 17 00:00:00 2001 From: Thomas Tourlourat Date: Tue, 9 Dec 2014 14:28:57 +0100 Subject: [PATCH 497/813] Add missing connection and header --- docsite/rst/guide_aws.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docsite/rst/guide_aws.rst b/docsite/rst/guide_aws.rst index 7f05833550..623a80fe40 100644 --- a/docsite/rst/guide_aws.rst +++ b/docsite/rst/guide_aws.rst @@ -58,7 +58,10 @@ be terminated. What is being counted is specified by the "count_tag" parameter. The parameter "instance_tags" is used to apply tags to the newly created instance. + # demo_setup.yml + - hosts: localhost + connection: local gather_facts: False tasks: @@ -84,6 +87,7 @@ From this, we'll use the add_host module to dynamically create a host group cons # demo_setup.yml - hosts: localhost + connection: local gather_facts: False tasks: From 55cb55d67ad9e06704d00d07cca6835384c380a0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 9 Dec 2014 08:01:31 -0800 Subject: [PATCH 498/813] Update the extras submodules to latest --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index d2d0ed2259..82aaaa4152 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit d2d0ed2259fc37b4d17266f820964d3ff58667c7 +Subproject commit 82aaaa4152d955c79df00acd184f18c9be3c80cb From 35cfeefdc10ce5e4592927170e84c00add119397 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 9 Dec 2014 08:03:07 -0800 Subject: [PATCH 499/813] Update core modules --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index db5668b84c..abff6c2582 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit db5668b84c3a19498b843d0bfe34574aef40c193 +Subproject commit abff6c2582a657d2622ae97d40e5936dcffeb755 From 58be1b049e819774000e398e9d00b561913e8c87 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 9 Dec 2014 11:31:21 -0500 Subject: [PATCH 500/813] added tests for templating and with_items --- .../roles/test_lookups/tasks/main.yml | 30 +++++++++++++------ test/integration/vars_file.yml | 7 +++++ 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml index 6480b18b35..8440ff5772 100644 --- a/test/integration/roles/test_lookups/tasks/main.yml +++ b/test/integration/roles/test_lookups/tasks/main.yml @@ -81,7 +81,7 @@ - "wc_result.stdout == '9'" - "cat_result.stdout == newpass" -# ENV LOOKUP +# ENV LOOKUP - name: get first environment var name shell: env | head -n1 | cut -d\= -f1 @@ -92,16 +92,16 @@ register: known_var_value - name: use env lookup to get known var - set_fact: + set_fact: test_val: "{{ lookup('env', known_var_name.stdout) }}" - debug: var=known_var_name.stdout - debug: var=known_var_value.stdout -- debug: var=test_val +- debug: var=test_val - name: compare values assert: - that: + that: - "test_val == known_var_value.stdout" @@ -109,11 +109,23 @@ # https://github.com/ansible/ansible/issues/6550 - name: confirm pipe lookup works with a single positional arg - debug: msg="{{ lookup('pipe', 'ls') }}" - -# https://github.com/ansible/ansible/issues/6550 -- name: confirm pipe lookup works with multiple positional args - debug: msg="{{ lookup('pipe', 'ls -l /tmp') }}" + debug: msg="{{ lookup('pipe', 'ls') }}" +# LOOKUP TEMPLATING + +- name: use bare interpolation + debug: msg="got {{item}}" + with_items: things1 + register: bare_var + +- name: verify that list was interpolated + assert: + that: + - "bare_var.results[0].item == 1" + - "bare_var.results[1].item == 2" + +- name: use list with undefined var in it + debug: msg={{item}} + with_items: things2 diff --git a/test/integration/vars_file.yml b/test/integration/vars_file.yml index bd162327d2..c43bf81866 100644 --- a/test/integration/vars_file.yml +++ b/test/integration/vars_file.yml @@ -2,4 +2,11 @@ # in general define test data in the individual role: # roles/role_name/vars/main.yml +foo: "Hello" +things1: + - 1 + - 2 +things2: + - "{{ foo }}" + - "{{ foob }}" vars_file_var: 321 From c16b83af14b13e84887169ef6470cfb1d3d589c4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 9 Dec 2014 09:09:29 -0800 Subject: [PATCH 501/813] Updat ecore to pull in new git module fixes --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index abff6c2582..375025d2e3 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit abff6c2582a657d2622ae97d40e5936dcffeb755 +Subproject commit 375025d2e3edf2dca764a50c1c213286f38fc9c2 From ff970eabd8f6a8096d76a6a58ce7ccd8784a7366 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 9 Dec 2014 09:44:57 -0800 Subject: [PATCH 502/813] Add role vars to differentiate with role defaults --- docsite/rst/playbooks_variables.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 84f0a1f5b5..3a52261360 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -972,7 +972,7 @@ If multiple variables of the same name are defined in different places, they win * extra vars (-e in the command line) always win * then comes connection variables defined in inventory (ansible_ssh_user, etc) - * then comes "most everything else" (command line switches, vars in play, included vars, etc) + * then comes "most everything else" (command line switches, vars in play, included vars, role vars, etc) * then comes the rest of the variables defined in inventory * then comes facts discovered about a system * then "role defaults", which are the most "defaulty" and lose in priority to everything. From fa51e8f36ddefbb3e8e59ecd370696b2918b9fab Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 9 Dec 2014 10:49:05 -0800 Subject: [PATCH 503/813] Document the --offline parameter to galaxy init Fixes #9758 --- docs/man/man1/ansible-doc.1 | 8 ++++---- docs/man/man1/ansible-galaxy.1 | 13 +++++++++---- docs/man/man1/ansible-galaxy.1.asciidoc.in | 4 ++++ docs/man/man1/ansible-playbook.1 | 8 ++++---- docs/man/man1/ansible-pull.1 | 10 +++++----- docs/man/man1/ansible-vault.1 | 8 ++++---- docs/man/man1/ansible.1 | 8 ++++---- 7 files changed, 34 insertions(+), 25 deletions(-) diff --git a/docs/man/man1/ansible-doc.1 b/docs/man/man1/ansible-doc.1 index 041cf48099..1b51fa00e6 100644 --- a/docs/man/man1/ansible-doc.1 +++ b/docs/man/man1/ansible-doc.1 @@ -1,13 +1,13 @@ '\" t .\" Title: ansible-doc .\" Author: :doctype:manpage -.\" Generator: DocBook XSL Stylesheets v1.76.1 -.\" Date: 05/26/2014 +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 12/09/2014 .\" Manual: System administration commands -.\" Source: Ansible 1.7 +.\" Source: Ansible 1.9 .\" Language: English .\" -.TH "ANSIBLE\-DOC" "1" "05/26/2014" "Ansible 1\&.7" "System administration commands" +.TH "ANSIBLE\-DOC" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- diff --git a/docs/man/man1/ansible-galaxy.1 b/docs/man/man1/ansible-galaxy.1 index 5bac353505..eac74b6a85 100644 --- a/docs/man/man1/ansible-galaxy.1 +++ b/docs/man/man1/ansible-galaxy.1 @@ -1,13 +1,13 @@ '\" t .\" Title: ansible-galaxy .\" Author: [see the "AUTHOR" section] -.\" Generator: DocBook XSL Stylesheets v1.76.1 -.\" Date: 05/26/2014 +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 12/09/2014 .\" Manual: System administration commands -.\" Source: Ansible 1.7 +.\" Source: Ansible 1.9 .\" Language: English .\" -.TH "ANSIBLE\-GALAXY" "1" "05/26/2014" "Ansible 1\&.7" "System administration commands" +.TH "ANSIBLE\-GALAXY" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -149,6 +149,11 @@ Force overwriting an existing role\&. .RS 4 The path in which the skeleton role will be created\&.The default is the current working directory\&. .RE +.PP +\fB\-\-offline\fR +.RS 4 +Don\(cqt query the galaxy API when creating roles +.RE .SH "LIST" .sp The \fBlist\fR sub\-command is used to show what roles are currently instaled\&. You can specify a role name, and if installed only that role will be shown\&. diff --git a/docs/man/man1/ansible-galaxy.1.asciidoc.in b/docs/man/man1/ansible-galaxy.1.asciidoc.in index b8a80e6b2c..3d59e31706 100644 --- a/docs/man/man1/ansible-galaxy.1.asciidoc.in +++ b/docs/man/man1/ansible-galaxy.1.asciidoc.in @@ -122,6 +122,10 @@ Force overwriting an existing role. The path in which the skeleton role will be created.The default is the current working directory. +*--offline*:: + +Don't query the galaxy API when creating roles + LIST ---- diff --git a/docs/man/man1/ansible-playbook.1 b/docs/man/man1/ansible-playbook.1 index 63f8904f0c..ac8466a36a 100644 --- a/docs/man/man1/ansible-playbook.1 +++ b/docs/man/man1/ansible-playbook.1 @@ -1,13 +1,13 @@ '\" t .\" Title: ansible-playbook .\" Author: :doctype:manpage -.\" Generator: DocBook XSL Stylesheets v1.76.1 -.\" Date: 05/26/2014 +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 12/09/2014 .\" Manual: System administration commands -.\" Source: Ansible 1.7 +.\" Source: Ansible 1.9 .\" Language: English .\" -.TH "ANSIBLE\-PLAYBOOK" "1" "05/26/2014" "Ansible 1\&.7" "System administration commands" +.TH "ANSIBLE\-PLAYBOOK" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- diff --git a/docs/man/man1/ansible-pull.1 b/docs/man/man1/ansible-pull.1 index 58029eabb8..d39cfa67a2 100644 --- a/docs/man/man1/ansible-pull.1 +++ b/docs/man/man1/ansible-pull.1 @@ -1,13 +1,13 @@ '\" t .\" Title: ansible .\" Author: :doctype:manpage -.\" Generator: DocBook XSL Stylesheets v1.76.1 -.\" Date: 05/26/2014 +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 12/09/2014 .\" Manual: System administration commands -.\" Source: Ansible 1.7 +.\" Source: Ansible 1.9 .\" Language: English .\" -.TH "ANSIBLE" "1" "05/26/2014" "Ansible 1\&.7" "System administration commands" +.TH "ANSIBLE" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -31,7 +31,7 @@ ansible-pull \- set up a remote copy of ansible on each managed node .SH "SYNOPSIS" .sp -ansible \-d DEST \-U URL [options] [ ] +ansible\-pull \-d DEST \-U URL [options] [ ] .SH "DESCRIPTION" .sp \fBAnsible\fR is an extra\-simple tool/framework/API for doing \*(Aqremote things\*(Aq over SSH\&. diff --git a/docs/man/man1/ansible-vault.1 b/docs/man/man1/ansible-vault.1 index f353e3269f..286e642748 100644 --- a/docs/man/man1/ansible-vault.1 +++ b/docs/man/man1/ansible-vault.1 @@ -1,13 +1,13 @@ '\" t .\" Title: ansible-vault .\" Author: [see the "AUTHOR" section] -.\" Generator: DocBook XSL Stylesheets v1.76.1 -.\" Date: 05/26/2014 +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 12/09/2014 .\" Manual: System administration commands -.\" Source: Ansible 1.7 +.\" Source: Ansible 1.9 .\" Language: English .\" -.TH "ANSIBLE\-VAULT" "1" "05/26/2014" "Ansible 1\&.7" "System administration commands" +.TH "ANSIBLE\-VAULT" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- diff --git a/docs/man/man1/ansible.1 b/docs/man/man1/ansible.1 index 233428782e..6f16a449bf 100644 --- a/docs/man/man1/ansible.1 +++ b/docs/man/man1/ansible.1 @@ -1,13 +1,13 @@ '\" t .\" Title: ansible .\" Author: :doctype:manpage -.\" Generator: DocBook XSL Stylesheets v1.76.1 -.\" Date: 05/26/2014 +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 12/09/2014 .\" Manual: System administration commands -.\" Source: Ansible 1.7 +.\" Source: Ansible 1.9 .\" Language: English .\" -.TH "ANSIBLE" "1" "05/26/2014" "Ansible 1\&.7" "System administration commands" +.TH "ANSIBLE" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- From 74f48ed79d344a021413a9ee450d13bf80cc77a4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 3 Nov 2014 14:30:14 -0800 Subject: [PATCH 504/813] Inventory with docstrings and notes on how to change --- v2/ansible/inventory/__init__.py | 312 ++++++++++++++++++++++++++++--- 1 file changed, 291 insertions(+), 21 deletions(-) diff --git a/v2/ansible/inventory/__init__.py b/v2/ansible/inventory/__init__.py index 5ad688eaf0..8ee44d851a 100644 --- a/v2/ansible/inventory/__init__.py +++ b/v2/ansible/inventory/__init__.py @@ -21,68 +21,338 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from . group import Group +from . host import Host + +### List of things to change in Inventory +### Replace some lists with sets/frozensets. +### Check where this makes sense to reveal externally +### Rename all caches to *_cache +### Standardize how caches are flushed for all caches if possible +### Think about whether retrieving variables should be methods of the +### Groups/Hosts being queried with caches at that level +### Store things into a VarManager instead of inventory +### Merge list_hosts() and get_hosts() +### Merge list_groups() and groups_list() +### Merge get_variables() and get_host_variables() +### Restrictions: +### Remove get_restriction() +### Prefix restrict_to and lift_restriction with _ and note in docstring that +### only playbook is to use these for implementing failed hosts. This is +### the closest that python has to a "friend function" +### Can we get rid of restrictions altogether? +### If we must keep restrictions, reimplement as a stack of sets. Then +### calling code will push and pop restrictions onto the inventory +### is_file() and basedir() => Change to properties +### Can we move the playbook variable resolving to someplace else? Seems that: +### 1) It can change within a single session +### 2) Inventory shouldn't know about playbook. +### Possibilities: +### Host and groups read the host_vars and group_vars. Both inventory and +### playbook register paths that the hsot_vars and group_vars can read from. +### The VariableManager reads the host_vars and group_vars and keeps them +### layered depending on the context from which it's being asked what +### the value of a variable is +### Either of these results in getting rid of/moving to another class +### Inventory.playbook_basedir() and Inventory.set_playbook_basedir() + + +### Questiony things: +### Do we want patterns to apply to both groups and hosts or only to hosts? +### Think about whether we could and want to go through the pattern_cache for +### standard lookups +### Is this the current architecture: +### We have a single Inventory per runner. +### The Inventory may be initialized via: +### an ini file +### a directory of ini files +### a script +### a , separated string of hosts +### a list of hosts +### host_vars/* +### group_vars/* +### Do we want to change this so that multiple sources are allowed? +### ansible -i /etc/ansible,./inventory,/opt/ansible/inventory_plugins/ec2.py,localhost +### What are vars_loaders? What's their scope? Why aren't the parsing of +### inventory files and scripts implemented as a vars_loader? +### If we have add_group(), why no merge_group()? +### group = inven.get_group(name) +### if not group: +### group = Group(name) +### inven.add_group(group) +### +### vs +### group = Group(name) +### try: +### inven.add_group(group) +### except: +### inven.merge_group(group) +### +### vs: +### group = Group(name) +### inven.add_or_merge(group) + class Inventory: + ''' + Collect variables for hosts and groups from inventory + ''' def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): + ''' + :kwarg host_list: A filename for an inventory file or script or a list + of hosts + :kwarg vault_password: Password to use if any of the inventory sources + are in an ansible vault + ''' pass + def get_hosts(self, pattern="all"): + ''' + Find all hosts matching a pattern string + + This also takes into account any inventory restrictions or applied + subsets. + + :kwarg pattern: An fnmatch pattern that hosts must match on. Multiple + patterns may be separated by ";" and ":". Defaults to the special + pattern "all" which means to return all hosts. + :returns: list of hosts + ''' pass + def clear_pattern_cache(self): - # Possibly not needed? + ''' + Invalidate the pattern cache + ''' + #### Possibly not needed? + # Former docstring: + # Called exclusively by the add_host plugin to allow patterns to be + # recalculated pass + def groups_for_host(self, host): + ''' + Return the groupnames to which a host belongs + + :arg host: Name of host to lookup + :returns: list of groupnames + ''' pass + def groups_list(self): + ''' + Return a mapping of group name to hostnames which belong to the group + + :returns: dict of groupnames mapped to a list of hostnames within that group + ''' pass + def get_groups(self): + ''' + Retrieve the Group objects known to the Inventory + + :returns: list of :class:`Group`s belonging to the Inventory + ''' pass + def get_host(self, hostname): + ''' + Retrieve the Host object for a hostname + + :arg hostname: hostname associated with the :class:`Host` + :returns: :class:`Host` object whose hostname was requested + ''' pass + def get_group(self, groupname): + ''' + Retrieve the Group object for a groupname + + :arg groupname: groupname associated with the :class:`Group` + :returns: :class:`Group` object whose groupname was requested + ''' pass + def get_group_variables(self, groupname, update_cached=False, vault_password=None): + ''' + Retrieve the variables set on a group + + :arg groupname: groupname to retrieve variables for + :kwarg update_cached: if True, retrieve the variables from the source + and refresh the cache for this variable + :kwarg vault_password: Password to use if any of the inventory sources + are in an ansible vault + :returns: dict mapping group variable names to values + ''' pass + def get_variables(self, hostname, update_cached=False, vault_password=None): + ''' + Retrieve the variables set on a host + + :arg hostname: hostname to retrieve variables for + :kwarg update_cached: if True, retrieve the variables from the source + and refresh the cache for this variable + :kwarg vault_password: Password to use if any of the inventory sources + are in an ansible vault + :returns: dict mapping host variable names to values + ''' + ### WARNING: v1 implementation ignores update_cached and vault_password pass + def get_host_variables(self, hostname, update_cached=False, vault_password=None): + ''' + Retrieve the variables set on a host + + :arg hostname: hostname to retrieve variables for + :kwarg update_cached: if True, retrieve the variables from the source + and refresh the cache for this variable + :kwarg vault_password: Password to use if any of the inventory sources + are in an ansible vault + :returns: dict mapping host variable names to values + ''' pass + def add_group(self, group): + ''' + Add a new group to the inventory + + :arg group: Group object to add to the inventory + ''' pass + def list_hosts(self, pattern="all"): + ''' + Retrieve a list of hostnames for a pattern + + :kwarg pattern: Retrieve hosts which match this pattern. The special + pattern "all" matches every host the inventory knows about. + :returns: list of hostnames + ''' + ### Notes: Differences with get_hosts: + ### get_hosts returns hosts, this returns host names + ### This adds the implicit localhost/127.0.0.1 as a name but not as + ### a host pass + def list_groups(self): + ''' + Retrieve list of groupnames + :returns: list of groupnames + ''' pass + def get_restriction(self): + ''' + Accessor for the private _restriction attribute. + ''' + ### Note: In v1, says to be removed. + ### Not used by anything at all. pass + def restrict_to(self, restriction): + ''' + Restrict get and list operations to hosts given in the restriction + + :arg restriction: + ''' + ### The v1 docstring says: + ### Used by the main playbook code to exclude failed hosts, don't use + ### this for other reasons pass + + def lift_restriction(self): + ''' + Remove a restriction + ''' + pass + def also_restrict_to(self, restriction): + ''' + Restrict get and list operations to hosts in the additional restriction + ''' + ### Need to explore use case here -- maybe we want to restrict for + ### several different reasons. Within a certain scope we restrict + ### again for a separate reason? pass + + def lift_also_restriction(self): + ''' + Remove an also_restriction + ''' + # HACK -- dead host skipping + pass + def subset(self, subset_pattern): """ Limits inventory results to a subset of inventory that matches a given - pattern, such as to select a given geographic of numeric slice amongst - a previous 'hosts' selection that only select roles, or vice versa... + pattern, such as to select a subset of a hosts selection that also + belongs to a certain geographic group or numeric slice. Corresponds to --limit parameter to ansible-playbook + + :arg subset_pattern: The pattern to limit with. If this is None it + clears the subset. Multiple patterns may be specified as a comma, + semicolon, or colon separated string. """ pass - def lift_restriction(self): - # HACK -- - pass - def lift_also_restriction(self): - # HACK -- dead host skipping - pass + def is_file(self): - pass - def basedir(self): - pass - def src(self): - pass - def playbook_basedir(self): - pass - def set_playbook_basedir(self, dir): - pass - def get_host_vars(self, host, new_pb_basedir=False): - pass - def get_group_vars(self, group, new_pb_basedir=False): + ''' + Did inventory come from a file? + + :returns: True if the inventory is file based, False otherwise + ''' pass + def basedir(self): + ''' + What directory was inventory read from + + :returns: the path to the directory holding the inventory. None if + the inventory is not file based + ''' + pass + + def src(self): + ''' + What's the complete path to the inventory file? + + :returns: Complete path to the inventory file. None if inventory is + not file-based + ''' + pass + + def playbook_basedir(self): + ''' + Retrieve the directory of the current playbook + ''' + ### I want to move this out of inventory + + pass + + def set_playbook_basedir(self, dir): + ''' + Tell Inventory the basedir of the current playbook so Inventory can + look for host_vars and group_vars there. + ''' + ### I want to move this out of inventory + pass + + def get_host_vars(self, host, new_pb_basedir=False): + ''' + Loads variables from host_vars/ + + The variables are loaded from subdirectories located either in the + inventory base directory or the playbook base directory. Variables in + the playbook dir will win over the inventory dir if files are in both. + ''' + pass + + def get_group_vars(self, group, new_pb_basedir=False): + ''' + Loads variables from group_vars/ + + The variables are loaded from subdirectories located either in the + inventory base directory or the playbook base directory. Variables in + the playbook dir will win over the inventory dir if files are in both. + ''' + pass From bdf42104cd7ea064e2e11b56e0328d30401a7ca7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 7 Nov 2014 14:01:29 -0800 Subject: [PATCH 505/813] Add some more comments from jimi-c and mpdehaan --- v2/ansible/inventory/__init__.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/v2/ansible/inventory/__init__.py b/v2/ansible/inventory/__init__.py index 8ee44d851a..dbd733db92 100644 --- a/v2/ansible/inventory/__init__.py +++ b/v2/ansible/inventory/__init__.py @@ -25,16 +25,23 @@ from . group import Group from . host import Host ### List of things to change in Inventory + ### Replace some lists with sets/frozensets. ### Check where this makes sense to reveal externally + ### Rename all caches to *_cache + ### Standardize how caches are flushed for all caches if possible + ### Think about whether retrieving variables should be methods of the ### Groups/Hosts being queried with caches at that level + ### Store things into a VarManager instead of inventory + ### Merge list_hosts() and get_hosts() ### Merge list_groups() and groups_list() ### Merge get_variables() and get_host_variables() + ### Restrictions: ### Remove get_restriction() ### Prefix restrict_to and lift_restriction with _ and note in docstring that @@ -43,7 +50,10 @@ from . host import Host ### Can we get rid of restrictions altogether? ### If we must keep restrictions, reimplement as a stack of sets. Then ### calling code will push and pop restrictions onto the inventory +### (mpdehaan +1'd stack idea) + ### is_file() and basedir() => Change to properties + ### Can we move the playbook variable resolving to someplace else? Seems that: ### 1) It can change within a single session ### 2) Inventory shouldn't know about playbook. @@ -55,10 +65,20 @@ from . host import Host ### the value of a variable is ### Either of these results in getting rid of/moving to another class ### Inventory.playbook_basedir() and Inventory.set_playbook_basedir() +### mpdehaan: evaluate caching and make sure we're just caching once. (Toshio: tie +### this in with storing and retrieving variables via Host and Group objects +### mpdehaan: If it's possible, move templating entirely out of inventory +### (Toshio: If it's possible, implement this by storing inside of +### VariableManager which will handle resolving templated variables) ### Questiony things: ### Do we want patterns to apply to both groups and hosts or only to hosts? +### jimi-c: Current code should do both as we're parsing things you can +### give to the -i commandline switch which can mix hosts and groups. +### like: `hosts: group1:group2&host3` +### toshio: should we move parsing the commandline out and then have that +### cli parser pass in a distinct list of hosts to add? ### Think about whether we could and want to go through the pattern_cache for ### standard lookups ### Is this the current architecture: @@ -73,8 +93,16 @@ from . host import Host ### group_vars/* ### Do we want to change this so that multiple sources are allowed? ### ansible -i /etc/ansible,./inventory,/opt/ansible/inventory_plugins/ec2.py,localhost +### jimi-c: We don't currently have multiple inventory sources explicitly +### allowed but you can specify an inventory directory and then have multiple +### sources inside of that. +### toshio: So do we want to make that available to people since we have to do it anyway? +### jimi-c: Also, what calls Inventory? TaskExecutor probably makes sense in v2 ### What are vars_loaders? What's their scope? Why aren't the parsing of ### inventory files and scripts implemented as a vars_loader? +### jimi-c: vars_loaders are plugins to do additional variable loading. +### svg has some inhouse. +### Could theoretically rewrite the current loading to be handled by a plugin ### If we have add_group(), why no merge_group()? ### group = inven.get_group(name) ### if not group: From b4dfcc2d286d87de8a030cf986e0d3bb0e3f3255 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 17 Nov 2014 19:34:56 -0800 Subject: [PATCH 506/813] Start laying out how the pieces of code that parse inventory information into ansible will work --- v2/ansible/plugins/inventory/__init__.py | 59 ++++++++++++++++++++++ v2/ansible/plugins/inventory/aggregate.py | 61 +++++++++++++++++++++++ v2/ansible/plugins/inventory/directory.py | 52 +++++++++++++++++++ v2/ansible/plugins/inventory/ini.py | 53 ++++++++++++++++++++ 4 files changed, 225 insertions(+) create mode 100644 v2/ansible/plugins/inventory/aggregate.py create mode 100644 v2/ansible/plugins/inventory/directory.py create mode 100644 v2/ansible/plugins/inventory/ini.py diff --git a/v2/ansible/plugins/inventory/__init__.py b/v2/ansible/plugins/inventory/__init__.py index 785fc45992..41e8578ee7 100644 --- a/v2/ansible/plugins/inventory/__init__.py +++ b/v2/ansible/plugins/inventory/__init__.py @@ -15,7 +15,66 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +############################################# + # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from abc import ABCMeta, abstractmethod + +class InventoryParser: + '''Abstract Base Class for retrieving inventory information + + Any InventoryParser functions by taking an inven_source. The caller then + calls the parser() method. Once parser is called, the caller can access + InventoryParser.hosts for a mapping of Host objects and + InventoryParser.Groups for a mapping of Group objects. + ''' + __metaclass__ = ABCMeta + + def __init__(self, inven_source): + ''' + InventoryParser contructors take a source of inventory information + that they will parse the host and group information from. + ''' + self.inven_source = inven_source + self.reset_parser() + + @abstractmethod + def reset_parser(self): + ''' + InventoryParsers generally cache their data once parser() is + called. This method initializes any parser state before calling parser + again. + ''' + self.hosts = dict() + self.groups = dict() + self.parsed = False + + def _merge(self, target, addition): + ''' + This method is provided to InventoryParsers to merge host or group + dicts since it may take several passes to get all of the data + + Example usage: + self.hosts = self.from_ini(filename) + new_hosts = self.from_script(scriptname) + self._merge(self.hosts, new_hosts) + ''' + for i in addition: + if i in target: + target[i].merge(addition[i]) + else: + target[i] = addition[i] + + @abstractmethod + def parse(self, refresh=False): + if refresh: + self.reset_parser() + if self.parsed: + return self.parsed + + # Parse self.inven_sources here + pass + diff --git a/v2/ansible/plugins/inventory/aggregate.py b/v2/ansible/plugins/inventory/aggregate.py new file mode 100644 index 0000000000..6bdf2ddcb6 --- /dev/null +++ b/v2/ansible/plugins/inventory/aggregate.py @@ -0,0 +1,61 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +############################################# + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from . import InventoryParser +#from . ini import InventoryIniParser +#from . script import InventoryScriptParser + +class InventoryAggregateParser(InventoryParser): + + def __init__(self, inven_sources): + self.inven_source = inven_sources + self.hosts = dict() + self.groups = dict() + + def reset_parser(self): + super(InventoryAggregateParser, self).reset_parser() + + def parse(self, refresh=False): + # InventoryDirectoryParser is a InventoryAggregateParser so we avoid + # a circular import by importing here + from . directory import InventoryAggregateParser + if super(InventoryAggregateParser, self).parse(refresh): + return self.parsed + + for entry in self.inven_sources: + if os.path.sep in entry: + # file or directory + if os.path.isdir(entry): + parser = directory.InventoryDirectoryParser(filename=entry) + elif utils.is_executable(entry): + parser = InventoryScriptParser(filename=entry) + else: + parser = InventoryIniParser(filename=entry) + else: + # hostname + parser = HostnameParser(hostname=entry) + hosts, groups = parser.parse() + self._merge(self.hosts, hosts) + self._merge(self.groups, groups) diff --git a/v2/ansible/plugins/inventory/directory.py b/v2/ansible/plugins/inventory/directory.py new file mode 100644 index 0000000000..d340ed7538 --- /dev/null +++ b/v2/ansible/plugins/inventory/directory.py @@ -0,0 +1,52 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +############################################# + +# Make coding more python3-ish +from __future__ import (division, print_function) +__metaclass__ = type + +import os + +from . aggregate import InventoryAggregateParser + +class InventoryDirectoryParser(InventoryAggregateParser): + + def __init__(self, inven_directory): + directory = inven_directory + names = os.listdir(inven_directory) + filtered_names = [] + + # Clean up the list of filenames + for filename in names: + # Skip files that end with certain extensions or characters + if any(filename.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")): + continue + # Skip hidden files + if filename.startswith('.') and not filename.startswith('.{0}'.format(os.path.sep)): + continue + # These are things inside of an inventory basedir + if filename in ("host_vars", "group_vars", "vars_plugins"): + continue + fullpath = os.path.join(directory, filename) + new_names.append(fullpath) + + super(InventoryDirectoryParser, self).__init__(new_names) + + def parse(self): + return super(InventoryDirectoryParser, self).parse() diff --git a/v2/ansible/plugins/inventory/ini.py b/v2/ansible/plugins/inventory/ini.py new file mode 100644 index 0000000000..2cc062b959 --- /dev/null +++ b/v2/ansible/plugins/inventory/ini.py @@ -0,0 +1,53 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +############################################# + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from . import InventoryParser + +class InventoryIniParser(InventoryAggregateParser): + + def __init__(self, inven_directory): + directory = inven_directory + names = os.listdir(inven_directory) + filtered_names = [] + + # Clean up the list of filenames + for filename in names: + # Skip files that end with certain extensions or characters + if any(filename.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")): + continue + # Skip hidden files + if filename.startswith('.') and not filename.startswith('.{0}'.format(os.path.sep)): + continue + # These are things inside of an inventory basedir + if filename in ("host_vars", "group_vars", "vars_plugins"): + continue + fullpath = os.path.join(directory, filename) + new_names.append(fullpath) + + super(InventoryDirectoryParser, self).__init__(new_names) + + def parse(self): + return super(InventoryDirectoryParser, self).parse() + From b6c3670f8aa60cea87cc5518780b704886a423bb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 17 Nov 2014 19:36:35 -0800 Subject: [PATCH 507/813] Mark some inventory methods that I'm thinking should go away (and their replacements) --- v2/ansible/inventory/__init__.py | 42 ++++++++++++++++++++++++++------ v2/ansible/vars/__init__.py | 4 +++ 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/v2/ansible/inventory/__init__.py b/v2/ansible/inventory/__init__.py index dbd733db92..631fddfe68 100644 --- a/v2/ansible/inventory/__init__.py +++ b/v2/ansible/inventory/__init__.py @@ -21,9 +21,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from . group import Group -from . host import Host - ### List of things to change in Inventory ### Replace some lists with sets/frozensets. @@ -120,18 +117,36 @@ from . host import Host ### group = Group(name) ### inven.add_or_merge(group) +from .. plugins.inventory.aggregate import InventoryAggregateParser +from . group import Group +from . host import Host + class Inventory: ''' - Collect variables for hosts and groups from inventory + Create hosts and groups from inventory + + Retrieve the hosts and groups that ansible knows about from this + class. + + Retrieve raw variables (non-expanded) from the Group and Host classes + returned from here. ''' - def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): + def __init__(self, inventory_list=C.DEFAULT_HOST_LIST, vault_password=None): ''' - :kwarg host_list: A filename for an inventory file or script or a list - of hosts + :kwarg inventory_list: A list of inventory sources. This may be file + names which will be parsed as ini-like files, executable scripts + which return inventory data as json, directories of both of the above, + or hostnames. Files and directories are :kwarg vault_password: Password to use if any of the inventory sources are in an ansible vault ''' - pass + self.vault_password = vault_password + + self.parser = InventoryAggregateParser(inventory_list) + self.parser.parse() + self.hosts = self.parser.hosts + self.groups = self.parser.groups + def get_hosts(self, pattern="all"): ''' @@ -158,6 +173,8 @@ class Inventory: pass def groups_for_host(self, host): + ### Remove in favour of + ### inventory.hosts[host].groups.keys() ''' Return the groupnames to which a host belongs @@ -175,6 +192,7 @@ class Inventory: pass def get_groups(self): + ### Remove in favour of inventory.groups.values() ''' Retrieve the Group objects known to the Inventory @@ -183,6 +201,7 @@ class Inventory: pass def get_host(self, hostname): + ### Remove in favour of inventory.hosts.values() ''' Retrieve the Host object for a hostname @@ -192,6 +211,7 @@ class Inventory: pass def get_group(self, groupname): + ### Revmoe in favour of inventory.groups.groupname ''' Retrieve the Group object for a groupname @@ -201,6 +221,7 @@ class Inventory: pass def get_group_variables(self, groupname, update_cached=False, vault_password=None): + ### Remove in favour of inventory.groups[groupname].get_vars() ''' Retrieve the variables set on a group @@ -214,6 +235,7 @@ class Inventory: pass def get_variables(self, hostname, update_cached=False, vault_password=None): + ### Remove in favour of inventory.hosts[hostname].get_vars() ''' Retrieve the variables set on a host @@ -228,6 +250,7 @@ class Inventory: pass def get_host_variables(self, hostname, update_cached=False, vault_password=None): + ### Remove in favour of inventory.hosts[hostname].get_vars() ''' Retrieve the variables set on a host @@ -241,6 +264,7 @@ class Inventory: pass def add_group(self, group): + ### Possibly remove in favour of inventory.groups[groupname] = group ''' Add a new group to the inventory @@ -249,6 +273,7 @@ class Inventory: pass def list_hosts(self, pattern="all"): + ### Remove in favour of: inventory.hosts.keys()? Maybe not as pattern is here ''' Retrieve a list of hostnames for a pattern @@ -263,6 +288,7 @@ class Inventory: pass def list_groups(self): + ### Remove in favour of: inventory.groups.keys() ''' Retrieve list of groupnames :returns: list of groupnames diff --git a/v2/ansible/vars/__init__.py b/v2/ansible/vars/__init__.py index af81b12b2e..a804985fa9 100644 --- a/v2/ansible/vars/__init__.py +++ b/v2/ansible/vars/__init__.py @@ -141,6 +141,10 @@ class VariableManager: return vars + ### Note: + ### Planning to move this into the inventory. + ### So when you query the host for the variables in its context, it + ### loads the vars_files and then returns those to the VariableManager. def _get_inventory_basename(self, path): ''' Returns the bsaename minus the extension of the given path, so the From 0ce5d2c8460308e22b8b3e7d92a450644f5d7e1d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 10 Dec 2014 09:21:49 -0500 Subject: [PATCH 508/813] added complex bare templated conditional test --- test/integration/roles/test_conditionals/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/integration/roles/test_conditionals/tasks/main.yml b/test/integration/roles/test_conditionals/tasks/main.yml index 8d794e497f..01a4f960d7 100644 --- a/test/integration/roles/test_conditionals/tasks/main.yml +++ b/test/integration/roles/test_conditionals/tasks/main.yml @@ -293,3 +293,7 @@ that: - result.results|length == 3 - result.results[1].skipped + +- name: test complex templated condition + debug: msg="it works" + when: vars_file_var in things1|union([vars_file_var]) From e507a79b9685b1558981f54ad3c52c0b92f92b9b Mon Sep 17 00:00:00 2001 From: Mike Putnam Date: Wed, 10 Dec 2014 11:41:31 -0600 Subject: [PATCH 509/813] Typo fix --- docsite/rst/playbooks_best_practices.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_best_practices.rst b/docsite/rst/playbooks_best_practices.rst index de2e27774c..2eaa8e7736 100644 --- a/docsite/rst/playbooks_best_practices.rst +++ b/docsite/rst/playbooks_best_practices.rst @@ -411,7 +411,7 @@ for you. For example, you will probably not need ``vars``, ``vars_files``, ``vars_prompt`` and ``--extra-vars`` all at once, while also using an external inventory file. -If something feels complicated, it probably is, and may be a good opportunity to simply things. +If something feels complicated, it probably is, and may be a good opportunity to simplify things. .. _version_control: From 09ef8f5722c300088e78512ca403f3d99b08d74d Mon Sep 17 00:00:00 2001 From: Mike Putnam Date: Wed, 10 Dec 2014 12:13:31 -0600 Subject: [PATCH 510/813] Typo fix in playbook delegation docs. --- docsite/rst/playbooks_delegation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst index e4640afbfa..483a24edbc 100644 --- a/docsite/rst/playbooks_delegation.rst +++ b/docsite/rst/playbooks_delegation.rst @@ -161,7 +161,7 @@ This can be optionally paired with "delegate_to" to specify an individual host t When "run_once" is not used with "delegate_to" it will execute on the first host, as defined by inventory, in the group(s) of hosts targeted by the play. e.g. webservers[0] if the play targeted "hosts: webservers". -This aproach is similar, although more concise and cleaner than applying a conditional to a task such as:: +This approach is similar, although more concise and cleaner than applying a conditional to a task such as:: - command: /opt/application/upgrade_db.py when: inventory_hostname == webservers[0] From 21bb12ee8430fae5a29f63c3b03218aba44e6fb9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 10 Dec 2014 14:06:48 -0500 Subject: [PATCH 511/813] updated refs to module repos --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 375025d2e3..7e2fbec944 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 375025d2e3edf2dca764a50c1c213286f38fc9c2 +Subproject commit 7e2fbec9448395be290f1e889994ffdafc9482ee diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 82aaaa4152..b8071a8d5e 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 82aaaa4152d955c79df00acd184f18c9be3c80cb +Subproject commit b8071a8d5eebe405250774a0b7c6c74451bc9532 From 2d266ce401dcd8c3e41a8d5b1c8e4b61b24701d5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 10 Dec 2014 11:22:40 -0800 Subject: [PATCH 512/813] Update core submodule for docs fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 7e2fbec944..467ad65f73 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 7e2fbec9448395be290f1e889994ffdafc9482ee +Subproject commit 467ad65f735ddb33b6302cf0968074c22d153565 From 37d1b3f1cf8cd70401deb609c804cbd0672a9cc5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 10 Dec 2014 11:37:32 -0800 Subject: [PATCH 513/813] New test that binary contenti. (and a few unicode tests because they make clearer what we're testing) works. disabled for now since they mostly do not pass on 1.8. We'll want to enable them when we start integration testing the v2 tree. --- test/integration/non_destructive.yml | 2 + .../roles/test_binary/files/b64_latin1 | 1 + .../roles/test_binary/files/b64_utf8 | 1 + .../roles/test_binary/files/from_playbook | 1 + .../roles/test_binary/meta/main.yml | 3 + .../roles/test_binary/tasks/main.yml | 123 ++++++++++++++++++ .../templates/b64_latin1_template.j2 | 1 + .../templates/b64_utf8_template.j2 | 1 + .../templates/from_playbook_template.j2 | 1 + .../roles/test_binary/vars/main.yml | 3 + 10 files changed, 137 insertions(+) create mode 100644 test/integration/roles/test_binary/files/b64_latin1 create mode 100644 test/integration/roles/test_binary/files/b64_utf8 create mode 100644 test/integration/roles/test_binary/files/from_playbook create mode 100644 test/integration/roles/test_binary/meta/main.yml create mode 100644 test/integration/roles/test_binary/tasks/main.yml create mode 100644 test/integration/roles/test_binary/templates/b64_latin1_template.j2 create mode 100644 test/integration/roles/test_binary/templates/b64_utf8_template.j2 create mode 100644 test/integration/roles/test_binary/templates/from_playbook_template.j2 create mode 100644 test/integration/roles/test_binary/vars/main.yml diff --git a/test/integration/non_destructive.yml b/test/integration/non_destructive.yml index b177763fbf..e520a17ea0 100644 --- a/test/integration/non_destructive.yml +++ b/test/integration/non_destructive.yml @@ -39,4 +39,6 @@ - { role: test_authorized_key, tags: test_authorized_key } - { role: test_get_url, tags: test_get_url } - { role: test_embedded_module, tags: test_embedded_module } + # Turn on test_binary when we start testing v2 + #- { role: test_binary, tags: test_binary } diff --git a/test/integration/roles/test_binary/files/b64_latin1 b/test/integration/roles/test_binary/files/b64_latin1 new file mode 100644 index 0000000000..c7fbdeb632 --- /dev/null +++ b/test/integration/roles/test_binary/files/b64_latin1 @@ -0,0 +1 @@ +Café Eñe diff --git a/test/integration/roles/test_binary/files/b64_utf8 b/test/integration/roles/test_binary/files/b64_utf8 new file mode 100644 index 0000000000..c7fbdeb632 --- /dev/null +++ b/test/integration/roles/test_binary/files/b64_utf8 @@ -0,0 +1 @@ +Café Eñe diff --git a/test/integration/roles/test_binary/files/from_playbook b/test/integration/roles/test_binary/files/from_playbook new file mode 100644 index 0000000000..c7fbdeb632 --- /dev/null +++ b/test/integration/roles/test_binary/files/from_playbook @@ -0,0 +1 @@ +Café Eñe diff --git a/test/integration/roles/test_binary/meta/main.yml b/test/integration/roles/test_binary/meta/main.yml new file mode 100644 index 0000000000..1050c23ce3 --- /dev/null +++ b/test/integration/roles/test_binary/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + diff --git a/test/integration/roles/test_binary/tasks/main.yml b/test/integration/roles/test_binary/tasks/main.yml new file mode 100644 index 0000000000..dea1f85348 --- /dev/null +++ b/test/integration/roles/test_binary/tasks/main.yml @@ -0,0 +1,123 @@ +--- +# Various ways users want to use binary data +# Could integrate into individual modules but currently these don't all work. +# Probably easier to see them all in a single block to know what we're testing. +# When we can start testing v2 we should test that all of these work. + +# Expected values of the written files +- name: get checksums that we expect later files to have + copy: + src: from_playbook + dest: "{{ output_dir }}" + +- copy: + src: b64_utf8 + dest: "{{ output_dir }}" + +- copy: + src: b64_latin1 + dest: "{{ output_dir }}" + +- stat: + path: "{{ output_dir }}/from_playbook" + register: from_playbook + +- stat: + path: "{{ output_dir }}/b64_utf8" + register: b64_utf8 + +- stat: + path: "{{ output_dir }}/b64_latin1" + register: b64_latin1 + +- name: copy with utf-8 content in a playbook + copy: + content: "{{ simple_accents }}\n" + dest: "{{ output_dir }}/from_playbook.txt" + +- name: Check that what was written matches + stat: + path: "{{ output_dir }}/from_playbook.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == from_playbook.stat.checksum' + ignore_errors: True + +- name: copy with utf8 in a base64 encoded string + copy: + content: "{{ utf8_simple_accents|b64decode }}\n" + dest: "{{ output_dir }}/b64_utf8.txt" + +- name: Check that what was written matches + stat: + path: "{{ output_dir }}/b64_utf8.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == b64_utf8.stat.checksum' + ignore_errors: True + +#- name: copy with latin1 in a base64 encoded string +# copy: +# content: "{{ latin1_simple_accents|b64decode }}\n" +# dest: "{{ output_dir }}/b64_latin1.txt" +# +#- name: Check that what was written matches +# stat: +# path: "{{ output_dir }}/b64_latin1.txt" +# register: results +# +#- assert: +# that: +# - 'results.stat.checksum == b64_latin1.stat.checksum' +# ignore_errors: True + +- name: Template with a unicode string from the playbook + template: + src: "from_playbook_template.j2" + dest: "{{ output_dir }}/from_playbook_template.txt" + +- name: Check that what was written matches + stat: + path: "{{ output_dir }}/from_playbook_template.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == from_playbook.stat.checksum' + +- name: Template with utf8 in a base64 encoded string + template: + src: "b64_utf8_template.j2" + dest: "{{ output_dir }}/b64_utf8_template.txt" + +- name: Check that what was written matches + stat: + path: "{{ output_dir }}/b64_utf8_template.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == b64_utf8.stat.checksum' + +#- name: Template with latin1 in a base64 encoded string +# template: +# src: "b64_latin1_template.j2" +# dest: "{{ output_dir }}/b64_latin1_template.txt" +# +#- name: Check that what was written matches +# stat: +# path: "{{ output_dir }}/b64_latin1_template.txt" +# register: results +# +#- assert: +# that: +# - 'results.stat.checksum == b64_latin1.stat.checksum' + +# These might give garbled output but none of them should traceback +- debug: var=simple_accents +- debug: msg={{ utf8_simple_accents|b64decode}} +#- debug: msg={{ latin1_simple_accents|b64decode}} diff --git a/test/integration/roles/test_binary/templates/b64_latin1_template.j2 b/test/integration/roles/test_binary/templates/b64_latin1_template.j2 new file mode 100644 index 0000000000..ee2fc1b19c --- /dev/null +++ b/test/integration/roles/test_binary/templates/b64_latin1_template.j2 @@ -0,0 +1 @@ +{{ latin1_simple_accents|b64decode }} diff --git a/test/integration/roles/test_binary/templates/b64_utf8_template.j2 b/test/integration/roles/test_binary/templates/b64_utf8_template.j2 new file mode 100644 index 0000000000..9fd3ed48b1 --- /dev/null +++ b/test/integration/roles/test_binary/templates/b64_utf8_template.j2 @@ -0,0 +1 @@ +{{ utf8_simple_accents|b64decode }} diff --git a/test/integration/roles/test_binary/templates/from_playbook_template.j2 b/test/integration/roles/test_binary/templates/from_playbook_template.j2 new file mode 100644 index 0000000000..3be6dd4f0b --- /dev/null +++ b/test/integration/roles/test_binary/templates/from_playbook_template.j2 @@ -0,0 +1 @@ +{{ simple_accents }} diff --git a/test/integration/roles/test_binary/vars/main.yml b/test/integration/roles/test_binary/vars/main.yml new file mode 100644 index 0000000000..f6d40232c3 --- /dev/null +++ b/test/integration/roles/test_binary/vars/main.yml @@ -0,0 +1,3 @@ +simple_accents: 'Café Eñe' +utf8_simple_accents: 'Q2Fmw6kgRcOxZQ==' +latin1_simple_accents: 'Q2Fm6SBF8WU=' From 65be0eefcfd633d1fc0e33ea9655e9633abe6b95 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 10 Dec 2014 11:40:33 -0800 Subject: [PATCH 514/813] Enable all the binary tests inside of the role. They're not being run by default so make sure they're ready to show errors when we turn them on for v2 --- .../roles/test_binary/tasks/main.yml | 58 +++++++++---------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/test/integration/roles/test_binary/tasks/main.yml b/test/integration/roles/test_binary/tasks/main.yml index dea1f85348..aaddad8ea2 100644 --- a/test/integration/roles/test_binary/tasks/main.yml +++ b/test/integration/roles/test_binary/tasks/main.yml @@ -30,6 +30,7 @@ path: "{{ output_dir }}/b64_latin1" register: b64_latin1 +# Tests themselves - name: copy with utf-8 content in a playbook copy: content: "{{ simple_accents }}\n" @@ -43,7 +44,6 @@ - assert: that: - 'results.stat.checksum == from_playbook.stat.checksum' - ignore_errors: True - name: copy with utf8 in a base64 encoded string copy: @@ -58,22 +58,20 @@ - assert: that: - 'results.stat.checksum == b64_utf8.stat.checksum' - ignore_errors: True -#- name: copy with latin1 in a base64 encoded string -# copy: -# content: "{{ latin1_simple_accents|b64decode }}\n" -# dest: "{{ output_dir }}/b64_latin1.txt" -# -#- name: Check that what was written matches -# stat: -# path: "{{ output_dir }}/b64_latin1.txt" -# register: results -# -#- assert: -# that: -# - 'results.stat.checksum == b64_latin1.stat.checksum' -# ignore_errors: True +- name: copy with latin1 in a base64 encoded string + copy: + content: "{{ latin1_simple_accents|b64decode }}\n" + dest: "{{ output_dir }}/b64_latin1.txt" + +- name: Check that what was written matches + stat: + path: "{{ output_dir }}/b64_latin1.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == b64_latin1.stat.checksum' - name: Template with a unicode string from the playbook template: @@ -103,21 +101,21 @@ that: - 'results.stat.checksum == b64_utf8.stat.checksum' -#- name: Template with latin1 in a base64 encoded string -# template: -# src: "b64_latin1_template.j2" -# dest: "{{ output_dir }}/b64_latin1_template.txt" -# -#- name: Check that what was written matches -# stat: -# path: "{{ output_dir }}/b64_latin1_template.txt" -# register: results -# -#- assert: -# that: -# - 'results.stat.checksum == b64_latin1.stat.checksum' +- name: Template with latin1 in a base64 encoded string + template: + src: "b64_latin1_template.j2" + dest: "{{ output_dir }}/b64_latin1_template.txt" + +- name: Check that what was written matches + stat: + path: "{{ output_dir }}/b64_latin1_template.txt" + register: results + +- assert: + that: + - 'results.stat.checksum == b64_latin1.stat.checksum' # These might give garbled output but none of them should traceback - debug: var=simple_accents - debug: msg={{ utf8_simple_accents|b64decode}} -#- debug: msg={{ latin1_simple_accents|b64decode}} +- debug: msg={{ latin1_simple_accents|b64decode}} From ac71caa0ac35cc61cff8337480cbed6b51aac523 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 10 Dec 2014 11:50:48 -0800 Subject: [PATCH 515/813] Quote debug msgs and find one more wierd bug to test in v2 --- test/integration/roles/test_binary/tasks/main.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_binary/tasks/main.yml b/test/integration/roles/test_binary/tasks/main.yml index aaddad8ea2..7ae9f16dc9 100644 --- a/test/integration/roles/test_binary/tasks/main.yml +++ b/test/integration/roles/test_binary/tasks/main.yml @@ -4,6 +4,10 @@ # Probably easier to see them all in a single block to know what we're testing. # When we can start testing v2 we should test that all of these work. +# In v1: The following line will traceback if it's the first task in the role. +# Does not traceback if it's the second or third etc task. +- debug: msg="{{ utf8_simple_accents|b64decode}}" + # Expected values of the written files - name: get checksums that we expect later files to have copy: @@ -117,5 +121,5 @@ # These might give garbled output but none of them should traceback - debug: var=simple_accents -- debug: msg={{ utf8_simple_accents|b64decode}} -- debug: msg={{ latin1_simple_accents|b64decode}} +- debug: msg="{{ utf8_simple_accents|b64decode}}" +- debug: msg="{{ latin1_simple_accents|b64decode}}" From 462471209ec93a4aa4d284b1f3b06cf93de1693c Mon Sep 17 00:00:00 2001 From: jszwedko Date: Wed, 10 Dec 2014 16:54:58 -0500 Subject: [PATCH 516/813] Allow retries to be templatable Fixes #5865 --- lib/ansible/runner/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 47c1faadeb..7912d23462 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1030,7 +1030,7 @@ class Runner(object): cond = template.template(self.basedir, until, inject, expand_lists=False) if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): - retries = self.module_vars.get('retries') + retries = template.template(self.basedir, self.module_vars.get('retries'), inject, expand_lists=False) delay = self.module_vars.get('delay') for x in range(1, int(retries) + 1): # template the delay, cast to float and sleep From 2dd0e514693250c77668dcdb25be20accb1d8448 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 10 Dec 2014 14:25:40 -0800 Subject: [PATCH 517/813] Possible fix for postgres setup on F21 cloud image --- .../roles/setup_postgresql_db/tasks/main.yml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/setup_postgresql_db/tasks/main.yml b/test/integration/roles/setup_postgresql_db/tasks/main.yml index d306ac3b7a..970b87d18d 100644 --- a/test/integration/roles/setup_postgresql_db/tasks/main.yml +++ b/test/integration/roles/setup_postgresql_db/tasks/main.yml @@ -61,13 +61,21 @@ - name: Copy pg_hba into place copy: src=pg_hba.conf dest="{{ pg_hba_location }}" owner="postgres" group="root" mode="0644" -- name: Generate locale on Debian systems +- name: Generate pt_BR locale (Debian) command: locale-gen pt_BR when: ansible_os_family == 'Debian' -- name: Generate locale on Debian systems +- name: Generate es_MX locale (Debian) command: locale-gen es_MX when: ansible_os_family == 'Debian' +- name: Generate pt_BR locale (Red Hat) + command: locale-gen -f UTF-8 -i pt_BR pt_BR + when: ansible_os_family == 'RedHat' + +- name: Generate es_MX locale (Red Hat) + command: locale-gen -f UTF-8 -i es_MX es_MX + when: ansible_os_family == 'RedHat' + - name: restart postgresql service service: name={{ postgresql_service }} state=restarted From 367a361a70bf162242ba6259daed88619d6cb5f0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 10 Dec 2014 14:40:10 -0800 Subject: [PATCH 518/813] Correct the command name on RHT Systems --- test/integration/roles/setup_postgresql_db/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/setup_postgresql_db/tasks/main.yml b/test/integration/roles/setup_postgresql_db/tasks/main.yml index 970b87d18d..47219875ef 100644 --- a/test/integration/roles/setup_postgresql_db/tasks/main.yml +++ b/test/integration/roles/setup_postgresql_db/tasks/main.yml @@ -70,11 +70,11 @@ when: ansible_os_family == 'Debian' - name: Generate pt_BR locale (Red Hat) - command: locale-gen -f UTF-8 -i pt_BR pt_BR + command: localedef -f UTF-8 -i pt_BR pt_BR when: ansible_os_family == 'RedHat' - name: Generate es_MX locale (Red Hat) - command: locale-gen -f UTF-8 -i es_MX es_MX + command: localedef -f UTF-8 -i es_MX es_MX when: ansible_os_family == 'RedHat' - name: restart postgresql service From a0ff0f819875287e90d05bed70cbf3e62e9d6850 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 10 Dec 2014 14:51:02 -0800 Subject: [PATCH 519/813] And the encoding needs to be latin1 for this test --- test/integration/roles/setup_postgresql_db/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/setup_postgresql_db/tasks/main.yml b/test/integration/roles/setup_postgresql_db/tasks/main.yml index 47219875ef..fbcc9cab72 100644 --- a/test/integration/roles/setup_postgresql_db/tasks/main.yml +++ b/test/integration/roles/setup_postgresql_db/tasks/main.yml @@ -70,11 +70,11 @@ when: ansible_os_family == 'Debian' - name: Generate pt_BR locale (Red Hat) - command: localedef -f UTF-8 -i pt_BR pt_BR + command: localedef -f ISO-8859-1 -i pt_BR pt_BR when: ansible_os_family == 'RedHat' - name: Generate es_MX locale (Red Hat) - command: localedef -f UTF-8 -i es_MX es_MX + command: localedef -f ISO-8859-1 -i es_MX es_MX when: ansible_os_family == 'RedHat' - name: restart postgresql service From 23405b60cbd0b64efc4d60b0a7b40c8a1269cdfa Mon Sep 17 00:00:00 2001 From: Tim Gerla Date: Wed, 10 Dec 2014 19:04:02 -0800 Subject: [PATCH 520/813] fix up formatting for one code section in guide_aws --- docsite/rst/guide_aws.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/guide_aws.rst b/docsite/rst/guide_aws.rst index 623a80fe40..c91c6478e9 100644 --- a/docsite/rst/guide_aws.rst +++ b/docsite/rst/guide_aws.rst @@ -56,7 +56,7 @@ In the example below, the "exact_count" of instances is set to 5. This means if be terminated. What is being counted is specified by the "count_tag" parameter. The parameter "instance_tags" is used to apply tags to the newly created -instance. +instance.:: # demo_setup.yml @@ -82,7 +82,7 @@ instance. The data about what instances are created is being saved by the "register" keyword in the variable named "ec2". -From this, we'll use the add_host module to dynamically create a host group consisting of these new instances. This facilitates performing configuration actions on the hosts immediately in a subsequent task:: +From this, we'll use the add_host module to dynamically create a host group consisting of these new instances. This facilitates performing configuration actions on the hosts immediately in a subsequent task.:: # demo_setup.yml From a1a6b8dfbb84823196f0462ab222c30603e65012 Mon Sep 17 00:00:00 2001 From: Donovan Hernandez Date: Thu, 11 Dec 2014 02:10:44 -0600 Subject: [PATCH 521/813] Fix typo for the word "maintaining" --- docsite/rst/playbooks_best_practices.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_best_practices.rst b/docsite/rst/playbooks_best_practices.rst index 2eaa8e7736..cec48679cc 100644 --- a/docsite/rst/playbooks_best_practices.rst +++ b/docsite/rst/playbooks_best_practices.rst @@ -77,7 +77,7 @@ Use Dynamic Inventory With Clouds If you are using a cloud provider, you should not be managing your inventory in a static file. See :doc:`intro_dynamic_inventory`. -This does not just apply to clouds -- If you have another system maintaing a canonical list of systems +This does not just apply to clouds -- If you have another system maintaining a canonical list of systems in your infrastructure, usage of dynamic inventory is a great idea in general. .. _stage_vs_prod: From 7bffc1a29ee50a838e7d707bfd1c1c7d8a036ec4 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Wed, 6 Nov 2013 11:55:02 +0100 Subject: [PATCH 522/813] add a default path in ~/.ansible to place plugins, so ansible is a bit more usable out of the box as simple user --- lib/ansible/constants.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 861dd5325c..d00712bcad 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -86,6 +86,9 @@ def shell_expand_path(path): path = os.path.expanduser(os.path.expandvars(path)) return path +def get_plugin_paths(path): + return ':'.join([os.path.join(x, path) for x in [os.path.expanduser('~/.ansible/plugins/'), '/usr/share/ansible_plugins/']]) + p = load_config_file() active_user = pwd.getpwuid(os.geteuid())[0] @@ -135,13 +138,13 @@ DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER' DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() -DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '/usr/share/ansible_plugins/action_plugins') -DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '/usr/share/ansible_plugins/cache_plugins') -DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '/usr/share/ansible_plugins/callback_plugins') -DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '/usr/share/ansible_plugins/connection_plugins') -DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '/usr/share/ansible_plugins/lookup_plugins') -DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '/usr/share/ansible_plugins/vars_plugins') -DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '/usr/share/ansible_plugins/filter_plugins') +DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', get_plugin_paths('action_plugins')) +DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', get_plugin_paths('cache_plugins')) +DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', get_plugin_paths('callback_plugins')) +DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', get_plugin_paths('connection_plugins')) +DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', get_plugin_paths('lookup_plugins')) +DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', get_plugin_paths('vars_plugins')) +DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', get_plugin_paths('filter_plugins')) DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') From 72eab3c01987b4363a9520babd328bf23f6ec313 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Wed, 6 Nov 2013 11:55:43 +0100 Subject: [PATCH 523/813] fix the documentation path for latest regarding plugins --- docsite/rst/intro_configuration.rst | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index a9f50f804f..e2550644c9 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -70,7 +70,7 @@ Actions are pieces of code in ansible that enable things like module execution, This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from different locations:: - action_plugins = /usr/share/ansible_plugins/action_plugins + action_plugins = ~/.ansible/plugins/action_plugins/:/usr/share/ansible_plugins/action_plugins Most users will not need to use this feature. See :doc:`developing_plugins` for more details. @@ -135,10 +135,12 @@ Prior to 1.8, callbacks were never loaded for /usr/bin/ansible. callback_plugins ================ +Callbacks are pieces of code in ansible that get called on specific events, permitting to trigger notifications. + This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from different locations:: - callback_plugins = /usr/share/ansible_plugins/callback_plugins + callback_plugins = ~/.ansible/plugins/callback_plugins/:/usr/share/ansible_plugins/callback_plugins Most users will not need to use this feature. See :doc:`developing_plugins` for more details @@ -171,10 +173,12 @@ parameter string, like so:: connection_plugins ================== +Connections plugin permit to extend the channel used by ansible to transport commands and files. + This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from different locations:: - connection_plugins = /usr/share/ansible_plugins/connection_plugins + connection_plugins = ~/.ansible/plugins/connection_plugins/:/usr/share/ansible_plugins/connection_plugins Most users will not need to use this feature. See :doc:`developing_plugins` for more details @@ -230,10 +234,12 @@ rare instances to /bin/bash in rare instances when sudo is constrained, but in m filter_plugins ============== +Filters are specific functions that can be used to extend the template system. + This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from different locations:: - filter_plugins = /usr/share/ansible_plugins/filter_plugins + filter_plugins = ~/.ansible/plugins/filter_plugins/:/usr/share/ansible_plugins/filter_plugins Most users will not need to use this feature. See :doc:`developing_plugins` for more details @@ -350,7 +356,7 @@ lookup_plugins This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from different locations:: - lookup_plugins = /usr/share/ansible_plugins/lookup_plugins + lookup_plugins = ~/.ansible/plugins/lookup_plugins/:/usr/share/ansible_plugins/lookup_plugins Most users will not need to use this feature. See :doc:`developing_plugins` for more details @@ -553,7 +559,7 @@ vars_plugins This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from different locations:: - vars_plugins = /usr/share/ansible_plugins/vars_plugins + vars_plugins = ~/.ansible/plugins/vars_plugins/:/usr/share/ansible_plugins/vars_plugins Most users will not need to use this feature. See :doc:`developing_plugins` for more details From 3cf0c09ce9210b3cf1e986523b99dd1ece9e6583 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20R=C3=A9mond?= Date: Fri, 12 Dec 2014 12:11:17 +0100 Subject: [PATCH 524/813] Variables lookup in a template should handle properly the undefined case --- lib/ansible/utils/template.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index 3e7f5e4d81..0098aa8b89 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -93,6 +93,8 @@ def lookup(name, *args, **kwargs): ran = instance.run(*args, inject=tvars, **kwargs) except errors.AnsibleError: raise + except jinja2.exceptions.UndefinedError, e: + raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e)) except Exception, e: raise errors.AnsibleError('Unexpected error in during lookup: %s' % e) if ran: From 12968acd5f553d1b0b46eb2111443e223a7f2e93 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 12 Dec 2014 12:09:06 -0800 Subject: [PATCH 525/813] Update the core modules to pull in some fixes --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 467ad65f73..e1f90635af 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 467ad65f735ddb33b6302cf0968074c22d153565 +Subproject commit e1f90635af0e9ca09449fe47f94471bf9e4ffa5d From 5cd4ac16f75e5bbcec9c3c7fdd0dd947f855e5ca Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Fri, 12 Dec 2014 15:51:47 -0500 Subject: [PATCH 526/813] Added distribution facts for OpenSuse --- lib/ansible/module_utils/facts.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 8e27b9b882..7a73a32ffb 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -325,11 +325,15 @@ class Facts(object): if 'suse' in data.lower(): if path == '/etc/os-release': release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) + distdata = get_file_content(path).split('\n')[0] + self.facts['distribution'] = distdata.split('=')[1] if release: self.facts['distribution_release'] = release.groups()[0] break elif path == '/etc/SuSE-release': data = data.splitlines() + distdata = get_file_content(path).split('\n')[0] + self.facts['distribution'] = distdata.split()[0] for line in data: release = re.search('CODENAME *= *([^\n]+)', line) if release: From b84fba44391c9fa48c16674d47760cbfd249a102 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sat, 13 Dec 2014 20:56:06 +0100 Subject: [PATCH 527/813] Strip the line to filter lines composed of only whitespaces Fix #9395 --- lib/ansible/runner/connection_plugins/ssh.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index c2fd9666eb..104f60fd35 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -230,6 +230,7 @@ class Connection(object): host_fh.close() for line in data.split("\n"): + line = line.strip() if line is None or " " not in line: continue tokens = line.split() From 38dbce1527d4b5b82af75ec6ae198bd7e36d45dc Mon Sep 17 00:00:00 2001 From: Jason Holland Date: Sat, 13 Dec 2014 21:12:23 -0600 Subject: [PATCH 528/813] Allow Ansible to honor the "no_proxy" environment varaible. --- lib/ansible/module_utils/urls.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index c2d87c27bc..962b868ee0 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -252,9 +252,33 @@ class SSLValidationHandler(urllib2.BaseHandler): except: self.module.fail_json(msg='Connection to proxy failed') + def detect_no_proxy(self, url): + ''' + Detect if the 'no_proxy' environment variable is set and honor those locations. + ''' + env_no_proxy = os.environ.get('no_proxy') + if env_no_proxy: + env_no_proxy = env_no_proxy.split(',') + netloc = urlparse.urlparse(url).netloc + + for host in env_no_proxy: + if netloc.endswith(host) or netloc.split(':')[0].endswith(host): + # Our requested URL matches something in no_proxy, so don't + # use the proxy for this + return False + return True + def http_request(self, req): tmp_ca_cert_path, paths_checked = self.get_ca_certs() https_proxy = os.environ.get('https_proxy') + + # Detect if 'no_proxy' environment variable is set and if our URL is included + use_proxy = self.detect_no_proxy(req.get_full_url()) + + if not use_proxy: + # ignore proxy settings for this host request + return req + try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if https_proxy: From eedc51f21302f6ce3247afd91932c19aff272af6 Mon Sep 17 00:00:00 2001 From: Jason Holland Date: Sat, 13 Dec 2014 21:20:33 -0600 Subject: [PATCH 529/813] Add support for SSL protocol version configuration option. Also fix 2 places where the SSL version was not being set properly. --- examples/ansible.cfg | 9 +++++++++ lib/ansible/constants.py | 1 + lib/ansible/module_utils/urls.py | 8 ++++---- v2/ansible/constants.py | 1 + 4 files changed, 15 insertions(+), 4 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index a89fa47664..0c43f0e07d 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -212,3 +212,12 @@ accelerate_daemon_timeout = 30 # is "no". #accelerate_multi_key = yes +[ssl] +# SSL/TLS Protocol +# Configure the default protocol strength of any SSL/TLS connections +# made by Ansible. Valid values are +# SSLv2 - 0 +# SSLv3 - 1 +# SSLv23 - 2 +# TLSv1 - 3 +ssl_protocol = 3 diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 861dd5325c..b4b2ff5618 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -166,6 +166,7 @@ ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'AN ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True) PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) +SSL_PROTOCOL = get_config(p, 'ssl', 'ssl_protocol', 'SSL_PROTOCOL', 3, integer=True) # obsolete -- will be formally removed in 1.6 ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True) ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index c2d87c27bc..a0c94f4fa1 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -55,7 +55,7 @@ import os import re import socket import tempfile - +from ansible import constants as C # This is a dummy cacert provided for Mac OS since you need at least 1 # ca cert, regardless of validity, for Python on Mac OS to use the @@ -91,7 +91,7 @@ class CustomHTTPSConnection(httplib.HTTPSConnection): if self._tunnel_host: self.sock = sock self._tunnel() - self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) + self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=C.SSL_PROTOCOL) class CustomHTTPSHandler(urllib2.HTTPSHandler): @@ -268,12 +268,12 @@ class SSLValidationHandler(urllib2.BaseHandler): s.sendall('\r\n') connect_result = s.recv(4096) self.validate_proxy_response(connect_result) - ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED) + ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=C.SSL_PROTOCOL) else: self.module.fail_json(msg='Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme')) else: s.connect((self.hostname, self.port)) - ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED) + ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=C.SSL_PROTOCOL) # close the ssl connection #ssl_s.unwrap() s.close() diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index e74720b8a6..bc48cbf5d4 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -172,6 +172,7 @@ ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'AN ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True) PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) +SSL_PROTOCOL = get_config(p, 'ssl', 'ssl_protocol', 'SSL_PROTOCOL', 3, integer=True) # obsolete -- will be formally removed in 1.6 ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True) ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True) From f7ac0123011a21ce8282fb5450a3799572f15a14 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 14 Dec 2014 17:56:18 +0100 Subject: [PATCH 530/813] Do not assume that stdin is a tty This can be used from another non interactive software, see #9695 for details. --- bin/ansible-doc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bin/ansible-doc b/bin/ansible-doc index 0ba84b9a30..59d14b6ef1 100755 --- a/bin/ansible-doc +++ b/bin/ansible-doc @@ -165,7 +165,10 @@ def get_snippet_text(doc): return "\n".join(text) def get_module_list_text(module_list): - columns = max(60, int(os.popen('stty size', 'r').read().split()[1])) + tty_size = 0 + if os.isatty(0): + tty_size = int(os.popen('stty size', 'r').read().split()[1]) + columns = max(60, tty_size) displace = max(len(x) for x in module_list) linelimit = columns - displace - 5 text = [] From caefc20f160e2dece37d883fea98c94f5bd89379 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 14 Dec 2014 18:09:42 +0100 Subject: [PATCH 531/813] Use --version to see if less can be executed (less) 2> /dev/null would fail if stdin is /dev/null. Sinceless --version do not read anything from stdin, it is perfect for seeing if the software exist or not. Also replace the whole os system detection by directly using subprocess ( as we use it elsewhere, we already depend on it ). --- bin/ansible-doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ansible-doc b/bin/ansible-doc index 59d14b6ef1..36db3dff42 100755 --- a/bin/ansible-doc +++ b/bin/ansible-doc @@ -71,7 +71,7 @@ def pager(text): pager_print(text) else: pager_pipe(text, os.environ['PAGER']) - elif hasattr(os, 'system') and os.system('(less) 2> /dev/null') == 0: + elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0: pager_pipe(text, 'less') else: pager_print(text) From 9b8a55032dd19a0a185f8c687d3f095b774083ff Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 14 Dec 2014 19:27:17 +0100 Subject: [PATCH 532/813] Do not use the variable name as a key for the result of the module Using the variable name can interfere with various systems used to communicate between modules and ansible ( as reported in #7732 , where ansible_facts is a reserved key for updating the fact cache, for example ). --- lib/ansible/runner/action_plugins/debug.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/debug.py b/lib/ansible/runner/action_plugins/debug.py index 75613b9919..eaf1364c3f 100644 --- a/lib/ansible/runner/action_plugins/debug.py +++ b/lib/ansible/runner/action_plugins/debug.py @@ -52,7 +52,7 @@ class ActionModule(object): result = dict(msg=args['msg']) elif 'var' in args and not utils.LOOKUP_REGEX.search(args['var']): results = template.template(self.basedir, args['var'], inject, convert_bare=True) - result[args['var']] = results + result['var'] = { args['var']: results } # force flag to make debug output module always verbose result['verbose_always'] = True From b9761a06923aee6da4f6e3c17ad1711796cb0bc0 Mon Sep 17 00:00:00 2001 From: axiaoxin <254606826@qq.com> Date: Mon, 15 Dec 2014 15:32:49 +0800 Subject: [PATCH 533/813] members of a list must start with ``- `` --- docsite/rst/YAMLSyntax.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index 3230a39f24..0ebfb1be56 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -25,7 +25,7 @@ Ansible or not) should begin with ``---``. This is part of the YAML format and indicates the start of a document. All members of a list are lines beginning at the same indentation level starting -with a ``-`` (dash) character:: +with a ``- `` (dash and whitespace) character:: --- # A list of tasty fruits From c0cb4b3e080ed154b847d1b782c3ca9d0080f0f3 Mon Sep 17 00:00:00 2001 From: axiaoxin <254606826@qq.com> Date: Mon, 15 Dec 2014 15:44:00 +0800 Subject: [PATCH 534/813] in key/value pairs, whitespace is needed after the colon --- docsite/rst/YAMLSyntax.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index 0ebfb1be56..f92ba5791e 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -34,7 +34,7 @@ with a ``- `` (dash and whitespace) character:: - Strawberry - Mango -A dictionary is represented in a simple ``key:`` and ``value`` form:: +A dictionary is represented in a simple ``key: `` and ``value`` form:: --- # An employee record From 36eab28c7c1661a495120fa3b32f5b86d121c0ba Mon Sep 17 00:00:00 2001 From: axiaoxin <254606826@qq.com> Date: Mon, 15 Dec 2014 15:53:35 +0800 Subject: [PATCH 535/813] making the whitespace expicity in dict --- docsite/rst/YAMLSyntax.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index f92ba5791e..1b15f81e2a 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -34,7 +34,7 @@ with a ``- `` (dash and whitespace) character:: - Strawberry - Mango -A dictionary is represented in a simple ``key: `` and ``value`` form:: +A dictionary is represented in a simple ``key: `` (colon and whitespac) and ``value`` form:: --- # An employee record From 58388129676f764801b794a1996071fd12b61574 Mon Sep 17 00:00:00 2001 From: axiaoxin <254606826@qq.com> Date: Mon, 15 Dec 2014 15:55:12 +0800 Subject: [PATCH 536/813] minor --- docsite/rst/YAMLSyntax.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index 1b15f81e2a..9e5ef31103 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -34,7 +34,7 @@ with a ``- `` (dash and whitespace) character:: - Strawberry - Mango -A dictionary is represented in a simple ``key: `` (colon and whitespac) and ``value`` form:: +A dictionary is represented in a simple ``key: `` (colon and whitespace) and ``value`` form:: --- # An employee record From 8278626dd04b52ffe56a8885c4dc1e9f82873d4a Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Fri, 12 Dec 2014 15:51:47 -0500 Subject: [PATCH 537/813] Added distribution facts for OpenSuse --- lib/ansible/module_utils/facts.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 8e27b9b882..7a73a32ffb 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -325,11 +325,15 @@ class Facts(object): if 'suse' in data.lower(): if path == '/etc/os-release': release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) + distdata = get_file_content(path).split('\n')[0] + self.facts['distribution'] = distdata.split('=')[1] if release: self.facts['distribution_release'] = release.groups()[0] break elif path == '/etc/SuSE-release': data = data.splitlines() + distdata = get_file_content(path).split('\n')[0] + self.facts['distribution'] = distdata.split()[0] for line in data: release = re.search('CODENAME *= *([^\n]+)', line) if release: From ac28652602805796211d9a3486e3a0d6d5e73f7e Mon Sep 17 00:00:00 2001 From: Willem Pienaar Date: Tue, 16 Dec 2014 02:29:13 +0200 Subject: [PATCH 538/813] Fixed error handling for the enabling of PS Remoting --- examples/scripts/ConfigureRemotingForAnsible.ps1 | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/examples/scripts/ConfigureRemotingForAnsible.ps1 b/examples/scripts/ConfigureRemotingForAnsible.ps1 index 39601d2a76..1b45ce442b 100644 --- a/examples/scripts/ConfigureRemotingForAnsible.ps1 +++ b/examples/scripts/ConfigureRemotingForAnsible.ps1 @@ -98,13 +98,7 @@ ElseIf ((Get-Service "WinRM").Status -ne "Running") If (!(Get-PSSessionConfiguration -Verbose:$false) -or (!(Get-ChildItem WSMan:\localhost\Listener))) { Write-Verbose "Enabling PS Remoting." - Try - { - Enable-PSRemoting -Force -ErrorAction SilentlyContinue - } - Catch - { - } + Enable-PSRemoting -Force -ErrorAction Stop } Else { From bf916fb58a351ee409ef5bbb3899079712226ab7 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 24 Nov 2014 18:03:32 +0000 Subject: [PATCH 539/813] Adding first pass at win_copy, win_file and win_template modules. --- lib/ansible/module_utils/powershell.ps1 | 22 + lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- lib/ansible/runner/action_plugins/fetch.py | 7 +- lib/ansible/runner/action_plugins/win_copy.py | 377 ++++++++++++++++ .../runner/action_plugins/win_template.py | 147 ++++++ test/integration/integration_config.yml | 1 + .../roles/prepare_win_tests/tasks/main.yml | 30 ++ .../roles/test_win_copy/files/foo.txt | 1 + .../roles/test_win_copy/files/subdir/bar.txt | 1 + .../files/subdir/subdir2/baz.txt | 1 + .../subdir/subdir2/subdir3/subdir4/qux.txt | 1 + .../roles/test_win_copy/meta/main.yml | 3 + .../roles/test_win_copy/tasks/main.yml | 259 +++++++++++ .../roles/test_win_file/files/foo.txt | 1 + .../files/foobar/directory/fileC | 0 .../files/foobar/directory/fileD | 0 .../roles/test_win_file/files/foobar/fileA | 1 + .../roles/test_win_file/files/foobar/fileB | 0 .../roles/test_win_file/meta/main.yml | 3 + .../roles/test_win_file/tasks/main.yml | 421 ++++++++++++++++++ .../roles/test_win_template/files/foo.txt | 1 + .../roles/test_win_template/meta/main.yml | 3 + .../roles/test_win_template/tasks/main.yml | 103 +++++ .../roles/test_win_template/templates/foo.j2 | 1 + .../roles/test_win_template/vars/main.yml | 1 + test/integration/test_winrm.yml | 3 + 27 files changed, 1387 insertions(+), 5 deletions(-) create mode 100644 lib/ansible/runner/action_plugins/win_copy.py create mode 100644 lib/ansible/runner/action_plugins/win_template.py create mode 100644 test/integration/roles/prepare_win_tests/tasks/main.yml create mode 100644 test/integration/roles/test_win_copy/files/foo.txt create mode 100644 test/integration/roles/test_win_copy/files/subdir/bar.txt create mode 100644 test/integration/roles/test_win_copy/files/subdir/subdir2/baz.txt create mode 100644 test/integration/roles/test_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt create mode 100644 test/integration/roles/test_win_copy/meta/main.yml create mode 100644 test/integration/roles/test_win_copy/tasks/main.yml create mode 100644 test/integration/roles/test_win_file/files/foo.txt create mode 100644 test/integration/roles/test_win_file/files/foobar/directory/fileC create mode 100644 test/integration/roles/test_win_file/files/foobar/directory/fileD create mode 100644 test/integration/roles/test_win_file/files/foobar/fileA create mode 100644 test/integration/roles/test_win_file/files/foobar/fileB create mode 100644 test/integration/roles/test_win_file/meta/main.yml create mode 100644 test/integration/roles/test_win_file/tasks/main.yml create mode 100644 test/integration/roles/test_win_template/files/foo.txt create mode 100644 test/integration/roles/test_win_template/meta/main.yml create mode 100644 test/integration/roles/test_win_template/tasks/main.yml create mode 100644 test/integration/roles/test_win_template/templates/foo.j2 create mode 100644 test/integration/roles/test_win_template/vars/main.yml diff --git a/lib/ansible/module_utils/powershell.ps1 b/lib/ansible/module_utils/powershell.ps1 index c097c69768..57d2c1b101 100644 --- a/lib/ansible/module_utils/powershell.ps1 +++ b/lib/ansible/module_utils/powershell.ps1 @@ -142,3 +142,25 @@ Function ConvertTo-Bool return } +# Helper function to calculate md5 of a file in a way which powershell 3 +# and above can handle: +Function Get-FileMd5($path) +{ + $hash = "" + If (Test-Path -PathType Leaf $path) + { + $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; + $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); + [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); + $fp.Dispose(); + } + ElseIf (Test-Path -PathType Container $path) + { + $hash= "3"; + } + Else + { + $hash = "1"; + } + return $hash +} diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index e1f90635af..08c5cc06c6 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit e1f90635af0e9ca09449fe47f94471bf9e4ffa5d +Subproject commit 08c5cc06c6ad9a1e0016ad89eb0f7ca009cc8108 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index b8071a8d5e..317654dba5 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit b8071a8d5eebe405250774a0b7c6c74451bc9532 +Subproject commit 317654dba5cae905b5d6eed78f5c6c6984cc2f02 diff --git a/lib/ansible/runner/action_plugins/fetch.py b/lib/ansible/runner/action_plugins/fetch.py index 3fa748ccbd..61f9f032a3 100644 --- a/lib/ansible/runner/action_plugins/fetch.py +++ b/lib/ansible/runner/action_plugins/fetch.py @@ -127,13 +127,13 @@ class ActionModule(object): elif remote_checksum == '2': result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False) elif remote_checksum == '3': - result = dict(msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False) + result = dict(failed=True, msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False) elif remote_checksum == '4': result = dict(msg="python isn't present on the system. Unable to compute checksum", file=source, changed=False) return ReturnData(conn=conn, result=result) # calculate checksum for the local file - local_checksum = utils.checksum(dest) + local_checksum = utils.md5(dest) if remote_checksum != local_checksum: # create the containing directories, if needed @@ -147,7 +147,8 @@ class ActionModule(object): f = open(dest, 'w') f.write(remote_data) f.close() - new_checksum = utils.secure_hash(dest) + new_checksum = utils.md5(dest) + # new_checksum = utils.secure_hash(dest) # For backwards compatibility. We'll return None on FIPS enabled # systems try: diff --git a/lib/ansible/runner/action_plugins/win_copy.py b/lib/ansible/runner/action_plugins/win_copy.py new file mode 100644 index 0000000000..28362195c9 --- /dev/null +++ b/lib/ansible/runner/action_plugins/win_copy.py @@ -0,0 +1,377 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os + +from ansible import utils +import ansible.constants as C +import ansible.utils.template as template +from ansible import errors +from ansible.runner.return_data import ReturnData +import base64 +import json +import stat +import tempfile +import pipes + +## fixes https://github.com/ansible/ansible/issues/3518 +# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html +import sys +reload(sys) +sys.setdefaultencoding("utf8") + + +class ActionModule(object): + + def __init__(self, runner): + self.runner = runner + + def run(self, conn, tmp_path, module_name, module_args, inject, complex_args=None, **kwargs): + ''' handler for file transfer operations ''' + + # load up options + options = {} + if complex_args: + options.update(complex_args) + options.update(utils.parse_kv(module_args)) + source = options.get('src', None) + content = options.get('content', None) + dest = options.get('dest', None) + raw = utils.boolean(options.get('raw', 'no')) + force = utils.boolean(options.get('force', 'yes')) + + # content with newlines is going to be escaped to safely load in yaml + # now we need to unescape it so that the newlines are evaluated properly + # when writing the file to disk + if content: + if isinstance(content, unicode): + try: + content = content.decode('unicode-escape') + except UnicodeDecodeError: + pass + + if (source is None and content is None and not 'first_available_file' in inject) or dest is None: + result=dict(failed=True, msg="src (or content) and dest are required") + return ReturnData(conn=conn, result=result) + elif (source is not None or 'first_available_file' in inject) and content is not None: + result=dict(failed=True, msg="src and content are mutually exclusive") + return ReturnData(conn=conn, result=result) + + # Check if the source ends with a "/" + source_trailing_slash = False + if source: + source_trailing_slash = source.endswith("/") + + # Define content_tempfile in case we set it after finding content populated. + content_tempfile = None + + # If content is defined make a temp file and write the content into it. + if content is not None: + try: + # If content comes to us as a dict it should be decoded json. + # We need to encode it back into a string to write it out. + if type(content) is dict: + content_tempfile = self._create_content_tempfile(json.dumps(content)) + else: + content_tempfile = self._create_content_tempfile(content) + source = content_tempfile + except Exception, err: + result = dict(failed=True, msg="could not write content temp file: %s" % err) + return ReturnData(conn=conn, result=result) + # if we have first_available_file in our vars + # look up the files and use the first one we find as src + elif 'first_available_file' in inject: + found = False + for fn in inject.get('first_available_file'): + fn_orig = fn + fnt = template.template(self.runner.basedir, fn, inject) + fnd = utils.path_dwim(self.runner.basedir, fnt) + if not os.path.exists(fnd) and '_original_file' in inject: + fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False) + if os.path.exists(fnd): + source = fnd + found = True + break + if not found: + results = dict(failed=True, msg="could not find src in first_available_file list") + return ReturnData(conn=conn, result=results) + else: + source = template.template(self.runner.basedir, source, inject) + if '_original_file' in inject: + source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir) + else: + source = utils.path_dwim(self.runner.basedir, source) + + # A list of source file tuples (full_path, relative_path) which will try to copy to the destination + source_files = [] + + # If source is a directory populate our list else source is a file and translate it to a tuple. + if os.path.isdir(source): + # Get the amount of spaces to remove to get the relative path. + if source_trailing_slash: + sz = len(source) + 1 + else: + sz = len(source.rsplit('/', 1)[0]) + 1 + + # Walk the directory and append the file tuples to source_files. + for base_path, sub_folders, files in os.walk(source): + for file in files: + full_path = os.path.join(base_path, file) + rel_path = full_path[sz:] + source_files.append((full_path, rel_path)) + + # If it's recursive copy, destination is always a dir, + # explicitly mark it so (note - copy module relies on this). + if not conn.shell.path_has_trailing_slash(dest): + dest = conn.shell.join_path(dest, '') + else: + source_files.append((source, os.path.basename(source))) + + changed = False + diffs = [] + module_result = {"changed": False} + + # A register for if we executed a module. + # Used to cut down on command calls when not recursive. + module_executed = False + + # Tell _execute_module to delete the file if there is one file. + delete_remote_tmp = (len(source_files) == 1) + + # If this is a recursive action create a tmp_path that we can share as the _exec_module create is too late. + if not delete_remote_tmp: + if "-tmp-" not in tmp_path: + tmp_path = self.runner._make_tmp_path(conn) + + # expand any user home dir specifier + dest = self.runner._remote_expand_user(conn, dest, tmp_path) + + for source_full, source_rel in source_files: + # Generate a hash of the local file. + local_checksum = utils.checksum(source_full) + + # If local_checksum is not defined we can't find the file so we should fail out. + if local_checksum is None: + result = dict(failed=True, msg="could not find src=%s" % source_full) + return ReturnData(conn=conn, result=result) + + # This is kind of optimization - if user told us destination is + # dir, do path manipulation right away, otherwise we still check + # for dest being a dir via remote call below. + if conn.shell.path_has_trailing_slash(dest): + dest_file = conn.shell.join_path(dest, source_rel) + else: + dest_file = conn.shell.join_path(dest) + + # Attempt to get the remote checksum + remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject) + + if remote_checksum == '3': + # The remote_checksum was executed on a directory. + if content is not None: + # If source was defined as content remove the temporary file and fail out. + self._remove_tempfile_if_content_defined(content, content_tempfile) + result = dict(failed=True, msg="can not use content with a dir as dest") + return ReturnData(conn=conn, result=result) + else: + # Append the relative source location to the destination and retry remote_checksum. + dest_file = conn.shell.join_path(dest, source_rel) + remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject) + + if remote_checksum != '1' and not force: + # remote_file does not exist so continue to next iteration. + continue + + if local_checksum != remote_checksum: + # The checksums don't match and we will change or error out. + changed = True + + # Create a tmp_path if missing only if this is not recursive. + # If this is recursive we already have a tmp_path. + if delete_remote_tmp: + if "-tmp-" not in tmp_path: + tmp_path = self.runner._make_tmp_path(conn) + + if self.runner.diff and not raw: + diff = self._get_diff_data(conn, tmp_path, inject, dest_file, source_full) + else: + diff = {} + + if self.runner.noop_on_check(inject): + self._remove_tempfile_if_content_defined(content, content_tempfile) + diffs.append(diff) + changed = True + module_result = dict(changed=True) + continue + + # Define a remote directory that we will copy the file to. + tmp_src = tmp_path + 'source' + + if not raw: + conn.put_file(source_full, tmp_src) + else: + conn.put_file(source_full, dest_file) + + # We have copied the file remotely and no longer require our content_tempfile + self._remove_tempfile_if_content_defined(content, content_tempfile) + + # fix file permissions when the copy is done as a different user + if (self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root') and not raw: + self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path) + + if raw: + # Continue to next iteration if raw is defined. + continue + + # Run the copy module + + # src and dest here come after original and override them + # we pass dest only to make sure it includes trailing slash in case of recursive copy + new_module_args = dict( + src=tmp_src, + dest=dest, + original_basename=source_rel + ) + if self.runner.noop_on_check(inject): + new_module_args['CHECKMODE'] = True + if self.runner.no_log: + new_module_args['NO_LOG'] = True + + module_args_tmp = utils.merge_module_args(module_args, new_module_args) + + module_return = self.runner._execute_module(conn, tmp_path, 'win_copy', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp) + module_executed = True + + else: + # no need to transfer the file, already correct md5, but still need to call + # the file module in case we want to change attributes + self._remove_tempfile_if_content_defined(content, content_tempfile) + + if raw: + # Continue to next iteration if raw is defined. + # self.runner._remove_tmp_path(conn, tmp_path) + continue + + tmp_src = tmp_path + source_rel + + # Build temporary module_args. + new_module_args = dict( + src=tmp_src, + dest=dest, + original_basename=source_rel + ) + if self.runner.noop_on_check(inject): + new_module_args['CHECKMODE'] = True + if self.runner.no_log: + new_module_args['NO_LOG'] = True + + module_args_tmp = utils.merge_module_args(module_args, new_module_args) + + # Execute the file module. + module_return = self.runner._execute_module(conn, tmp_path, 'win_file', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp) + module_executed = True + + module_result = module_return.result + if not module_result.get('checksum'): + module_result['checksum'] = local_checksum + if module_result.get('failed') == True: + return module_return + if module_result.get('changed') == True: + changed = True + + # Delete tmp_path if we were recursive or if we did not execute a module. + if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) \ + or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed): + self.runner._remove_tmp_path(conn, tmp_path) + + # the file module returns the file path as 'path', but + # the copy module uses 'dest', so add it if it's not there + if 'path' in module_result and 'dest' not in module_result: + module_result['dest'] = module_result['path'] + + # TODO: Support detailed status/diff for multiple files + if len(source_files) == 1: + result = module_result + else: + result = dict(dest=dest, src=source, changed=changed) + if len(diffs) == 1: + return ReturnData(conn=conn, result=result, diff=diffs[0]) + else: + return ReturnData(conn=conn, result=result) + + def _create_content_tempfile(self, content): + ''' Create a tempfile containing defined content ''' + fd, content_tempfile = tempfile.mkstemp() + f = os.fdopen(fd, 'w') + try: + f.write(content) + except Exception, err: + os.remove(content_tempfile) + raise Exception(err) + finally: + f.close() + return content_tempfile + + def _get_diff_data(self, conn, tmp, inject, destination, source): + peek_result = self.runner._execute_module(conn, tmp, 'win_file', "path=%s diff_peek=1" % destination, inject=inject, persist_files=True) + + if not peek_result.is_successful(): + return {} + + diff = {} + if peek_result.result['state'] == 'absent': + diff['before'] = '' + elif peek_result.result['appears_binary']: + diff['dst_binary'] = 1 + elif peek_result.result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF: + diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF + else: + dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % destination, inject=inject, persist_files=True) + if 'content' in dest_result.result: + dest_contents = dest_result.result['content'] + if dest_result.result['encoding'] == 'base64': + dest_contents = base64.b64decode(dest_contents) + else: + raise Exception("unknown encoding, failed: %s" % dest_result.result) + diff['before_header'] = destination + diff['before'] = dest_contents + + src = open(source) + src_contents = src.read(8192) + st = os.stat(source) + if "\x00" in src_contents: + diff['src_binary'] = 1 + elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF: + diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF + else: + src.seek(0) + diff['after_header'] = source + diff['after'] = src.read() + + return diff + + def _remove_tempfile_if_content_defined(self, content, content_tempfile): + if content is not None: + os.remove(content_tempfile) + + + def _result_key_merge(self, options, results): + # add keys to file module results to mimic copy + if 'path' in results.result and 'dest' not in results.result: + results.result['dest'] = results.result['path'] + del results.result['path'] + return results diff --git a/lib/ansible/runner/action_plugins/win_template.py b/lib/ansible/runner/action_plugins/win_template.py new file mode 100644 index 0000000000..e284316191 --- /dev/null +++ b/lib/ansible/runner/action_plugins/win_template.py @@ -0,0 +1,147 @@ +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import pipes +from ansible.utils import template +from ansible import utils +from ansible import errors +from ansible.runner.return_data import ReturnData +import base64 + +class ActionModule(object): + + TRANSFERS_FILES = True + + def __init__(self, runner): + self.runner = runner + + def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs): + ''' handler for template operations ''' + + if not self.runner.is_playbook: + raise errors.AnsibleError("in current versions of ansible, templates are only usable in playbooks") + + # load up options + options = {} + if complex_args: + options.update(complex_args) + options.update(utils.parse_kv(module_args)) + + source = options.get('src', None) + dest = options.get('dest', None) + + if (source is None and 'first_available_file' not in inject) or dest is None: + result = dict(failed=True, msg="src and dest are required") + return ReturnData(conn=conn, comm_ok=False, result=result) + + # if we have first_available_file in our vars + # look up the files and use the first one we find as src + + if 'first_available_file' in inject: + found = False + for fn in self.runner.module_vars.get('first_available_file'): + fn_orig = fn + fnt = template.template(self.runner.basedir, fn, inject) + fnd = utils.path_dwim(self.runner.basedir, fnt) + if not os.path.exists(fnd) and '_original_file' in inject: + fnd = utils.path_dwim_relative(inject['_original_file'], 'templates', fnt, self.runner.basedir, check=False) + if os.path.exists(fnd): + source = fnd + found = True + break + if not found: + result = dict(failed=True, msg="could not find src in first_available_file list") + return ReturnData(conn=conn, comm_ok=False, result=result) + else: + source = template.template(self.runner.basedir, source, inject) + + if '_original_file' in inject: + source = utils.path_dwim_relative(inject['_original_file'], 'templates', source, self.runner.basedir) + else: + source = utils.path_dwim(self.runner.basedir, source) + + + if dest.endswith("\\"): # TODO: Check that this fixes the path for Windows hosts. + base = os.path.basename(source) + dest = os.path.join(dest, base) + + # template the source data locally & get ready to transfer + try: + resultant = template.template_from_file(self.runner.basedir, source, inject, vault_password=self.runner.vault_pass) + except Exception, e: + result = dict(failed=True, msg=type(e).__name__ + ": " + str(e)) + return ReturnData(conn=conn, comm_ok=False, result=result) + + local_checksum = utils.checksum_s(resultant) + remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject) + + if local_checksum != remote_checksum: + + # template is different from the remote value + + # if showing diffs, we need to get the remote value + dest_contents = '' + + if self.runner.diff: + # using persist_files to keep the temp directory around to avoid needing to grab another + dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True) + if 'content' in dest_result.result: + dest_contents = dest_result.result['content'] + if dest_result.result['encoding'] == 'base64': + dest_contents = base64.b64decode(dest_contents) + else: + raise Exception("unknown encoding, failed: %s" % dest_result.result) + + xfered = self.runner._transfer_str(conn, tmp, 'source', resultant) + + # fix file permissions when the copy is done as a different user + if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': + self.runner._remote_chmod(conn, 'a+r', xfered, tmp) + + # run the copy module + new_module_args = dict( + src=xfered, + dest=dest, + original_basename=os.path.basename(source), + follow=True, + ) + module_args_tmp = utils.merge_module_args(module_args, new_module_args) + + if self.runner.noop_on_check(inject): + return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant)) + else: + res = self.runner._execute_module(conn, tmp, 'win_copy', module_args_tmp, inject=inject, complex_args=complex_args) + if res.result.get('changed', False): + res.diff = dict(before=dest_contents, after=resultant) + return res + else: + # when running the file module based on the template data, we do + # not want the source filename (the name of the template) to be used, + # since this would mess up links, so we clear the src param and tell + # the module to follow links + new_module_args = dict( + src=None, + follow=True, + ) + # be sure to inject the check mode param into the module args and + # rely on the file module to report its changed status + if self.runner.noop_on_check(inject): + new_module_args['CHECKMODE'] = True + module_args = utils.merge_module_args(module_args, new_module_args) + return self.runner._execute_module(conn, tmp, 'win_file', module_args, inject=inject, complex_args=complex_args) + diff --git a/test/integration/integration_config.yml b/test/integration/integration_config.yml index 4c2fb2a0a5..bf5d6db3de 100644 --- a/test/integration/integration_config.yml +++ b/test/integration/integration_config.yml @@ -1,4 +1,5 @@ --- +win_output_dir: 'C:/temp/' output_dir: ~/ansible_testing non_root_test_user: ansible pip_test_package: epdb diff --git a/test/integration/roles/prepare_win_tests/tasks/main.yml b/test/integration/roles/prepare_win_tests/tasks/main.yml new file mode 100644 index 0000000000..756c977fb1 --- /dev/null +++ b/test/integration/roles/prepare_win_tests/tasks/main.yml @@ -0,0 +1,30 @@ +# test code for the windows versions of copy, file and template module +# originally +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +- name: clean out the test directory + win_file: name={{win_output_dir|mandatory}} state=absent + tags: + - prepare + +- name: create the test directory + win_file: name={{win_output_dir}} state=directory + tags: + - prepare + diff --git a/test/integration/roles/test_win_copy/files/foo.txt b/test/integration/roles/test_win_copy/files/foo.txt new file mode 100644 index 0000000000..7c6ded14ec --- /dev/null +++ b/test/integration/roles/test_win_copy/files/foo.txt @@ -0,0 +1 @@ +foo.txt diff --git a/test/integration/roles/test_win_copy/files/subdir/bar.txt b/test/integration/roles/test_win_copy/files/subdir/bar.txt new file mode 100644 index 0000000000..76018072e0 --- /dev/null +++ b/test/integration/roles/test_win_copy/files/subdir/bar.txt @@ -0,0 +1 @@ +baz diff --git a/test/integration/roles/test_win_copy/files/subdir/subdir2/baz.txt b/test/integration/roles/test_win_copy/files/subdir/subdir2/baz.txt new file mode 100644 index 0000000000..76018072e0 --- /dev/null +++ b/test/integration/roles/test_win_copy/files/subdir/subdir2/baz.txt @@ -0,0 +1 @@ +baz diff --git a/test/integration/roles/test_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt b/test/integration/roles/test_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt new file mode 100644 index 0000000000..78df5b06bd --- /dev/null +++ b/test/integration/roles/test_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt @@ -0,0 +1 @@ +qux \ No newline at end of file diff --git a/test/integration/roles/test_win_copy/meta/main.yml b/test/integration/roles/test_win_copy/meta/main.yml new file mode 100644 index 0000000000..55200b3fc6 --- /dev/null +++ b/test/integration/roles/test_win_copy/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_win_tests + diff --git a/test/integration/roles/test_win_copy/tasks/main.yml b/test/integration/roles/test_win_copy/tasks/main.yml new file mode 100644 index 0000000000..f0fe2d04c4 --- /dev/null +++ b/test/integration/roles/test_win_copy/tasks/main.yml @@ -0,0 +1,259 @@ +# test code for the copy module and action plugin +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: record the output directory + set_fact: output_file={{win_output_dir}}/foo.txt + +- name: initiate a basic copy +#- name: initiate a basic copy, and also test the mode +# win_copy: src=foo.txt dest={{output_file}} mode=0444 + win_copy: src=foo.txt dest={{output_file}} + register: copy_result + +- debug: var=copy_result + +#- name: check the presence of the output file +- name: check the mode of the output file + win_file: name={{output_file}} state=file + register: file_result_check + +- debug: var=file_result_check + + +#- name: assert the mode is correct +# assert: +# that: +# - "file_result_check.mode == '0444'" + +- name: assert basic copy worked + assert: + that: + - "'changed' in copy_result" +# - "'dest' in copy_result" +# - "'group' in copy_result" +# - "'gid' in copy_result" + - "'checksum' in copy_result" +# - "'owner' in copy_result" +# - "'size' in copy_result" +# - "'src' in copy_result" +# - "'state' in copy_result" +# - "'uid' in copy_result" + +- name: verify that the file was marked as changed + assert: + that: + - "copy_result.changed == true" + +- name: verify that the file checksum is correct + assert: + that: + - "copy_result.checksum[0] == 'c47397529fe81ab62ba3f85e9f4c71f2'" + +- name: check the stat results of the file + win_stat: path={{output_file}} + register: stat_results + +- name: assert the stat results are correct + assert: + that: + - "stat_results.stat.exists == true" +# - "stat_results.stat.isblk == false" +# - "stat_results.stat.isfifo == false" +# - "stat_results.stat.isreg == true" +# - "stat_results.stat.issock == false" + - "stat_results.stat.md5[0] == 'c47397529fe81ab62ba3f85e9f4c71f2'" + +- name: overwrite the file via same means + win_copy: src=foo.txt dest={{output_file}} + register: copy_result2 + +- name: assert that the file was not changed + assert: + that: + - "not copy_result2|changed" + +# content system not available in win_copy right now +#- name: overwrite the file using the content system +# win_copy: content="modified" dest={{output_file}} +# register: copy_result3 +# +#- name: assert that the file has changed +# assert: +# that: +# - "copy_result3|changed" +# - "'content' not in copy_result3" + +# test recursive copy + +- name: set the output subdirectory + set_fact: output_subdir={{win_output_dir}}/sub/ + +- name: make an output subdirectory + win_file: name={{output_subdir}} state=directory + +- name: test recursive copy to directory +# win_copy: src=subdir dest={{output_subdir}} directory_mode=0700 + win_copy: src=subdir dest={{output_subdir}} + register: recursive_copy_result + +- debug: var=recursive_copy_result + +- name: check that a file in a directory was transferred + win_stat: path={{win_output_dir}}/sub/subdir/bar.txt + register: stat_bar + +- name: check that a file in a deeper directory was transferred + win_stat: path={{win_output_dir}}/sub/subdir/subdir2/baz.txt + register: stat_bar2 + +- name: check that a file in a directory whose parent contains a directory alone was transferred + win_stat: path={{win_output_dir}}/sub/subdir/subdir2/subdir3/subdir4/qux.txt + register: stat_bar3 + +- name: assert recursive copy things + assert: + that: + - "stat_bar.stat.exists" + - "stat_bar2.stat.exists" + - "stat_bar3.stat.exists" + +- name: stat the recursively copied directories + win_stat: path={{win_output_dir}}/sub/{{item}} + register: dir_stats + with_items: + - "subdir" + - "subdir/subdir2" + - "subdir/subdir2/subdir3" + - "subdir/subdir2/subdir3/subdir4" + +# can't check file mode on windows so commenting this one out. +#- name: assert recursive copied directories mode +# assert: +# that: +# - "{{item.stat.mode}} == 0700" +# with_items: dir_stats.results + + +# errors on this aren't presently ignored so this test is commented out. But it would be nice to fix. +# + +# content param not available in win_copy +#- name: overwrite the file again using the content system, also passing along file params +# win_copy: content="modified" dest={{output_file}} +# register: copy_result4 + +#- name: assert invalid copy input location fails +# win_copy: src=invalid_file_location_does_not_exist dest={{win_output_dir}}/file.txt +# ignore_errors: True +# register: failed_copy + +# owner not available in win_copy, commenting out +#- name: copy already copied directory again +# win_copy: src=subdir dest={{output_subdir | expanduser}} owner={{ansible_ssh_user}} +# register: copy_result5 + +#- name: assert that the directory was not changed +# assert: +# that: +# - "not copy_result5|changed" + +# content not available in win_copy, commenting out. +# issue 8394 +#- name: create a file with content and a literal multiline block +# win_copy: | +# content='this is the first line +# this is the second line +# +# this line is after an empty line +# this line is the last line +# ' +# dest={{win_output_dir}}/multiline.txt +# register: copy_result6 + +#- debug: var=copy_result6 + +#- name: assert the multiline file was created correctly +# assert: +# that: +# - "copy_result6.changed" +# - "copy_result6.dest == '{{win_output_dir|expanduser}}/multiline.txt'" +# - "copy_result6.checksum == '1627d51e7e607c92cf1a502bf0c6cce3'" + +# test overwriting a file as an unprivileged user (pull request #8624) +# this can't be relative to {{win_output_dir}} as ~root usually has mode 700 + +#- name: create world writable directory + #win_file: dest=/tmp/worldwritable state=directory mode=0777 + +#- name: create world writable file +# win_copy: dest=/tmp/worldwritable/file.txt content="bar" mode=0666 + +#- name: overwrite the file as user nobody +# win_copy: dest=/tmp/worldwritable/file.txt content="baz" +# sudo: yes +# sudo_user: nobody +# register: copy_result7 + +#- name: assert the file was overwritten +# assert: +# that: +# - "copy_result7.changed" +# - "copy_result7.dest == '/tmp/worldwritable/file.txt'" +# - "copy_result7.checksum == '73feffa4b7f6bb68e44cf984c85f6e88'" + +#- name: clean up +# win_file: dest=/tmp/worldwritable state=absent + +# test overwritting a link using "follow=yes" so that the link +# is preserved and the link target is updated + +#- name: create a test file to symlink to +# win_copy: dest={{win_output_dir}}/follow_test content="this is the follow test file\n" +# +#- name: create a symlink to the test file +# win_file: path={{win_output_dir}}/follow_link src='./follow_test' state=link +# +#- name: update the test file using follow=True to preserve the link +# win_copy: dest={{win_output_dir}}/follow_link content="this is the new content\n" follow=yes +# register: replace_follow_result + +#- name: stat the link path +# win_stat: path={{win_output_dir}}/follow_link +# register: stat_link_result +# +#- name: assert that the link is still a link +# assert: +# that: +# - stat_link_result.stat.islnk +# +#- name: get the md5 of the link target +# shell: checksum {{win_output_dir}}/follow_test | cut -f1 -sd ' ' +# register: target_file_result + +#- name: assert that the link target was updated +# assert: +# that: +# - replace_follow_result.checksum == target_file_result.stdout + +- name: clean up sub + win_file: path={{win_output_dir}}/sub state=absent + +- name: clean up foo.txt + win_file: path={{win_output_dir}}/foo.txt state=absent + + diff --git a/test/integration/roles/test_win_file/files/foo.txt b/test/integration/roles/test_win_file/files/foo.txt new file mode 100644 index 0000000000..7c6ded14ec --- /dev/null +++ b/test/integration/roles/test_win_file/files/foo.txt @@ -0,0 +1 @@ +foo.txt diff --git a/test/integration/roles/test_win_file/files/foobar/directory/fileC b/test/integration/roles/test_win_file/files/foobar/directory/fileC new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/integration/roles/test_win_file/files/foobar/directory/fileD b/test/integration/roles/test_win_file/files/foobar/directory/fileD new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/integration/roles/test_win_file/files/foobar/fileA b/test/integration/roles/test_win_file/files/foobar/fileA new file mode 100644 index 0000000000..ab47708c98 --- /dev/null +++ b/test/integration/roles/test_win_file/files/foobar/fileA @@ -0,0 +1 @@ +fileA diff --git a/test/integration/roles/test_win_file/files/foobar/fileB b/test/integration/roles/test_win_file/files/foobar/fileB new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/integration/roles/test_win_file/meta/main.yml b/test/integration/roles/test_win_file/meta/main.yml new file mode 100644 index 0000000000..55200b3fc6 --- /dev/null +++ b/test/integration/roles/test_win_file/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_win_tests + diff --git a/test/integration/roles/test_win_file/tasks/main.yml b/test/integration/roles/test_win_file/tasks/main.yml new file mode 100644 index 0000000000..35ecfb6387 --- /dev/null +++ b/test/integration/roles/test_win_file/tasks/main.yml @@ -0,0 +1,421 @@ +# Test code for the file module. +# (c) 2014, Richard Isaacson + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- set_fact: output_file={{win_output_dir}}\\foo.txt + +- name: prep with a basic win copy + win_copy: src=foo.txt dest={{output_file}} + +- name: verify that we are checking a file and it is present + win_file: path={{output_file}} state=file + register: file_result + +- name: verify that the file was marked as changed + assert: + that: + - "file_result.changed == false" +# - "file_result.state == 'file'" + +- name: verify that we are checking an absent file + win_file: path={{win_output_dir}}\bar.txt state=absent + register: file2_result + +- name: verify that the file was marked as changed + assert: + that: + - "file2_result.changed == false" +# - "file2_result.state == 'absent'" + +- name: verify we can touch a file + win_file: path={{win_output_dir}}\baz.txt state=touch + register: file3_result + +- name: verify that the file was marked as changed + assert: + that: + - "file3_result.changed == true" +# - "file3_result.state == 'file'" +# - "file3_result.mode == '0644'" + +#- name: change file mode +# win_file: path={{win_output_dir}}/baz.txt mode=0600 +# register: file4_result + +#- name: verify that the file was marked as changed +# assert: +# that: +# - "file4_result.changed == true" +# - "file4_result.mode == '0600'" +# +#- name: change ownership and group +# win_file: path={{win_output_dir}}/baz.txt owner=1234 group=1234 +# +#- name: setup a tmp-like directory for ownership test +# win_file: path=/tmp/worldwritable mode=1777 state=directory + +#- name: Ask to create a file without enough perms to change ownership +# win_file: path=/tmp/worldwritable/baz.txt state=touch owner=root +# sudo: yes +# sudo_user: nobody +# register: chown_result +# ignore_errors: True + +#- name: Ask whether the new file exists +# win_stat: path=/tmp/worldwritable/baz.txt +# register: file_exists_result + +#- name: Verify that the file doesn't exist on failure +# assert: +# that: +# - "chown_result.failed == True" +# - "file_exists_result.stat.exists == False" +# +- name: clean up + win_file: path=/tmp/worldwritable state=absent + +#- name: create soft link to file +# win_file: src={{output_file}} dest={{win_output_dir}}/soft.txt state=link +# register: file5_result + +#- name: verify that the file was marked as changed +# assert: +# that: +# - "file5_result.changed == true" +# +#- name: create hard link to file +# win_file: src={{output_file}} dest={{win_output_dir}}/hard.txt state=hard +# register: file6_result +# +#- name: verify that the file was marked as changed +# assert: +# that: +# - "file6_result.changed == true" +# +- name: create a directory + win_file: path={{win_output_dir}}\foobar state=directory + register: file7_result + +- debug: var=file7_result + +- name: verify that the file was marked as changed + assert: + that: + - "file7_result.changed == true" +# - "file7_result.state == 'directory'" + +# windows and selinux unlikely to ever mix, removing these tests: +#- name: determine if selinux is installed +# shell: which getenforce || exit 0 +# register: selinux_installed + +#- name: determine if selinux is enabled +# shell: getenforce +# register: selinux_enabled +# when: selinux_installed.stdout != "" +# ignore_errors: true + +#- name: decide to include or not include selinux tests +# include: selinux_tests.yml +# when: selinux_installed.stdout != "" and selinux_enabled.stdout != "Disabled" + +- name: remote directory foobar + win_file: path={{win_output_dir}}\foobar state=absent + +- name: remove file foo.txt + win_file: path={{win_output_dir}}\foo.txt state=absent + +- name: remove file bar.txt + win_file: path={{win_output_dir}}\foo.txt state=absent + +- name: remove file baz.txt + win_file: path={{win_output_dir}}\foo.txt state=absent + +- name: win copy directory structure over + win_copy: src=foobar dest={{win_output_dir}} + +- name: remove directory foobar + win_file: path={{win_output_dir}}\foobar state=absent + register: file14_result + +- debug: var=file14_result + +- name: verify that the directory was removed + assert: + that: + - 'file14_result.changed == true' +# - 'file14_result.state == "absent"' + +- name: create a test sub-directory + win_file: dest={{win_output_dir}}/sub1 state=directory + register: file15_result + +- name: verify that the new directory was created + assert: + that: + - 'file15_result.changed == true' +# - 'file15_result.state == "directory"' + +- name: create test files in the sub-directory + win_file: dest={{win_output_dir}}/sub1/{{item}} state=touch + with_items: + - file1 + - file2 + - file3 + register: file16_result + +- name: verify the files were created + assert: + that: + - 'item.changed == true' +# - 'item.state == "file"' + with_items: file16_result.results + +#- name: try to force the sub-directory to a link +# win_file: src={{win_output_dir}}/testing dest={{win_output_dir}}/sub1 state=link force=yes +# register: file17_result +# ignore_errors: true + +#- name: verify the directory was not replaced with a link +# assert: +# that: +# - 'file17_result.failed == true' +# - 'file17_result.state == "directory"' + +#- name: create soft link to directory using absolute path +# win_file: src=/ dest={{win_output_dir}}/root state=link +# register: file18_result +# +#- name: verify that the result was marked as changed +# assert: +# that: +# - "file18_result.changed == true" +# +- name: create another test sub-directory + win_file: dest={{win_output_dir}}/sub2 state=directory + register: file19_result + +- name: verify that the new directory was created + assert: + that: + - 'file19_result.changed == true' +# - 'file19_result.state == "directory"' + +#- name: create soft link to relative file +# win_file: src=../sub1/file1 dest={{win_output_dir}}/sub2/link1 state=link +# register: file20_result +# +#- name: verify that the result was marked as changed +# assert: +# that: +# - "file20_result.changed == true" + +#- name: create soft link to relative directory +# win_file: src=sub1 dest={{win_output_dir}}/sub1-link state=link +# register: file21_result +# +#- name: verify that the result was marked as changed +# assert: +# that: +# - "file21_result.changed == true" +# +#- name: test file creation with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=u=rwx,g=rwx,o=rwx +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0777' + +#- name: modify symbolic mode for all +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=a=r +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0444' + +#- name: modify symbolic mode for owner +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=u+w +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0644' + +#- name: modify symbolic mode for group +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=g+w +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0664' +# +#- name: modify symbolic mode for world +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=o+w +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0666' +# +#- name: modify symbolic mode for owner +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=u+x +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0766' +## +#- name: modify symbolic mode for group +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=g+x +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0776' +# +#- name: modify symbolic mode for world +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=o+x +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0777' + +#- name: remove symbolic mode for world +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=o-wx +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0774' +# +#- name: remove symbolic mode for group +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=g-wx +# register: result +# +#- name: assert file mode +### assert: +# that: +# - result.mode == '0744' + +#- name: remove symbolic mode for owner +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=u-wx +# register: result + +#- name: assert file mode +# assert: +# that: +# - result.mode == '0444' +# +#- name: set sticky bit with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=o+t +# register: result + +#- name: assert file mode +# assert: +# that: +# - result.mode == '01444' +# +#- name: remove sticky bit with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=o-t +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0444' + +#- name: add setgid with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=g+s +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '02444' +# +#- name: remove setgid with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=g-s +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0444' + +#- name: add setuid with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=u+s +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '04444' + +#- name: remove setuid with symbolic mode +# win_file: dest={{win_output_dir}}/test_symbolic state=touch mode=u-s +# register: result +# +#- name: assert file mode +# assert: +# that: +# - result.mode == '0444' + +# test the file module using follow=yes, so that the target of a +# symlink is modified, rather than the link itself + +#- name: create a test file +# win_copy: dest={{win_output_dir}}\test_follow content="this is a test file\n" mode=0666 + +#- name: create a symlink to the test file +# win_file: path={{win_output_dir}}\test_follow_link src="./test_follow" state=link +# +#- name: modify the permissions on the link using follow=yes +# win_file: path={{win_output_dir}}\test_follow_link mode=0644 follow=yes +# register: result + +#- name: assert that the chmod worked +# assert: +# that: +# - result.changed +# +#- name: stat the link target +# win_stat: path={{win_output_dir}}/test_follow +# register: result +# +#- name: assert that the link target was modified correctly +# assert: +# that: +## - result.stat.mode == '0644' + +- name: clean up sub1 + win_file: path={{win_output_dir}}/sub1 state=absent + +- name: clean up sub2 + win_file: path={{win_output_dir}}/sub2 state=absent + diff --git a/test/integration/roles/test_win_template/files/foo.txt b/test/integration/roles/test_win_template/files/foo.txt new file mode 100644 index 0000000000..3e96db9b3e --- /dev/null +++ b/test/integration/roles/test_win_template/files/foo.txt @@ -0,0 +1 @@ +templated_var_loaded diff --git a/test/integration/roles/test_win_template/meta/main.yml b/test/integration/roles/test_win_template/meta/main.yml new file mode 100644 index 0000000000..55200b3fc6 --- /dev/null +++ b/test/integration/roles/test_win_template/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_win_tests + diff --git a/test/integration/roles/test_win_template/tasks/main.yml b/test/integration/roles/test_win_template/tasks/main.yml new file mode 100644 index 0000000000..9c2ea920ff --- /dev/null +++ b/test/integration/roles/test_win_template/tasks/main.yml @@ -0,0 +1,103 @@ +# test code for the template module +# (c) 2014, Michael DeHaan + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: fill in a basic template +# win_template: src=foo.j2 dest={{win_output_dir}}/foo.templated mode=0644 + win_template: src=foo.j2 dest={{win_output_dir}}/foo.templated + register: template_result + +- assert: + that: + - "'changed' in template_result" +# - "'dest' in template_result" +# - "'group' in template_result" +# - "'gid' in template_result" +# - "'checksum' in template_result" +# - "'owner' in template_result" +# - "'size' in template_result" +# - "'src' in template_result" +# - "'state' in template_result" +# - "'uid' in template_result" + +- name: verify that the file was marked as changed + assert: + that: + - "template_result.changed == true" + +# VERIFY CONTENTS + +- name: copy known good into place + win_copy: src=foo.txt dest={{win_output_dir}}\foo.txt + +- name: compare templated file to known good + raw: fc.exe {{win_output_dir}}\foo.templated {{win_output_dir}}\foo.txt + register: diff_result + +- debug: var=diff_result + +- name: verify templated file matches known good + assert: + that: +# - 'diff_result.stdout == ""' + - 'diff_result.stdout_lines[1] == "FC: no differences encountered"' + - "diff_result.rc == 0" + +# VERIFY MODE +# can't set file mode on windows so commenting this test out +#- name: set file mode +# win_file: path={{win_output_dir}}/foo.templated mode=0644 +# register: file_result + +#- name: ensure file mode did not change +# assert: +# that: +# - "file_result.changed != True" + +# commenting out all the following tests as expanduser and file modes not windows concepts. + +# VERIFY dest as a directory does not break file attributes +# Note: expanduser is needed to go down the particular codepath that was broken before +#- name: setup directory for test +# win_file: state=directory dest={{win_output_dir | expanduser}}/template-dir mode=0755 owner=nobody group=root + +#- name: set file mode when the destination is a directory +# win_template: src=foo.j2 dest={{win_output_dir | expanduser}}/template-dir/ mode=0600 owner=root group=root + +#- name: set file mode when the destination is a directory +# win_template: src=foo.j2 dest={{win_output_dir | expanduser}}/template-dir/ mode=0600 owner=root group=root +# register: file_result +# +#- name: check that the file has the correct attributes +# win_stat: path={{win_output_dir | expanduser}}/template-dir/foo.j2 +# register: file_attrs +# +#- assert: +# that: +# - "file_attrs.stat.uid == 0" +# - "file_attrs.stat.pw_name == 'root'" +# - "file_attrs.stat.mode == '0600'" +# +#- name: check that the containing directory did not change attributes +# win_stat: path={{win_output_dir | expanduser}}/template-dir/ +# register: dir_attrs +# +#- assert: +# that: +# - "dir_attrs.stat.uid != 0" +# - "dir_attrs.stat.pw_name == 'nobody'" +# - "dir_attrs.stat.mode == '0755'" diff --git a/test/integration/roles/test_win_template/templates/foo.j2 b/test/integration/roles/test_win_template/templates/foo.j2 new file mode 100644 index 0000000000..55aab8f1ea --- /dev/null +++ b/test/integration/roles/test_win_template/templates/foo.j2 @@ -0,0 +1 @@ +{{ templated_var }} diff --git a/test/integration/roles/test_win_template/vars/main.yml b/test/integration/roles/test_win_template/vars/main.yml new file mode 100644 index 0000000000..1e8f64ccf4 --- /dev/null +++ b/test/integration/roles/test_win_template/vars/main.yml @@ -0,0 +1 @@ +templated_var: templated_var_loaded diff --git a/test/integration/test_winrm.yml b/test/integration/test_winrm.yml index 415f381d46..e2a282e061 100644 --- a/test/integration/test_winrm.yml +++ b/test/integration/test_winrm.yml @@ -30,3 +30,6 @@ - { role: test_win_msi, tags: test_win_msi } - { role: test_win_service, tags: test_win_service } - { role: test_win_feature, tags: test_win_feature } + - { role: test_win_file, tags: test_win_file } + - { role: test_win_copy, tags: test_win_copy } + - { role: test_win_template, tags: test_win_template } From e37b63386c0c77e8ab8216e5520be80400ea6170 Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Tue, 9 Dec 2014 23:38:35 +0000 Subject: [PATCH 540/813] Revised following comments from Chris Church. Now uses sha1 checksums following merge of 9688. Also I undid the changes I made to fetch.py win_template.py now uses conn.shell.has_trailing_slash and conn.shell.join_path updated integration tests. --- lib/ansible/module_utils/powershell.ps1 | 6 +++--- lib/ansible/runner/action_plugins/fetch.py | 7 +++---- lib/ansible/runner/action_plugins/win_template.py | 5 ++--- test/integration/roles/test_win_copy/tasks/main.yml | 6 ++++-- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/lib/ansible/module_utils/powershell.ps1 b/lib/ansible/module_utils/powershell.ps1 index 57d2c1b101..ee7d3ddeca 100644 --- a/lib/ansible/module_utils/powershell.ps1 +++ b/lib/ansible/module_utils/powershell.ps1 @@ -142,14 +142,14 @@ Function ConvertTo-Bool return } -# Helper function to calculate md5 of a file in a way which powershell 3 +# Helper function to calculate a hash of a file in a way which powershell 3 # and above can handle: -Function Get-FileMd5($path) +Function Get-FileChecksum($path) { $hash = "" If (Test-Path -PathType Leaf $path) { - $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; + $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider; $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); $fp.Dispose(); diff --git a/lib/ansible/runner/action_plugins/fetch.py b/lib/ansible/runner/action_plugins/fetch.py index 61f9f032a3..3fa748ccbd 100644 --- a/lib/ansible/runner/action_plugins/fetch.py +++ b/lib/ansible/runner/action_plugins/fetch.py @@ -127,13 +127,13 @@ class ActionModule(object): elif remote_checksum == '2': result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False) elif remote_checksum == '3': - result = dict(failed=True, msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False) + result = dict(msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False) elif remote_checksum == '4': result = dict(msg="python isn't present on the system. Unable to compute checksum", file=source, changed=False) return ReturnData(conn=conn, result=result) # calculate checksum for the local file - local_checksum = utils.md5(dest) + local_checksum = utils.checksum(dest) if remote_checksum != local_checksum: # create the containing directories, if needed @@ -147,8 +147,7 @@ class ActionModule(object): f = open(dest, 'w') f.write(remote_data) f.close() - new_checksum = utils.md5(dest) - # new_checksum = utils.secure_hash(dest) + new_checksum = utils.secure_hash(dest) # For backwards compatibility. We'll return None on FIPS enabled # systems try: diff --git a/lib/ansible/runner/action_plugins/win_template.py b/lib/ansible/runner/action_plugins/win_template.py index e284316191..e32a5806c4 100644 --- a/lib/ansible/runner/action_plugins/win_template.py +++ b/lib/ansible/runner/action_plugins/win_template.py @@ -75,10 +75,9 @@ class ActionModule(object): else: source = utils.path_dwim(self.runner.basedir, source) - - if dest.endswith("\\"): # TODO: Check that this fixes the path for Windows hosts. + if conn.shell.path_has_trailing_slash(dest): base = os.path.basename(source) - dest = os.path.join(dest, base) + dest = conn.shell.join_path(dest, base) # template the source data locally & get ready to transfer try: diff --git a/test/integration/roles/test_win_copy/tasks/main.yml b/test/integration/roles/test_win_copy/tasks/main.yml index f0fe2d04c4..d898219a85 100644 --- a/test/integration/roles/test_win_copy/tasks/main.yml +++ b/test/integration/roles/test_win_copy/tasks/main.yml @@ -62,12 +62,14 @@ - name: verify that the file checksum is correct assert: that: - - "copy_result.checksum[0] == 'c47397529fe81ab62ba3f85e9f4c71f2'" + - "copy_result.checksum[0] == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" - name: check the stat results of the file win_stat: path={{output_file}} register: stat_results +- debug: var=stat_results + - name: assert the stat results are correct assert: that: @@ -76,7 +78,7 @@ # - "stat_results.stat.isfifo == false" # - "stat_results.stat.isreg == true" # - "stat_results.stat.issock == false" - - "stat_results.stat.md5[0] == 'c47397529fe81ab62ba3f85e9f4c71f2'" + - "stat_results.stat.checksum[0] == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" - name: overwrite the file via same means win_copy: src=foo.txt dest={{output_file}} From 28e69b90178fb169db39f02bf9adef5ea0ced102 Mon Sep 17 00:00:00 2001 From: Kale Franz Date: Mon, 15 Dec 2014 22:31:29 -0800 Subject: [PATCH 541/813] Allow ec2 tags to be used to address servers in ec2 dynamic inventory. --- plugins/inventory/ec2.ini | 7 +++++-- plugins/inventory/ec2.py | 8 ++++++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/plugins/inventory/ec2.ini b/plugins/inventory/ec2.ini index c66bf309b1..66f65a69d2 100644 --- a/plugins/inventory/ec2.ini +++ b/plugins/inventory/ec2.ini @@ -24,14 +24,17 @@ regions_exclude = us-gov-west-1,cn-north-1 # This is the normal destination variable to use. If you are running Ansible # from outside EC2, then 'public_dns_name' makes the most sense. If you are # running Ansible from within EC2, then perhaps you want to use the internal -# address, and should set this to 'private_dns_name'. +# address, and should set this to 'private_dns_name'. The key of an EC2 tag +# may optionally be used; however the boto instance variables hold precedence +# in the event of a collision. destination_variable = public_dns_name # For server inside a VPC, using DNS names may not make sense. When an instance # has 'subnet_id' set, this variable is used. If the subnet is public, setting # this to 'ip_address' will return the public IP address. For instances in a # private subnet, this should be set to 'private_ip_address', and Ansible must -# be run from with EC2. +# be run from with EC2. The key of an EC2 tag may optionally be used; however +# the boto instance variables hold precedence in the event of a collision. vpc_destination_variable = ip_address # To tag instances on EC2 with the resource records that point to them from diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 9d2dec38d3..573a4cbb21 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -385,9 +385,13 @@ class Ec2Inventory(object): # Select the best destination address if instance.subnet_id: - dest = getattr(instance, self.vpc_destination_variable) + dest = getattr(instance, self.vpc_destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) else: - dest = getattr(instance, self.destination_variable) + dest = getattr(instance, self.destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.destination_variable, None) if not dest: # Skip instances we cannot address (e.g. private VPC subnet) From fbadcfd44fb86752abd19616cb56acb25b89cae4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Dec 2014 09:20:11 -0800 Subject: [PATCH 542/813] Fix for not all platforms having optional libraries at the proper versions. --- bin/ansible | 11 +++++++++++ bin/ansible-playbook | 10 +++++++++- bin/ansible-vault | 10 +++++++++- 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/bin/ansible b/bin/ansible index b82a18d3d3..7ba615dbc0 100755 --- a/bin/ansible +++ b/bin/ansible @@ -19,6 +19,17 @@ ######################################################## +__requires__ = ['ansible'] +try: + import pkg_resources +except Exception: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. But we + # have code that better expresses the errors in the places where the code + # is actually used (the deps are optional for many code paths) so we don't + # want to fail here. + pass + import os import sys diff --git a/bin/ansible-playbook b/bin/ansible-playbook index 93645903f7..d25a3710d8 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -19,7 +19,15 @@ ####################################################### __requires__ = ['ansible'] -import pkg_resources +try: + import pkg_resources +except Exception: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. But we + # have code that better expresses the errors in the places where the code + # is actually used (the deps are optional for many code paths) so we don't + # want to fail here. + pass import sys import os diff --git a/bin/ansible-vault b/bin/ansible-vault index 3079b31d9e..22cfc0e148 100755 --- a/bin/ansible-vault +++ b/bin/ansible-vault @@ -19,7 +19,15 @@ # http://docs.ansible.com/playbooks_vault.html for more details. __requires__ = ['ansible'] -import pkg_resources +try: + import pkg_resources +except Exception: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. But we + # have code that better expresses the errors in the places where the code + # is actually used (the deps are optional for many code paths) so we don't + # want to fail here. + pass import os import sys From a5b3b59bd1f2e3ec405377a2f5450e1c20a303bc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Dec 2014 11:09:42 -0800 Subject: [PATCH 543/813] Comment remote_port in the example ansible.cfg so users do not override their .ssh/config settings by default --- examples/ansible.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index a89fa47664..9f1d3c53cb 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -21,7 +21,7 @@ sudo_user = root #ask_sudo_pass = True #ask_pass = True transport = smart -remote_port = 22 +#remote_port = 22 module_lang = C # plays will gather facts by default, which contain information about From 07e483b0c02f2500d0326bc270c306c3c80d4deb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Dec 2014 11:17:29 -0800 Subject: [PATCH 544/813] Update core modules --- lib/ansible/modules/core | 2 +- test/integration/Makefile | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 08c5cc06c6..54b836f0b8 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 08c5cc06c6ad9a1e0016ad89eb0f7ca009cc8108 +Subproject commit 54b836f0b885543b6cb982e4e1155d97d8b487a1 diff --git a/test/integration/Makefile b/test/integration/Makefile index cf15c753cf..1985f38de6 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -37,6 +37,9 @@ unicode: # Test the start-at-task flag #9571 ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v --start-at-task '*¶' -e 'start_at_task=True' $(TEST_FLAGS) +mine: + ansible-playbook mine.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) + non_destructive: ansible-playbook non_destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) From 735c8d3c4bef44e008fc7ef92e889addba8b2564 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Dec 2014 11:51:02 -0800 Subject: [PATCH 545/813] Update core so docs build --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 54b836f0b8..51ed13b887 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 54b836f0b885543b6cb982e4e1155d97d8b487a1 +Subproject commit 51ed13b8874a90ad3191301647901234eb10f02b From b74a02301bd65264c681c267fa8396585541d327 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Dec 2014 12:25:41 -0800 Subject: [PATCH 546/813] Fix documentation syntax problems --- docsite/rst/YAMLSyntax.rst | 2 +- docsite/rst/guide_aws.rst | 2 +- docsite/rst/playbooks_best_practices.rst | 6 ++---- docsite/rst/playbooks_startnstep.rst | 4 ++-- docsite/rst/playbooks_variables.rst | 2 +- 5 files changed, 7 insertions(+), 9 deletions(-) diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index 9e5ef31103..4b85ebac30 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -25,7 +25,7 @@ Ansible or not) should begin with ``---``. This is part of the YAML format and indicates the start of a document. All members of a list are lines beginning at the same indentation level starting -with a ``- `` (dash and whitespace) character:: +with a ``"- "`` (dash and whitespace) character:: --- # A list of tasty fruits diff --git a/docsite/rst/guide_aws.rst b/docsite/rst/guide_aws.rst index c91c6478e9..2daf8ec27e 100644 --- a/docsite/rst/guide_aws.rst +++ b/docsite/rst/guide_aws.rst @@ -223,7 +223,7 @@ Generally speaking, we find most users using Packer. If you do not want to adopt Packer at this time, configuring a base-image with Ansible after provisioning (as shown above) is acceptable. -.. aws_next_steps:: +.. _aws_next_steps: Next Steps: Explore Modules ``````````````````````````` diff --git a/docsite/rst/playbooks_best_practices.rst b/docsite/rst/playbooks_best_practices.rst index cec48679cc..43c642d583 100644 --- a/docsite/rst/playbooks_best_practices.rst +++ b/docsite/rst/playbooks_best_practices.rst @@ -66,9 +66,7 @@ The top level of the directory would contain files and directories like so:: monitoring/ # "" fooapp/ # "" -.. note: If you find yourself having too many top level playbooks (for instance you have a playbook you wrote for a specific hotfix, etc), it may -make sense to have a playbooks/ directory instead. This can be a good idea as you get larger. If you do this, -configure your roles_path in ansible.cfg to find your roles location. +.. note: If you find yourself having too many top level playbooks (for instance you have a playbook you wrote for a specific hotfix, etc), it may make sense to have a playbooks/ directory instead. This can be a good idea as you get larger. If you do this, configure your roles_path in ansible.cfg to find your roles location. .. _use_dynamic_inventory_with_clouds: @@ -367,7 +365,7 @@ If group-specific settings are needed, this can also be done. For example:: In the above example, CentOS machines get the value of '42' for asdf, but other machines get '10'. This can be used not only to set variables, but also to apply certain roles to only certain systems. -Alternatively, if only variables are needed: +Alternatively, if only variables are needed:: - hosts: all tasks: diff --git a/docsite/rst/playbooks_startnstep.rst b/docsite/rst/playbooks_startnstep.rst index ac06962cf2..1067c3e121 100644 --- a/docsite/rst/playbooks_startnstep.rst +++ b/docsite/rst/playbooks_startnstep.rst @@ -4,7 +4,7 @@ Start and Step This shows a few alternative ways to run playbooks. These modes are very useful for testing new plays or debugging. -.. _start_at_task +.. _start_at_task: Start-at-task ````````````` @@ -15,7 +15,7 @@ If you want to start executing your playbook at a particular task, you can do so The above will start executing your playbook at a task named "install packages". -.. _step +.. _step: Step ```` diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 3a52261360..e198a45472 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -911,7 +911,7 @@ The contents of each variables file is a simple YAML dictionary, like this:: .. note:: It's also possible to keep per-host and per-group variables in very - similar files, this is covered in :doc:`intro_patterns`. + similar files, this is covered in :ref:`splitting_out_vars`. .. _passing_variables_on_the_command_line: From cc5a5978ef7642137b2d439c410345e86fa211ed Mon Sep 17 00:00:00 2001 From: Nathan Cahill Date: Tue, 16 Dec 2014 13:30:59 -0700 Subject: [PATCH 547/813] fix vars_prompt no/false values --- lib/ansible/callbacks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/callbacks.py b/lib/ansible/callbacks.py index a4b62fb005..21ca4a49c9 100644 --- a/lib/ansible/callbacks.py +++ b/lib/ansible/callbacks.py @@ -672,7 +672,7 @@ class PlaybookCallbacks(object): result = prompt(msg, private) # if result is false and default is not None - if not result and default: + if not result and default is not None: result = default From 7a43d4005026234cc8227147387d4782e2289d9e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Dec 2014 17:47:50 -0500 Subject: [PATCH 548/813] math filters! --- docsite/rst/playbooks_variables.rst | 39 +++++++++ lib/ansible/runner/filter_plugins/math.py | 96 +++++++++++++++++++++++ 2 files changed, 135 insertions(+) create mode 100644 lib/ansible/runner/filter_plugins/math.py diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 3a52261360..e42fdce22d 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -310,6 +310,45 @@ To get a random list from an existing list:: {{ ['a','b','c']|shuffle }} => ['b','c','a'] note that when used with a non 'listable' item it is a noop, otherwise it always returns a list +j + +.. _math_stuff: + +Math +-------------------- +.. versionadded:: 1.9 + +To get the absolute value of a number:: + + {{ -23 | abs }} + +To see if something is actually a number:: + + {{ myvar | isnan }} + +Rounding:: + + {{ myvar | ceil }} + {{ myvar | floor }} + +Get the logarithm (default is e):: + + {{ myvar | log }} + +Get the base 10 logarithm:: + + {{ myvar | log(10) }} + +Give me the power of 2! (or 5):: + + {{ myvar | pow(2) }} + {{ myvar | pow(5) }} + +Square root, or the 5th:: + + {{ myvar | root }} + {{ myvar | root(5) }} + .. _other_useful_filters: diff --git a/lib/ansible/runner/filter_plugins/math.py b/lib/ansible/runner/filter_plugins/math.py new file mode 100644 index 0000000000..f49635af72 --- /dev/null +++ b/lib/ansible/runner/filter_plugins/math.py @@ -0,0 +1,96 @@ +# (c) 2014, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import math +from ansible import errors + +def absolute(x): + + if isinstance(x, float): + return math.fabs(x) + elif isinstance(x, int): + return abs(x) + else + raise errors.AnsibleFilterError('abs() can only be used on numbers') + + +def cieling(x): + try: + return math.ciel(x) + except TypeError, e: + raise errors.AnsibleFilterError('ciel() can only be used on floats: %s' % str(e)) + + +def flooring(x): + try: + return math.floor(x) + except TypeError, e: + raise errors.AnsibleFilterError('floor() can only be used on floats: %s' % str(e)) + + +def isnotanumber(x): + try: + return math.isnan(x) + except TypeError, e: + return False + + +def logarithm(x, base=math.e): + try: + if base == 10: + return math.log10(x) + else: + return = math.log(x, base) + except TypeError, e: + raise errors.AnsibleFilterError('log() can only be used on numbers: %s' % str(e)) + + +def power(x): + try: + return math.pow(x,y) + except TypeError, e: + raise errors.AnsibleFilterError('pow() can only be used on numbers: %s' % str(e)) + + +def inversepower(x, base=2): + try: + if base == 2: + return math.sqrt(x) + else: + return math.pow(x, 1.0/float(base)) + except TypeError, e: + raise errors.AnsibleFilterError('root() can only be used on numbers: %s' % str(e)) + + +class FilterModule(object): + ''' Ansible math jinja2 filters ''' + + def filters(self): + return { + # general math + 'abs': absolute, + 'isnan': isnotanumber, + + # rounding + 'ceil': cieling, + 'floor': flooring, + + # exponents and logarithms + 'log': logarithm, + 'pow': power, + 'root': inversepower, + } From 6a3c26eb7022af5e78e23b44df738083d459a7a6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Dec 2014 17:49:32 -0500 Subject: [PATCH 549/813] removed stray j --- docsite/rst/playbooks_variables.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index e42fdce22d..a341fa44e7 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -310,7 +310,7 @@ To get a random list from an existing list:: {{ ['a','b','c']|shuffle }} => ['b','c','a'] note that when used with a non 'listable' item it is a noop, otherwise it always returns a list -j + .. _math_stuff: From b07ce8b942d1d659257b0aebb6a17e1425e583d1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Dec 2014 17:57:21 -0500 Subject: [PATCH 550/813] removed redundant math functions as jinja2 provides abs() and round() already --- docsite/rst/playbooks_variables.rst | 10 ++------ lib/ansible/runner/filter_plugins/math.py | 29 ----------------------- 2 files changed, 2 insertions(+), 37 deletions(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index a341fa44e7..c272f160a5 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -318,19 +318,11 @@ Math -------------------- .. versionadded:: 1.9 -To get the absolute value of a number:: - - {{ -23 | abs }} To see if something is actually a number:: {{ myvar | isnan }} -Rounding:: - - {{ myvar | ceil }} - {{ myvar | floor }} - Get the logarithm (default is e):: {{ myvar | log }} @@ -349,6 +341,8 @@ Square root, or the 5th:: {{ myvar | root }} {{ myvar | root(5) }} +Note that jinja2 already provides some like abs() and round(). + .. _other_useful_filters: diff --git a/lib/ansible/runner/filter_plugins/math.py b/lib/ansible/runner/filter_plugins/math.py index f49635af72..ce01ae573b 100644 --- a/lib/ansible/runner/filter_plugins/math.py +++ b/lib/ansible/runner/filter_plugins/math.py @@ -18,30 +18,6 @@ import math from ansible import errors -def absolute(x): - - if isinstance(x, float): - return math.fabs(x) - elif isinstance(x, int): - return abs(x) - else - raise errors.AnsibleFilterError('abs() can only be used on numbers') - - -def cieling(x): - try: - return math.ciel(x) - except TypeError, e: - raise errors.AnsibleFilterError('ciel() can only be used on floats: %s' % str(e)) - - -def flooring(x): - try: - return math.floor(x) - except TypeError, e: - raise errors.AnsibleFilterError('floor() can only be used on floats: %s' % str(e)) - - def isnotanumber(x): try: return math.isnan(x) @@ -82,13 +58,8 @@ class FilterModule(object): def filters(self): return { # general math - 'abs': absolute, 'isnan': isnotanumber, - # rounding - 'ceil': cieling, - 'floor': flooring, - # exponents and logarithms 'log': logarithm, 'pow': power, From c808c8a22ad40de15e1f3877212358fd2eacceb9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Dec 2014 16:09:17 -0800 Subject: [PATCH 551/813] Fix some of the new math filters --- lib/ansible/runner/filter_plugins/math.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/runner/filter_plugins/math.py b/lib/ansible/runner/filter_plugins/math.py index ce01ae573b..d069fbd391 100644 --- a/lib/ansible/runner/filter_plugins/math.py +++ b/lib/ansible/runner/filter_plugins/math.py @@ -21,7 +21,7 @@ from ansible import errors def isnotanumber(x): try: return math.isnan(x) - except TypeError, e: + except TypeError: return False @@ -30,14 +30,14 @@ def logarithm(x, base=math.e): if base == 10: return math.log10(x) else: - return = math.log(x, base) + return math.log(x, base) except TypeError, e: raise errors.AnsibleFilterError('log() can only be used on numbers: %s' % str(e)) -def power(x): +def power(x, y): try: - return math.pow(x,y) + return math.pow(x, y) except TypeError, e: raise errors.AnsibleFilterError('pow() can only be used on numbers: %s' % str(e)) From fa6e587654fb1bdb5090de940e644a5f98afb5ac Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Dec 2014 16:09:50 -0800 Subject: [PATCH 552/813] Pull in some new fixes to core modules --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 51ed13b887..9c6826e928 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 51ed13b8874a90ad3191301647901234eb10f02b +Subproject commit 9c6826e9286f2e683c583ff11ccd562bfb5eed8c From 2664de55fb7bd36459575acd62762d3ae4155ea7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Dec 2014 16:59:22 -0800 Subject: [PATCH 553/813] Fix math filters --- lib/ansible/runner/filter_plugins/math.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/runner/filter_plugins/math.py b/lib/ansible/runner/filter_plugins/math.py index d069fbd391..7f6cc19555 100644 --- a/lib/ansible/runner/filter_plugins/math.py +++ b/lib/ansible/runner/filter_plugins/math.py @@ -15,6 +15,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import absolute_import + import math from ansible import errors From 6d785ca081d8e5aa141ff0b6b6b8a1cfd968f6d5 Mon Sep 17 00:00:00 2001 From: Nathan Cahill Date: Tue, 16 Dec 2014 20:58:38 -0700 Subject: [PATCH 554/813] support variables with dashes - fixes #9786 --- lib/ansible/utils/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index a735e9c0b0..b2e921eea2 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -269,7 +269,7 @@ def check_conditional(conditional, basedir, inject, fail_on_undefined=False): conditional = conditional.replace("jinja2_compare ","") # allow variable names - if conditional in inject and '-' not in str(inject[conditional]): + if conditional in inject: conditional = inject[conditional] conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined) original = str(conditional).replace("jinja2_compare ","") From 64c256b3567c3b331cf2445bcc09514117090d5a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 17 Dec 2014 13:46:14 -0800 Subject: [PATCH 555/813] git module now with clone parameter and update=no reverted --- lib/ansible/modules/core | 2 +- test/integration/roles/test_git/tasks/main.yml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 9c6826e928..dfe7f6c6d6 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9c6826e9286f2e683c583ff11ccd562bfb5eed8c +Subproject commit dfe7f6c6d631d665232f1f033eba2e2fe5542364 diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml index cbdd8f9556..7e67c82882 100644 --- a/test/integration/roles/test_git/tasks/main.yml +++ b/test/integration/roles/test_git/tasks/main.yml @@ -146,6 +146,7 @@ git: repo: '{{ repo_format1 }}' update: no + clone: no accept_hostkey: yes register: git_result From ade083a2e3953f14cb37a93e0adaf9be1e0b204a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 17 Dec 2014 13:54:39 -0800 Subject: [PATCH 556/813] Update changelog for the git clone parameter --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a6668557d..a989cdcd44 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ Ansible Changes By Release in progress, details pending +* Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally. + ## 1.8.1 "You Really Got Me" - Nov 26, 2014 * Various bug fixes in postgresql and mysql modules. From 1ac57a8a8fec74a65d30d935d17c18c74f5f5911 Mon Sep 17 00:00:00 2001 From: Rohan McGovern Date: Fri, 21 Nov 2014 15:55:31 +1000 Subject: [PATCH 557/813] test_git: add tests for `refspec' argument Includes a basic test of the clone and update cases. --- .../integration/roles/test_git/tasks/main.yml | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml index 7e67c82882..4bdc1d8bd8 100644 --- a/test/integration/roles/test_git/tasks/main.yml +++ b/test/integration/roles/test_git/tasks/main.yml @@ -206,6 +206,41 @@ that: - 'git_result.failed' +# Same as the previous test, but this time we specify which ref +# contains the SHA1 +- name: update to revision by specifying the refspec + git: + repo: https://github.com/ansible/ansible-examples.git + dest: '{{ checkout_dir }}' + version: 2cfde3668b8bb10fbe2b9d5cec486025ad8cc51b + refspec: refs/pull/7/merge + +- name: check HEAD after update with refspec + command: git rev-parse HEAD chdir="{{ checkout_dir }}" + register: git_result + +- assert: + that: + - 'git_result.stdout == "2cfde3668b8bb10fbe2b9d5cec486025ad8cc51b"' + +- name: clear checkout_dir + file: state=absent path={{ checkout_dir }} + +- name: clone to revision by specifying the refspec + git: + repo: https://github.com/ansible/ansible-examples.git + dest: '{{ checkout_dir }}' + version: 2cfde3668b8bb10fbe2b9d5cec486025ad8cc51b + refspec: refs/pull/7/merge + +- name: check HEAD after update with refspec + command: git rev-parse HEAD chdir="{{ checkout_dir }}" + register: git_result + +- assert: + that: + - 'git_result.stdout == "2cfde3668b8bb10fbe2b9d5cec486025ad8cc51b"' + # # Submodule tests # From d7b36dd9e7652755951579788ae226bb98ee78a4 Mon Sep 17 00:00:00 2001 From: Dan Jenkins Date: Tue, 25 Mar 2014 09:54:04 +0000 Subject: [PATCH 558/813] Add a new inventory group so you can see instances in a particular VPC --- plugins/inventory/ec2.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 9d2dec38d3..37b2c5b05c 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -432,7 +432,11 @@ class Ec2Inventory(object): self.push(self.inventory, key_name, dest) if self.nested_groups: self.push_group(self.inventory, 'keys', key_name) - + + # Inventory: Group by VPC + if instance.vpc_id: + self.push(self.inventory, self.to_safe('vpc_id_' + instance.vpc_id), dest) + # Inventory: Group by security group try: for group in instance.groups: @@ -504,13 +508,13 @@ class Ec2Inventory(object): self.push(self.inventory, instance.availability_zone, dest) if self.nested_groups: self.push_group(self.inventory, region, instance.availability_zone) - + # Inventory: Group by instance type type_name = self.to_safe('type_' + instance.instance_class) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) - + # Inventory: Group by security group try: if instance.security_group: From d4bf3127ec2d0353f69d255c621613d4d7d8bfef Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Fri, 19 Dec 2014 13:21:55 +0100 Subject: [PATCH 559/813] Add a explicit error when fact_caching_connection is not set By default, jsonfile is not documented, and the error message when fact_caching_connection is not set is a bit puzzling, so a error message would be beeter ( documentation too ). While redis is faster for bigger setup, jsonfile is fine for a small setup and is easier to deploy. The module will then stop ansible-playbook, as this match better the philosophy of Ansible being a fail-fast system. --- lib/ansible/cache/jsonfile.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/cache/jsonfile.py b/lib/ansible/cache/jsonfile.py index 8b4c892a40..ca18974d3c 100644 --- a/lib/ansible/cache/jsonfile.py +++ b/lib/ansible/cache/jsonfile.py @@ -34,6 +34,8 @@ class CacheModule(BaseCacheModule): self._timeout = float(C.CACHE_PLUGIN_TIMEOUT) self._cache = {} self._cache_dir = C.CACHE_PLUGIN_CONNECTION # expects a dir path + if not self._cache_dir: + utils.exit("error, fact_caching_connection is not set, cannot use fact cache") if not os.path.exists(self._cache_dir): try: From ed380136bcd4657b852618bf0c1e471995e5fe79 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Dec 2014 20:05:00 -0500 Subject: [PATCH 560/813] removed uneeded and posibly error producing json import now uses utils.jsonify which does proper utf8 encoding --- lib/ansible/cache/jsonfile.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/ansible/cache/jsonfile.py b/lib/ansible/cache/jsonfile.py index ca18974d3c..a3768209bc 100644 --- a/lib/ansible/cache/jsonfile.py +++ b/lib/ansible/cache/jsonfile.py @@ -17,14 +17,12 @@ import os import time -import json import errno from ansible import constants as C from ansible import utils from ansible.cache.base import BaseCacheModule - class CacheModule(BaseCacheModule): """ A caching module backed by json files. @@ -70,12 +68,11 @@ class CacheModule(BaseCacheModule): cachefile = "%s/%s" % (self._cache_dir, key) try: - #TODO: check if valid keys can have invalid FS chars, base32? f = open(cachefile, 'w') except (OSError,IOError), e: utils.warning("error while trying to read %s : %s" % (cachefile, str(e))) else: - json.dump(value, f, ensure_ascii=False) + f.write(utils.jsonify(value)) finally: f.close() From e0f72d58610aeea198195851292d6f561aad9606 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Dec 2014 20:08:06 -0500 Subject: [PATCH 561/813] fixed json encoding issue with redis --- lib/ansible/cache/redis.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/cache/redis.py b/lib/ansible/cache/redis.py index c55b74469d..776c6c7f80 100644 --- a/lib/ansible/cache/redis.py +++ b/lib/ansible/cache/redis.py @@ -20,9 +20,9 @@ import collections # FIXME: can we store these as something else before we ship it? import sys import time -import json from ansible import constants as C +from ansible.utils import jsonify from ansible.cache.base import BaseCacheModule try: @@ -65,7 +65,7 @@ class CacheModule(BaseCacheModule): return json.loads(value) def set(self, key, value): - value2 = json.dumps(value) + value2 = jsonify(value) if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire' self._cache.setex(self._make_key(key), int(self._timeout), value2) else: From 35b94948b9cbb48eac325c5e669c6adadc9f0be5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Astori?= Date: Sun, 21 Dec 2014 23:42:01 -0500 Subject: [PATCH 562/813] Fix conditionals doc example Use most recent parameters for yum --- docsite/rst/playbooks_conditionals.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/playbooks_conditionals.rst b/docsite/rst/playbooks_conditionals.rst index a00ec916c4..d71a0d3c7a 100644 --- a/docsite/rst/playbooks_conditionals.rst +++ b/docsite/rst/playbooks_conditionals.rst @@ -166,11 +166,11 @@ To use this conditional import feature, you'll need facter or ohai installed pri you can of course push this out with Ansible if you like:: # for facter - ansible -m yum -a "pkg=facter ensure=installed" - ansible -m yum -a "pkg=ruby-json ensure=installed" + ansible -m yum -a "pkg=facter state=present" + ansible -m yum -a "pkg=ruby-json state=present" # for ohai - ansible -m yum -a "pkg=ohai ensure=installed" + ansible -m yum -a "pkg=ohai state=present" Ansible's approach to configuration -- separating variables from tasks, keeps your playbooks from turning into arbitrary code with ugly nested ifs, conditionals, and so on - and results From a93db1948e38217ef4d7e928754e9a9bd59412ac Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Wed, 10 Dec 2014 11:43:37 -0500 Subject: [PATCH 563/813] expanded facts --- lib/ansible/module_utils/facts.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 38082fe854..198b93a282 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -571,6 +571,8 @@ class LinuxHardware(Hardware): platform = 'Linux' MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'] + EXTRA_MEMORY_FACTS = ['Buffers', 'Cached', 'SwapCached'] + def __init__(self): Hardware.__init__(self) @@ -587,6 +589,7 @@ class LinuxHardware(Hardware): return self.facts def get_memory_facts(self): + memstats = {} if not os.access("/proc/meminfo", os.R_OK): return for line in open("/proc/meminfo").readlines(): @@ -595,6 +598,26 @@ class LinuxHardware(Hardware): if key in LinuxHardware.MEMORY_FACTS: val = data[1].strip().split(' ')[0] self.facts["%s_mb" % key.lower()] = long(val) / 1024 + if key in LinuxHardware.MEMORY_FACTS or key in LinuxHardware.EXTRA_MEMORY_FACTS: + val = data[1].strip().split(' ')[0] + memstats[key.lower()] = long(val) / 1024 + self.facts['memory_mb'] = { + 'real' : { + 'total': memstats['memtotal'], + 'used': (memstats['memtotal'] - memstats['memfree']), + 'free': memstats['memfree'] + }, + 'nocache' : { + 'free': memstats['cached'] + memstats['memfree'] + memstats['buffers'], + 'used': memstats['memtotal'] - (memstats['cached'] + memstats['memfree'] + memstats['buffers']) + }, + 'swap' : { + 'total': memstats['swaptotal'], + 'free': memstats['swapfree'], + 'used': memstats['swaptotal'] - memstats['swapfree'], + 'cached': memstats['swapcached'] + } + } def get_cpu_facts(self): i = 0 From 18536d68854d3eb03fad0b6f6e11a165492bfea6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 22 Dec 2014 11:34:04 -0800 Subject: [PATCH 564/813] Pull in new refspec param for git module --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index dfe7f6c6d6..2cbe13a21d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit dfe7f6c6d631d665232f1f033eba2e2fe5542364 +Subproject commit 2cbe13a21d4b4e0adfc643e9d9554fddd5e4f475 From 89da873dfffb85a5f8d66d533933a9cb6c9be2e2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 22 Dec 2014 12:12:25 -0800 Subject: [PATCH 565/813] Fix bug in merged git refspec code --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 2cbe13a21d..8a03af6608 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 2cbe13a21d4b4e0adfc643e9d9554fddd5e4f475 +Subproject commit 8a03af66083da993c47a970cde44ab8fc39744b6 From f9c203feb68e224cd3d445568b39293f8a3d32ad Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 22 Dec 2014 15:15:29 -0800 Subject: [PATCH 566/813] Another try at a git fetch fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 8a03af6608..8f6ae92cf8 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 8a03af66083da993c47a970cde44ab8fc39744b6 +Subproject commit 8f6ae92cf88beda287c6c11d8b4127239c3168e0 From b5e99c852e0d91a4f26e6103e9270ef7f970d893 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 23 Dec 2014 16:15:26 +0100 Subject: [PATCH 567/813] facts caching: fix missing json in jsonfile caching --- lib/ansible/cache/jsonfile.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/ansible/cache/jsonfile.py b/lib/ansible/cache/jsonfile.py index a3768209bc..1ccf9b4a55 100644 --- a/lib/ansible/cache/jsonfile.py +++ b/lib/ansible/cache/jsonfile.py @@ -19,6 +19,11 @@ import os import time import errno +try: + import json +except ImportError: + import simplejson as json + from ansible import constants as C from ansible import utils from ansible.cache.base import BaseCacheModule From fb5b68298954062ad7e8a36d58e6c0d04e2d1484 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 23 Dec 2014 12:01:35 -0800 Subject: [PATCH 568/813] Use "override" instead of "loaded second" to be clear about what happens Need for clarification brought up here: https://github.com/ansible/ansible/issues/9877 --- docsite/rst/intro_inventory.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index 5b409e8e65..a830b6b4b6 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -184,7 +184,7 @@ variables. Note that this only works on Ansible 1.4 or later. Tip: In Ansible 1.2 or later the group_vars/ and host_vars/ directories can exist in either the playbook directory OR the inventory directory. If both paths exist, variables in the playbook -directory will be loaded second. +directory will override variables set in the inventory directory. Tip: Keeping your inventory file and variables in a git repo (or other version control) is an excellent way to track changes to your inventory and host variables. From 5ed7a55990e446b0f4a214e3e7228e3483390635 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 23 Dec 2014 13:14:14 -0800 Subject: [PATCH 569/813] Restore json import for redis as well. Switch preference to simplejson for speed --- lib/ansible/cache/jsonfile.py | 4 ++-- lib/ansible/cache/redis.py | 5 +++++ lib/ansible/utils/__init__.py | 6 +++--- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/lib/ansible/cache/jsonfile.py b/lib/ansible/cache/jsonfile.py index 1ccf9b4a55..b7d72c8d2e 100644 --- a/lib/ansible/cache/jsonfile.py +++ b/lib/ansible/cache/jsonfile.py @@ -20,9 +20,9 @@ import time import errno try: - import json -except ImportError: import simplejson as json +except ImportError: + import json from ansible import constants as C from ansible import utils diff --git a/lib/ansible/cache/redis.py b/lib/ansible/cache/redis.py index 776c6c7f80..7ae5ef74c1 100644 --- a/lib/ansible/cache/redis.py +++ b/lib/ansible/cache/redis.py @@ -21,6 +21,11 @@ import collections import sys import time +try: + import simplejson as json +except ImportError: + import json + from ansible import constants as C from ansible.utils import jsonify from ansible.cache.base import BaseCacheModule diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index a735e9c0b0..44db63e276 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -45,7 +45,6 @@ import warnings import traceback import getpass import sys -import json import subprocess import contextlib @@ -63,9 +62,10 @@ CODE_REGEX = re.compile(r'(?:{%|%})') try: - import json -except ImportError: + # simplejson can be much faster if it's available import simplejson as json +except ImportError: + import json # Note, sha1 is the only hash algorithm compatible with python2.4 and with # FIPS-140 mode (as of 11-2014) From 1ed9b6629e6413e9ddc3218f76adb61308b1d18f Mon Sep 17 00:00:00 2001 From: Costi Ciudatu Date: Thu, 11 Dec 2014 18:47:24 +0200 Subject: [PATCH 570/813] run_once tasks are skipped without checking the delegate_to host #9784 --- lib/ansible/runner/__init__.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 47c1faadeb..b926caf827 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1461,9 +1461,15 @@ class Runner(object): # Expose the current hostgroup to the bypassing plugins self.host_set = hosts # We aren't iterating over all the hosts in this - # group. So, just pick the first host in our group to + # group. So, just choose the "delegate_to" host if that is defined and is + # one of the targeted hosts, otherwise pick the first host in our group to # construct the conn object with. - result_data = self._executor(hosts[0], None).result + if self.delegate_to is not None and self.delegate_to in hosts: + host = self.delegate_to + else: + host = hosts[0] + + result_data = self._executor(host, None).result # Create a ResultData item for each host in this group # using the returned result. If we didn't do this we would # get false reports of dark hosts. From dea0ee663f65d958365ab86c9c0e2bdf68efe786 Mon Sep 17 00:00:00 2001 From: Luke Macken Date: Wed, 24 Dec 2014 11:31:44 -0700 Subject: [PATCH 571/813] Use send instead of sendv on the systemd.journal (fixes #9886) --- lib/ansible/module_utils/basic.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index cee6510f34..1d5dfcdf31 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1104,12 +1104,11 @@ class AnsibleModule(object): msg = msg.encode('utf-8') if (has_journal): - journal_args = ["MESSAGE=%s %s" % (module, msg)] - journal_args.append("MODULE=%s" % os.path.basename(__file__)) + journal_args = [("MODULE", os.path.basename(__file__))] for arg in log_args: - journal_args.append(arg.upper() + "=" + str(log_args[arg])) + journal_args.append((arg.upper(), str(log_args[arg]))) try: - journal.sendv(*journal_args) + journal.send("%s %s" % (module, msg), **dict(journal_args)) except IOError, e: # fall back to syslog since logging to journal failed syslog.openlog(str(module), 0, syslog.LOG_USER) From 641c6a28599525b3ed7dba31c8dba00325e9d541 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Dec 2014 00:25:51 -0800 Subject: [PATCH 572/813] Pull in apt changes --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 8f6ae92cf8..170457413d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 8f6ae92cf88beda287c6c11d8b4127239c3168e0 +Subproject commit 170457413dd179c3154a4184cbe12ad1ab14c86e From a88e928bf0fc95eaf48272b4c5b2f6c139bf4ece Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= Date: Thu, 25 Dec 2014 23:33:56 +0100 Subject: [PATCH 573/813] doc: mention smart gathering for facts caching --- docsite/rst/playbooks_variables.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index b4c5943ffb..8b353f14cb 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -791,6 +791,7 @@ the fact that they have not been communicated with in the current execution of / To configure fact caching, enable it in ansible.cfg as follows:: [defaults] + gathering = smart fact_caching = redis fact_caching_timeout = 86400 # seconds From 41399dedaf923a9b95dd0c047803b9e8fd738e89 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 27 Dec 2014 17:12:22 -0800 Subject: [PATCH 574/813] Update core modules for docker fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 170457413d..f9574cc318 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 170457413dd179c3154a4184cbe12ad1ab14c86e +Subproject commit f9574cc31862194a0350c142ad1616c2912e3946 From d2cae91dcfdf9da33a29fce34c61e8ca01cfaae2 Mon Sep 17 00:00:00 2001 From: Nate Eagleson Date: Mon, 29 Dec 2014 08:03:38 -0500 Subject: [PATCH 575/813] Improve wording in playbooks_intro.rst I was reading the docs and thought "that wording seems off." --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 4bc3bccf2d..7bcbbc9694 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -151,7 +151,7 @@ Just `Control-C` to kill it and run it again with `-K`. These are deleted immediately after the command is executed. This only occurs when sudoing from a user like 'bob' to 'timmy', not when going from 'bob' to 'root', or logging in directly as 'bob' or - 'root'. If this concerns you that this data is briefly readable + 'root'. If it concerns you that this data is briefly readable (not writable), avoid transferring uncrypted passwords with `sudo_user` set. In other cases, '/tmp' is not used and this does not come into play. Ansible also takes care to not log password From 410c8fc8251e2dd26a57f7468189eb199a053440 Mon Sep 17 00:00:00 2001 From: Nate Eagleson Date: Mon, 29 Dec 2014 08:39:39 -0500 Subject: [PATCH 576/813] Fix some typos in developing_modules.rst --- docsite/rst/developing_modules.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index decd5b305c..82edea9de8 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -441,12 +441,12 @@ Getting Your Module Into Ansible ```````````````````````````````` High-quality modules with minimal dependencies -can be included in the ansible, but modules (just due to the programming +can be included in Ansible, but modules (just due to the programming preferences of the developers) will need to be implemented in Python and use the AnsibleModule common code, and should generally use consistent arguments with the rest of the program. Stop by the mailing list to inquire about requirements if you like, and submit a github pull request to the `extras `_ project. -Included modules will ship with ansible, and also have a change to be promoted to 'core' status, which +Included modules will ship with ansible, and also have a chance to be promoted to 'core' status, which gives them slightly higher development priority (though they'll work in exactly the same way). From 62d79568be16084718bda2d890b2b4e1d10cc41d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 14 Nov 2014 16:14:08 -0600 Subject: [PATCH 577/813] Creating playbook executor and dependent classes --- v2/ansible/__init__.py | 2 + v2/ansible/constants.py | 1 + v2/ansible/errors/__init__.py | 21 +- .../strings.py => errors/yaml_strings.py} | 0 v2/ansible/executor/connection_info.py | 167 ++ v2/ansible/executor/manager.py | 66 + v2/ansible/executor/module_common.py | 185 ++ v2/ansible/executor/play_iterator.py | 258 ++ v2/ansible/executor/playbook_executor.py | 113 +- v2/ansible/executor/playbook_iterator.py | 125 - .../executor/process}/__init__.py | 0 v2/ansible/executor/process/result.py | 155 + v2/ansible/executor/process/worker.py | 141 + v2/ansible/executor/task_executor.py | 194 +- v2/ansible/executor/task_queue_manager.py | 193 +- v2/ansible/executor/task_result.py | 36 + v2/ansible/inventory/__init__.py | 912 +++--- v2/ansible/inventory/dir.py | 229 ++ v2/ansible/inventory/expand_hosts.py | 116 + v2/ansible/inventory/group.py | 159 ++ v2/ansible/inventory/host.py | 127 + v2/ansible/inventory/ini.py | 215 ++ v2/ansible/inventory/script.py | 150 + v2/ansible/inventory/vars_plugins/__init__.py | 0 v2/ansible/inventory/vars_plugins/noop.py | 48 + v2/ansible/module_utils/__init__.py | 17 + v2/ansible/module_utils/a10.py | 103 + v2/ansible/module_utils/basic.py | 1556 ++++++++++ v2/ansible/module_utils/ec2.py | 194 ++ v2/ansible/module_utils/facts.py | 2451 ++++++++++++++++ v2/ansible/module_utils/gce.py | 87 + v2/ansible/module_utils/known_hosts.py | 176 ++ v2/ansible/module_utils/openstack.py | 69 + v2/ansible/module_utils/powershell.ps1 | 144 + v2/ansible/module_utils/rax.py | 277 ++ v2/ansible/module_utils/redhat.py | 280 ++ v2/ansible/module_utils/splitter.py | 201 ++ v2/ansible/module_utils/urls.py | 456 +++ v2/ansible/modules/core | 2 +- v2/ansible/new_inventory/__init__.py | 341 +++ .../inventory => new_inventory}/aggregate.py | 0 .../group.py} | 0 v2/ansible/new_inventory/host.py | 51 + v2/ansible/parsing/__init__.py | 200 ++ v2/ansible/parsing/mod_args.py | 14 +- .../env.py => parsing/utils/__init__.py} | 28 +- v2/ansible/parsing/utils/jsonify.py | 26 + v2/ansible/parsing/yaml/__init__.py | 153 - v2/ansible/playbook/__init__.py | 40 +- v2/ansible/playbook/attribute.py | 3 +- v2/ansible/playbook/base.py | 150 +- v2/ansible/playbook/block.py | 105 +- v2/ansible/playbook/conditional.py | 81 +- v2/ansible/playbook/handler.py | 37 +- v2/ansible/playbook/helpers.py | 33 +- v2/ansible/playbook/play.py | 103 +- v2/ansible/playbook/role/__init__.py | 202 +- v2/ansible/playbook/role/definition.py | 8 +- v2/ansible/playbook/role/include.py | 7 +- v2/ansible/playbook/role/metadata.py | 16 +- v2/ansible/playbook/tag.py | 55 - v2/ansible/playbook/taggable.py | 46 + v2/ansible/playbook/task.py | 122 +- v2/ansible/playbook/task_include.py | 9 +- v2/ansible/plugins/__init__.py | 19 +- v2/ansible/plugins/action/__init__.py | 407 +++ v2/ansible/plugins/action/assemble.py | 159 ++ v2/ansible/plugins/action/assert.py | 54 + v2/ansible/plugins/action/copy.py | 384 +++ v2/ansible/plugins/action/debug.py | 46 + v2/ansible/plugins/action/include_vars.py | 48 + v2/ansible/plugins/action/normal.py | 40 + .../template.py => action/set_fact.py} | 21 +- v2/ansible/plugins/callback/__init__.py | 83 + v2/ansible/plugins/callback/default.py | 120 + v2/ansible/plugins/callback/minimal.py | 111 + v2/ansible/plugins/connections/__init__.py | 21 + v2/ansible/plugins/connections/accelerate.py | 371 +++ v2/ansible/plugins/connections/chroot.py | 130 + v2/ansible/plugins/connections/fireball.py | 151 + v2/ansible/plugins/connections/funcd.py | 99 + v2/ansible/plugins/connections/jail.py | 151 + v2/ansible/plugins/connections/libvirt_lxc.py | 127 + v2/ansible/plugins/connections/local.py | 138 + .../plugins/connections/paramiko_ssh.py | 417 +++ v2/ansible/plugins/connections/ssh.py | 487 ++++ v2/ansible/plugins/connections/winrm.py | 258 ++ v2/ansible/plugins/filter/core.py | 323 +++ v2/ansible/plugins/inventory/ini.py | 7 + v2/ansible/plugins/lookup/csvfile.py | 82 - v2/ansible/plugins/lookup/dict.py | 39 - v2/ansible/plugins/lookup/dnstxt.py | 68 - v2/ansible/plugins/lookup/etcd.py | 78 - v2/ansible/plugins/lookup/file.py | 59 - v2/ansible/plugins/lookup/first_found.py | 194 -- v2/ansible/plugins/lookup/flattened.py | 78 - .../plugins/lookup/inventory_hostnames.py | 48 - v2/ansible/plugins/lookup/items.py | 14 +- v2/ansible/plugins/lookup/lines.py | 38 - v2/ansible/plugins/lookup/nested.py | 73 - v2/ansible/plugins/lookup/password.py | 129 - v2/ansible/plugins/lookup/pipe.py | 52 - v2/ansible/plugins/lookup/redis_kv.py | 72 - v2/ansible/plugins/lookup/sequence.py | 204 -- v2/ansible/plugins/lookup/subelements.py | 67 - v2/ansible/plugins/lookup/together.py | 64 - v2/ansible/plugins/shell/csh.py | 23 + v2/ansible/plugins/shell/fish.py | 23 + v2/ansible/plugins/shell/powershell.py | 117 + v2/ansible/plugins/shell/sh.py | 115 + v2/ansible/plugins/strategies/__init__.py | 282 ++ v2/ansible/plugins/strategies/free.py | 105 + v2/ansible/plugins/strategies/linear.py | 84 + v2/ansible/template/__init__.py | 260 ++ v2/ansible/template/safe_eval.py | 118 + .../indexed_items.py => template/template.py} | 35 +- v2/ansible/template/vars.py | 87 + .../include.py => utils/__init__.py} | 0 .../lookup/fileglob.py => utils/boolean.py} | 30 +- v2/ansible/utils/cli.py | 214 ++ v2/ansible/utils/color.py | 75 + v2/ansible/utils/debug.py | 15 + v2/ansible/utils/display.py | 114 + v2/ansible/utils/hashing.py | 90 + v2/ansible/utils/vars.py | 51 + v2/ansible/vars/__init__.py | 145 +- v2/bin/ansible | 197 ++ v2/bin/ansible-playbook | 162 ++ v2/hacking/README.md | 48 + v2/hacking/authors.sh | 14 + v2/hacking/env-setup | 42 + v2/hacking/env-setup.fish | 57 + .../get_library.py} | 32 +- v2/hacking/module_formatter.py | 442 +++ v2/hacking/templates/rst.j2 | 153 + v2/hacking/test-module | 193 ++ v2/samples/README.md | 1 + v2/samples/inv_lg | 2540 +++++++++++++++++ v2/samples/inv_md | 1270 +++++++++ v2/samples/inv_sm | 254 ++ v2/samples/multi.py | 160 ++ v2/samples/multi_queues.py | 175 ++ v2/samples/roles/test_role/tasks/main.yml | 1 + v2/samples/src | 5 + v2/samples/test_big_debug.yml | 4 + v2/samples/test_big_ping.yml | 5 + v2/samples/test_fact_gather.yml | 7 + v2/samples/test_pb.yml | 70 + v2/samples/test_role.yml | 8 + v2/samples/testing/extra_vars.yml | 1 + v2/samples/testing/frag1 | 1 + v2/samples/testing/frag2 | 1 + v2/samples/testing/frag3 | 1 + v2/samples/testing/vars.yml | 1 + v2/test/errors/test_errors.py | 10 +- ...book_iterator.py => test_play_iterator.py} | 10 +- v2/test/mock/loader.py | 2 +- .../parsing/{yaml => }/test_data_loader.py | 2 +- 158 files changed, 22486 insertions(+), 2353 deletions(-) rename v2/ansible/{parsing/yaml/strings.py => errors/yaml_strings.py} (100%) create mode 100644 v2/ansible/executor/connection_info.py create mode 100644 v2/ansible/executor/manager.py create mode 100644 v2/ansible/executor/module_common.py create mode 100644 v2/ansible/executor/play_iterator.py delete mode 100644 v2/ansible/executor/playbook_iterator.py rename v2/{test/parsing/yaml => ansible/executor/process}/__init__.py (100%) create mode 100644 v2/ansible/executor/process/result.py create mode 100644 v2/ansible/executor/process/worker.py create mode 100644 v2/ansible/inventory/dir.py create mode 100644 v2/ansible/inventory/expand_hosts.py create mode 100644 v2/ansible/inventory/group.py create mode 100644 v2/ansible/inventory/host.py create mode 100644 v2/ansible/inventory/ini.py create mode 100644 v2/ansible/inventory/script.py create mode 100644 v2/ansible/inventory/vars_plugins/__init__.py create mode 100644 v2/ansible/inventory/vars_plugins/noop.py create mode 100644 v2/ansible/module_utils/__init__.py create mode 100644 v2/ansible/module_utils/a10.py create mode 100644 v2/ansible/module_utils/basic.py create mode 100644 v2/ansible/module_utils/ec2.py create mode 100644 v2/ansible/module_utils/facts.py create mode 100644 v2/ansible/module_utils/gce.py create mode 100644 v2/ansible/module_utils/known_hosts.py create mode 100644 v2/ansible/module_utils/openstack.py create mode 100644 v2/ansible/module_utils/powershell.ps1 create mode 100644 v2/ansible/module_utils/rax.py create mode 100644 v2/ansible/module_utils/redhat.py create mode 100644 v2/ansible/module_utils/splitter.py create mode 100644 v2/ansible/module_utils/urls.py create mode 100644 v2/ansible/new_inventory/__init__.py rename v2/ansible/{plugins/inventory => new_inventory}/aggregate.py (100%) rename v2/ansible/{executor/template_engine.py => new_inventory/group.py} (100%) create mode 100644 v2/ansible/new_inventory/host.py rename v2/ansible/{plugins/lookup/env.py => parsing/utils/__init__.py} (51%) create mode 100644 v2/ansible/parsing/utils/jsonify.py delete mode 100644 v2/ansible/playbook/tag.py create mode 100644 v2/ansible/playbook/taggable.py create mode 100644 v2/ansible/plugins/action/assemble.py create mode 100644 v2/ansible/plugins/action/assert.py create mode 100644 v2/ansible/plugins/action/copy.py create mode 100644 v2/ansible/plugins/action/debug.py create mode 100644 v2/ansible/plugins/action/include_vars.py create mode 100644 v2/ansible/plugins/action/normal.py rename v2/ansible/plugins/{lookup/template.py => action/set_fact.py} (56%) create mode 100644 v2/ansible/plugins/callback/default.py create mode 100644 v2/ansible/plugins/callback/minimal.py create mode 100644 v2/ansible/plugins/connections/accelerate.py create mode 100644 v2/ansible/plugins/connections/chroot.py create mode 100644 v2/ansible/plugins/connections/fireball.py create mode 100644 v2/ansible/plugins/connections/funcd.py create mode 100644 v2/ansible/plugins/connections/jail.py create mode 100644 v2/ansible/plugins/connections/libvirt_lxc.py create mode 100644 v2/ansible/plugins/connections/local.py create mode 100644 v2/ansible/plugins/connections/paramiko_ssh.py create mode 100644 v2/ansible/plugins/connections/ssh.py create mode 100644 v2/ansible/plugins/connections/winrm.py create mode 100644 v2/ansible/plugins/filter/core.py delete mode 100644 v2/ansible/plugins/lookup/csvfile.py delete mode 100644 v2/ansible/plugins/lookup/dict.py delete mode 100644 v2/ansible/plugins/lookup/dnstxt.py delete mode 100644 v2/ansible/plugins/lookup/etcd.py delete mode 100644 v2/ansible/plugins/lookup/file.py delete mode 100644 v2/ansible/plugins/lookup/first_found.py delete mode 100644 v2/ansible/plugins/lookup/flattened.py delete mode 100644 v2/ansible/plugins/lookup/inventory_hostnames.py delete mode 100644 v2/ansible/plugins/lookup/lines.py delete mode 100644 v2/ansible/plugins/lookup/nested.py delete mode 100644 v2/ansible/plugins/lookup/password.py delete mode 100644 v2/ansible/plugins/lookup/pipe.py delete mode 100644 v2/ansible/plugins/lookup/redis_kv.py delete mode 100644 v2/ansible/plugins/lookup/sequence.py delete mode 100644 v2/ansible/plugins/lookup/subelements.py delete mode 100644 v2/ansible/plugins/lookup/together.py create mode 100644 v2/ansible/plugins/shell/csh.py create mode 100644 v2/ansible/plugins/shell/fish.py create mode 100644 v2/ansible/plugins/shell/powershell.py create mode 100644 v2/ansible/plugins/shell/sh.py create mode 100644 v2/ansible/plugins/strategies/__init__.py create mode 100644 v2/ansible/plugins/strategies/free.py create mode 100644 v2/ansible/plugins/strategies/linear.py create mode 100644 v2/ansible/template/__init__.py create mode 100644 v2/ansible/template/safe_eval.py rename v2/ansible/{plugins/lookup/indexed_items.py => template/template.py} (50%) create mode 100644 v2/ansible/template/vars.py rename v2/ansible/{playbook/include.py => utils/__init__.py} (100%) rename v2/ansible/{plugins/lookup/fileglob.py => utils/boolean.py} (53%) create mode 100644 v2/ansible/utils/cli.py create mode 100644 v2/ansible/utils/color.py create mode 100644 v2/ansible/utils/debug.py create mode 100644 v2/ansible/utils/display.py create mode 100644 v2/ansible/utils/hashing.py create mode 100644 v2/ansible/utils/vars.py create mode 100755 v2/bin/ansible create mode 100755 v2/bin/ansible-playbook create mode 100644 v2/hacking/README.md create mode 100755 v2/hacking/authors.sh create mode 100755 v2/hacking/env-setup create mode 100644 v2/hacking/env-setup.fish rename v2/{ansible/plugins/lookup/random_choice.py => hacking/get_library.py} (51%) mode change 100644 => 100755 create mode 100755 v2/hacking/module_formatter.py create mode 100644 v2/hacking/templates/rst.j2 create mode 100755 v2/hacking/test-module create mode 100644 v2/samples/README.md create mode 100644 v2/samples/inv_lg create mode 100644 v2/samples/inv_md create mode 100644 v2/samples/inv_sm create mode 100644 v2/samples/multi.py create mode 100644 v2/samples/multi_queues.py create mode 100644 v2/samples/roles/test_role/tasks/main.yml create mode 100644 v2/samples/src create mode 100644 v2/samples/test_big_debug.yml create mode 100644 v2/samples/test_big_ping.yml create mode 100644 v2/samples/test_fact_gather.yml create mode 100644 v2/samples/test_pb.yml create mode 100644 v2/samples/test_role.yml create mode 100644 v2/samples/testing/extra_vars.yml create mode 100644 v2/samples/testing/frag1 create mode 100644 v2/samples/testing/frag2 create mode 100644 v2/samples/testing/frag3 create mode 100644 v2/samples/testing/vars.yml rename v2/test/executor/{test_playbook_iterator.py => test_play_iterator.py} (90%) rename v2/test/parsing/{yaml => }/test_data_loader.py (98%) diff --git a/v2/ansible/__init__.py b/v2/ansible/__init__.py index ae8ccff595..26869775ea 100644 --- a/v2/ansible/__init__.py +++ b/v2/ansible/__init__.py @@ -18,3 +18,5 @@ # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type + +__version__ = '1.v2' diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index e74720b8a6..6adcdd0a9f 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -104,6 +104,7 @@ YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ] DEFAULTS='defaults' # configurable things +DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True) DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts')) DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles')) diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py index 2813507df2..7effe41df7 100644 --- a/v2/ansible/errors/__init__.py +++ b/v2/ansible/errors/__init__.py @@ -21,7 +21,7 @@ __metaclass__ = type import os -from ansible.parsing.yaml.strings import * +from ansible.errors.yaml_strings import * class AnsibleError(Exception): ''' @@ -45,12 +45,12 @@ class AnsibleError(Exception): self._obj = obj self._show_content = show_content - if isinstance(self._obj, AnsibleBaseYAMLObject): + if obj and isinstance(obj, AnsibleBaseYAMLObject): extended_error = self._get_extended_error() if extended_error: - self.message = '%s\n\n%s' % (message, extended_error) + self.message = 'ERROR! %s\n\n%s' % (message, extended_error) else: - self.message = message + self.message = 'ERROR! %s' % message def __str__(self): return self.message @@ -98,8 +98,9 @@ class AnsibleError(Exception): (target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1) if target_line: stripped_line = target_line.replace(" ","") - arrow_line = (" " * (col_number-1)) + "^" - error_message += "%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line) + arrow_line = (" " * (col_number-1)) + "^ here" + #header_line = ("=" * 73) + error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line) # common error/remediation checking here: # check for unquoted vars starting lines @@ -158,3 +159,11 @@ class AnsibleModuleError(AnsibleRuntimeError): class AnsibleConnectionFailure(AnsibleRuntimeError): ''' the transport / connection_plugin had a fatal error ''' pass + +class AnsibleFilterError(AnsibleRuntimeError): + ''' a templating failure ''' + pass + +class AnsibleUndefinedVariable(AnsibleRuntimeError): + ''' a templating failure ''' + pass diff --git a/v2/ansible/parsing/yaml/strings.py b/v2/ansible/errors/yaml_strings.py similarity index 100% rename from v2/ansible/parsing/yaml/strings.py rename to v2/ansible/errors/yaml_strings.py diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py new file mode 100644 index 0000000000..dbc988d723 --- /dev/null +++ b/v2/ansible/executor/connection_info.py @@ -0,0 +1,167 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pipes +import random + +from ansible import constants as C + + +__all__ = ['ConnectionInformation'] + + +class ConnectionInformation: + + ''' + This class is used to consolidate the connection information for + hosts in a play and child tasks, where the task may override some + connection/authentication information. + ''' + + def __init__(self, play=None, options=None): + # FIXME: implement the new methodology here for supporting + # various different auth escalation methods (becomes, etc.) + + self.connection = C.DEFAULT_TRANSPORT + self.remote_user = 'root' + self.password = '' + self.port = 22 + self.su = False + self.su_user = '' + self.su_pass = '' + self.sudo = False + self.sudo_user = '' + self.sudo_pass = '' + self.verbosity = 0 + self.only_tags = set() + self.skip_tags = set() + + if play: + self.set_play(play) + + if options: + self.set_options(options) + + def set_play(self, play): + ''' + Configures this connection information instance with data from + the play class. + ''' + + if play.connection: + self.connection = play.connection + + self.remote_user = play.remote_user + self.password = '' + self.port = int(play.port) if play.port else 22 + self.su = play.su + self.su_user = play.su_user + self.su_pass = play.su_pass + self.sudo = play.sudo + self.sudo_user = play.sudo_user + self.sudo_pass = play.sudo_pass + + def set_options(self, options): + ''' + Configures this connection information instance with data from + options specified by the user on the command line. These have a + higher precedence than those set on the play or host. + ''' + + # FIXME: set other values from options here? + + self.verbosity = options.verbosity + if options.connection: + self.connection = options.connection + + # get the tag info from options, converting a comma-separated list + # of values into a proper list if need be + if isinstance(options.tags, list): + self.only_tags.update(options.tags) + elif isinstance(options.tags, basestring): + self.only_tags.update(options.tags.split(',')) + if isinstance(options.skip_tags, list): + self.skip_tags.update(options.skip_tags) + elif isinstance(options.skip_tags, basestring): + self.skip_tags.update(options.skip_tags.split(',')) + + def copy(self, ci): + ''' + Copies the connection info from another connection info object, used + when merging in data from task overrides. + ''' + + self.connection = ci.connection + self.remote_user = ci.remote_user + self.password = ci.password + self.port = ci.port + self.su = ci.su + self.su_user = ci.su_user + self.su_pass = ci.su_pass + self.sudo = ci.sudo + self.sudo_user = ci.sudo_user + self.sudo_pass = ci.sudo_pass + self.verbosity = ci.verbosity + self.only_tags = ci.only_tags.copy() + self.skip_tags = ci.skip_tags.copy() + + def set_task_override(self, task): + ''' + Sets attributes from the task if they are set, which will override + those from the play. + ''' + + new_info = ConnectionInformation() + new_info.copy(self) + + for attr in ('connection', 'remote_user', 'su', 'su_user', 'su_pass', 'sudo', 'sudo_user', 'sudo_pass'): + if hasattr(task, attr): + attr_val = getattr(task, attr) + if attr_val: + setattr(new_info, attr, attr_val) + + return new_info + + def make_sudo_cmd(self, sudo_exe, executable, cmd): + """ + Helper function for wrapping commands with sudo. + + Rather than detect if sudo wants a password this time, -k makes + sudo always ask for a password if one is required. Passing a quoted + compound command to sudo (or sudo -s) directly doesn't work, so we + shellquote it with pipes.quote() and pass the quoted string to the + user's shell. We loop reading output until we see the randomly- + generated sudo prompt set with the -p option. + """ + + randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) + prompt = '[sudo via ansible, key=%s] password: ' % randbits + success_key = 'SUDO-SUCCESS-%s' % randbits + + sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % ( + sudo_exe, sudo_exe, C.DEFAULT_SUDO_FLAGS, prompt, + self.sudo_user, executable or '$SHELL', + pipes.quote('echo %s; %s' % (success_key, cmd)) + ) + + #return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key) + return (sudocmd, prompt, success_key) + diff --git a/v2/ansible/executor/manager.py b/v2/ansible/executor/manager.py new file mode 100644 index 0000000000..33a76e143b --- /dev/null +++ b/v2/ansible/executor/manager.py @@ -0,0 +1,66 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from multiprocessing.managers import SyncManager, BaseProxy +from ansible.playbook.handler import Handler +from ansible.playbook.task import Task +from ansible.playbook.play import Play +from ansible.errors import AnsibleError + +__all__ = ['AnsibleManager'] + + +class VariableManagerWrapper: + ''' + This class simply acts as a wrapper around the VariableManager class, + since manager proxies expect a new object to be returned rather than + any existing one. Using this wrapper, a shared proxy can be created + and an existing VariableManager class assigned to it, which can then + be accessed through the exposed proxy methods. + ''' + + def __init__(self): + self._vm = None + + def get_vars(self, loader, play=None, host=None, task=None): + return self._vm.get_vars(loader=loader, play=play, host=host, task=task) + + def set_variable_manager(self, vm): + self._vm = vm + + def set_host_variable(self, host, varname, value): + self._vm.set_host_variable(host, varname, value) + + def set_host_facts(self, host, facts): + self._vm.set_host_facts(host, facts) + +class AnsibleManager(SyncManager): + ''' + This is our custom manager class, which exists only so we may register + the new proxy below + ''' + pass + +AnsibleManager.register( + typeid='VariableManagerWrapper', + callable=VariableManagerWrapper, +) + diff --git a/v2/ansible/executor/module_common.py b/v2/ansible/executor/module_common.py new file mode 100644 index 0000000000..e438099295 --- /dev/null +++ b/v2/ansible/executor/module_common.py @@ -0,0 +1,185 @@ +# (c) 2013-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# from python and deps +from cStringIO import StringIO +import inspect +import json +import os +import shlex + +# from Ansible +from ansible import __version__ +from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.parsing.utils.jsonify import jsonify + +REPLACER = "#<>" +REPLACER_ARGS = "\"<>\"" +REPLACER_COMPLEX = "\"<>\"" +REPLACER_WINDOWS = "# POWERSHELL_COMMON" +REPLACER_VERSION = "\"<>\"" + +class ModuleReplacer(object): + + """ + The Replacer is used to insert chunks of code into modules before + transfer. Rather than doing classical python imports, this allows for more + efficient transfer in a no-bootstrapping scenario by not moving extra files + over the wire, and also takes care of embedding arguments in the transferred + modules. + + This version is done in such a way that local imports can still be + used in the module code, so IDEs don't have to be aware of what is going on. + + Example: + + from ansible.module_utils.basic import * + + ... will result in the insertion basic.py into the module + + from the module_utils/ directory in the source tree. + + All modules are required to import at least basic, though there will also + be other snippets. + + # POWERSHELL_COMMON + + Also results in the inclusion of the common code in powershell.ps1 + + """ + + # ****************************************************************************** + + def __init__(self, strip_comments=False): + # FIXME: these members need to be prefixed with '_' and the rest of the file fixed + this_file = inspect.getfile(inspect.currentframe()) + # we've moved the module_common relative to the snippets, so fix the path + self.snippet_path = os.path.join(os.path.dirname(this_file), '..', 'module_utils') + self.strip_comments = strip_comments + + # ****************************************************************************** + + + def slurp(self, path): + if not os.path.exists(path): + raise AnsibleError("imported module support code does not exist at %s" % path) + fd = open(path) + data = fd.read() + fd.close() + return data + + def _find_snippet_imports(self, module_data, module_path): + """ + Given the source of the module, convert it to a Jinja2 template to insert + module code and return whether it's a new or old style module. + """ + + module_style = 'old' + if REPLACER in module_data: + module_style = 'new' + elif 'from ansible.module_utils.' in module_data: + module_style = 'new' + elif 'WANT_JSON' in module_data: + module_style = 'non_native_want_json' + + output = StringIO() + lines = module_data.split('\n') + snippet_names = [] + + for line in lines: + + if REPLACER in line: + output.write(self.slurp(os.path.join(self.snippet_path, "basic.py"))) + snippet_names.append('basic') + if REPLACER_WINDOWS in line: + ps_data = self.slurp(os.path.join(self.snippet_path, "powershell.ps1")) + output.write(ps_data) + snippet_names.append('powershell') + elif line.startswith('from ansible.module_utils.'): + tokens=line.split(".") + import_error = False + if len(tokens) != 3: + import_error = True + if " import *" not in line: + import_error = True + if import_error: + raise AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path) + snippet_name = tokens[2].split()[0] + snippet_names.append(snippet_name) + output.write(self.slurp(os.path.join(self.snippet_path, snippet_name + ".py"))) + else: + if self.strip_comments and line.startswith("#") or line == '': + pass + output.write(line) + output.write("\n") + + if not module_path.endswith(".ps1"): + # Unixy modules + if len(snippet_names) > 0 and not 'basic' in snippet_names: + raise AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path) + else: + # Windows modules + if len(snippet_names) > 0 and not 'powershell' in snippet_names: + raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path) + + return (output.getvalue(), module_style) + + # ****************************************************************************** + + def modify_module(self, module_path, module_args): + + with open(module_path) as f: + + # read in the module source + module_data = f.read() + + (module_data, module_style) = self._find_snippet_imports(module_data, module_path) + + #module_args_json = jsonify(module_args) + module_args_json = json.dumps(module_args) + encoded_args = repr(module_args_json.encode('utf-8')) + + # these strings should be part of the 'basic' snippet which is required to be included + module_data = module_data.replace(REPLACER_VERSION, repr(__version__)) + module_data = module_data.replace(REPLACER_ARGS, "''") + module_data = module_data.replace(REPLACER_COMPLEX, encoded_args) + + # FIXME: we're not passing around an inject dictionary anymore, so + # this needs to be fixed with whatever method we use for vars + # like this moving forward + #if module_style == 'new': + # facility = C.DEFAULT_SYSLOG_FACILITY + # if 'ansible_syslog_facility' in inject: + # facility = inject['ansible_syslog_facility'] + # module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility) + + lines = module_data.split("\n") + shebang = None + if lines[0].startswith("#!"): + shebang = lines[0].strip() + args = shlex.split(str(shebang[2:])) + interpreter = args[0] + interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter) + + # FIXME: more inject stuff here... + #if interpreter_config in inject: + # lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:])) + # module_data = "\n".join(lines) + + return (module_data, module_style, shebang) + diff --git a/v2/ansible/executor/play_iterator.py b/v2/ansible/executor/play_iterator.py new file mode 100644 index 0000000000..4f3d0e2320 --- /dev/null +++ b/v2/ansible/executor/play_iterator.py @@ -0,0 +1,258 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import * +from ansible.playbook.task import Task + +from ansible.utils.boolean import boolean + +__all__ = ['PlayIterator'] + + +# the primary running states for the play iteration +ITERATING_SETUP = 0 +ITERATING_TASKS = 1 +ITERATING_RESCUE = 2 +ITERATING_ALWAYS = 3 +ITERATING_COMPLETE = 4 + +# the failure states for the play iteration +FAILED_NONE = 0 +FAILED_SETUP = 1 +FAILED_TASKS = 2 +FAILED_RESCUE = 3 +FAILED_ALWAYS = 4 + +class PlayState: + + ''' + A helper class, which keeps track of the task iteration + state for a given playbook. This is used in the PlaybookIterator + class on a per-host basis. + ''' + + # FIXME: this class is the representation of a finite state machine, + # so we really should have a well defined state representation + # documented somewhere... + + def __init__(self, parent_iterator, host): + ''' + Create the initial state, which tracks the running state as well + as the failure state, which are used when executing block branches + (rescue/always) + ''' + + self._run_state = ITERATING_SETUP + self._failed_state = FAILED_NONE + self._task_list = parent_iterator._play.compile() + self._gather_facts = parent_iterator._play.gather_facts + self._host = host + + self._cur_block = None + self._cur_role = None + self._cur_task_pos = 0 + self._cur_rescue_pos = 0 + self._cur_always_pos = 0 + self._cur_handler_pos = 0 + + def next(self, peek=False): + ''' + Determines and returns the next available task from the playbook, + advancing through the list of plays as it goes. If peek is set to True, + the internal state is not stored. + ''' + + task = None + + # save this locally so that we can peek at the next task + # without updating the internal state of the iterator + run_state = self._run_state + failed_state = self._failed_state + cur_block = self._cur_block + cur_role = self._cur_role + cur_task_pos = self._cur_task_pos + cur_rescue_pos = self._cur_rescue_pos + cur_always_pos = self._cur_always_pos + cur_handler_pos = self._cur_handler_pos + + + while True: + if run_state == ITERATING_SETUP: + if failed_state == FAILED_SETUP: + run_state = ITERATING_COMPLETE + else: + run_state = ITERATING_TASKS + + if self._gather_facts == 'smart' and not self._host.gathered_facts or boolean(self._gather_facts): + self._host.set_gathered_facts(True) + task = Task() + task.action = 'setup' + break + elif run_state == ITERATING_TASKS: + # if there is any failure state besides FAILED_NONE, we should + # change to some other running state + if failed_state != FAILED_NONE or cur_task_pos > len(self._task_list) - 1: + # if there is a block (and there always should be), start running + # the rescue portion if it exists (and if we haven't failed that + # already), or the always portion (if it exists and we didn't fail + # there too). Otherwise, we're done iterating. + if cur_block: + if failed_state != FAILED_RESCUE and cur_block.rescue: + run_state = ITERATING_RESCUE + cur_rescue_pos = 0 + elif failed_state != FAILED_ALWAYS and cur_block.always: + run_state = ITERATING_ALWAYS + cur_always_pos = 0 + else: + run_state = ITERATING_COMPLETE + else: + run_state = ITERATING_COMPLETE + else: + task = self._task_list[cur_task_pos] + if cur_block is not None and cur_block != task._block: + run_state = ITERATING_ALWAYS + continue + else: + cur_block = task._block + cur_task_pos += 1 + + # Break out of the while loop now that we have our task + break + + elif run_state == ITERATING_RESCUE: + # If we're iterating through the rescue tasks, make sure we haven't + # failed yet. If so, move on to the always block or if not get the + # next rescue task (if one exists) + if failed_state == FAILED_RESCUE or cur_block.rescue is None or cur_rescue_pos > len(cur_block.rescue) - 1: + run_state = ITERATING_ALWAYS + else: + task = cur_block.rescue[cur_rescue_pos] + cur_rescue_pos += 1 + break + + elif run_state == ITERATING_ALWAYS: + # If we're iterating through the always tasks, make sure we haven't + # failed yet. If so, we're done iterating otherwise get the next always + # task (if one exists) + if failed_state == FAILED_ALWAYS or cur_block.always is None or cur_always_pos > len(cur_block.always) - 1: + cur_block = None + if failed_state == FAILED_ALWAYS or cur_task_pos > len(self._task_list) - 1: + run_state = ITERATING_COMPLETE + else: + run_state = ITERATING_TASKS + else: + task = cur_block.always[cur_always_pos] + cur_always_pos += 1 + break + + elif run_state == ITERATING_COMPLETE: + # done iterating, return None to signify that + return None + + if task._role: + if cur_role and task._role != cur_role: + cur_role._completed = True + cur_role = task._role + + # If we're not just peeking at the next task, save the internal state + if not peek: + self._run_state = run_state + self._failed_state = failed_state + self._cur_block = cur_block + self._cur_role = cur_role + self._cur_task_pos = cur_task_pos + self._cur_rescue_pos = cur_rescue_pos + self._cur_always_pos = cur_always_pos + self._cur_handler_pos = cur_handler_pos + + return task + + def mark_failed(self): + ''' + Escalates the failed state relative to the running state. + ''' + if self._run_state == ITERATING_SETUP: + self._failed_state = FAILED_SETUP + elif self._run_state == ITERATING_TASKS: + self._failed_state = FAILED_TASKS + elif self._run_state == ITERATING_RESCUE: + self._failed_state = FAILED_RESCUE + elif self._run_state == ITERATING_ALWAYS: + self._failed_state = FAILED_ALWAYS + + +class PlayIterator: + + ''' + The main iterator class, which keeps the state of the playbook + on a per-host basis using the above PlaybookState class. + ''' + + def __init__(self, inventory, play): + self._play = play + self._inventory = inventory + self._host_entries = dict() + self._first_host = None + + # Build the per-host dictionary of playbook states, using a copy + # of the play object so we can post_validate it to ensure any templated + # fields are filled in without modifying the original object, since + # post_validate() saves the templated values. + + # FIXME: this is a hacky way of doing this, the iterator should + # instead get the loader and variable manager directly + # as args to __init__ + all_vars = inventory._variable_manager.get_vars(loader=inventory._loader, play=play) + new_play = play.copy() + new_play.post_validate(all_vars, ignore_undefined=True) + + for host in inventory.get_hosts(new_play.hosts): + if self._first_host is None: + self._first_host = host + self._host_entries[host.get_name()] = PlayState(parent_iterator=self, host=host) + + # FIXME: remove, probably not required anymore + #def get_next_task(self, peek=False): + # ''' returns the next task for host[0] ''' + # + # first_entry = self._host_entries[self._first_host.get_name()] + # if not peek: + # for entry in self._host_entries: + # if entry != self._first_host.get_name(): + # target_entry = self._host_entries[entry] + # if target_entry._cur_task_pos == first_entry._cur_task_pos: + # target_entry.next() + # return first_entry.next(peek=peek) + + def get_next_task_for_host(self, host, peek=False): + ''' fetch the next task for the given host ''' + if host.get_name() not in self._host_entries: + raise AnsibleError("invalid host (%s) specified for playbook iteration" % host) + + return self._host_entries[host.get_name()].next(peek=peek) + + def mark_host_failed(self, host): + ''' mark the given host as failed ''' + if host.get_name() not in self._host_entries: + raise AnsibleError("invalid host (%s) specified for playbook iteration" % host) + + self._host_entries[host.get_name()].mark_failed() + diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 7031e51142..96c0fa3cbb 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -19,17 +19,110 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import signal + +from ansible import constants as C +from ansible.errors import * +from ansible.executor.task_queue_manager import TaskQueueManager +from ansible.playbook import Playbook + +from ansible.utils.debug import debug + class PlaybookExecutor: - def __init__(self, list_of_plays=[]): - # self.tqm = TaskQueueManager(forks) - assert False + ''' + This is the primary class for executing playbooks, and thus the + basis for bin/ansible-playbook operation. + ''' - def run(self): - # for play in list_of_plays: - # for block in play.blocks: - # # block must know it’s playbook class and context - # tqm.enqueue(block) - # tqm.go()... - assert False + def __init__(self, playbooks, inventory, variable_manager, loader, options): + self._playbooks = playbooks + self._inventory = inventory + self._variable_manager = variable_manager + self._loader = loader + self._options = options + self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, options=options) + + def run(self): + + ''' + Run the given playbook, based on the settings in the play which + may limit the runs to serialized groups, etc. + ''' + + signal.signal(signal.SIGINT, self._cleanup) + + try: + for playbook_path in self._playbooks: + pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) + + # FIXME: playbook entries are just plays, so we should rename them + for play in pb.get_entries(): + self._inventory.remove_restriction() + + # Create a temporary copy of the play here, so we can run post_validate + # on it without the templating changes affecting the original object. + all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) + new_play = play.copy() + new_play.post_validate(all_vars, ignore_undefined=True) + + result = True + for batch in self._get_serialized_batches(new_play): + if len(batch) == 0: + raise AnsibleError("No hosts matched the list specified in the play", obj=play._ds) + # restrict the inventory to the hosts in the serialized batch + self._inventory.restrict_to_hosts(batch) + # and run it... + result = self._tqm.run(play=play) + if not result: + break + + if not result: + # FIXME: do something here, to signify the playbook execution failed + self._cleanup() + return 1 + except: + self._cleanup() + raise + + self._cleanup() + return 0 + + def _cleanup(self, signum=None, framenum=None): + self._tqm.cleanup() + + def _get_serialized_batches(self, play): + ''' + Returns a list of hosts, subdivided into batches based on + the serial size specified in the play. + ''' + + # make sure we have a unique list of hosts + all_hosts = self._inventory.get_hosts(play.hosts) + + # check to see if the serial number was specified as a percentage, + # and convert it to an integer value based on the number of hosts + if isinstance(play.serial, basestring) and play.serial.endswith('%'): + serial_pct = int(play.serial.replace("%","")) + serial = int((serial_pct/100.0) * len(all_hosts)) + else: + serial = int(play.serial) + + # if the serial count was not specified or is invalid, default to + # a list of all hosts, otherwise split the list of hosts into chunks + # which are based on the serial size + if serial <= 0: + return [all_hosts] + else: + serialized_batches = [] + + while len(all_hosts) > 0: + play_hosts = [] + for x in range(serial): + if len(all_hosts) > 0: + play_hosts.append(all_hosts.pop(0)) + + serialized_batches.append(play_hosts) + + return serialized_batches diff --git a/v2/ansible/executor/playbook_iterator.py b/v2/ansible/executor/playbook_iterator.py deleted file mode 100644 index 88bec5a331..0000000000 --- a/v2/ansible/executor/playbook_iterator.py +++ /dev/null @@ -1,125 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -class PlaybookState: - - ''' - A helper class, which keeps track of the task iteration - state for a given playbook. This is used in the PlaybookIterator - class on a per-host basis. - ''' - def __init__(self, parent_iterator): - self._parent_iterator = parent_iterator - self._cur_play = 0 - self._task_list = None - self._cur_task_pos = 0 - self._done = False - - def next(self, peek=False): - ''' - Determines and returns the next available task from the playbook, - advancing through the list of plays as it goes. - ''' - - task = None - - # we save these locally so that we can peek at the next task - # without updating the internal state of the iterator - cur_play = self._cur_play - task_list = self._task_list - cur_task_pos = self._cur_task_pos - - while True: - # when we hit the end of the playbook entries list, we set a flag - # and return None to indicate we're there - # FIXME: accessing the entries and parent iterator playbook members - # should be done through accessor functions - if self._done or cur_play > len(self._parent_iterator._playbook._entries) - 1: - self._done = True - return None - - # initialize the task list by calling the .compile() method - # on the play, which will call compile() for all child objects - if task_list is None: - task_list = self._parent_iterator._playbook._entries[cur_play].compile() - - # if we've hit the end of this plays task list, move on to the next - # and reset the position values for the next iteration - if cur_task_pos > len(task_list) - 1: - cur_play += 1 - task_list = None - cur_task_pos = 0 - continue - else: - # FIXME: do tag/conditional evaluation here and advance - # the task position if it should be skipped without - # returning a task - task = task_list[cur_task_pos] - cur_task_pos += 1 - - # Skip the task if it is the member of a role which has already - # been run, unless the role allows multiple executions - if task._role: - # FIXME: this should all be done via member functions - # instead of direct access to internal variables - if task._role.has_run() and not task._role._metadata._allow_duplicates: - continue - - # Break out of the while loop now that we have our task - break - - # If we're not just peeking at the next task, save the internal state - if not peek: - self._cur_play = cur_play - self._task_list = task_list - self._cur_task_pos = cur_task_pos - - return task - -class PlaybookIterator: - - ''' - The main iterator class, which keeps the state of the playbook - on a per-host basis using the above PlaybookState class. - ''' - - def __init__(self, inventory, log_manager, playbook): - self._playbook = playbook - self._log_manager = log_manager - self._host_entries = dict() - self._first_host = None - - # build the per-host dictionary of playbook states - for host in inventory.get_hosts(): - if self._first_host is None: - self._first_host = host - self._host_entries[host.get_name()] = PlaybookState(parent_iterator=self) - - def get_next_task(self, peek=False): - ''' returns the next task for host[0] ''' - return self._host_entries[self._first_host.get_name()].next(peek=peek) - - def get_next_task_for_host(self, host, peek=False): - ''' fetch the next task for the given host ''' - if host.get_name() not in self._host_entries: - raise AnsibleError("invalid host specified for playbook iteration") - - return self._host_entries[host.get_name()].next(peek=peek) diff --git a/v2/test/parsing/yaml/__init__.py b/v2/ansible/executor/process/__init__.py similarity index 100% rename from v2/test/parsing/yaml/__init__.py rename to v2/ansible/executor/process/__init__.py diff --git a/v2/ansible/executor/process/result.py b/v2/ansible/executor/process/result.py new file mode 100644 index 0000000000..71bfdd7e02 --- /dev/null +++ b/v2/ansible/executor/process/result.py @@ -0,0 +1,155 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import Queue +import multiprocessing +import os +import signal +import sys +import time +import traceback + +HAS_ATFORK=True +try: + from Crypto.Random import atfork +except ImportError: + HAS_ATFORK=False + +from ansible.executor.task_result import TaskResult +from ansible.playbook.handler import Handler +from ansible.playbook.task import Task + +from ansible.utils.debug import debug + +__all__ = ['ResultProcess'] + + +class ResultProcess(multiprocessing.Process): + ''' + The result worker thread, which reads results from the results + queue and fires off callbacks/etc. as necessary. + ''' + + def __init__(self, final_q, workers): + + # takes a task queue manager as the sole param: + self._final_q = final_q + self._workers = workers + self._cur_worker = 0 + self._terminated = False + + super(ResultProcess, self).__init__() + + def _send_result(self, result): + debug("sending result: %s" % (result,)) + self._final_q.put(result, block=False) + debug("done sending result") + + def _read_worker_result(self): + result = None + starting_point = self._cur_worker + while True: + (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] + self._cur_worker += 1 + if self._cur_worker >= len(self._workers): + self._cur_worker = 0 + + try: + if not rslt_q.empty(): + debug("worker %d has data to read" % self._cur_worker) + result = rslt_q.get(block=False) + debug("got a result from worker %d: %s" % (self._cur_worker, result)) + break + except Queue.Empty: + pass + + if self._cur_worker == starting_point: + break + + return result + + def terminate(self): + self._terminated = True + super(ResultProcess, self).terminate() + + def run(self): + ''' + The main thread execution, which reads from the results queue + indefinitely and sends callbacks/etc. when results are received. + ''' + + if HAS_ATFORK: + atfork() + + while True: + try: + result = self._read_worker_result() + if result is None: + time.sleep(0.1) + continue + + host_name = result._host.get_name() + + # send callbacks, execute other options based on the result status + if result.is_failed(): + #self._callback.runner_on_failed(result._task, result) + self._send_result(('host_task_failed', result)) + elif result.is_unreachable(): + #self._callback.runner_on_unreachable(result._task, result) + self._send_result(('host_unreachable', result)) + elif result.is_skipped(): + #self._callback.runner_on_skipped(result._task, result) + self._send_result(('host_task_skipped', result)) + else: + #self._callback.runner_on_ok(result._task, result) + self._send_result(('host_task_ok', result)) + + # if this task is notifying a handler, do it now + if result._task.notify: + # The shared dictionary for notified handlers is a proxy, which + # does not detect when sub-objects within the proxy are modified. + # So, per the docs, we reassign the list so the proxy picks up and + # notifies all other threads + for notify in result._task.notify: + self._send_result(('notify_handler', notify, result._host)) + + # if this task is registering facts, do that now + if 'ansible_facts' in result._result: + if result._task.action in ('set_fact', 'include_vars'): + for (key, value) in result._result['ansible_facts'].iteritems(): + self._send_result(('set_host_var', result._host, key, value)) + else: + self._send_result(('set_host_facts', result._host, result._result['ansible_facts'])) + + # if this task is registering a result, do it now + if result._task.register: + self._send_result(('set_host_var', result._host, result._task.register, result._result)) + + except Queue.Empty: + pass + except (KeyboardInterrupt, IOError, EOFError): + break + except: + # FIXME: we should probably send a proper callback here instead of + # simply dumping a stack trace on the screen + traceback.print_exc() + break + diff --git a/v2/ansible/executor/process/worker.py b/v2/ansible/executor/process/worker.py new file mode 100644 index 0000000000..dcb8e4e924 --- /dev/null +++ b/v2/ansible/executor/process/worker.py @@ -0,0 +1,141 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import Queue +import multiprocessing +import os +import signal +import sys +import time +import traceback + +HAS_ATFORK=True +try: + from Crypto.Random import atfork +except ImportError: + HAS_ATFORK=False + +from ansible.errors import AnsibleError, AnsibleConnectionFailure +from ansible.executor.task_executor import TaskExecutor +from ansible.executor.task_result import TaskResult +from ansible.playbook.handler import Handler +from ansible.playbook.task import Task + +from ansible.utils.debug import debug + +__all__ = ['ExecutorProcess'] + + +class WorkerProcess(multiprocessing.Process): + ''' + The worker thread class, which uses TaskExecutor to run tasks + read from a job queue and pushes results into a results queue + for reading later. + ''' + + def __init__(self, tqm, main_q, rslt_q, loader, new_stdin): + + # takes a task queue manager as the sole param: + self._main_q = main_q + self._rslt_q = rslt_q + self._loader = loader + + # dupe stdin, if we have one + try: + fileno = sys.stdin.fileno() + except ValueError: + fileno = None + + self._new_stdin = new_stdin + if not new_stdin and fileno is not None: + try: + self._new_stdin = os.fdopen(os.dup(fileno)) + except OSError, e: + # couldn't dupe stdin, most likely because it's + # not a valid file descriptor, so we just rely on + # using the one that was passed in + pass + + super(WorkerProcess, self).__init__() + + def run(self): + ''' + Called when the process is started, and loops indefinitely + until an error is encountered (typically an IOerror from the + queue pipe being disconnected). During the loop, we attempt + to pull tasks off the job queue and run them, pushing the result + onto the results queue. We also remove the host from the blocked + hosts list, to signify that they are ready for their next task. + ''' + + if HAS_ATFORK: + atfork() + + while True: + task = None + try: + if not self._main_q.empty(): + debug("there's work to be done!") + (host, task, job_vars, connection_info) = self._main_q.get(block=False) + debug("got a task/handler to work on: %s" % task) + + new_connection_info = connection_info.set_task_override(task) + + # execute the task and build a TaskResult from the result + debug("running TaskExecutor() for %s/%s" % (host, task)) + executor_result = TaskExecutor(host, task, job_vars, new_connection_info, self._loader).run() + debug("done running TaskExecutor() for %s/%s" % (host, task)) + task_result = TaskResult(host, task, executor_result) + + # put the result on the result queue + debug("sending task result") + self._rslt_q.put(task_result, block=False) + debug("done sending task result") + + else: + time.sleep(0.1) + + except Queue.Empty: + pass + except (IOError, EOFError, KeyboardInterrupt): + break + except AnsibleConnectionFailure: + try: + if task: + task_result = TaskResult(host, task, dict(unreachable=True)) + self._rslt_q.put(task_result, block=False) + except: + # FIXME: most likely an abort, catch those kinds of errors specifically + break + except Exception, e: + debug("WORKER EXCEPTION: %s" % e) + debug("WORKER EXCEPTION: %s" % traceback.format_exc()) + try: + if task: + task_result = TaskResult(host, task, dict(failed=True, exception=True, stdout=traceback.format_exc())) + self._rslt_q.put(task_result, block=False) + except: + # FIXME: most likely an abort, catch those kinds of errors specifically + break + + debug("WORKER PROCESS EXITING") + + diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py index 878c15c489..cc3e04a814 100644 --- a/v2/ansible/executor/task_executor.py +++ b/v2/ansible/executor/task_executor.py @@ -19,14 +19,196 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.executor.connection_info import ConnectionInformation +from ansible.plugins import lookup_loader, connection_loader, action_loader + +from ansible.utils.debug import debug + +__all__ = ['TaskExecutor'] + +import json +import time + class TaskExecutor: - def __init__(self, task, host): - pass + ''' + This is the main worker class for the executor pipeline, which + handles loading an action plugin to actually dispatch the task to + a given host. This class roughly corresponds to the old Runner() + class. + ''' - def run(self): - # returns TaskResult - pass + def __init__(self, host, task, job_vars, connection_info, loader): + self._host = host + self._task = task + self._job_vars = job_vars + self._connection_info = connection_info + self._loader = loader - + def run(self): + ''' + The main executor entrypoint, where we determine if the specified + task requires looping and either runs the task with + ''' + debug("in run()") + items = self._get_loop_items() + if items: + if len(items) > 0: + item_results = self._run_loop(items) + res = dict(results=item_results) + else: + res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[]) + else: + debug("calling self._execute()") + res = self._execute() + debug("_execute() done") + + debug("dumping result to json") + result = json.dumps(res) + debug("done dumping result, returning") + return result + + def _get_loop_items(self): + ''' + Loads a lookup plugin to handle the with_* portion of a task (if specified), + and returns the items result. + ''' + + items = None + if self._task.loop and self._task.loop in lookup_loader: + items = lookup_loader.get(self._task.loop).run(self._task.loop_args) + + return items + + def _run_loop(self, items): + ''' + Runs the task with the loop items specified and collates the result + into an array named 'results' which is inserted into the final result + along with the item for which the loop ran. + ''' + + results = [] + + # FIXME: squash items into a flat list here for those modules + # which support it (yum, apt, etc.) but make it smarter + # than it is today? + + for item in items: + res = self._execute() + res['item'] = item + results.append(res) + + return results + + def _execute(self): + ''' + The primary workhorse of the executor system, this runs the task + on the specified host (which may be the delegated_to host) and handles + the retry/until and block rescue/always execution + ''' + + connection = self._get_connection() + handler = self._get_action_handler(connection=connection) + + # check to see if this task should be skipped, due to it being a member of a + # role which has already run (and whether that role allows duplicate execution) + if self._task._role and self._task._role.has_run(): + # If there is no metadata, the default behavior is to not allow duplicates, + # if there is metadata, check to see if the allow_duplicates flag was set to true + if self._task._role._metadata is None or self._task._role._metadata and not self._task._role._metadata.allow_duplicates: + debug("task belongs to a role which has already run, but does not allow duplicate execution") + return dict(skipped=True, skip_reason='This role has already been run, but does not allow duplicates') + + if not self._task.evaluate_conditional(self._job_vars): + debug("when evaulation failed, skipping this task") + return dict(skipped=True, skip_reason='Conditional check failed') + + if not self._task.evaluate_tags(self._connection_info.only_tags, self._connection_info.skip_tags): + debug("Tags don't match, skipping this task") + return dict(skipped=True, skip_reason='Skipped due to specified tags') + + retries = self._task.retries + if retries <= 0: + retries = 1 + + delay = self._task.delay + if delay < 0: + delay = 0 + + debug("starting attempt loop") + result = None + for attempt in range(retries): + if attempt > 0: + # FIXME: this should use the callback mechanism + print("FAILED - RETRYING: %s (%d retries left)" % (self._task, retries-attempt)) + result['attempts'] = attempt + 1 + + debug("running the handler") + result = handler.run(task_vars=self._job_vars) + debug("handler run complete") + if self._task.until: + # TODO: implement until logic (pseudo logic follows...) + # if VariableManager.check_conditional(cond, extra_vars=(dict(result=result))): + # break + pass + elif 'failed' not in result and result.get('rc', 0) == 0: + # if the result is not failed, stop trying + break + + if attempt < retries - 1: + time.sleep(delay) + + debug("attempt loop complete, returning result") + return result + + def _get_connection(self): + ''' + Reads the connection property for the host, and returns the + correct connection object from the list of connection plugins + ''' + + # FIXME: delegate_to calculation should be done here + # FIXME: calculation of connection params/auth stuff should be done here + + # FIXME: add all port/connection type munging here (accelerated mode, + # fixing up options for ssh, etc.)? and 'smart' conversion + conn_type = self._connection_info.connection + if conn_type == 'smart': + conn_type = 'ssh' + + connection = connection_loader.get(conn_type, self._host, self._connection_info) + if not connection: + raise AnsibleError("the connection plugin '%s' was not found" % conn_type) + + connection.connect() + + return connection + + def _get_action_handler(self, connection): + ''' + Returns the correct action plugin to handle the requestion task action + ''' + + if self._task.action in action_loader: + if self._task.async != 0: + raise AnsibleError("async mode is not supported with the %s module" % module_name) + handler_name = self._task.action + elif self._task.async == 0: + handler_name = 'normal' + else: + handler_name = 'async' + + handler = action_loader.get( + handler_name, + task=self._task, + connection=connection, + connection_info=self._connection_info, + loader=self._loader + ) + if not handler: + raise AnsibleError("the handler '%s' was not found" % handler_name) + + return handler diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py index a79235bfd0..72ff04d53d 100644 --- a/v2/ansible/executor/task_queue_manager.py +++ b/v2/ansible/executor/task_queue_manager.py @@ -19,18 +19,191 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -class TaskQueueManagerHostPlaybookIterator: +import multiprocessing +import os +import socket +import sys - def __init__(self, host, playbook): - pass +from ansible.errors import AnsibleError +from ansible.executor.connection_info import ConnectionInformation +#from ansible.executor.manager import AnsibleManager +from ansible.executor.play_iterator import PlayIterator +from ansible.executor.process.worker import WorkerProcess +from ansible.executor.process.result import ResultProcess +from ansible.plugins import callback_loader, strategy_loader - def get_next_task(self): - assert False +from ansible.utils.debug import debug - def is_blocked(self): - # depending on strategy, either - # ‘linear’ -- all prev tasks must be completed for all hosts - # ‘free’ -- this host doesn’t have any more work to do - assert False +__all__ = ['TaskQueueManager'] +class TaskQueueManager: + + ''' + This class handles the multiprocessing requirements of Ansible by + creating a pool of worker forks, a result handler fork, and a + manager object with shared datastructures/queues for coordinating + work between all processes. + + The queue manager is responsible for loading the play strategy plugin, + which dispatches the Play's tasks to hosts. + ''' + + def __init__(self, inventory, callback, variable_manager, loader, options): + + self._inventory = inventory + self._variable_manager = variable_manager + self._loader = loader + self._options = options + + # a special flag to help us exit cleanly + self._terminated = False + + # create and start the multiprocessing manager + #self._manager = AnsibleManager() + #self._manager.start() + + # this dictionary is used to keep track of notified handlers + self._notified_handlers = dict() + + # dictionaries to keep track of failed/unreachable hosts + self._failed_hosts = dict() + self._unreachable_hosts = dict() + + self._final_q = multiprocessing.Queue() + + # FIXME: hard-coded the default callback plugin here, which + # should be configurable. + self._callback = callback_loader.get(callback) + + # create the pool of worker threads, based on the number of forks specified + try: + fileno = sys.stdin.fileno() + except ValueError: + fileno = None + + self._workers = [] + for i in range(self._options.forks): + # duplicate stdin, if possible + new_stdin = None + if fileno is not None: + try: + new_stdin = os.fdopen(os.dup(fileno)) + except OSError, e: + # couldn't dupe stdin, most likely because it's + # not a valid file descriptor, so we just rely on + # using the one that was passed in + pass + + main_q = multiprocessing.Queue() + rslt_q = multiprocessing.Queue() + + prc = WorkerProcess(self, main_q, rslt_q, loader, new_stdin) + prc.start() + + self._workers.append((prc, main_q, rslt_q)) + + self._result_prc = ResultProcess(self._final_q, self._workers) + self._result_prc.start() + + def _initialize_notified_handlers(self, handlers): + ''' + Clears and initializes the shared notified handlers dict with entries + for each handler in the play, which is an empty array that will contain + inventory hostnames for those hosts triggering the handler. + ''' + + # Zero the dictionary first by removing any entries there. + # Proxied dicts don't support iteritems, so we have to use keys() + for key in self._notified_handlers.keys(): + del self._notified_handlers[key] + + # FIXME: there is a block compile helper for this... + handler_list = [] + for handler_block in handlers: + handler_list.extend(handler_block.compile()) + + # then initalize it with the handler names from the handler list + for handler in handler_list: + self._notified_handlers[handler.get_name()] = [] + + def run(self, play): + ''' + Iterates over the roles/tasks in a play, using the given (or default) + strategy for queueing tasks. The default is the linear strategy, which + operates like classic Ansible by keeping all hosts in lock-step with + a given task (meaning no hosts move on to the next task until all hosts + are done with the current task). + ''' + + connection_info = ConnectionInformation(play, self._options) + self._callback.set_connection_info(connection_info) + + # run final validation on the play now, to make sure fields are templated + # FIXME: is this even required? Everything is validated and merged at the + # task level, so else in the play needs to be templated + #all_vars = self._vmw.get_vars(loader=self._dlw, play=play) + #all_vars = self._vmw.get_vars(loader=self._loader, play=play) + #play.post_validate(all_vars=all_vars) + + self._callback.playbook_on_play_start(play.name) + + # initialize the shared dictionary containing the notified handlers + self._initialize_notified_handlers(play.handlers) + + # load the specified strategy (or the default linear one) + strategy = strategy_loader.get(play.strategy, self) + if strategy is None: + raise AnsibleError("Invalid play strategy specified: %s" % play.strategy, obj=play._ds) + + # build the iterator + iterator = PlayIterator(inventory=self._inventory, play=play) + + # and run the play using the strategy + return strategy.run(iterator, connection_info) + + def cleanup(self): + debug("RUNNING CLEANUP") + + self.terminate() + + self._final_q.close() + self._result_prc.terminate() + + for (worker_prc, main_q, rslt_q) in self._workers: + rslt_q.close() + main_q.close() + worker_prc.terminate() + + def get_inventory(self): + return self._inventory + + def get_callback(self): + return self._callback + + def get_variable_manager(self): + return self._variable_manager + + def get_loader(self): + return self._loader + + def get_server_pipe(self): + return self._server_pipe + + def get_client_pipe(self): + return self._client_pipe + + def get_pending_results(self): + return self._pending_results + + def get_allow_processing(self): + return self._allow_processing + + def get_notified_handlers(self): + return self._notified_handlers + + def get_workers(self): + return self._workers[:] + + def terminate(self): + self._terminated = True diff --git a/v2/ansible/executor/task_result.py b/v2/ansible/executor/task_result.py index 785fc45992..d911713651 100644 --- a/v2/ansible/executor/task_result.py +++ b/v2/ansible/executor/task_result.py @@ -19,3 +19,39 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible.parsing import DataLoader + +class TaskResult: + ''' + This class is responsible for interpretting the resulting data + from an executed task, and provides helper methods for determining + the result of a given task. + ''' + + def __init__(self, host, task, return_data): + self._host = host + self._task = task + if isinstance(return_data, dict): + self._result = return_data.copy() + else: + self._result = DataLoader().load(return_data) + + def is_changed(self): + return self._check_key('changed') + + def is_skipped(self): + return self._check_key('skipped') + + def is_failed(self): + return self._check_key('failed') or self._result.get('rc', 0) != 0 + + def is_unreachable(self): + return self._check_key('unreachable') + + def _check_key(self, key): + if 'results' in self._result: + flag = False + for res in self._result.get('results', []): + flag |= res.get(key, False) + else: + return self._result.get(key, False) diff --git a/v2/ansible/inventory/__init__.py b/v2/ansible/inventory/__init__.py index 631fddfe68..0c43133b92 100644 --- a/v2/ansible/inventory/__init__.py +++ b/v2/ansible/inventory/__init__.py @@ -16,397 +16,661 @@ # along with Ansible. If not, see . ############################################# +import fnmatch +import os +import sys +import re +import stat +import subprocess -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from ansible import constants as C +from ansible.errors import * -### List of things to change in Inventory +from ansible.inventory.ini import InventoryParser +from ansible.inventory.script import InventoryScript +from ansible.inventory.dir import InventoryDirectory +from ansible.inventory.group import Group +from ansible.inventory.host import Host +from ansible.plugins import vars_loader +from ansible.utils.vars import combine_vars -### Replace some lists with sets/frozensets. -### Check where this makes sense to reveal externally +# FIXME: these defs need to be somewhere else +def is_executable(path): + '''is the given path executable?''' + return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] + or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] + or stat.S_IXOTH & os.stat(path)[stat.ST_MODE]) -### Rename all caches to *_cache +class Inventory(object): + """ + Host inventory for ansible. + """ -### Standardize how caches are flushed for all caches if possible + #__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', + # 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list', + # '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir'] -### Think about whether retrieving variables should be methods of the -### Groups/Hosts being queried with caches at that level + def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST): -### Store things into a VarManager instead of inventory + # the host file file, or script path, or list of hosts + # if a list, inventory data will NOT be loaded + self.host_list = host_list + self._loader = loader + self._variable_manager = variable_manager -### Merge list_hosts() and get_hosts() -### Merge list_groups() and groups_list() -### Merge get_variables() and get_host_variables() + # caching to avoid repeated calculations, particularly with + # external inventory scripts. -### Restrictions: -### Remove get_restriction() -### Prefix restrict_to and lift_restriction with _ and note in docstring that -### only playbook is to use these for implementing failed hosts. This is -### the closest that python has to a "friend function" -### Can we get rid of restrictions altogether? -### If we must keep restrictions, reimplement as a stack of sets. Then -### calling code will push and pop restrictions onto the inventory -### (mpdehaan +1'd stack idea) + self._vars_per_host = {} + self._vars_per_group = {} + self._hosts_cache = {} + self._groups_list = {} + self._pattern_cache = {} -### is_file() and basedir() => Change to properties + # to be set by calling set_playbook_basedir by playbook code + self._playbook_basedir = None -### Can we move the playbook variable resolving to someplace else? Seems that: -### 1) It can change within a single session -### 2) Inventory shouldn't know about playbook. -### Possibilities: -### Host and groups read the host_vars and group_vars. Both inventory and -### playbook register paths that the hsot_vars and group_vars can read from. -### The VariableManager reads the host_vars and group_vars and keeps them -### layered depending on the context from which it's being asked what -### the value of a variable is -### Either of these results in getting rid of/moving to another class -### Inventory.playbook_basedir() and Inventory.set_playbook_basedir() -### mpdehaan: evaluate caching and make sure we're just caching once. (Toshio: tie -### this in with storing and retrieving variables via Host and Group objects -### mpdehaan: If it's possible, move templating entirely out of inventory -### (Toshio: If it's possible, implement this by storing inside of -### VariableManager which will handle resolving templated variables) + # the inventory object holds a list of groups + self.groups = [] + + # a list of host(names) to contain current inquiries to + self._restriction = None + self._also_restriction = None + self._subset = None + + if isinstance(host_list, basestring): + if "," in host_list: + host_list = host_list.split(",") + host_list = [ h for h in host_list if h and h.strip() ] + + if host_list is None: + self.parser = None + elif isinstance(host_list, list): + self.parser = None + all = Group('all') + self.groups = [ all ] + ipv6_re = re.compile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?') + for x in host_list: + m = ipv6_re.match(x) + if m: + all.add_host(Host(m.groups()[0], m.groups()[1])) + else: + if ":" in x: + tokens = x.rsplit(":", 1) + # if there is ':' in the address, then this is an ipv6 + if ':' in tokens[0]: + all.add_host(Host(x)) + else: + all.add_host(Host(tokens[0], tokens[1])) + else: + all.add_host(Host(x)) + elif os.path.exists(host_list): + if os.path.isdir(host_list): + # Ensure basedir is inside the directory + self.host_list = os.path.join(self.host_list, "") + self.parser = InventoryDirectory(filename=host_list) + self.groups = self.parser.groups.values() + else: + # check to see if the specified file starts with a + # shebang (#!/), so if an error is raised by the parser + # class we can show a more apropos error + shebang_present = False + try: + inv_file = open(host_list) + first_line = inv_file.readlines()[0] + inv_file.close() + if first_line.startswith('#!'): + shebang_present = True + except: + pass + + # FIXME: utils is_executable + if is_executable(host_list): + try: + self.parser = InventoryScript(filename=host_list) + self.groups = self.parser.groups.values() + except: + if not shebang_present: + raise errors.AnsibleError("The file %s is marked as executable, but failed to execute correctly. " % host_list + \ + "If this is not supposed to be an executable script, correct this with `chmod -x %s`." % host_list) + else: + raise + else: + try: + self.parser = InventoryParser(filename=host_list) + self.groups = self.parser.groups.values() + except: + if shebang_present: + raise errors.AnsibleError("The file %s looks like it should be an executable inventory script, but is not marked executable. " % host_list + \ + "Perhaps you want to correct this with `chmod +x %s`?" % host_list) + else: + raise + + vars_loader.add_directory(self.basedir(), with_subdir=True) + else: + raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?") + + self._vars_plugins = [ x for x in vars_loader.all(self) ] + + # FIXME: shouldn't be required, since the group/host vars file + # management will be done in VariableManager + # get group vars from group_vars/ files and vars plugins + for group in self.groups: + # FIXME: combine_vars + group.vars = combine_vars(group.vars, self.get_group_variables(group.name)) + + # get host vars from host_vars/ files and vars plugins + for host in self.get_hosts(): + # FIXME: combine_vars + host.vars = combine_vars(host.vars, self.get_host_variables(host.name)) -### Questiony things: -### Do we want patterns to apply to both groups and hosts or only to hosts? -### jimi-c: Current code should do both as we're parsing things you can -### give to the -i commandline switch which can mix hosts and groups. -### like: `hosts: group1:group2&host3` -### toshio: should we move parsing the commandline out and then have that -### cli parser pass in a distinct list of hosts to add? -### Think about whether we could and want to go through the pattern_cache for -### standard lookups -### Is this the current architecture: -### We have a single Inventory per runner. -### The Inventory may be initialized via: -### an ini file -### a directory of ini files -### a script -### a , separated string of hosts -### a list of hosts -### host_vars/* -### group_vars/* -### Do we want to change this so that multiple sources are allowed? -### ansible -i /etc/ansible,./inventory,/opt/ansible/inventory_plugins/ec2.py,localhost -### jimi-c: We don't currently have multiple inventory sources explicitly -### allowed but you can specify an inventory directory and then have multiple -### sources inside of that. -### toshio: So do we want to make that available to people since we have to do it anyway? -### jimi-c: Also, what calls Inventory? TaskExecutor probably makes sense in v2 -### What are vars_loaders? What's their scope? Why aren't the parsing of -### inventory files and scripts implemented as a vars_loader? -### jimi-c: vars_loaders are plugins to do additional variable loading. -### svg has some inhouse. -### Could theoretically rewrite the current loading to be handled by a plugin -### If we have add_group(), why no merge_group()? -### group = inven.get_group(name) -### if not group: -### group = Group(name) -### inven.add_group(group) -### -### vs -### group = Group(name) -### try: -### inven.add_group(group) -### except: -### inven.merge_group(group) -### -### vs: -### group = Group(name) -### inven.add_or_merge(group) + def _match(self, str, pattern_str): + try: + if pattern_str.startswith('~'): + return re.search(pattern_str[1:], str) + else: + return fnmatch.fnmatch(str, pattern_str) + except Exception, e: + raise errors.AnsibleError('invalid host pattern: %s' % pattern_str) -from .. plugins.inventory.aggregate import InventoryAggregateParser -from . group import Group -from . host import Host - -class Inventory: - ''' - Create hosts and groups from inventory - - Retrieve the hosts and groups that ansible knows about from this - class. - - Retrieve raw variables (non-expanded) from the Group and Host classes - returned from here. - ''' - def __init__(self, inventory_list=C.DEFAULT_HOST_LIST, vault_password=None): - ''' - :kwarg inventory_list: A list of inventory sources. This may be file - names which will be parsed as ini-like files, executable scripts - which return inventory data as json, directories of both of the above, - or hostnames. Files and directories are - :kwarg vault_password: Password to use if any of the inventory sources - are in an ansible vault - ''' - self.vault_password = vault_password - - self.parser = InventoryAggregateParser(inventory_list) - self.parser.parse() - self.hosts = self.parser.hosts - self.groups = self.parser.groups + def _match_list(self, items, item_attr, pattern_str): + results = [] + try: + if not pattern_str.startswith('~'): + pattern = re.compile(fnmatch.translate(pattern_str)) + else: + pattern = re.compile(pattern_str[1:]) + except Exception, e: + raise errors.AnsibleError('invalid host pattern: %s' % pattern_str) + for item in items: + if pattern.match(getattr(item, item_attr)): + results.append(item) + return results def get_hosts(self, pattern="all"): - ''' - Find all hosts matching a pattern string + """ + find all host names matching a pattern string, taking into account any inventory restrictions or + applied subsets. + """ - This also takes into account any inventory restrictions or applied - subsets. + # process patterns + if isinstance(pattern, list): + pattern = ';'.join(pattern) + patterns = pattern.replace(";",":").split(":") + hosts = self._get_hosts(patterns) - :kwarg pattern: An fnmatch pattern that hosts must match on. Multiple - patterns may be separated by ";" and ":". Defaults to the special - pattern "all" which means to return all hosts. - :returns: list of hosts - ''' - pass + # exclude hosts not in a subset, if defined + if self._subset: + subset = self._get_hosts(self._subset) + hosts = [ h for h in hosts if h in subset ] + + # exclude hosts mentioned in any restriction (ex: failed hosts) + if self._restriction is not None: + hosts = [ h for h in hosts if h in self._restriction ] + if self._also_restriction is not None: + hosts = [ h for h in hosts if h in self._also_restriction ] + + return hosts + + def _get_hosts(self, patterns): + """ + finds hosts that match a list of patterns. Handles negative + matches as well as intersection matches. + """ + + # Host specifiers should be sorted to ensure consistent behavior + pattern_regular = [] + pattern_intersection = [] + pattern_exclude = [] + for p in patterns: + if p.startswith("!"): + pattern_exclude.append(p) + elif p.startswith("&"): + pattern_intersection.append(p) + elif p: + pattern_regular.append(p) + + # if no regular pattern was given, hence only exclude and/or intersection + # make that magically work + if pattern_regular == []: + pattern_regular = ['all'] + + # when applying the host selectors, run those without the "&" or "!" + # first, then the &s, then the !s. + patterns = pattern_regular + pattern_intersection + pattern_exclude + + hosts = [] + + for p in patterns: + # avoid resolving a pattern that is a plain host + if p in self._hosts_cache: + hosts.append(self.get_host(p)) + else: + that = self.__get_hosts(p) + if p.startswith("!"): + hosts = [ h for h in hosts if h not in that ] + elif p.startswith("&"): + hosts = [ h for h in hosts if h in that ] + else: + to_append = [ h for h in that if h.name not in [ y.name for y in hosts ] ] + hosts.extend(to_append) + return hosts + + def __get_hosts(self, pattern): + """ + finds hosts that positively match a particular pattern. Does not + take into account negative matches. + """ + + if pattern in self._pattern_cache: + return self._pattern_cache[pattern] + + (name, enumeration_details) = self._enumeration_info(pattern) + hpat = self._hosts_in_unenumerated_pattern(name) + result = self._apply_ranges(pattern, hpat) + self._pattern_cache[pattern] = result + return result + + def _enumeration_info(self, pattern): + """ + returns (pattern, limits) taking a regular pattern and finding out + which parts of it correspond to start/stop offsets. limits is + a tuple of (start, stop) or None + """ + + # Do not parse regexes for enumeration info + if pattern.startswith('~'): + return (pattern, None) + + # The regex used to match on the range, which can be [x] or [x-y]. + pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$") + m = pattern_re.match(pattern) + if m: + (target, first, last, rest) = m.groups() + first = int(first) + if last: + if first < 0: + raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range") + last = int(last) + else: + last = first + return (target, (first, last)) + else: + return (pattern, None) + + def _apply_ranges(self, pat, hosts): + """ + given a pattern like foo, that matches hosts, return all of hosts + given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts + """ + + # If there are no hosts to select from, just return the + # empty set. This prevents trying to do selections on an empty set. + # issue#6258 + if not hosts: + return hosts + + (loose_pattern, limits) = self._enumeration_info(pat) + if not limits: + return hosts + + (left, right) = limits + + if left == '': + left = 0 + if right == '': + right = 0 + left=int(left) + right=int(right) + try: + if left != right: + return hosts[left:right] + else: + return [ hosts[left] ] + except IndexError: + raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat) + + def _create_implicit_localhost(self, pattern): + new_host = Host(pattern) + new_host.set_variable("ansible_python_interpreter", sys.executable) + new_host.set_variable("ansible_connection", "local") + new_host.ipv4_address = '127.0.0.1' + + ungrouped = self.get_group("ungrouped") + if ungrouped is None: + self.add_group(Group('ungrouped')) + ungrouped = self.get_group('ungrouped') + self.get_group('all').add_child_group(ungrouped) + ungrouped.add_host(new_host) + return new_host + + def _hosts_in_unenumerated_pattern(self, pattern): + """ Get all host names matching the pattern """ + + results = [] + hosts = [] + hostnames = set() + + # ignore any negative checks here, this is handled elsewhere + pattern = pattern.replace("!","").replace("&", "") + + def __append_host_to_results(host): + if host not in results and host.name not in hostnames: + hostnames.add(host.name) + results.append(host) + + groups = self.get_groups() + for group in groups: + if pattern == 'all': + for host in group.get_hosts(): + __append_host_to_results(host) + else: + if self._match(group.name, pattern): + for host in group.get_hosts(): + __append_host_to_results(host) + else: + matching_hosts = self._match_list(group.get_hosts(), 'name', pattern) + for host in matching_hosts: + __append_host_to_results(host) + + if pattern in ["localhost", "127.0.0.1"] and len(results) == 0: + new_host = self._create_implicit_localhost(pattern) + results.append(new_host) + return results def clear_pattern_cache(self): - ''' - Invalidate the pattern cache - ''' - #### Possibly not needed? - # Former docstring: - # Called exclusively by the add_host plugin to allow patterns to be - # recalculated - pass + ''' called exclusively by the add_host plugin to allow patterns to be recalculated ''' + self._pattern_cache = {} def groups_for_host(self, host): - ### Remove in favour of - ### inventory.hosts[host].groups.keys() - ''' - Return the groupnames to which a host belongs - - :arg host: Name of host to lookup - :returns: list of groupnames - ''' - pass + if host in self._hosts_cache: + return self._hosts_cache[host].get_groups() + else: + return [] def groups_list(self): - ''' - Return a mapping of group name to hostnames which belong to the group - - :returns: dict of groupnames mapped to a list of hostnames within that group - ''' - pass + if not self._groups_list: + groups = {} + for g in self.groups: + groups[g.name] = [h.name for h in g.get_hosts()] + ancestors = g.get_ancestors() + for a in ancestors: + if a.name not in groups: + groups[a.name] = [h.name for h in a.get_hosts()] + self._groups_list = groups + return self._groups_list def get_groups(self): - ### Remove in favour of inventory.groups.values() - ''' - Retrieve the Group objects known to the Inventory - - :returns: list of :class:`Group`s belonging to the Inventory - ''' - pass + return self.groups def get_host(self, hostname): - ### Remove in favour of inventory.hosts.values() - ''' - Retrieve the Host object for a hostname + if hostname not in self._hosts_cache: + self._hosts_cache[hostname] = self._get_host(hostname) + return self._hosts_cache[hostname] - :arg hostname: hostname associated with the :class:`Host` - :returns: :class:`Host` object whose hostname was requested - ''' - pass + def _get_host(self, hostname): + if hostname in ['localhost','127.0.0.1']: + for host in self.get_group('all').get_hosts(): + if host.name in ['localhost', '127.0.0.1']: + return host + return self._create_implicit_localhost(hostname) + else: + for group in self.groups: + for host in group.get_hosts(): + if hostname == host.name: + return host + return None def get_group(self, groupname): - ### Revmoe in favour of inventory.groups.groupname - ''' - Retrieve the Group object for a groupname - - :arg groupname: groupname associated with the :class:`Group` - :returns: :class:`Group` object whose groupname was requested - ''' - pass + for group in self.groups: + if group.name == groupname: + return group + return None def get_group_variables(self, groupname, update_cached=False, vault_password=None): - ### Remove in favour of inventory.groups[groupname].get_vars() - ''' - Retrieve the variables set on a group + if groupname not in self._vars_per_group or update_cached: + self._vars_per_group[groupname] = self._get_group_variables(groupname, vault_password=vault_password) + return self._vars_per_group[groupname] - :arg groupname: groupname to retrieve variables for - :kwarg update_cached: if True, retrieve the variables from the source - and refresh the cache for this variable - :kwarg vault_password: Password to use if any of the inventory sources - are in an ansible vault - :returns: dict mapping group variable names to values - ''' - pass + def _get_group_variables(self, groupname, vault_password=None): + + group = self.get_group(groupname) + if group is None: + raise Exception("group not found: %s" % groupname) + + vars = {} + + # plugin.get_group_vars retrieves just vars for specific group + vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')] + for updated in vars_results: + if updated is not None: + # FIXME: combine_vars + vars = combine_vars(vars, updated) + + # Read group_vars/ files + # FIXME: combine_vars + vars = combine_vars(vars, self.get_group_vars(group)) + + return vars def get_variables(self, hostname, update_cached=False, vault_password=None): - ### Remove in favour of inventory.hosts[hostname].get_vars() - ''' - Retrieve the variables set on a host - :arg hostname: hostname to retrieve variables for - :kwarg update_cached: if True, retrieve the variables from the source - and refresh the cache for this variable - :kwarg vault_password: Password to use if any of the inventory sources - are in an ansible vault - :returns: dict mapping host variable names to values - ''' - ### WARNING: v1 implementation ignores update_cached and vault_password - pass + host = self.get_host(hostname) + if not host: + raise Exception("host not found: %s" % hostname) + return host.get_variables() def get_host_variables(self, hostname, update_cached=False, vault_password=None): - ### Remove in favour of inventory.hosts[hostname].get_vars() - ''' - Retrieve the variables set on a host - :arg hostname: hostname to retrieve variables for - :kwarg update_cached: if True, retrieve the variables from the source - and refresh the cache for this variable - :kwarg vault_password: Password to use if any of the inventory sources - are in an ansible vault - :returns: dict mapping host variable names to values - ''' - pass + if hostname not in self._vars_per_host or update_cached: + self._vars_per_host[hostname] = self._get_host_variables(hostname, vault_password=vault_password) + return self._vars_per_host[hostname] + + def _get_host_variables(self, hostname, vault_password=None): + + host = self.get_host(hostname) + if host is None: + raise errors.AnsibleError("host not found: %s" % hostname) + + vars = {} + + # plugin.run retrieves all vars (also from groups) for host + vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')] + for updated in vars_results: + if updated is not None: + # FIXME: combine_vars + vars = combine_vars(vars, updated) + + # plugin.get_host_vars retrieves just vars for specific host + vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')] + for updated in vars_results: + if updated is not None: + # FIXME: combine_vars + vars = combine_vars(vars, updated) + + # still need to check InventoryParser per host vars + # which actually means InventoryScript per host, + # which is not performant + if self.parser is not None: + # FIXME: combine_vars + vars = combine_vars(vars, self.parser.get_host_variables(host)) + + # Read host_vars/ files + # FIXME: combine_vars + vars = combine_vars(vars, self.get_host_vars(host)) + + return vars def add_group(self, group): - ### Possibly remove in favour of inventory.groups[groupname] = group - ''' - Add a new group to the inventory - - :arg group: Group object to add to the inventory - ''' - pass + if group.name not in self.groups_list(): + self.groups.append(group) + self._groups_list = None # invalidate internal cache + else: + raise errors.AnsibleError("group already in inventory: %s" % group.name) def list_hosts(self, pattern="all"): - ### Remove in favour of: inventory.hosts.keys()? Maybe not as pattern is here - ''' - Retrieve a list of hostnames for a pattern - :kwarg pattern: Retrieve hosts which match this pattern. The special - pattern "all" matches every host the inventory knows about. - :returns: list of hostnames - ''' - ### Notes: Differences with get_hosts: - ### get_hosts returns hosts, this returns host names - ### This adds the implicit localhost/127.0.0.1 as a name but not as - ### a host - pass + """ return a list of hostnames for a pattern """ + + result = [ h for h in self.get_hosts(pattern) ] + if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]: + result = [pattern] + return result def list_groups(self): - ### Remove in favour of: inventory.groups.keys() - ''' - Retrieve list of groupnames - :returns: list of groupnames - ''' - pass + return sorted([ g.name for g in self.groups ], key=lambda x: x) - def get_restriction(self): - ''' - Accessor for the private _restriction attribute. - ''' - ### Note: In v1, says to be removed. - ### Not used by anything at all. - pass - - def restrict_to(self, restriction): - ''' - Restrict get and list operations to hosts given in the restriction - - :arg restriction: - ''' - ### The v1 docstring says: - ### Used by the main playbook code to exclude failed hosts, don't use - ### this for other reasons - pass - - def lift_restriction(self): - ''' - Remove a restriction - ''' - pass + def restrict_to_hosts(self, restriction): + """ + Restrict list operations to the hosts given in restriction. This is used + to exclude failed hosts in main playbook code, don't use this for other + reasons. + """ + if not isinstance(restriction, list): + restriction = [ restriction ] + self._restriction = restriction def also_restrict_to(self, restriction): - ''' - Restrict get and list operations to hosts in the additional restriction - ''' - ### Need to explore use case here -- maybe we want to restrict for - ### several different reasons. Within a certain scope we restrict - ### again for a separate reason? - pass - - def lift_also_restriction(self): - ''' - Remove an also_restriction - ''' - # HACK -- dead host skipping - pass - + """ + Works like restict_to but offers an additional restriction. Playbooks use this + to implement serial behavior. + """ + if not isinstance(restriction, list): + restriction = [ restriction ] + self._also_restriction = restriction + def subset(self, subset_pattern): - """ + """ Limits inventory results to a subset of inventory that matches a given - pattern, such as to select a subset of a hosts selection that also - belongs to a certain geographic group or numeric slice. + pattern, such as to select a given geographic of numeric slice amongst + a previous 'hosts' selection that only select roles, or vice versa. Corresponds to --limit parameter to ansible-playbook + """ + if subset_pattern is None: + self._subset = None + else: + subset_pattern = subset_pattern.replace(',',':') + subset_pattern = subset_pattern.replace(";",":").split(":") + results = [] + # allow Unix style @filename data + for x in subset_pattern: + if x.startswith("@"): + fd = open(x[1:]) + results.extend(fd.read().split("\n")) + fd.close() + else: + results.append(x) + self._subset = results - :arg subset_pattern: The pattern to limit with. If this is None it - clears the subset. Multiple patterns may be specified as a comma, - semicolon, or colon separated string. - """ - pass + def remove_restriction(self): + """ Do not restrict list operations """ + self._restriction = None + + def lift_also_restriction(self): + """ Clears the also restriction """ + self._also_restriction = None def is_file(self): - ''' - Did inventory come from a file? - - :returns: True if the inventory is file based, False otherwise - ''' - pass + """ did inventory come from a file? """ + if not isinstance(self.host_list, basestring): + return False + return os.path.exists(self.host_list) def basedir(self): - ''' - What directory was inventory read from - - :returns: the path to the directory holding the inventory. None if - the inventory is not file based - ''' - pass + """ if inventory came from a file, what's the directory? """ + if not self.is_file(): + return None + dname = os.path.dirname(self.host_list) + if dname is None or dname == '' or dname == '.': + cwd = os.getcwd() + return os.path.abspath(cwd) + return os.path.abspath(dname) def src(self): - ''' - What's the complete path to the inventory file? - - :returns: Complete path to the inventory file. None if inventory is - not file-based - ''' - pass + """ if inventory came from a file, what's the directory and file name? """ + if not self.is_file(): + return None + return self.host_list def playbook_basedir(self): - ''' - Retrieve the directory of the current playbook - ''' - ### I want to move this out of inventory - - pass + """ returns the directory of the current playbook """ + return self._playbook_basedir def set_playbook_basedir(self, dir): - ''' - Tell Inventory the basedir of the current playbook so Inventory can - look for host_vars and group_vars there. - ''' - ### I want to move this out of inventory - pass + """ + sets the base directory of the playbook so inventory can use it as a + basedir for host_ and group_vars, and other things. + """ + # Only update things if dir is a different playbook basedir + if dir != self._playbook_basedir: + self._playbook_basedir = dir + # get group vars from group_vars/ files + for group in self.groups: + # FIXME: combine_vars + group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True)) + # get host vars from host_vars/ files + for host in self.get_hosts(): + # FIXME: combine_vars + host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True)) + # invalidate cache + self._vars_per_host = {} + self._vars_per_group = {} def get_host_vars(self, host, new_pb_basedir=False): - ''' - Loads variables from host_vars/ - - The variables are loaded from subdirectories located either in the - inventory base directory or the playbook base directory. Variables in - the playbook dir will win over the inventory dir if files are in both. - ''' - pass + """ Read host_vars/ files """ + return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir) def get_group_vars(self, group, new_pb_basedir=False): - ''' - Loads variables from group_vars/ + """ Read group_vars/ files """ + return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir) + + def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False): + """ + Loads variables from group_vars/ and host_vars/ in directories parallel + to the inventory base directory or in the same directory as the playbook. Variables in the playbook + dir will win over the inventory dir if files are in both. + """ + + results = {} + scan_pass = 0 + _basedir = self.basedir() + + # look in both the inventory base directory and the playbook base directory + # unless we do an update for a new playbook base dir + if not new_pb_basedir: + basedirs = [_basedir, self._playbook_basedir] + else: + basedirs = [self._playbook_basedir] + + for basedir in basedirs: + + # this can happen from particular API usages, particularly if not run + # from /usr/bin/ansible-playbook + if basedir is None: + continue + + scan_pass = scan_pass + 1 + + # it's not an eror if the directory does not exist, keep moving + if not os.path.exists(basedir): + continue + + # save work of second scan if the directories are the same + if _basedir == self._playbook_basedir and scan_pass != 1: + continue + + # FIXME: these should go to VariableManager + if group and host is None: + # load vars in dir/group_vars/name_of_group + base_path = os.path.join(basedir, "group_vars/%s" % group.name) + self._variable_manager.add_group_vars_file(base_path, self._loader) + elif host and group is None: + # same for hostvars in dir/host_vars/name_of_host + base_path = os.path.join(basedir, "host_vars/%s" % host.name) + self._variable_manager.add_host_vars_file(base_path, self._loader) + + # all done, results is a dictionary of variables for this particular host. + return results - The variables are loaded from subdirectories located either in the - inventory base directory or the playbook base directory. Variables in - the playbook dir will win over the inventory dir if files are in both. - ''' - pass diff --git a/v2/ansible/inventory/dir.py b/v2/ansible/inventory/dir.py new file mode 100644 index 0000000000..9ac23fff89 --- /dev/null +++ b/v2/ansible/inventory/dir.py @@ -0,0 +1,229 @@ +# (c) 2013, Daniel Hokka Zakrisson +# (c) 2014, Serge van Ginderachter +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +############################################# + +import os +import ansible.constants as C +from ansible.inventory.host import Host +from ansible.inventory.group import Group +from ansible.inventory.ini import InventoryParser +from ansible.inventory.script import InventoryScript +from ansible import utils +from ansible import errors + +class InventoryDirectory(object): + ''' Host inventory parser for ansible using a directory of inventories. ''' + + def __init__(self, filename=C.DEFAULT_HOST_LIST): + self.names = os.listdir(filename) + self.names.sort() + self.directory = filename + self.parsers = [] + self.hosts = {} + self.groups = {} + + for i in self.names: + + # Skip files that end with certain extensions or characters + if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")): + continue + # Skip hidden files + if i.startswith('.') and not i.startswith('./'): + continue + # These are things inside of an inventory basedir + if i in ("host_vars", "group_vars", "vars_plugins"): + continue + fullpath = os.path.join(self.directory, i) + if os.path.isdir(fullpath): + parser = InventoryDirectory(filename=fullpath) + elif utils.is_executable(fullpath): + parser = InventoryScript(filename=fullpath) + else: + parser = InventoryParser(filename=fullpath) + self.parsers.append(parser) + + # retrieve all groups and hosts form the parser and add them to + # self, don't look at group lists yet, to avoid + # recursion trouble, but just make sure all objects exist in self + newgroups = parser.groups.values() + for group in newgroups: + for host in group.hosts: + self._add_host(host) + for group in newgroups: + self._add_group(group) + + # now check the objects lists so they contain only objects from + # self; membership data in groups is already fine (except all & + # ungrouped, see later), but might still reference objects not in self + for group in self.groups.values(): + # iterate on a copy of the lists, as those lists get changed in + # the loop + # list with group's child group objects: + for child in group.child_groups[:]: + if child != self.groups[child.name]: + group.child_groups.remove(child) + group.child_groups.append(self.groups[child.name]) + # list with group's parent group objects: + for parent in group.parent_groups[:]: + if parent != self.groups[parent.name]: + group.parent_groups.remove(parent) + group.parent_groups.append(self.groups[parent.name]) + # list with group's host objects: + for host in group.hosts[:]: + if host != self.hosts[host.name]: + group.hosts.remove(host) + group.hosts.append(self.hosts[host.name]) + # also check here that the group that contains host, is + # also contained in the host's group list + if group not in self.hosts[host.name].groups: + self.hosts[host.name].groups.append(group) + + # extra checks on special groups all and ungrouped + # remove hosts from 'ungrouped' if they became member of other groups + if 'ungrouped' in self.groups: + ungrouped = self.groups['ungrouped'] + # loop on a copy of ungrouped hosts, as we want to change that list + for host in ungrouped.hosts[:]: + if len(host.groups) > 1: + host.groups.remove(ungrouped) + ungrouped.hosts.remove(host) + + # remove hosts from 'all' if they became member of other groups + # all should only contain direct children, not grandchildren + # direct children should have dept == 1 + if 'all' in self.groups: + allgroup = self.groups['all' ] + # loop on a copy of all's child groups, as we want to change that list + for group in allgroup.child_groups[:]: + # groups might once have beeen added to all, and later be added + # to another group: we need to remove the link wit all then + if len(group.parent_groups) > 1 and allgroup in group.parent_groups: + # real children of all have just 1 parent, all + # this one has more, so not a direct child of all anymore + group.parent_groups.remove(allgroup) + allgroup.child_groups.remove(group) + elif allgroup not in group.parent_groups: + # this group was once added to all, but doesn't list it as + # a parent any more; the info in the group is the correct + # info + allgroup.child_groups.remove(group) + + + def _add_group(self, group): + """ Merge an existing group or add a new one; + Track parent and child groups, and hosts of the new one """ + + if group.name not in self.groups: + # it's brand new, add him! + self.groups[group.name] = group + if self.groups[group.name] != group: + # different object, merge + self._merge_groups(self.groups[group.name], group) + + def _add_host(self, host): + if host.name not in self.hosts: + # Papa's got a brand new host + self.hosts[host.name] = host + if self.hosts[host.name] != host: + # different object, merge + self._merge_hosts(self.hosts[host.name], host) + + def _merge_groups(self, group, newgroup): + """ Merge all of instance newgroup into group, + update parent/child relationships + group lists may still contain group objects that exist in self with + same name, but was instanciated as a different object in some other + inventory parser; these are handled later """ + + # name + if group.name != newgroup.name: + raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name)) + + # depth + group.depth = max([group.depth, newgroup.depth]) + + # hosts list (host objects are by now already added to self.hosts) + for host in newgroup.hosts: + grouphosts = dict([(h.name, h) for h in group.hosts]) + if host.name in grouphosts: + # same host name but different object, merge + self._merge_hosts(grouphosts[host.name], host) + else: + # new membership, add host to group from self + # group from self will also be added again to host.groups, but + # as different object + group.add_host(self.hosts[host.name]) + # now remove this the old object for group in host.groups + for hostgroup in [g for g in host.groups]: + if hostgroup.name == group.name and hostgroup != self.groups[group.name]: + self.hosts[host.name].groups.remove(hostgroup) + + + # group child membership relation + for newchild in newgroup.child_groups: + # dict with existing child groups: + childgroups = dict([(g.name, g) for g in group.child_groups]) + # check if child of new group is already known as a child + if newchild.name not in childgroups: + self.groups[group.name].add_child_group(newchild) + + # group parent membership relation + for newparent in newgroup.parent_groups: + # dict with existing parent groups: + parentgroups = dict([(g.name, g) for g in group.parent_groups]) + # check if parent of new group is already known as a parent + if newparent.name not in parentgroups: + if newparent.name not in self.groups: + # group does not exist yet in self, import him + self.groups[newparent.name] = newparent + # group now exists but not yet as a parent here + self.groups[newparent.name].add_child_group(group) + + # variables + group.vars = utils.combine_vars(group.vars, newgroup.vars) + + def _merge_hosts(self,host, newhost): + """ Merge all of instance newhost into host """ + + # name + if host.name != newhost.name: + raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name)) + + # group membership relation + for newgroup in newhost.groups: + # dict with existing groups: + hostgroups = dict([(g.name, g) for g in host.groups]) + # check if new group is already known as a group + if newgroup.name not in hostgroups: + if newgroup.name not in self.groups: + # group does not exist yet in self, import him + self.groups[newgroup.name] = newgroup + # group now exists but doesn't have host yet + self.groups[newgroup.name].add_host(host) + + # variables + host.vars = utils.combine_vars(host.vars, newhost.vars) + + def get_host_variables(self, host): + """ Gets additional host variables from all inventories """ + vars = {} + for i in self.parsers: + vars.update(i.get_host_variables(host)) + return vars + diff --git a/v2/ansible/inventory/expand_hosts.py b/v2/ansible/inventory/expand_hosts.py new file mode 100644 index 0000000000..f129740935 --- /dev/null +++ b/v2/ansible/inventory/expand_hosts.py @@ -0,0 +1,116 @@ +# (c) 2012, Zettar Inc. +# Written by Chin Fang +# +# This file is part of Ansible +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . +# + +''' +This module is for enhancing ansible's inventory parsing capability such +that it can deal with hostnames specified using a simple pattern in the +form of [beg:end], example: [1:5], [a:c], [D:G]. If beg is not specified, +it defaults to 0. + +If beg is given and is left-zero-padded, e.g. '001', it is taken as a +formatting hint when the range is expanded. e.g. [001:010] is to be +expanded into 001, 002 ...009, 010. + +Note that when beg is specified with left zero padding, then the length of +end must be the same as that of beg, else an exception is raised. +''' +import string + +from ansible import errors + +def detect_range(line = None): + ''' + A helper function that checks a given host line to see if it contains + a range pattern described in the docstring above. + + Returnes True if the given line contains a pattern, else False. + ''' + if 0 <= line.find("[") < line.find(":") < line.find("]"): + return True + else: + return False + +def expand_hostname_range(line = None): + ''' + A helper function that expands a given line that contains a pattern + specified in top docstring, and returns a list that consists of the + expanded version. + + The '[' and ']' characters are used to maintain the pseudo-code + appearance. They are replaced in this function with '|' to ease + string splitting. + + References: http://ansible.github.com/patterns.html#hosts-and-groups + ''' + all_hosts = [] + if line: + # A hostname such as db[1:6]-node is considered to consists + # three parts: + # head: 'db' + # nrange: [1:6]; range() is a built-in. Can't use the name + # tail: '-node' + + # Add support for multiple ranges in a host so: + # db[01:10:3]node-[01:10] + # - to do this we split off at the first [...] set, getting the list + # of hosts and then repeat until none left. + # - also add an optional third parameter which contains the step. (Default: 1) + # so range can be [01:10:2] -> 01 03 05 07 09 + # FIXME: make this work for alphabetic sequences too. + + (head, nrange, tail) = line.replace('[','|',1).replace(']','|',1).split('|') + bounds = nrange.split(":") + if len(bounds) != 2 and len(bounds) != 3: + raise errors.AnsibleError("host range incorrectly specified") + beg = bounds[0] + end = bounds[1] + if len(bounds) == 2: + step = 1 + else: + step = bounds[2] + if not beg: + beg = "0" + if not end: + raise errors.AnsibleError("host range end value missing") + if beg[0] == '0' and len(beg) > 1: + rlen = len(beg) # range length formatting hint + if rlen != len(end): + raise errors.AnsibleError("host range format incorrectly specified!") + fill = lambda _: str(_).zfill(rlen) # range sequence + else: + fill = str + + try: + i_beg = string.ascii_letters.index(beg) + i_end = string.ascii_letters.index(end) + if i_beg > i_end: + raise errors.AnsibleError("host range format incorrectly specified!") + seq = string.ascii_letters[i_beg:i_end+1] + except ValueError: # not an alpha range + seq = range(int(beg), int(end)+1, int(step)) + + for rseq in seq: + hname = ''.join((head, fill(rseq), tail)) + + if detect_range(hname): + all_hosts.extend( expand_hostname_range( hname ) ) + else: + all_hosts.append(hname) + + return all_hosts diff --git a/v2/ansible/inventory/group.py b/v2/ansible/inventory/group.py new file mode 100644 index 0000000000..87d6f64dfc --- /dev/null +++ b/v2/ansible/inventory/group.py @@ -0,0 +1,159 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.utils.debug import debug + +class Group: + ''' a group of ansible hosts ''' + + #__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ] + + def __init__(self, name=None): + + self.depth = 0 + self.name = name + self.hosts = [] + self.vars = {} + self.child_groups = [] + self.parent_groups = [] + self._hosts_cache = None + + #self.clear_hosts_cache() + #if self.name is None: + # raise Exception("group name is required") + + def __repr__(self): + return self.get_name() + + def __getstate__(self): + return self.serialize() + + def __setstate__(self, data): + return self.deserialize(data) + + def serialize(self): + parent_groups = [] + for parent in self.parent_groups: + parent_groups.append(parent.serialize()) + + result = dict( + name=self.name, + vars=self.vars.copy(), + parent_groups=parent_groups, + depth=self.depth, + ) + + debug("serializing group, result is: %s" % result) + return result + + def deserialize(self, data): + debug("deserializing group, data is: %s" % data) + self.__init__() + self.name = data.get('name') + self.vars = data.get('vars', dict()) + + parent_groups = data.get('parent_groups', []) + for parent_data in parent_groups: + g = Group() + g.deserialize(parent_data) + self.parent_groups.append(g) + + def get_name(self): + return self.name + + def add_child_group(self, group): + + if self == group: + raise Exception("can't add group to itself") + + # don't add if it's already there + if not group in self.child_groups: + self.child_groups.append(group) + + # update the depth of the child + group.depth = max([self.depth+1, group.depth]) + + # update the depth of the grandchildren + group._check_children_depth() + + # now add self to child's parent_groups list, but only if there + # isn't already a group with the same name + if not self.name in [g.name for g in group.parent_groups]: + group.parent_groups.append(self) + + self.clear_hosts_cache() + + def _check_children_depth(self): + + for group in self.child_groups: + group.depth = max([self.depth+1, group.depth]) + group._check_children_depth() + + def add_host(self, host): + + self.hosts.append(host) + host.add_group(self) + self.clear_hosts_cache() + + def set_variable(self, key, value): + + self.vars[key] = value + + def clear_hosts_cache(self): + + self._hosts_cache = None + for g in self.parent_groups: + g.clear_hosts_cache() + + def get_hosts(self): + + if self._hosts_cache is None: + self._hosts_cache = self._get_hosts() + + return self._hosts_cache + + def _get_hosts(self): + + hosts = [] + seen = {} + for kid in self.child_groups: + kid_hosts = kid.get_hosts() + for kk in kid_hosts: + if kk not in seen: + seen[kk] = 1 + hosts.append(kk) + for mine in self.hosts: + if mine not in seen: + seen[mine] = 1 + hosts.append(mine) + return hosts + + def get_vars(self): + return self.vars.copy() + + def _get_ancestors(self): + + results = {} + for g in self.parent_groups: + results[g.name] = g + results.update(g._get_ancestors()) + return results + + def get_ancestors(self): + + return self._get_ancestors().values() + diff --git a/v2/ansible/inventory/host.py b/v2/ansible/inventory/host.py new file mode 100644 index 0000000000..414ec34b96 --- /dev/null +++ b/v2/ansible/inventory/host.py @@ -0,0 +1,127 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible import constants as C +from ansible.inventory.group import Group +from ansible.utils.vars import combine_vars + +__all__ = ['Host'] + +class Host: + ''' a single ansible host ''' + + #__slots__ = [ 'name', 'vars', 'groups' ] + + def __getstate__(self): + return self.serialize() + + def __setstate__(self, data): + return self.deserialize(data) + + def serialize(self): + groups = [] + for group in self.groups: + groups.append(group.serialize()) + + return dict( + name=self.name, + vars=self.vars.copy(), + ipv4_address=self.ipv4_address, + ipv6_address=self.ipv6_address, + port=self.port, + gathered_facts=self._gathered_facts, + groups=groups, + ) + + def deserialize(self, data): + self.__init__() + + self.name = data.get('name') + self.vars = data.get('vars', dict()) + self.ipv4_address = data.get('ipv4_address', '') + self.ipv6_address = data.get('ipv6_address', '') + self.port = data.get('port') + + groups = data.get('groups', []) + for group_data in groups: + g = Group() + g.deserialize(group_data) + self.groups.append(g) + + def __init__(self, name=None, port=None): + + self.name = name + self.vars = {} + self.groups = [] + + self.ipv4_address = name + self.ipv6_address = name + + if port and port != C.DEFAULT_REMOTE_PORT: + self.port = int(port) + else: + self.port = C.DEFAULT_REMOTE_PORT + + self._gathered_facts = False + + def __repr__(self): + return self.get_name() + + def get_name(self): + return self.name + + @property + def gathered_facts(self): + return self._gathered_facts + + def set_gathered_facts(self, gathered): + self._gathered_facts = gathered + + def add_group(self, group): + + self.groups.append(group) + + def set_variable(self, key, value): + + self.vars[key]=value + + def get_groups(self): + + groups = {} + for g in self.groups: + groups[g.name] = g + ancestors = g.get_ancestors() + for a in ancestors: + groups[a.name] = a + return groups.values() + + def get_vars(self): + + results = {} + groups = self.get_groups() + for group in sorted(groups, key=lambda g: g.depth): + results = combine_vars(results, group.get_vars()) + results = combine_vars(results, self.vars) + results['inventory_hostname'] = self.name + results['inventory_hostname_short'] = self.name.split('.')[0] + results['group_names'] = sorted([ g.name for g in groups if g.name != 'all']) + return results + diff --git a/v2/ansible/inventory/ini.py b/v2/ansible/inventory/ini.py new file mode 100644 index 0000000000..ef3f162aa3 --- /dev/null +++ b/v2/ansible/inventory/ini.py @@ -0,0 +1,215 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +############################################# + +import ast +import shlex +import re + +from ansible import constants as C +from ansible.errors import * +from ansible.inventory.host import Host +from ansible.inventory.group import Group +from ansible.inventory.expand_hosts import detect_range +from ansible.inventory.expand_hosts import expand_hostname_range + +class InventoryParser(object): + """ + Host inventory for ansible. + """ + + def __init__(self, filename=C.DEFAULT_HOST_LIST): + + with open(filename) as fh: + self.lines = fh.readlines() + self.groups = {} + self.hosts = {} + self._parse() + + def _parse(self): + + self._parse_base_groups() + self._parse_group_children() + self._add_allgroup_children() + self._parse_group_variables() + return self.groups + + @staticmethod + def _parse_value(v): + if "#" not in v: + try: + return ast.literal_eval(v) + # Using explicit exceptions. + # Likely a string that literal_eval does not like. We wil then just set it. + except ValueError: + # For some reason this was thought to be malformed. + pass + except SyntaxError: + # Is this a hash with an equals at the end? + pass + return v + + # [webservers] + # alpha + # beta:2345 + # gamma sudo=True user=root + # delta asdf=jkl favcolor=red + + def _add_allgroup_children(self): + + for group in self.groups.values(): + if group.depth == 0 and group.name != 'all': + self.groups['all'].add_child_group(group) + + + def _parse_base_groups(self): + # FIXME: refactor + + ungrouped = Group(name='ungrouped') + all = Group(name='all') + all.add_child_group(ungrouped) + + self.groups = dict(all=all, ungrouped=ungrouped) + active_group_name = 'ungrouped' + + for line in self.lines: + line = self._before_comment(line).strip() + if line.startswith("[") and line.endswith("]"): + active_group_name = line.replace("[","").replace("]","") + if ":vars" in line or ":children" in line: + active_group_name = active_group_name.rsplit(":", 1)[0] + if active_group_name not in self.groups: + new_group = self.groups[active_group_name] = Group(name=active_group_name) + active_group_name = None + elif active_group_name not in self.groups: + new_group = self.groups[active_group_name] = Group(name=active_group_name) + elif line.startswith(";") or line == '': + pass + elif active_group_name: + tokens = shlex.split(line) + if len(tokens) == 0: + continue + hostname = tokens[0] + port = C.DEFAULT_REMOTE_PORT + # Three cases to check: + # 0. A hostname that contains a range pesudo-code and a port + # 1. A hostname that contains just a port + if hostname.count(":") > 1: + # Possible an IPv6 address, or maybe a host line with multiple ranges + # IPv6 with Port XXX:XXX::XXX.port + # FQDN foo.example.com + if hostname.count(".") == 1: + (hostname, port) = hostname.rsplit(".", 1) + elif ("[" in hostname and + "]" in hostname and + ":" in hostname and + (hostname.rindex("]") < hostname.rindex(":")) or + ("]" not in hostname and ":" in hostname)): + (hostname, port) = hostname.rsplit(":", 1) + + hostnames = [] + if detect_range(hostname): + hostnames = expand_hostname_range(hostname) + else: + hostnames = [hostname] + + for hn in hostnames: + host = None + if hn in self.hosts: + host = self.hosts[hn] + else: + host = Host(name=hn, port=port) + self.hosts[hn] = host + if len(tokens) > 1: + for t in tokens[1:]: + if t.startswith('#'): + break + try: + (k,v) = t.split("=", 1) + except ValueError, e: + raise AnsibleError("Invalid ini entry: %s - %s" % (t, str(e))) + if k == 'ansible_ssh_host': + host.ipv4_address = self._parse_value(v) + else: + host.set_variable(k, self._parse_value(v)) + self.groups[active_group_name].add_host(host) + + # [southeast:children] + # atlanta + # raleigh + + def _parse_group_children(self): + group = None + + for line in self.lines: + line = line.strip() + if line is None or line == '': + continue + if line.startswith("[") and ":children]" in line: + line = line.replace("[","").replace(":children]","") + group = self.groups.get(line, None) + if group is None: + group = self.groups[line] = Group(name=line) + elif line.startswith("#") or line.startswith(";"): + pass + elif line.startswith("["): + group = None + elif group: + kid_group = self.groups.get(line, None) + if kid_group is None: + raise AnsibleError("child group is not defined: (%s)" % line) + else: + group.add_child_group(kid_group) + + + # [webservers:vars] + # http_port=1234 + # maxRequestsPerChild=200 + + def _parse_group_variables(self): + group = None + for line in self.lines: + line = line.strip() + if line.startswith("[") and ":vars]" in line: + line = line.replace("[","").replace(":vars]","") + group = self.groups.get(line, None) + if group is None: + raise AnsibleError("can't add vars to undefined group: %s" % line) + elif line.startswith("#") or line.startswith(";"): + pass + elif line.startswith("["): + group = None + elif line == '': + pass + elif group: + if "=" not in line: + raise AnsibleError("variables assigned to group must be in key=value form") + else: + (k, v) = [e.strip() for e in line.split("=", 1)] + group.set_variable(k, self._parse_value(v)) + + def get_host_variables(self, host): + return {} + + def _before_comment(self, msg): + ''' what's the part of a string before a comment? ''' + msg = msg.replace("\#","**NOT_A_COMMENT**") + msg = msg.split("#")[0] + msg = msg.replace("**NOT_A_COMMENT**","#") + return msg + diff --git a/v2/ansible/inventory/script.py b/v2/ansible/inventory/script.py new file mode 100644 index 0000000000..6239be0140 --- /dev/null +++ b/v2/ansible/inventory/script.py @@ -0,0 +1,150 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +############################################# + +import os +import subprocess +import ansible.constants as C +from ansible.inventory.host import Host +from ansible.inventory.group import Group +from ansible.module_utils.basic import json_dict_unicode_to_bytes +from ansible import utils +from ansible import errors +import sys + + +class InventoryScript(object): + ''' Host inventory parser for ansible using external inventory scripts. ''' + + def __init__(self, filename=C.DEFAULT_HOST_LIST): + + # Support inventory scripts that are not prefixed with some + # path information but happen to be in the current working + # directory when '.' is not in PATH. + self.filename = os.path.abspath(filename) + cmd = [ self.filename, "--list" ] + try: + sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except OSError, e: + raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) + (stdout, stderr) = sp.communicate() + self.data = stdout + # see comment about _meta below + self.host_vars_from_top = None + self.groups = self._parse(stderr) + + + def _parse(self, err): + + all_hosts = {} + + # not passing from_remote because data from CMDB is trusted + self.raw = utils.parse_json(self.data) + self.raw = json_dict_unicode_to_bytes(self.raw) + + all = Group('all') + groups = dict(all=all) + group = None + + + if 'failed' in self.raw: + sys.stderr.write(err + "\n") + raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw) + + for (group_name, data) in self.raw.items(): + + # in Ansible 1.3 and later, a "_meta" subelement may contain + # a variable "hostvars" which contains a hash for each host + # if this "hostvars" exists at all then do not call --host for each + # host. This is for efficiency and scripts should still return data + # if called with --host for backwards compat with 1.2 and earlier. + + if group_name == '_meta': + if 'hostvars' in data: + self.host_vars_from_top = data['hostvars'] + continue + + if group_name != all.name: + group = groups[group_name] = Group(group_name) + else: + group = all + host = None + + if not isinstance(data, dict): + data = {'hosts': data} + # is not those subkeys, then simplified syntax, host with vars + elif not any(k in data for k in ('hosts','vars')): + data = {'hosts': [group_name], 'vars': data} + + if 'hosts' in data: + if not isinstance(data['hosts'], list): + raise errors.AnsibleError("You defined a group \"%s\" with bad " + "data for the host list:\n %s" % (group_name, data)) + + for hostname in data['hosts']: + if not hostname in all_hosts: + all_hosts[hostname] = Host(hostname) + host = all_hosts[hostname] + group.add_host(host) + + if 'vars' in data: + if not isinstance(data['vars'], dict): + raise errors.AnsibleError("You defined a group \"%s\" with bad " + "data for variables:\n %s" % (group_name, data)) + + for k, v in data['vars'].iteritems(): + if group.name == all.name: + all.set_variable(k, v) + else: + group.set_variable(k, v) + + # Separate loop to ensure all groups are defined + for (group_name, data) in self.raw.items(): + if group_name == '_meta': + continue + if isinstance(data, dict) and 'children' in data: + for child_name in data['children']: + if child_name in groups: + groups[group_name].add_child_group(groups[child_name]) + + for group in groups.values(): + if group.depth == 0 and group.name != 'all': + all.add_child_group(group) + + return groups + + def get_host_variables(self, host): + """ Runs