Some more module categorization.

This commit is contained in:
Michael DeHaan 2014-11-04 17:23:22 -05:00 committed by Matt Clay
commit 5d814d9fb2
22 changed files with 0 additions and 0 deletions

View file

@ -0,0 +1,562 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Flowroute LLC
# Written by Matthew Williams <matthew@flowroute.com>
# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: apt
short_description: Manages apt-packages
description:
- Manages I(apt) packages (such as for Debian/Ubuntu).
version_added: "0.0.2"
options:
name:
description:
- A package name, like C(foo), or package specifier with version, like C(foo=1.0). Wildcards (fnmatch) like apt* are also supported.
required: false
default: null
state:
description:
- Indicates the desired package state. C(latest) ensures that the latest version is installed.
required: false
default: present
choices: [ "latest", "absent", "present" ]
update_cache:
description:
- Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
required: false
default: no
choices: [ "yes", "no" ]
cache_valid_time:
description:
- If C(update_cache) is specified and the last run is less or equal than I(cache_valid_time) seconds ago, the C(update_cache) gets skipped.
required: false
default: no
purge:
description:
- Will force purging of configuration files if the module state is set to I(absent).
required: false
default: no
choices: [ "yes", "no" ]
default_release:
description:
- Corresponds to the C(-t) option for I(apt) and sets pin priorities
required: false
default: null
install_recommends:
description:
- Corresponds to the C(--no-install-recommends) option for I(apt). Default behavior (C(yes)) replicates apt's default behavior; C(no) does not install recommended packages. Suggested packages are never installed.
required: false
default: yes
choices: [ "yes", "no" ]
force:
description:
- If C(yes), force installs/removes.
required: false
default: "no"
choices: [ "yes", "no" ]
upgrade:
description:
- 'If yes or safe, performs an aptitude safe-upgrade.'
- 'If full, performs an aptitude full-upgrade.'
- 'If dist, performs an apt-get dist-upgrade.'
- 'Note: This does not upgrade a specific package, use state=latest for that.'
version_added: "1.1"
required: false
default: "yes"
choices: [ "yes", "safe", "full", "dist"]
dpkg_options:
description:
- Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"'
- Options should be supplied as comma separated list
required: false
default: 'force-confdef,force-confold'
deb:
description:
- Path to a .deb package on the remote machine.
required: false
version_added: "1.6"
requirements: [ python-apt, aptitude ]
author: Matthew Williams
notes:
- Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) require C(aptitude), otherwise
C(apt-get) suffices.
'''
EXAMPLES = '''
# Update repositories cache and install "foo" package
- apt: name=foo update_cache=yes
# Remove "foo" package
- apt: name=foo state=absent
# Install the package "foo"
- apt: name=foo state=present
# Install the version '1.00' of package "foo"
- apt: name=foo=1.00 state=present
# Update the repository cache and update package "nginx" to latest version using default release squeeze-backport
- apt: name=nginx state=latest default_release=squeeze-backports update_cache=yes
# Install latest version of "openjdk-6-jdk" ignoring "install-recommends"
- apt: name=openjdk-6-jdk state=latest install_recommends=no
# Update all packages to the latest version
- apt: upgrade=dist
# Run the equivalent of "apt-get update" as a separate step
- apt: update_cache=yes
# Only run "update_cache=yes" if the last one is more than 3600 seconds ago
- apt: update_cache=yes cache_valid_time=3600
# Pass options to dpkg on run
- apt: upgrade=dist update_cache=yes dpkg_options='force-confold,force-confdef'
# Install a .deb package
- apt: deb=/tmp/mypackage.deb
'''
import traceback
# added to stave off future warnings about apt api
import warnings
warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning)
import os
import datetime
import fnmatch
# APT related constants
APT_ENV_VARS = dict(
DEBIAN_FRONTEND = 'noninteractive',
DEBIAN_PRIORITY = 'critical',
LANG = 'C'
)
DPKG_OPTIONS = 'force-confdef,force-confold'
APT_GET_ZERO = "0 upgraded, 0 newly installed"
APTITUDE_ZERO = "0 packages upgraded, 0 newly installed"
APT_LISTS_PATH = "/var/lib/apt/lists"
APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
HAS_PYTHON_APT = True
try:
import apt
import apt.debfile
import apt_pkg
except ImportError:
HAS_PYTHON_APT = False
def package_split(pkgspec):
parts = pkgspec.split('=')
if len(parts) > 1:
return parts[0], parts[1]
else:
return parts[0], None
def package_status(m, pkgname, version, cache, state):
try:
# get the package from the cache, as well as the
# the low-level apt_pkg.Package object which contains
# state fields not directly acccesible from the
# higher-level apt.package.Package object.
pkg = cache[pkgname]
ll_pkg = cache._cache[pkgname] # the low-level package object
except KeyError:
if state == 'install':
if cache.get_providing_packages(pkgname):
return False, True, False
m.fail_json(msg="No package matching '%s' is available" % pkgname)
else:
return False, False, False
try:
has_files = len(pkg.installed_files) > 0
except UnicodeDecodeError:
has_files = True
except AttributeError:
has_files = False # older python-apt cannot be used to determine non-purged
try:
package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED
except AttributeError: # python-apt 0.7.X has very weak low-level object
try:
# might not be necessary as python-apt post-0.7.X should have current_state property
package_is_installed = pkg.is_installed
except AttributeError:
# assume older version of python-apt is installed
package_is_installed = pkg.isInstalled
if version and package_is_installed:
try:
installed_version = pkg.installed.version
except AttributeError:
installed_version = pkg.installedVersion
return package_is_installed and fnmatch.fnmatch(installed_version, version), False, has_files
else:
try:
package_is_upgradable = pkg.is_upgradable
except AttributeError:
# assume older version of python-apt is installed
package_is_upgradable = pkg.isUpgradable
return package_is_installed, package_is_upgradable, has_files
def expand_dpkg_options(dpkg_options_compressed):
options_list = dpkg_options_compressed.split(',')
dpkg_options = ""
for dpkg_option in options_list:
dpkg_options = '%s -o "Dpkg::Options::=--%s"' \
% (dpkg_options, dpkg_option)
return dpkg_options.strip()
def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
new_pkgspec = []
for pkgname_or_fnmatch_pattern in pkgspec:
# note that any of these chars is not allowed in a (debian) pkgname
if [c for c in pkgname_or_fnmatch_pattern if c in "*?[]!"]:
if "=" in pkgname_or_fnmatch_pattern:
m.fail_json(msg="pkgname wildcard and version can not be mixed")
# handle multiarch pkgnames, the idea is that "apt*" should
# only select native packages. But "apt*:i386" should still work
if not ":" in pkgname_or_fnmatch_pattern:
matches = fnmatch.filter(
[pkg.name for pkg in cache
if not ":" in pkg.name], pkgname_or_fnmatch_pattern)
else:
matches = fnmatch.filter(
[pkg.name for pkg in cache], pkgname_or_fnmatch_pattern)
if len(matches) == 0:
m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_or_fnmatch_pattern))
else:
new_pkgspec.extend(matches)
else:
new_pkgspec.append(pkgname_or_fnmatch_pattern)
return new_pkgspec
def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=True, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
packages = ""
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
for package in pkgspec:
name, version = package_split(package)
installed, upgradable, has_files = package_status(m, name, version, cache, state='install')
if not installed or (upgrade and upgradable):
packages += "'%s' " % package
if len(packages) != 0:
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
for (k,v) in APT_ENV_VARS.iteritems():
os.environ[k] = v
cmd = "%s -y %s %s %s install %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages)
if default_release:
cmd += " -t '%s'" % (default_release,)
if not install_recommends:
cmd += " --no-install-recommends"
rc, out, err = m.run_command(cmd)
if rc:
return (False, dict(msg="'apt-get install %s' failed: %s" % (packages, err), stdout=out, stderr=err))
else:
return (True, dict(changed=True, stdout=out, stderr=err))
else:
return (True, dict(changed=False))
def install_deb(m, debs, cache, force, install_recommends, dpkg_options):
changed=False
deps_to_install = []
pkgs_to_install = []
for deb_file in debs.split(','):
pkg = apt.debfile.DebPackage(deb_file)
# Check if it's already installed
if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME:
continue
# Check if package is installable
if not pkg.check():
m.fail_json(msg=pkg._failure_string)
# add any missing deps to the list of deps we need
# to install so they're all done in one shot
deps_to_install.extend(pkg.missing_deps)
# and add this deb to the list of packages to install
pkgs_to_install.append(deb_file)
# install the deps through apt
retvals = {}
if len(deps_to_install) > 0:
(success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
install_recommends=install_recommends,
dpkg_options=expand_dpkg_options(dpkg_options))
if not success:
m.fail_json(**retvals)
changed = retvals.get('changed', False)
if len(pkgs_to_install) > 0:
options = ' '.join(["--%s"% x for x in dpkg_options.split(",")])
if m.check_mode:
options += " --simulate"
if force:
options += " --force-yes"
cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
rc, out, err = m.run_command(cmd)
if "stdout" in retvals:
stdout = retvals["stdout"] + out
else:
stdout = out
if "stderr" in retvals:
stderr = retvals["stderr"] + err
else:
stderr = err
if rc == 0:
m.exit_json(changed=True, stdout=stdout, stderr=stderr)
else:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
else:
m.exit_json(changed=changed, stdout=retvals.get('stdout',''), stderr=retvals.get('stderr',''))
def remove(m, pkgspec, cache, purge=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
packages = ""
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
for package in pkgspec:
name, version = package_split(package)
installed, upgradable, has_files = package_status(m, name, version, cache, state='remove')
if installed or (has_files and purge):
packages += "'%s' " % package
if len(packages) == 0:
m.exit_json(changed=False)
else:
if purge:
purge = '--purge'
else:
purge = ''
for (k,v) in APT_ENV_VARS.iteritems():
os.environ[k] = v
cmd = "%s -q -y %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, packages)
if m.check_mode:
m.exit_json(changed=True)
rc, out, err = m.run_command(cmd)
if rc:
m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err)
m.exit_json(changed=True, stdout=out, stderr=err)
def upgrade(m, mode="yes", force=False, default_release=None,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
apt_cmd = None
if mode == "dist":
# apt-get dist-upgrade
apt_cmd = APT_GET_CMD
upgrade_command = "dist-upgrade"
elif mode == "full":
# aptitude full-upgrade
apt_cmd = APTITUDE_CMD
upgrade_command = "full-upgrade"
else:
# aptitude safe-upgrade # mode=yes # default
apt_cmd = APTITUDE_CMD
upgrade_command = "safe-upgrade"
if force:
if apt_cmd == APT_GET_CMD:
force_yes = '--force-yes'
else:
force_yes = ''
else:
force_yes = ''
apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
for (k,v) in APT_ENV_VARS.iteritems():
os.environ[k] = v
cmd = '%s -y %s %s %s %s' % (apt_cmd_path, dpkg_options,
force_yes, check_arg, upgrade_command)
if default_release:
cmd += " -t '%s'" % (default_release,)
rc, out, err = m.run_command(cmd)
if rc:
m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out)
if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
m.exit_json(changed=True, msg=out, stdout=out, stderr=err)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='installed', choices=['installed', 'latest', 'removed', 'absent', 'present']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
cache_valid_time = dict(type='int'),
purge = dict(default=False, type='bool'),
package = dict(default=None, aliases=['pkg', 'name'], type='list'),
deb = dict(default=None),
default_release = dict(default=None, aliases=['default-release']),
install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'),
force = dict(default='no', type='bool'),
upgrade = dict(choices=['yes', 'safe', 'full', 'dist']),
dpkg_options = dict(default=DPKG_OPTIONS)
),
mutually_exclusive = [['package', 'upgrade', 'deb']],
required_one_of = [['package', 'upgrade', 'update_cache', 'deb']],
supports_check_mode = True
)
if not HAS_PYTHON_APT:
try:
module.run_command('apt-get update && apt-get install python-apt -y -q', use_unsafe_shell=True, check_rc=True)
global apt, apt_pkg
import apt
import apt_pkg
except ImportError:
module.fail_json(msg="Could not import python modules: apt, apt_pkg. Please install python-apt package.")
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]:
module.fail_json(msg="Could not find aptitude. Please ensure it is installed.")
install_recommends = p['install_recommends']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
try:
cache = apt.Cache()
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
if p['update_cache']:
# Default is: always update the cache
cache_valid = False
if p['cache_valid_time']:
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
try:
mtime = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime
except:
mtime = False
if mtime is False:
# Looks like the update-success-stamp is not available
# Fallback: Checking the mtime of the lists
try:
mtime = os.stat(APT_LISTS_PATH).st_mtime
except:
mtime = False
if mtime is False:
# No mtime could be read - looks like lists are not there
# We update the cache to be safe
cache_valid = False
else:
mtimestamp = datetime.datetime.fromtimestamp(mtime)
if mtimestamp + tdelta >= datetime.datetime.now():
# dont update the cache
# the old cache is less than cache_valid_time seconds old - so still valid
cache_valid = True
if cache_valid is not True:
cache.update()
cache.open(progress=None)
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(changed=False)
force_yes = p['force']
if p['upgrade']:
upgrade(module, p['upgrade'], force_yes,
p['default_release'], dpkg_options)
if p['deb']:
if p['state'] != "installed":
module.fail_json(msg="deb only supports state=installed")
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
force=force_yes, dpkg_options=p['dpkg_options'])
packages = p['package']
latest = p['state'] == 'latest'
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if p['state'] == 'latest':
result = install(module, packages, cache, upgrade=True,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes, dpkg_options=dpkg_options)
(success, retvals) = result
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] in [ 'installed', 'present' ]:
result = install(module, packages, cache, default_release=p['default_release'],
install_recommends=install_recommends,force=force_yes,
dpkg_options=dpkg_options)
(success, retvals) = result
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] in [ 'removed', 'absent' ]:
remove(module, packages, cache, p['purge'], dpkg_options)
except apt.cache.LockFailedException:
module.fail_json(msg="Failed to lock apt for exclusive operation")
# import module snippets
from ansible.module_utils.basic import *
main()

View file

@ -0,0 +1,280 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012, Jayson Vantuyl <jayson@aggressive.ly>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apt_key
author: Jayson Vantuyl & others
version_added: "1.0"
short_description: Add or remove an apt key
description:
- Add or remove an I(apt) key, optionally downloading it
notes:
- doesn't download the key unless it really needs it
- as a sanity check, downloaded key id must match the one specified
- best practice is to specify the key id and the url
options:
id:
required: false
default: none
description:
- identifier of key
data:
required: false
default: none
description:
- keyfile contents
file:
required: false
default: none
description:
- keyfile path
keyring:
required: false
default: none
description:
- path to specific keyring file in /etc/apt/trusted.gpg.d
version_added: "1.3"
url:
required: false
default: none
description:
- url to retrieve key from.
keyserver:
version_added: "1.6"
required: false
default: none
description:
- keyserver to retrieve key from.
state:
required: false
choices: [ absent, present ]
default: present
description:
- used to specify if key is being added or revoked
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Add an apt key by id from a keyserver
- apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
# Add an Apt signing key, uses whichever key is at the URL
- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present
# Add an Apt signing key, will not download if present
- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present
# Remove an Apt signing key, uses whichever key is at the URL
- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=absent
# Remove a Apt specific signing key, leading 0x is valid
- apt_key: id=0x473041FA state=absent
# Add a key from a file on the Ansible server
- apt_key: data="{{ lookup('file', 'apt.gpg') }}" state=present
# Add an Apt signing key to a specific keyring file
- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc keyring=/etc/apt/trusted.gpg.d/debian.gpg state=present
'''
# FIXME: standardize into module_common
from traceback import format_exc
from re import compile as re_compile
# FIXME: standardize into module_common
from distutils.spawn import find_executable
from os import environ
from sys import exc_info
import traceback
match_key = re_compile("^gpg:.*key ([0-9a-fA-F]+):.*$")
REQUIRED_EXECUTABLES=['gpg', 'grep', 'apt-key']
def check_missing_binaries(module):
missing = [e for e in REQUIRED_EXECUTABLES if not find_executable(e)]
if len(missing):
module.fail_json(msg="binaries are missing", names=missing)
def all_keys(module, keyring, short_format):
if keyring:
cmd = "apt-key --keyring %s adv --list-public-keys --keyid-format=long" % keyring
else:
cmd = "apt-key adv --list-public-keys --keyid-format=long"
(rc, out, err) = module.run_command(cmd)
results = []
lines = out.split('\n')
for line in lines:
if line.startswith("pub"):
tokens = line.split()
code = tokens[1]
(len_type, real_code) = code.split("/")
results.append(real_code)
if short_format:
results = shorten_key_ids(results)
return results
def shorten_key_ids(key_id_list):
"""
Takes a list of key ids, and converts them to the 'short' format,
by reducing them to their last 8 characters.
"""
short = []
for key in key_id_list:
short.append(key[-8:])
return short
def download_key(module, url):
# FIXME: move get_url code to common, allow for in-memory D/L, support proxies
# and reuse here
if url is None:
module.fail_json(msg="needed a URL but was not specified")
try:
rsp, info = fetch_url(module, url)
if info['status'] != 200:
module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg']))
return rsp.read()
except Exception:
module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
def import_key(module, keyserver, key_id):
cmd = "apt-key adv --keyserver %s --recv %s" % (keyserver, key_id)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def add_key(module, keyfile, keyring, data=None):
if data is not None:
if keyring:
cmd = "apt-key --keyring %s add -" % keyring
else:
cmd = "apt-key add -"
(rc, out, err) = module.run_command(cmd, data=data, check_rc=True, binary_data=True)
else:
if keyring:
cmd = "apt-key --keyring %s add %s" % (keyring, keyfile)
else:
cmd = "apt-key add %s" % (keyfile)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def remove_key(module, key_id, keyring):
# FIXME: use module.run_command, fail at point of error and don't discard useful stdin/stdout
if keyring:
cmd = 'apt-key --keyring %s del %s' % (keyring, key_id)
else:
cmd = 'apt-key del %s' % key_id
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def main():
module = AnsibleModule(
argument_spec=dict(
id=dict(required=False, default=None),
url=dict(required=False),
data=dict(required=False),
file=dict(required=False),
key=dict(required=False),
keyring=dict(required=False),
validate_certs=dict(default='yes', type='bool'),
keyserver=dict(required=False),
state=dict(required=False, choices=['present', 'absent'], default='present')
),
supports_check_mode=True
)
key_id = module.params['id']
url = module.params['url']
data = module.params['data']
filename = module.params['file']
keyring = module.params['keyring']
state = module.params['state']
keyserver = module.params['keyserver']
changed = False
if key_id:
try:
_ = int(key_id, 16)
if key_id.startswith('0x'):
key_id = key_id[2:]
key_id = key_id.upper()
except ValueError:
module.fail_json(msg="Invalid key_id", id=key_id)
# FIXME: I think we have a common facility for this, if not, want
check_missing_binaries(module)
short_format = (key_id is not None and len(key_id) == 8)
keys = all_keys(module, keyring, short_format)
return_values = {}
if state == 'present':
if key_id and key_id in keys:
module.exit_json(changed=False)
else:
if not filename and not data and not keyserver:
data = download_key(module, url)
if key_id and key_id in keys:
module.exit_json(changed=False)
else:
if module.check_mode:
module.exit_json(changed=True)
if filename:
add_key(module, filename, keyring)
elif keyserver:
import_key(module, keyserver, key_id)
else:
add_key(module, "-", keyring, data)
changed=False
keys2 = all_keys(module, keyring, short_format)
if len(keys) != len(keys2):
changed=True
if key_id and not key_id[-16:] in keys2:
module.fail_json(msg="key does not seem to have been added", id=key_id)
module.exit_json(changed=changed)
elif state == 'absent':
if not key_id:
module.fail_json(msg="key is required")
if key_id in keys:
if module.check_mode:
module.exit_json(changed=True)
if remove_key(module, key_id, keyring):
changed=True
else:
# FIXME: module.fail_json or exit-json immediately at point of failure
module.fail_json(msg="error removing key_id", **return_values)
module.exit_json(changed=changed, **return_values)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()

View file

@ -0,0 +1,446 @@
#!/usr/bin/python
# encoding: utf-8
# (c) 2012, Matt Wright <matt@nobien.net>
# (c) 2013, Alexander Saltanov <asd@mokote.com>
# (c) 2014, Rutger Spiertz <rutger@kumina.nl>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apt_repository
short_description: Add and remove APT repositories
description:
- Add or remove an APT repositories in Ubuntu and Debian.
notes:
- This module works on Debian and Ubuntu and requires C(python-apt).
- This module supports Debian Squeeze (version 6) as well as its successors.
- This module treats Debian and Ubuntu distributions separately. So PPA could be installed only on Ubuntu machines.
options:
repo:
required: true
default: none
description:
- A source string for the repository.
state:
required: false
choices: [ "absent", "present" ]
default: "present"
description:
- A source string state.
mode:
required: false
default: 0644
description:
- The octal mode for newly created files in sources.list.d
version_added: "1.6"
update_cache:
description:
- Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
required: false
default: "yes"
choices: [ "yes", "no" ]
validate_certs:
version_added: '1.8'
description:
- If C(no), SSL certificates for the target repo will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
author: Alexander Saltanov
version_added: "0.7"
requirements: [ python-apt ]
'''
EXAMPLES = '''
# Add specified repository into sources list.
apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=present
# Add source repository into sources list.
apt_repository: repo='deb-src http://archive.canonical.com/ubuntu hardy partner' state=present
# Remove specified repository from sources list.
apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=absent
# On Ubuntu target: add nginx stable repository from PPA and install its signing key.
# On Debian target: adding PPA is not available, so it will fail immediately.
apt_repository: repo='ppa:nginx/stable'
'''
import glob
import os
import re
import tempfile
try:
import apt
import apt_pkg
import aptsources.distro as aptsources_distro
distro = aptsources_distro.get_distro()
HAVE_PYTHON_APT = True
except ImportError:
distro = None
HAVE_PYTHON_APT = False
VALID_SOURCE_TYPES = ('deb', 'deb-src')
def install_python_apt(module):
if not module.check_mode:
apt_get_path = module.get_bin_path('apt-get')
if apt_get_path:
rc, so, se = module.run_command('%s update && %s install python-apt -y -q' % (apt_get_path, apt_get_path), use_unsafe_shell=True)
if rc == 0:
global apt, apt_pkg, aptsources_distro, distro, HAVE_PYTHON_APT
import apt
import apt_pkg
import aptsources.distro as aptsources_distro
distro = aptsources_distro.get_distro()
HAVE_PYTHON_APT = True
else:
module.fail_json(msg="Failed to auto-install python-apt. Error was: '%s'" % se.strip())
class InvalidSource(Exception):
pass
# Simple version of aptsources.sourceslist.SourcesList.
# No advanced logic and no backups inside.
class SourcesList(object):
def __init__(self):
self.files = {} # group sources by file
self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist')
# read sources.list if it exists
if os.path.isfile(self.default_file):
self.load(self.default_file)
# read sources.list.d
for file in glob.iglob('%s/*.list' % self._apt_cfg_dir('Dir::Etc::sourceparts')):
self.load(file)
def __iter__(self):
'''Simple iterator to go over all sources. Empty, non-source, and other not valid lines will be skipped.'''
for file, sources in self.files.items():
for n, valid, enabled, source, comment in sources:
if valid:
yield file, n, enabled, source, comment
raise StopIteration
def _expand_path(self, filename):
if '/' in filename:
return filename
else:
return os.path.abspath(os.path.join(self._apt_cfg_dir('Dir::Etc::sourceparts'), filename))
def _suggest_filename(self, line):
def _cleanup_filename(s):
return '_'.join(re.sub('[^a-zA-Z0-9]', ' ', s).split())
def _strip_username_password(s):
if '@' in s:
s = s.split('@', 1)
s = s[-1]
return s
# Drop options and protocols.
line = re.sub('\[[^\]]+\]', '', line)
line = re.sub('\w+://', '', line)
# split line into valid keywords
parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES]
# Drop usernames and passwords
parts[0] = _strip_username_password(parts[0])
return '%s.list' % _cleanup_filename(' '.join(parts[:1]))
def _parse(self, line, raise_if_invalid_or_disabled=False):
valid = False
enabled = True
source = ''
comment = ''
line = line.strip()
if line.startswith('#'):
enabled = False
line = line[1:]
# Check for another "#" in the line and treat a part after it as a comment.
i = line.find('#')
if i > 0:
comment = line[i+1:].strip()
line = line[:i]
# Split a source into substring to make sure that it is source spec.
# Duplicated whitespaces in a valid source spec will be removed.
source = line.strip()
if source:
chunks = source.split()
if chunks[0] in VALID_SOURCE_TYPES:
valid = True
source = ' '.join(chunks)
if raise_if_invalid_or_disabled and (not valid or not enabled):
raise InvalidSource(line)
return valid, enabled, source, comment
@staticmethod
def _apt_cfg_file(filespec):
'''
Wrapper for `apt_pkg` module for running with Python 2.5
'''
try:
result = apt_pkg.config.find_file(filespec)
except AttributeError:
result = apt_pkg.Config.FindFile(filespec)
return result
@staticmethod
def _apt_cfg_dir(dirspec):
'''
Wrapper for `apt_pkg` module for running with Python 2.5
'''
try:
result = apt_pkg.config.find_dir(dirspec)
except AttributeError:
result = apt_pkg.Config.FindDir(dirspec)
return result
def load(self, file):
group = []
f = open(file, 'r')
for n, line in enumerate(f):
valid, enabled, source, comment = self._parse(line)
group.append((n, valid, enabled, source, comment))
self.files[file] = group
def save(self, module):
for filename, sources in self.files.items():
if sources:
d, fn = os.path.split(filename)
fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d)
# allow the user to override the default mode
this_mode = module.params['mode']
module.set_mode_if_different(tmp_path, this_mode, False)
f = os.fdopen(fd, 'w')
for n, valid, enabled, source, comment in sources:
chunks = []
if not enabled:
chunks.append('# ')
chunks.append(source)
if comment:
chunks.append(' # ')
chunks.append(comment)
chunks.append('\n')
line = ''.join(chunks)
try:
f.write(line)
except IOError, err:
module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err)))
module.atomic_move(tmp_path, filename)
else:
del self.files[filename]
if os.path.exists(filename):
os.remove(filename)
def dump(self):
return '\n'.join([str(i) for i in self])
def modify(self, file, n, enabled=None, source=None, comment=None):
'''
This function to be used with iterator, so we don't care of invalid sources.
If source, enabled, or comment is None, original value from line ``n`` will be preserved.
'''
valid, enabled_old, source_old, comment_old = self.files[file][n][1:]
choice = lambda new, old: old if new is None else new
self.files[file][n] = (n, valid, choice(enabled, enabled_old), choice(source, source_old), choice(comment, comment_old))
def _add_valid_source(self, source_new, comment_new, file):
# We'll try to reuse disabled source if we have it.
# If we have more than one entry, we will enable them all - no advanced logic, remember.
found = False
for filename, n, enabled, source, comment in self:
if source == source_new:
self.modify(filename, n, enabled=True)
found = True
if not found:
if file is None:
file = self.default_file
else:
file = self._expand_path(file)
if file not in self.files:
self.files[file] = []
files = self.files[file]
files.append((len(files), True, True, source_new, comment_new))
def add_source(self, line, comment='', file=None):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
# Prefer separate files for new sources.
self._add_valid_source(source, comment, file=file or self._suggest_filename(source))
def _remove_valid_source(self, source):
# If we have more than one entry, we will remove them all (not comment, remove!)
for filename, n, enabled, src, comment in self:
if source == src and enabled:
self.files[filename].pop(n)
def remove_source(self, line):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
self._remove_valid_source(source)
class UbuntuSourcesList(SourcesList):
LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s'
def __init__(self, module, add_ppa_signing_keys_callback=None):
self.module = module
self.add_ppa_signing_keys_callback = add_ppa_signing_keys_callback
super(UbuntuSourcesList, self).__init__()
def _get_ppa_info(self, owner_name, ppa_name):
lp_api = self.LP_API % (owner_name, ppa_name)
headers = dict(Accept='application/json')
response, info = fetch_url(self.module, lp_api, headers=headers)
if info['status'] != 200:
self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg'])
return json.load(response)
def _expand_ppa(self, path):
ppa = path.split(':')[1]
ppa_owner = ppa.split('/')[0]
try:
ppa_name = ppa.split('/')[1]
except IndexError:
ppa_name = 'ppa'
line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, distro.codename)
return line, ppa_owner, ppa_name
def _key_already_exists(self, key_fingerprint):
rc, out, err = self.module.run_command('apt-key export %s' % key_fingerprint, check_rc=True)
return len(err) == 0
def add_source(self, line, comment='', file=None):
if line.startswith('ppa:'):
source, ppa_owner, ppa_name = self._expand_ppa(line)
if self.add_ppa_signing_keys_callback is not None:
info = self._get_ppa_info(ppa_owner, ppa_name)
if not self._key_already_exists(info['signing_key_fingerprint']):
command = ['apt-key', 'adv', '--recv-keys', '--keyserver', 'hkp://keyserver.ubuntu.com:80', info['signing_key_fingerprint']]
self.add_ppa_signing_keys_callback(command)
file = file or self._suggest_filename('%s_%s' % (line, distro.codename))
else:
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
file = file or self._suggest_filename(source)
self._add_valid_source(source, comment, file)
def remove_source(self, line):
if line.startswith('ppa:'):
source = self._expand_ppa(line)[0]
else:
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
self._remove_valid_source(source)
def get_add_ppa_signing_key_callback(module):
def _run_command(command):
module.run_command(command, check_rc=True)
if module.check_mode:
return None
else:
return _run_command
def main():
module = AnsibleModule(
argument_spec=dict(
repo=dict(required=True),
state=dict(choices=['present', 'absent'], default='present'),
mode=dict(required=False, default=0644),
update_cache = dict(aliases=['update-cache'], type='bool', default='yes'),
# this should not be needed, but exists as a failsafe
install_python_apt=dict(required=False, default="yes", type='bool'),
validate_certs = dict(default='yes', type='bool'),
),
supports_check_mode=True,
)
params = module.params
if params['install_python_apt'] and not HAVE_PYTHON_APT and not module.check_mode:
install_python_apt(module)
repo = module.params['repo']
state = module.params['state']
update_cache = module.params['update_cache']
sourceslist = None
if HAVE_PYTHON_APT:
if isinstance(distro, aptsources_distro.UbuntuDistribution):
sourceslist = UbuntuSourcesList(module,
add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module))
elif HAVE_PYTHON_APT and \
isinstance(distro, aptsources_distro.DebianDistribution) or isinstance(distro, aptsources_distro.Distribution):
sourceslist = SourcesList()
else:
module.fail_json(msg='Module apt_repository supports only Debian and Ubuntu. ' + \
'You may be seeing this because python-apt is not installed, but you requested that it not be auto-installed')
sources_before = sourceslist.dump()
try:
if state == 'present':
sourceslist.add_source(repo)
elif state == 'absent':
sourceslist.remove_source(repo)
except InvalidSource, err:
module.fail_json(msg='Invalid repository string: %s' % unicode(err))
sources_after = sourceslist.dump()
changed = sources_before != sources_after
if not module.check_mode and changed:
try:
sourceslist.save(module)
if update_cache:
cache = apt.Cache()
cache.update()
except OSError, err:
module.fail_json(msg=unicode(err))
module.exit_json(changed=changed, repo=repo, state=state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()

View file

@ -0,0 +1,172 @@
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Evgenii Terechkov
# Written by Evgenii Terechkov <evg@altlinux.org>
# Based on urpmi module written by Philippe Makowski <philippem@mageia.org>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apt_rpm
short_description: apt_rpm package manager
description:
- Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
version_added: "1.5"
options:
pkg:
description:
- name of package to install, upgrade or remove.
required: true
default: null
state:
description:
- Indicates the desired package state
required: false
default: present
choices: [ "absent", "present" ]
update_cache:
description:
- update the package database first C(apt-get update).
required: false
default: no
choices: [ "yes", "no" ]
author: Evgenii Terechkov
notes: []
'''
EXAMPLES = '''
# install package foo
- apt_rpm: pkg=foo state=present
# remove package foo
- apt_rpm: pkg=foo state=absent
# description: remove packages foo and bar
- apt_rpm: pkg=foo,bar state=absent
# description: update the package database and install bar (bar will be the updated if a newer version exists)
- apt_rpm: name=bar state=present update_cache=yes
'''
try:
import json
except ImportError:
import simplejson as json
import shlex
import os
import sys
APT_PATH="/usr/bin/apt-get"
RPM_PATH="/usr/bin/rpm"
def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
rc = os.system("%s -q %s" % (RPM_PATH,name))
if rc == 0:
return True
else:
return False
def query_package_provides(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
rc = os.system("%s -q --provides %s >/dev/null" % (RPM_PATH,name))
return rc == 0
def update_package_db(module):
rc = os.system("%s update" % APT_PATH)
if rc != 0:
module.fail_json(msg="could not update package db")
def remove_packages(module, packages):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
rc = os.system("%s -y remove %s > /dev/null" % (APT_PATH,package))
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pkgspec):
packages = ""
for package in pkgspec:
if not query_package_provides(module, package):
packages += "'%s' " % package
if len(packages) != 0:
cmd = ("%s -y install %s > /dev/null" % (APT_PATH, packages))
rc, out, err = module.run_command(cmd)
installed = True
for packages in pkgspec:
if not query_package_provides(module, package):
installed = False
# apt-rpm always have 0 for exit code if --force is used
if rc or not installed:
module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
else:
module.exit_json(changed=True, msg="%s present(s)" % packages)
else:
module.exit_json(changed=False)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
package = dict(aliases=['pkg', 'name'], required=True)))
if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
p = module.params
if p['update_cache']:
update_package_db(module)
packages = p['package'].split(',')
if p['state'] in [ 'installed', 'present' ]:
install_packages(module, packages)
elif p['state'] in [ 'removed', 'absent' ]:
remove_packages(module, packages)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
main()

View file

@ -0,0 +1,396 @@
#!/usr/bin/python
DOCUMENTATION = '''
---
module: redhat_subscription
short_description: Manage Red Hat Network registration and subscriptions using the C(subscription-manager) command
description:
- Manage registration and subscription to the Red Hat Network entitlement platform.
version_added: "1.2"
author: James Laska
notes:
- In order to register a system, subscription-manager requires either a username and password, or an activationkey.
requirements:
- subscription-manager
options:
state:
description:
- whether to register and subscribe (C(present)), or unregister (C(absent)) a system
required: false
choices: [ "present", "absent" ]
default: "present"
username:
description:
- Red Hat Network username
required: False
default: null
password:
description:
- Red Hat Network password
required: False
default: null
server_hostname:
description:
- Specify an alternative Red Hat Network server
required: False
default: Current value from C(/etc/rhsm/rhsm.conf) is the default
server_insecure:
description:
- Allow traffic over insecure http
required: False
default: Current value from C(/etc/rhsm/rhsm.conf) is the default
rhsm_baseurl:
description:
- Specify CDN baseurl
required: False
default: Current value from C(/etc/rhsm/rhsm.conf) is the default
autosubscribe:
description:
- Upon successful registration, auto-consume available subscriptions
required: False
default: False
activationkey:
description:
- supply an activation key for use with registration
required: False
default: null
pool:
description:
- Specify a subscription pool name to consume. Regular expressions accepted.
required: False
default: '^$'
'''
EXAMPLES = '''
# Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
- redhat_subscription: action=register username=joe_user password=somepass autosubscribe=true
# Register with activationkey (1-222333444) and consume subscriptions matching
# the names (Red hat Enterprise Server) and (Red Hat Virtualization)
- redhat_subscription: action=register
activationkey=1-222333444
pool='^(Red Hat Enterprise Server|Red Hat Virtualization)$'
'''
import os
import re
import types
import ConfigParser
import shlex
class RegistrationBase(object):
def __init__(self, module, username=None, password=None):
self.module = module
self.username = username
self.password = password
def configure(self):
raise NotImplementedError("Must be implemented by a sub-class")
def enable(self):
# Remove any existing redhat.repo
redhat_repo = '/etc/yum.repos.d/redhat.repo'
if os.path.isfile(redhat_repo):
os.unlink(redhat_repo)
def register(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unregister(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unsubscribe(self):
raise NotImplementedError("Must be implemented by a sub-class")
def update_plugin_conf(self, plugin, enabled=True):
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
if os.path.isfile(plugin_conf):
cfg = ConfigParser.ConfigParser()
cfg.read([plugin_conf])
if enabled:
cfg.set('main', 'enabled', 1)
else:
cfg.set('main', 'enabled', 0)
fd = open(plugin_conf, 'rwa+')
cfg.write(fd)
fd.close()
def subscribe(self, **kwargs):
raise NotImplementedError("Must be implemented by a sub-class")
class Rhsm(RegistrationBase):
def __init__(self, module, username=None, password=None):
RegistrationBase.__init__(self, module, username, password)
self.config = self._read_config()
self.module = module
def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
'''
Load RHSM configuration from /etc/rhsm/rhsm.conf.
Returns:
* ConfigParser object
'''
# Read RHSM defaults ...
cp = ConfigParser.ConfigParser()
cp.read(rhsm_conf)
# Add support for specifying a default value w/o having to standup some configuration
# Yeah, I know this should be subclassed ... but, oh well
def get_option_default(self, key, default=''):
sect, opt = key.split('.', 1)
if self.has_section(sect) and self.has_option(sect, opt):
return self.get(sect, opt)
else:
return default
cp.get_option = types.MethodType(get_option_default, cp, ConfigParser.ConfigParser)
return cp
def enable(self):
'''
Enable the system to receive updates from subscription-manager.
This involves updating affected yum plugins and removing any
conflicting yum repositories.
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', True)
def configure(self, **kwargs):
'''
Configure the system as directed for registration with RHN
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'config']
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
# non-configuration parameters and replace '_' with '.'. For example,
# 'server_hostname' becomes '--system.hostname'.
for k,v in kwargs.items():
if re.search(r'^(system|rhsm)_', k):
args.append('--%s=%s' % (k.replace('_','.'), v))
self.module.run_command(args, check_rc=True)
@property
def is_registered(self):
'''
Determine whether the current system
Returns:
* Boolean - whether the current system is currently registered to
RHN.
'''
# Quick version...
if False:
return os.path.isfile('/etc/pki/consumer/cert.pem') and \
os.path.isfile('/etc/pki/consumer/key.pem')
args = ['subscription-manager', 'identity']
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
if rc == 0:
return True
else:
return False
def register(self, username, password, autosubscribe, activationkey):
'''
Register the current system to the provided RHN server
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'register']
# Generate command arguments
if activationkey:
args.append('--activationkey "%s"' % activationkey)
else:
if autosubscribe:
args.append('--autosubscribe')
if username:
args.extend(['--username', username])
if password:
args.extend(['--password', password])
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unsubscribe(self):
'''
Unsubscribe a system from all subscribed channels
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'unsubscribe', '--all']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unregister(self):
'''
Unregister a currently registered system
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'unregister']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def subscribe(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
for pool in available_pools.filter(regexp):
pool.subscribe()
class RhsmPool(object):
'''
Convenience class for housing subscription information
'''
def __init__(self, module, **kwargs):
self.module = module
for k,v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return str(self.__getattribute__('_name'))
def subscribe(self):
args = "subscription-manager subscribe --pool %s" % self.PoolId
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
if rc == 0:
return True
else:
return False
class RhsmPools(object):
"""
This class is used for manipulating pools subscriptions with RHSM
"""
def __init__(self, module):
self.module = module
self.products = self._load_product_list()
def __iter__(self):
return self.products.__iter__()
def _load_product_list(self):
"""
Loads list of all available pools for system in data structure
"""
args = "subscription-manager list --available"
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
products = []
for line in stdout.split('\n'):
# Remove leading+trailing whitespace
line = line.strip()
# An empty line implies the end of a output group
if len(line) == 0:
continue
# If a colon ':' is found, parse
elif ':' in line:
(key, value) = line.split(':',1)
key = key.strip().replace(" ", "") # To unify
value = value.strip()
if key in ['ProductName', 'SubscriptionName']:
# Remember the name for later processing
products.append(RhsmPool(self.module, _name=value, key=value))
elif products:
# Associate value with most recently recorded product
products[-1].__setattr__(key, value)
# FIXME - log some warning?
#else:
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
return products
def filter(self, regexp='^$'):
'''
Return a list of RhsmPools whose name matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product._name):
yield product
def main():
# Load RHSM configuration from file
rhn = Rhsm(None)
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent']),
username = dict(default=None, required=False),
password = dict(default=None, required=False),
server_hostname = dict(default=rhn.config.get_option('server.hostname'), required=False),
server_insecure = dict(default=rhn.config.get_option('server.insecure'), required=False),
rhsm_baseurl = dict(default=rhn.config.get_option('rhsm.baseurl'), required=False),
autosubscribe = dict(default=False, type='bool'),
activationkey = dict(default=None, required=False),
pool = dict(default='^$', required=False, type='str'),
)
)
rhn.module = module
state = module.params['state']
username = module.params['username']
password = module.params['password']
server_hostname = module.params['server_hostname']
server_insecure = module.params['server_insecure']
rhsm_baseurl = module.params['rhsm_baseurl']
autosubscribe = module.params['autosubscribe'] == True
activationkey = module.params['activationkey']
pool = module.params['pool']
# Ensure system is registered
if state == 'present':
# Check for missing parameters ...
if not (activationkey or username or password):
module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, username, password))
if not activationkey and not (username and password):
module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
# Register system
if rhn.is_registered:
module.exit_json(changed=False, msg="System already registered.")
else:
try:
rhn.enable()
rhn.configure(**module.params)
rhn.register(username, password, autosubscribe, activationkey)
rhn.subscribe(pool)
except Exception, e:
module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, e))
else:
module.exit_json(changed=True, msg="System successfully registered to '%s'." % server_hostname)
# Ensure system is *not* registered
if state == 'absent':
if not rhn.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
else:
try:
rhn.unsubscribe()
rhn.unregister()
except Exception, e:
module.fail_json(msg="Failed to unregister: %s" % e)
else:
module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname)
# import module snippets
from ansible.module_utils.basic import *
main()

View file

@ -0,0 +1,169 @@
#!/usr/bin/python
# (c) Vincent Van de Kussen
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rhn_channel
short_description: Adds or removes Red Hat software channels
description:
- Adds or removes Red Hat software channels
version_added: "1.1"
author: Vincent Van der Kussen
notes:
- this module fetches the system id from RHN.
requirements:
- none
options:
name:
description:
- name of the software channel
required: true
default: null
sysname:
description:
- name of the system as it is known in RHN/Satellite
required: true
default: null
state:
description:
- whether the channel should be present or not
required: false
default: present
url:
description:
- The full url to the RHN/Satellite api
required: true
user:
description:
- RHN/Satellite user
required: true
password:
description:
- "the user's password"
required: true
'''
EXAMPLES = '''
- rhn_channel: name=rhel-x86_64-server-v2vwin-6 sysname=server01 url=https://rhn.redhat.com/rpc/api user=rhnuser password=guessme
'''
import xmlrpclib
from operator import itemgetter
import re
# ------------------------------------------------------- #
def get_systemid(client, session, sysname):
systems = client.system.listUserSystems(session)
for system in systems:
if system.get('name') == sysname:
idres = system.get('id')
idd = int(idres)
return idd
# ------------------------------------------------------- #
# unused:
#
#def get_localsystemid():
# f = open("/etc/sysconfig/rhn/systemid", "r")
# content = f.read()
# loc_id = re.search(r'\b(ID-)(\d{10})' ,content)
# return loc_id.group(2)
# ------------------------------------------------------- #
def subscribe_channels(channels, client, session, sysname, sys_id):
c = base_channels(client, session, sys_id)
c.append(channels)
return client.channel.software.setSystemChannels(session, sys_id, c)
# ------------------------------------------------------- #
def unsubscribe_channels(channels, client, session, sysname, sys_id):
c = base_channels(client, session, sys_id)
c.remove(channels)
return client.channel.software.setSystemChannels(session, sys_id, c)
# ------------------------------------------------------- #
def base_channels(client, session, sys_id):
basechan = client.channel.software.listSystemChannels(session, sys_id)
try:
chans = [item['label'] for item in basechan]
except KeyError:
chans = [item['channel_label'] for item in basechan]
return chans
# ------------------------------------------------------- #
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent']),
name = dict(required=True),
sysname = dict(required=True),
url = dict(required=True),
user = dict(required=True),
password = dict(required=True, aliases=['pwd']),
)
# supports_check_mode=True
)
state = module.params['state']
channelname = module.params['name']
systname = module.params['sysname']
saturl = module.params['url']
user = module.params['user']
password = module.params['password']
#initialize connection
client = xmlrpclib.Server(saturl, verbose=0)
session = client.auth.login(user, password)
# get systemid
sys_id = get_systemid(client, session, systname)
# get channels for system
chans = base_channels(client, session, sys_id)
if state == 'present':
if channelname in chans:
module.exit_json(changed=False, msg="Channel %s already exists" % channelname)
else:
subscribe_channels(channelname, client, session, systname, sys_id)
module.exit_json(changed=True, msg="Channel %s added" % channelname)
if state == 'absent':
if not channelname in chans:
module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname)
else:
unsubscribe_channels(channelname, client, session, systname, sys_id)
module.exit_json(changed=True, msg="Channel %s removed" % channelname)
client.auth.logout(session)
# import module snippets
from ansible.module_utils.basic import *
main()

View file

@ -0,0 +1,336 @@
#!/usr/bin/python
# (c) James Laska
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rhn_register
short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command
description:
- Manage registration to the Red Hat Network.
version_added: "1.2"
author: James Laska
notes:
- In order to register a system, rhnreg_ks requires either a username and password, or an activationkey.
requirements:
- rhnreg_ks
options:
state:
description:
- whether to register (C(present)), or unregister (C(absent)) a system
required: false
choices: [ "present", "absent" ]
default: "present"
username:
description:
- Red Hat Network username
required: False
default: null
password:
description:
- Red Hat Network password
required: False
default: null
server_url:
description:
- Specify an alternative Red Hat Network server URL
required: False
default: Current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date) is the default
activationkey:
description:
- supply an activation key for use with registration
required: False
default: null
channels:
description:
- Optionally specify a list of comma-separated channels to subscribe to upon successful registration.
required: false
default: []
'''
EXAMPLES = '''
# Unregister system from RHN.
- rhn_register: state=absent username=joe_user password=somepass
# Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
- rhn_register: state=present username=joe_user password=somepass
# Register with activationkey (1-222333444) and enable extended update support.
- rhn_register: state=present activationkey=1-222333444 enable_eus=true
# Register as user (joe_user) with password (somepass) against a satellite
# server specified by (server_url).
- rhn_register: >
state=present
username=joe_user
password=somepass
server_url=https://xmlrpc.my.satellite/XMLRPC
# Register as user (joe_user) with password (somepass) and enable
# channels (rhel-x86_64-server-6-foo-1) and (rhel-x86_64-server-6-bar-1).
- rhn_register: state=present username=joe_user
password=somepass
channels=rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
'''
import sys
import types
import xmlrpclib
import urlparse
# Attempt to import rhn client tools
sys.path.insert(0, '/usr/share/rhn')
try:
import up2date_client
import up2date_client.config
except ImportError, e:
module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?\n%s" % e)
# INSERT REDHAT SNIPPETS
from ansible.module_utils.redhat import *
# INSERT COMMON SNIPPETS
from ansible.module_utils.basic import *
class Rhn(RegistrationBase):
def __init__(self, username=None, password=None):
RegistrationBase.__init__(self, username, password)
self.config = self.load_config()
def load_config(self):
'''
Read configuration from /etc/sysconfig/rhn/up2date
'''
self.config = up2date_client.config.initUp2dateConfig()
# Add support for specifying a default value w/o having to standup some
# configuration. Yeah, I know this should be subclassed ... but, oh
# well
def get_option_default(self, key, default=''):
# ignore pep8 W601 errors for this line
# setting this to use 'in' does not work in the rhn library
if self.has_key(key):
return self[key]
else:
return default
self.config.get_option = types.MethodType(get_option_default, self.config, up2date_client.config.Config)
return self.config
@property
def hostname(self):
'''
Return the non-xmlrpc RHN hostname. This is a convenience method
used for displaying a more readable RHN hostname.
Returns: str
'''
url = urlparse.urlparse(self.config['serverURL'])
return url[1].replace('xmlrpc.','')
@property
def systemid(self):
systemid = None
xpath_str = "//member[name='system_id']/value/string"
if os.path.isfile(self.config['systemIdPath']):
fd = open(self.config['systemIdPath'], 'r')
xml_data = fd.read()
fd.close()
# Ugh, xml parsing time ...
# First, try parsing with libxml2 ...
if systemid is None:
try:
import libxml2
doc = libxml2.parseDoc(xml_data)
ctxt = doc.xpathNewContext()
systemid = ctxt.xpathEval(xpath_str)[0].content
doc.freeDoc()
ctxt.xpathFreeContext()
except ImportError:
pass
# m-kay, let's try with lxml now ...
if systemid is None:
try:
from lxml import etree
root = etree.fromstring(xml_data)
systemid = root.xpath(xpath_str)[0].text
except ImportError:
pass
# Strip the 'ID-' prefix
if systemid is not None and systemid.startswith('ID-'):
systemid = systemid[3:]
return int(systemid)
@property
def is_registered(self):
'''
Determine whether the current system is registered.
Returns: True|False
'''
return os.path.isfile(self.config['systemIdPath'])
def configure(self, server_url):
'''
Configure system for registration
'''
self.config.set('serverURL', server_url)
self.config.save()
def enable(self):
'''
Prepare the system for RHN registration. This includes ...
* enabling the rhnplugin yum plugin
* disabling the subscription-manager yum plugin
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', True)
self.update_plugin_conf('subscription-manager', False)
def register(self, enable_eus=False, activationkey=None):
'''
Register system to RHN. If enable_eus=True, extended update
support will be requested.
'''
register_cmd = "/usr/sbin/rhnreg_ks --username='%s' --password='%s' --force" % (self.username, self.password)
if self.module.params.get('server_url', None):
register_cmd += " --serverUrl=%s" % self.module.params.get('server_url')
if enable_eus:
register_cmd += " --use-eus-channel"
if activationkey is not None:
register_cmd += " --activationkey '%s'" % activationkey
# FIXME - support --profilename
# FIXME - support --systemorgid
rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True)
def api(self, method, *args):
'''
Convenience RPC wrapper
'''
if not hasattr(self, 'server') or self.server is None:
if self.hostname != 'rhn.redhat.com':
url = "https://%s/rpc/api" % self.hostname
else:
url = "https://xmlrpc.%s/rpc/api" % self.hostname
self.server = xmlrpclib.Server(url, verbose=0)
self.session = self.server.auth.login(self.username, self.password)
func = getattr(self.server, method)
return func(self.session, *args)
def unregister(self):
'''
Unregister a previously registered system
'''
# Initiate RPC connection
self.api('system.deleteSystems', [self.systemid])
# Remove systemid file
os.unlink(self.config['systemIdPath'])
def subscribe(self, channels=[]):
if len(channels) <= 0:
return
current_channels = self.api('channel.software.listSystemChannels', self.systemid)
new_channels = [item['channel_label'] for item in current_channels]
new_channels.extend(channels)
return self.api('channel.software.setSystemChannels', self.systemid, new_channels)
def _subscribe(self, channels=[]):
'''
Subscribe to requested yum repositories using 'rhn-channel' command
'''
rhn_channel_cmd = "rhn-channel --user='%s' --password='%s'" % (self.username, self.password)
rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --available-channels", check_rc=True)
# Enable requested repoid's
for wanted_channel in channels:
# Each inserted repo regexp will be matched. If no match, no success.
for available_channel in stdout.rstrip().split('\n'): # .rstrip() because of \n at the end -> empty string at the end
if re.search(wanted_repo, available_channel):
rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel, check_rc=True)
def main():
# Read system RHN configuration
rhn = Rhn()
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent']),
username = dict(default=None, required=False),
password = dict(default=None, required=False),
server_url = dict(default=rhn.config.get_option('serverURL'), required=False),
activationkey = dict(default=None, required=False),
enable_eus = dict(default=False, type='bool'),
channels = dict(default=[], type='list'),
)
)
state = module.params['state']
rhn.username = module.params['username']
rhn.password = module.params['password']
rhn.configure(module.params['server_url'])
activationkey = module.params['activationkey']
channels = module.params['channels']
rhn.module = module
# Ensure system is registered
if state == 'present':
# Check for missing parameters ...
if not (activationkey or rhn.username or rhn.password):
module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username, rhn.password))
if not activationkey and not (rhn.username and rhn.password):
module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
# Register system
if rhn.is_registered:
module.exit_json(changed=False, msg="System already registered.")
else:
try:
rhn.enable()
rhn.register(module.params['enable_eus'] == True, activationkey)
rhn.subscribe(channels)
except Exception, e:
module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, e))
module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
# Ensure system is *not* registered
if state == 'absent':
if not rhn.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
else:
try:
rhn.unregister()
except Exception, e:
module.fail_json(msg="Failed to unregister: %s" % e)
module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
main()

View file

@ -0,0 +1,206 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Ansible module to import third party repo keys to your rpm db
# (c) 2013, Héctor Acosta <hector.acosta@gazzang.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rpm_key
author: Hector Acosta <hector.acosta@gazzang.com>
short_description: Adds or removes a gpg key from the rpm db
description:
- Adds or removes (rpm --import) a gpg key to your rpm database.
version_added: "1.3"
options:
key:
required: true
default: null
aliases: []
description:
- Key that will be modified. Can be a url, a file, or a keyid if the key already exists in the database.
state:
required: false
default: "present"
choices: [present, absent]
description:
- Wheather the key will be imported or removed from the rpm db.
validate_certs:
description:
- If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Example action to import a key from a url
- rpm_key: state=present key=http://apt.sw.be/RPM-GPG-KEY.dag.txt
# Example action to import a key from a file
- rpm_key: state=present key=/path/to/key.gpg
# Example action to ensure a key is not present in the db
- rpm_key: state=absent key=DEADB33F
'''
import syslog
import os.path
import re
import tempfile
def is_pubkey(string):
"""Verifies if string is a pubkey"""
pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*"
return re.match(pgp_regex, string, re.DOTALL)
class RpmKey:
def __init__(self, module):
self.syslogging = False
# If the key is a url, we need to check if it's present to be idempotent,
# to do that, we need to check the keyid, which we can get from the armor.
keyfile = None
should_cleanup_keyfile = False
self.module = module
self.rpm = self.module.get_bin_path('rpm', True)
state = module.params['state']
key = module.params['key']
if '://' in key:
keyfile = self.fetch_key(key)
keyid = self.getkeyid(keyfile)
should_cleanup_keyfile = True
elif self.is_keyid(key):
keyid = key
elif os.path.isfile(key):
keyfile = key
keyid = self.getkeyid(keyfile)
else:
self.module.fail_json(msg="Not a valid key %s" % key)
keyid = self.normalize_keyid(keyid)
if state == 'present':
if self.is_key_imported(keyid):
module.exit_json(changed=False)
else:
if not keyfile:
self.module.fail_json(msg="When importing a key, a valid file must be given")
self.import_key(keyfile, dryrun=module.check_mode)
if should_cleanup_keyfile:
self.module.cleanup(keyfile)
module.exit_json(changed=True)
else:
if self.is_key_imported(keyid):
self.drop_key(keyid, dryrun=module.check_mode)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
def fetch_key(self, url):
"""Downloads a key from url, returns a valid path to a gpg key"""
try:
rsp, info = fetch_url(self.module, url)
key = rsp.read()
if not is_pubkey(key):
self.module.fail_json(msg="Not a public key: %s" % url)
tmpfd, tmpname = tempfile.mkstemp()
tmpfile = os.fdopen(tmpfd, "w+b")
tmpfile.write(key)
tmpfile.close()
return tmpname
except urllib2.URLError, e:
self.module.fail_json(msg=str(e))
def normalize_keyid(self, keyid):
"""Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is lowercase"""
ret = keyid.strip().lower()
if ret.startswith('0x'):
return ret[2:]
elif ret.startswith('0X'):
return ret[2:]
else:
return ret
def getkeyid(self, keyfile):
gpg = self.module.get_bin_path('gpg', True)
stdout, stderr = self.execute_command([gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', '--list-packets', keyfile])
for line in stdout.splitlines():
line = line.strip()
if line.startswith(':signature packet:'):
# We want just the last 8 characters of the keyid
keyid = line.split()[-1].strip()[8:]
return keyid
self.json_fail(msg="Unexpected gpg output")
def is_keyid(self, keystr):
"""Verifies if a key, as provided by the user is a keyid"""
return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
def execute_command(self, cmd):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg=stderr)
return stdout, stderr
def is_key_imported(self, keyid):
stdout, stderr = self.execute_command([self.rpm, '-qa', 'gpg-pubkey'])
for line in stdout.splitlines():
line = line.strip()
if not line:
continue
match = re.match('gpg-pubkey-([0-9a-f]+)-([0-9a-f]+)', line)
if not match:
self.module.fail_json(msg="rpm returned unexpected output [%s]" % line)
else:
if keyid == match.group(1):
return True
return False
def import_key(self, keyfile, dryrun=False):
if not dryrun:
self.execute_command([self.rpm, '--import', keyfile])
def drop_key(self, key, dryrun=False):
if not dryrun:
self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % key])
def main():
module = AnsibleModule(
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
key=dict(required=True, type='str'),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
RpmKey(module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()

View file

@ -0,0 +1,838 @@
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
# (c) 2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import traceback
import os
import yum
try:
from yum.misc import find_unfinished_transactions, find_ts_remaining
from rpmUtils.miscutils import splitFilename
transaction_helpers = True
except:
transaction_helpers = False
DOCUMENTATION = '''
---
module: yum
version_added: historical
short_description: Manages packages with the I(yum) package manager
description:
- Installs, upgrade, removes, and lists packages and groups with the I(yum) package manager.
options:
name:
description:
- "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: yum -y update. You can also pass a url or a local path to a rpm file."
required: true
default: null
aliases: []
list:
description:
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
required: false
default: null
state:
description:
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
required: false
choices: [ "present", "latest", "absent" ]
default: "present"
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
required: false
version_added: "0.9"
default: null
aliases: []
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
required: false
version_added: "0.9"
default: null
aliases: []
conf_file:
description:
- The remote yum configuration file to use for the transaction.
required: false
version_added: "0.6"
default: null
aliases: []
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
required: false
version_added: "1.2"
default: "no"
choices: ["yes", "no"]
aliases: []
notes: []
# informational: requirements for nodes
requirements: [ yum, rpm ]
author: Seth Vidal
'''
EXAMPLES = '''
- name: install the latest version of Apache
yum: name=httpd state=latest
- name: remove the Apache package
yum: name=httpd state=absent
- name: install the latest version of Apache from the testing repo
yum: name=httpd enablerepo=testing state=present
- name: upgrade all packages
yum: name=* state=latest
- name: install the nginx rpm from a remote repo
yum: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
- name: install nginx rpm from a local file
yum: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
- name: install the 'Development tools' package group
yum: name="@Development tools" state=present
'''
def_qf = "%{name}-%{version}-%{release}.%{arch}"
repoquery='/usr/bin/repoquery'
if not os.path.exists(repoquery):
repoquery = None
yumbin='/usr/bin/yum'
import syslog
def log(msg):
syslog.openlog('ansible-yum', 0, syslog.LOG_USER)
syslog.syslog(syslog.LOG_NOTICE, msg)
def yum_base(conf_file=None, cachedir=False):
my = yum.YumBase()
my.preconf.debuglevel=0
my.preconf.errorlevel=0
if conf_file and os.path.exists(conf_file):
my.preconf.fn = conf_file
if cachedir or os.geteuid() != 0:
if hasattr(my, 'setCacheDir'):
my.setCacheDir()
else:
cachedir = yum.misc.getCacheDir()
my.repos.setCacheDir(cachedir)
my.conf.cache = 0
return my
def install_yum_utils(module):
if not module.check_mode:
yum_path = module.get_bin_path('yum')
if yum_path:
rc, so, se = module.run_command('%s -y install yum-utils' % yum_path)
if rc == 0:
this_path = module.get_bin_path('repoquery')
global repoquery
repoquery = this_path
def po_to_nevra(po):
if hasattr(po, 'ui_nevra'):
return po.ui_nevra
else:
return '%s-%s-%s.%s' % (po.name, po.version, po.release, po.arch)
def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[], is_pkg=False):
if not repoq:
pkgs = []
try:
my = yum_base(conf_file)
for rid in en_repos:
my.repos.enableRepo(rid)
for rid in dis_repos:
my.repos.disableRepo(rid)
e,m,u = my.rpmdb.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs:
pkgs.extend(my.returnInstalledPackagesByDep(pkgspec))
except Exception, e:
module.fail_json(msg="Failure talking to yum: %s" % e)
return [ po_to_nevra(p) for p in pkgs ]
else:
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec]
rc,out,err = module.run_command(cmd)
if not is_pkg:
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec]
rc2,out2,err2 = module.run_command(cmd)
else:
rc2,out2,err2 = (0, '', '')
if rc == 0 and rc2 == 0:
out += out2
return [ p for p in out.split('\n') if p.strip() ]
else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
return []
def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
if not repoq:
pkgs = []
try:
my = yum_base(conf_file)
for rid in en_repos:
my.repos.enableRepo(rid)
for rid in dis_repos:
my.repos.disableRepo(rid)
e,m,u = my.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs:
pkgs.extend(my.returnPackagesByDep(pkgspec))
except Exception, e:
module.fail_json(msg="Failure talking to yum: %s" % e)
return [ po_to_nevra(p) for p in pkgs ]
else:
myrepoq = list(repoq)
for repoid in dis_repos:
r_cmd = ['--disablerepo', repoid]
myrepoq.extend(r_cmd)
for repoid in en_repos:
r_cmd = ['--enablerepo', repoid]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, pkgspec]
rc,out,err = module.run_command(cmd)
if rc == 0:
return [ p for p in out.split('\n') if p.strip() ]
else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return []
def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
if not repoq:
retpkgs = []
pkgs = []
updates = []
try:
my = yum_base(conf_file)
for rid in en_repos:
my.repos.enableRepo(rid)
for rid in dis_repos:
my.repos.disableRepo(rid)
pkgs = my.returnPackagesByDep(pkgspec) + my.returnInstalledPackagesByDep(pkgspec)
if not pkgs:
e,m,u = my.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
updates = my.doPackageLists(pkgnarrow='updates').updates
except Exception, e:
module.fail_json(msg="Failure talking to yum: %s" % e)
for pkg in pkgs:
if pkg in updates:
retpkgs.append(pkg)
return set([ po_to_nevra(p) for p in retpkgs ])
else:
myrepoq = list(repoq)
for repoid in dis_repos:
r_cmd = ['--disablerepo', repoid]
myrepoq.extend(r_cmd)
for repoid in en_repos:
r_cmd = ['--enablerepo', repoid]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
rc,out,err = module.run_command(cmd)
if rc == 0:
return set([ p for p in out.split('\n') if p.strip() ])
else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return []
def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
if not repoq:
pkgs = []
try:
my = yum_base(conf_file)
for rid in en_repos:
my.repos.enableRepo(rid)
for rid in dis_repos:
my.repos.disableRepo(rid)
pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec)
if not pkgs:
e,m,u = my.pkgSack.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
e,m,u = my.rpmdb.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
except Exception, e:
module.fail_json(msg="Failure talking to yum: %s" % e)
return set([ po_to_nevra(p) for p in pkgs ])
else:
myrepoq = list(repoq)
for repoid in dis_repos:
r_cmd = ['--disablerepo', repoid]
myrepoq.extend(r_cmd)
for repoid in en_repos:
r_cmd = ['--enablerepo', repoid]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
rc,out,err = module.run_command(cmd)
cmd = myrepoq + ["--qf", qf, req_spec]
rc2,out2,err2 = module.run_command(cmd)
if rc == 0 and rc2 == 0:
out += out2
pkgs = set([ p for p in out.split('\n') if p.strip() ])
if not pkgs:
pkgs = is_installed(module, repoq, req_spec, conf_file, qf=qf)
return pkgs
else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
return []
def transaction_exists(pkglist):
"""
checks the package list to see if any packages are
involved in an incomplete transaction
"""
conflicts = []
if not transaction_helpers:
return conflicts
# first, we create a list of the package 'nvreas'
# so we can compare the pieces later more easily
pkglist_nvreas = []
for pkg in pkglist:
pkglist_nvreas.append(splitFilename(pkg))
# next, we build the list of packages that are
# contained within an unfinished transaction
unfinished_transactions = find_unfinished_transactions()
for trans in unfinished_transactions:
steps = find_ts_remaining(trans)
for step in steps:
# the action is install/erase/etc., but we only
# care about the package spec contained in the step
(action, step_spec) = step
(n,v,r,e,a) = splitFilename(step_spec)
# and see if that spec is in the list of packages
# requested for installation/updating
for pkg in pkglist_nvreas:
# if the name and arch match, we're going to assume
# this package is part of a pending transaction
# the label is just for display purposes
label = "%s-%s" % (n,a)
if n == pkg[0] and a == pkg[4]:
if label not in conflicts:
conflicts.append("%s-%s" % (n,a))
break
return conflicts
def local_nvra(module, path):
"""return nvra of a local rpm passed in"""
cmd = ['/bin/rpm', '-qp' ,'--qf',
'%{name}-%{version}-%{release}.%{arch}\n', path ]
rc, out, err = module.run_command(cmd)
if rc != 0:
return None
nvra = out.split('\n')[0]
return nvra
def pkg_to_dict(pkgstr):
if pkgstr.strip():
n,e,v,r,a,repo = pkgstr.split('|')
else:
return {'error_parsing': pkgstr}
d = {
'name':n,
'arch':a,
'epoch':e,
'release':r,
'version':v,
'repo':repo,
'nevra': '%s:%s-%s-%s.%s' % (e,n,v,r,a)
}
if repo == 'installed':
d['yumstate'] = 'installed'
else:
d['yumstate'] = 'available'
return d
def repolist(module, repoq, qf="%{repoid}"):
cmd = repoq + ["--qf", qf, "-a"]
rc,out,err = module.run_command(cmd)
ret = []
if rc == 0:
ret = set([ p for p in out.split('\n') if p.strip() ])
return ret
def list_stuff(module, conf_file, stuff):
qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q']
if conf_file and os.path.exists(conf_file):
repoq += ['-c', conf_file]
if stuff == 'installed':
return [ pkg_to_dict(p) for p in is_installed(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
elif stuff == 'updates':
return [ pkg_to_dict(p) for p in is_update(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
elif stuff == 'available':
return [ pkg_to_dict(p) for p in is_available(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
elif stuff == 'repos':
return [ dict(repoid=name, state='enabled') for name in repolist(module, repoq) if name.strip() ]
else:
return [ pkg_to_dict(p) for p in is_installed(module, repoq, stuff, conf_file, qf=qf) + is_available(module, repoq, stuff, conf_file, qf=qf) if p.strip() ]
def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
res = {}
res['results'] = []
res['msg'] = ''
res['rc'] = 0
res['changed'] = False
for spec in items:
pkg = None
# check if pkgspec is installed (if possible for idempotence)
# localpkg
if spec.endswith('.rpm') and '://' not in spec:
# get the pkg name-v-r.arch
if not os.path.exists(spec):
res['msg'] += "No Package file matching '%s' found on system" % spec
module.fail_json(**res)
nvra = local_nvra(module, spec)
# look for them in the rpmdb
if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos):
# if they are there, skip it
continue
pkg = spec
# URL
elif '://' in spec:
pkg = spec
#groups :(
elif spec.startswith('@'):
# complete wild ass guess b/c it's a group
pkg = spec
# range requires or file-requires or pkgname :(
else:
# most common case is the pkg is already installed and done
# short circuit all the bs - and search for it as a pkg in is_installed
# if you find it then we're done
if not set(['*','?']).intersection(set(spec)):
pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True)
if pkgs:
res['results'].append('%s providing %s is already installed' % (pkgs[0], spec))
continue
# look up what pkgs provide this
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
if not pkglist:
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
module.fail_json(**res)
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = transaction_exists(pkglist)
if len(conflicts) > 0:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
module.fail_json(**res)
# if any of them are installed
# then nothing to do
found = False
for this in pkglist:
if is_installed(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True):
found = True
res['results'].append('%s providing %s is already installed' % (this, spec))
break
# if the version of the pkg you have installed is not in ANY repo, but there are
# other versions in the repos (both higher and lower) then the previous checks won't work.
# so we check one more time. This really only works for pkgname - not for file provides or virt provides
# but virt provides should be all caught in what_provides on its own.
# highly irritating
if not found:
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos):
found = True
res['results'].append('package providing %s is already installed' % (spec))
if found:
continue
# if not - then pass in the spec as what to install
# we could get here if nothing provides it but that's not
# the error we're catching here
pkg = spec
cmd = yum_basecmd + ['install', pkg]
if module.check_mode:
module.exit_json(changed=True)
changed = True
rc, out, err = module.run_command(cmd)
# Fail on invalid urls:
if (rc == 1 and '://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
err = 'Package at %s could not be installed' % spec
module.fail_json(changed=False,msg=err,rc=1)
elif (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out:
# avoid failing in the 'Nothing To Do' case
# this may happen with an URL spec.
# for an already installed group,
# we get rc = 0 and 'Nothing to do' in out, not in err.
rc = 0
err = ''
out = '%s: Nothing to do' % spec
changed = False
res['rc'] += rc
res['results'].append(out)
res['msg'] += err
# FIXME - if we did an install - go and check the rpmdb to see if it actually installed
# look for the pkg in rpmdb
# look for the pkg via obsoletes
# accumulate any changes
res['changed'] |= changed
module.exit_json(**res)
def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
for pkg in items:
is_group = False
# group remove - this is doom on a stick
if pkg.startswith('@'):
is_group = True
else:
if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos):
res['results'].append('%s is not installed' % pkg)
continue
# run an actual yum transaction
cmd = yum_basecmd + ["remove", pkg]
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command(cmd)
res['rc'] += rc
res['results'].append(out)
res['msg'] += err
# compile the results into one batch. If anything is changed
# then mark changed
# at the end - if we've end up failed then fail out of the rest
# of the process
# at this point we should check to see if the pkg is no longer present
if not is_group: # we can't sensibly check for a group being uninstalled reliably
# look to see if the pkg shows up from is_installed. If it doesn't
if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos):
res['changed'] = True
else:
module.fail_json(**res)
if rc != 0:
module.fail_json(**res)
module.exit_json(**res)
def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
for spec in items:
pkg = None
basecmd = 'update'
cmd = ''
# groups, again
if spec.startswith('@'):
pkg = spec
elif spec == '*': #update all
# use check-update to see if there is any need
rc,out,err = module.run_command(yum_basecmd + ['check-update'])
if rc == 100:
cmd = yum_basecmd + [basecmd]
else:
res['results'].append('All packages up to date')
continue
# dep/pkgname - find it
else:
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos):
basecmd = 'update'
else:
basecmd = 'install'
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
if not pkglist:
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
module.fail_json(**res)
nothing_to_do = True
for this in pkglist:
if basecmd == 'install' and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos):
nothing_to_do = False
break
if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos):
nothing_to_do = False
break
if nothing_to_do:
res['results'].append("All packages providing %s are up to date" % spec)
continue
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = transaction_exists(pkglist)
if len(conflicts) > 0:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
module.fail_json(**res)
pkg = spec
if not cmd:
cmd = yum_basecmd + [basecmd, pkg]
if module.check_mode:
return module.exit_json(changed=True)
rc, out, err = module.run_command(cmd)
res['rc'] += rc
res['results'].append(out)
res['msg'] += err
# FIXME if it is - update it and check to see if it applied
# check to see if there is no longer an update available for the pkgspec
if rc:
res['failed'] = True
else:
res['changed'] = True
module.exit_json(**res)
def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo,
disable_gpg_check):
# take multiple args comma separated
items = pkgspec.split(',')
# need debug level 2 to get 'Nothing to do' for groupinstall.
yum_basecmd = [yumbin, '-d', '2', '-y']
if not repoquery:
repoq = None
else:
repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q']
if conf_file and os.path.exists(conf_file):
yum_basecmd += ['-c', conf_file]
if repoq:
repoq += ['-c', conf_file]
dis_repos =[]
en_repos = []
if disablerepo:
dis_repos = disablerepo.split(',')
if enablerepo:
en_repos = enablerepo.split(',')
for repoid in dis_repos:
r_cmd = ['--disablerepo=%s' % repoid]
yum_basecmd.extend(r_cmd)
for repoid in en_repos:
r_cmd = ['--enablerepo=%s' % repoid]
yum_basecmd.extend(r_cmd)
if state in ['installed', 'present', 'latest']:
my = yum_base(conf_file)
try:
for r in dis_repos:
my.repos.disableRepo(r)
current_repos = my.repos.repos.keys()
for r in en_repos:
try:
my.repos.enableRepo(r)
new_repos = my.repos.repos.keys()
for i in new_repos:
if not i in current_repos:
rid = my.repos.getRepo(i)
a = rid.repoXML.repoid
current_repos = new_repos
except yum.Errors.YumBaseError, e:
module.fail_json(msg="Error setting/accessing repo %s: %s" % (r, e))
except yum.Errors.YumBaseError, e:
module.fail_json(msg="Error accessing repos: %s" % e)
if state in ['installed', 'present']:
if disable_gpg_check:
yum_basecmd.append('--nogpgcheck')
install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos)
elif state in ['removed', 'absent']:
remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos)
elif state == 'latest':
if disable_gpg_check:
yum_basecmd.append('--nogpgcheck')
latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos)
# should be caught by AnsibleModule argument_spec
return dict(changed=False, failed=True, results='', errors='unexpected state')
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
module = AnsibleModule(
argument_spec = dict(
name=dict(aliases=['pkg']),
# removed==absent, installed==present, these are accepted as aliases
state=dict(default='installed', choices=['absent','present','installed','removed','latest']),
enablerepo=dict(),
disablerepo=dict(),
list=dict(),
conf_file=dict(default=None),
disable_gpg_check=dict(required=False, default="no", type='bool'),
# this should not be needed, but exists as a failsafe
install_repoquery=dict(required=False, default="yes", type='bool'),
),
required_one_of = [['name','list']],
mutually_exclusive = [['name','list']],
supports_check_mode = True
)
# this should not be needed, but exists as a failsafe
params = module.params
if params['install_repoquery'] and not repoquery and not module.check_mode:
install_yum_utils(module)
if params['list']:
if not repoquery:
module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.")
results = dict(results=list_stuff(module, params['conf_file'], params['list']))
module.exit_json(**results)
else:
pkg = params['name']
state = params['state']
enablerepo = params.get('enablerepo', '')
disablerepo = params.get('disablerepo', '')
disable_gpg_check = params['disable_gpg_check']
res = ensure(module, state, pkg, params['conf_file'], enablerepo,
disablerepo, disable_gpg_check)
module.fail_json(msg="we should never get here unless this all failed", **res)
# import module snippets
from ansible.module_utils.basic import *
main()