Initial commit

This commit is contained in:
Ansible Core Team 2020-03-09 09:11:07 +00:00
commit aebc1b03fd
4861 changed files with 812621 additions and 0 deletions

View file

@ -0,0 +1,376 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, 2018 Kairo Araujo <kairo@kairo.eti.br>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
author:
- Kairo Araujo (@kairoaraujo)
module: aix_devices
short_description: Manages AIX devices
description:
- This module discovers, defines, removes and modifies attributes of AIX devices.
options:
attributes:
description:
- A list of device attributes.
type: dict
device:
description:
- The name of the device.
- C(all) is valid to rescan C(available) all devices (AIX cfgmgr command).
type: str
required: true
force:
description:
- Forces action.
type: bool
default: no
recursive:
description:
- Removes or defines a device and children devices.
type: bool
default: no
state:
description:
- Controls the device state.
- C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified).
- C(removed) (alias C(absent) removes a device.
- C(defined) changes device to Defined state.
type: str
choices: [ available, defined, removed ]
default: available
'''
EXAMPLES = r'''
- name: Scan new devices
aix_devices:
device: all
state: available
- name: Scan new virtual devices (vio0)
aix_devices:
device: vio0
state: available
- name: Removing IP alias to en0
aix_devices:
device: en0
attributes:
delalias4: 10.0.0.100,255.255.255.0
- name: Removes ent2
aix_devices:
device: ent2
state: removed
- name: Put device en2 in Defined
aix_devices:
device: en2
state: defined
- name: Removes ent4 (inexistent).
aix_devices:
device: ent4
state: removed
- name: Put device en4 in Defined (inexistent)
aix_devices:
device: en4
state: defined
- name: Put vscsi1 and children devices in Defined state.
aix_devices:
device: vscsi1
recursive: yes
state: defined
- name: Removes vscsi1 and children devices.
aix_devices:
device: vscsi1
recursive: yes
state: removed
- name: Changes en1 mtu to 9000 and disables arp.
aix_devices:
device: en1
attributes:
mtu: 900
arp: off
state: available
- name: Configure IP, netmask and set en1 up.
aix_devices:
device: en1
attributes:
netaddr: 192.168.0.100
netmask: 255.255.255.0
state: up
state: available
- name: Adding IP alias to en0
aix_devices:
device: en0
attributes:
alias4: 10.0.0.100,255.255.255.0
state: available
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
def _check_device(module, device):
"""
Check if device already exists and the state.
Args:
module: Ansible module.
device: device to be checked.
Returns: bool, device state
"""
lsdev_cmd = module.get_bin_path('lsdev', True)
rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device])
if rc != 0:
module.fail_json(msg="Failed to run lsdev", rc=rc, err=err)
if lsdev_out:
device_state = lsdev_out.split()[1]
return True, device_state
device_state = None
return False, device_state
def _check_device_attr(module, device, attr):
"""
Args:
module: Ansible module.
device: device to check attributes.
attr: attribute to be checked.
Returns:
"""
lsattr_cmd = module.get_bin_path('lsattr', True)
rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr])
hidden_attrs = ['delalias4', 'delalias6']
if rc == 255:
if attr in hidden_attrs:
current_param = ''
else:
current_param = None
return current_param
elif rc != 0:
module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err)
current_param = lsattr_out.split()[1]
return current_param
def discover_device(module, device):
""" Discover AIX devices."""
cfgmgr_cmd = module.get_bin_path('cfgmgr', True)
if device is not None:
device = "-l %s" % device
else:
device = ''
changed = True
msg = ''
if not module.check_mode:
rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device])
changed = True
msg = cfgmgr_out
return changed, msg
def change_device_attr(module, attributes, device, force):
""" Change AIX device attribute. """
attr_changed = []
attr_not_changed = []
attr_invalid = []
chdev_cmd = module.get_bin_path('chdev', True)
for attr in list(attributes.keys()):
new_param = attributes[attr]
current_param = _check_device_attr(module, device, attr)
if current_param is None:
attr_invalid.append(attr)
elif current_param != new_param:
if force:
cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force]
else:
cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])]
if not module.check_mode:
rc, chdev_out, err = module.run_command(cmd)
if rc != 0:
module.exit_json(msg="Failed to run chdev.", rc=rc, err=err)
attr_changed.append(attributes[attr])
else:
attr_not_changed.append(attributes[attr])
if len(attr_changed) > 0:
changed = True
attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed)
else:
changed = False
attr_changed_msg = ''
if len(attr_not_changed) > 0:
attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed)
else:
attr_not_changed_msg = ''
if len(attr_invalid) > 0:
attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid)
else:
attr_invalid_msg = ''
msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg)
return changed, msg
def remove_device(module, device, force, recursive, state):
""" Puts device in defined state or removes device. """
state_opt = {
'removed': '-d',
'absent': '-d',
'defined': ''
}
recursive_opt = {
True: '-R',
False: ''
}
recursive = recursive_opt[recursive]
state = state_opt[state]
changed = True
msg = ''
rmdev_cmd = module.get_bin_path('rmdev', True)
if not module.check_mode:
if state:
rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force])
else:
rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive])
if rc != 0:
module.fail_json(msg="Failed to run rmdev", rc=rc, err=err)
msg = rmdev_out
return changed, msg
def main():
module = AnsibleModule(
argument_spec=dict(
attributes=dict(type='dict'),
device=dict(type='str'),
force=dict(type='bool', default=False),
recursive=dict(type='bool', default=False),
state=dict(type='str', default='available', choices=['available', 'defined', 'removed']),
),
supports_check_mode=True,
)
force_opt = {
True: '-f',
False: '',
}
attributes = module.params['attributes']
device = module.params['device']
force = force_opt[module.params['force']]
recursive = module.params['recursive']
state = module.params['state']
result = dict(
changed=False,
msg='',
)
if state == 'available' or state == 'present':
if attributes:
# change attributes on device
device_status, device_state = _check_device(module, device)
if device_status:
result['changed'], result['msg'] = change_device_attr(module, attributes, device, force)
else:
result['msg'] = "Device %s does not exist." % device
else:
# discovery devices (cfgmgr)
if device and device != 'all':
device_status, device_state = _check_device(module, device)
if device_status:
# run cfgmgr on specific device
result['changed'], result['msg'] = discover_device(module, device)
else:
result['msg'] = "Device %s does not exist." % device
else:
result['changed'], result['msg'] = discover_device(module, device)
elif state == 'removed' or state == 'absent' or state == 'defined':
if not device:
result['msg'] = "device is required to removed or defined state."
else:
# Remove device
check_device, device_state = _check_device(module, device)
if check_device:
if state == 'defined' and device_state == 'Defined':
result['changed'] = False
result['msg'] = 'Device %s already in Defined' % device
else:
result['changed'], result['msg'] = remove_device(module, device, force, recursive, state)
else:
result['msg'] = "Device %s does not exist." % device
else:
result['msg'] = "Unexpected state %s." % state
module.fail_json(**result)
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,573 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
author:
- Kairo Araujo (@kairoaraujo)
module: aix_filesystem
short_description: Configure LVM and NFS file systems for AIX
description:
- This module creates, removes, mount and unmount LVM and NFS file system for
AIX using C(/etc/filesystems).
- For LVM file systems is possible to resize a file system.
options:
account_subsystem:
description:
- Specifies whether the file system is to be processed by the accounting subsystem.
type: bool
default: no
attributes:
description:
- Specifies attributes for files system separated by comma.
type: list
default: agblksize='4096',isnapshot='no'
auto_mount:
description:
- File system is automatically mounted at system restart.
type: bool
default: yes
device:
description:
- Logical volume (LV) device name or remote export device to create a NFS file system.
- It is used to create a file system on an already existing logical volume or the exported NFS file system.
- If not mentioned a new logical volume name will be created following AIX standards (LVM).
type: str
fs_type:
description:
- Specifies the virtual file system type.
type: str
default: jfs2
permissions:
description:
- Set file system permissions. C(rw) (read-write) or C(ro) (read-only).
type: str
choices: [ ro, rw ]
default: rw
mount_group:
description:
- Specifies the mount group.
type: str
filesystem:
description:
- Specifies the mount point, which is the directory where the file system will be mounted.
type: str
required: true
nfs_server:
description:
- Specifies a Network File System (NFS) server.
type: str
rm_mount_point:
description:
- Removes the mount point directory when used with state C(absent).
type: bool
default: no
size:
description:
- Specifies the file system size.
- For already C(present) it will be resized.
- 512-byte blocks, Megabytes or Gigabytes. If the value has M specified
it will be in Megabytes. If the value has G specified it will be in
Gigabytes.
- If no M or G the value will be 512-byte blocks.
- If "+" is specified in begin of value, the value will be added.
- If "-" is specified in begin of value, the value will be removed.
- If "+" or "-" is not specified, the total value will be the specified.
- Size will respects the LVM AIX standards.
type: str
state:
description:
- Controls the file system state.
- C(present) check if file system exists, creates or resize.
- C(absent) removes existing file system if already C(unmounted).
- C(mounted) checks if the file system is mounted or mount the file system.
- C(unmounted) check if the file system is unmounted or unmount the file system.
type: str
required: true
choices: [ absent, mounted, present, unmounted ]
default: present
vg:
description:
- Specifies an existing volume group (VG).
type: str
notes:
- For more C(attributes), please check "crfs" AIX manual.
'''
EXAMPLES = r'''
- name: Create filesystem in a previously defined logical volume.
aix_filesystem:
device: testlv
filesystem: /testfs
state: present
- name: Creating NFS filesystem from nfshost.
aix_filesystem:
device: /home/ftp
nfs_server: nfshost
filesystem: /home/ftp
state: present
- name: Creating a new file system without a previously logical volume.
aix_filesystem:
filesystem: /newfs
size: 1G
state: present
vg: datavg
- name: Unmounting /testfs.
aix_filesystem:
filesystem: /testfs
state: unmounted
- name: Resizing /mksysb to +512M.
aix_filesystem:
filesystem: /mksysb
size: +512M
state: present
- name: Resizing /mksysb to 11G.
aix_filesystem:
filesystem: /mksysb
size: 11G
state: present
- name: Resizing /mksysb to -2G.
aix_filesystem:
filesystem: /mksysb
size: -2G
state: present
- name: Remove NFS filesystem /home/ftp.
aix_filesystem:
filesystem: /home/ftp
rm_mount_point: yes
state: absent
- name: Remove /newfs.
aix_filesystem:
filesystem: /newfs
rm_mount_point: yes
state: absent
'''
RETURN = r'''
changed:
description: Return changed for aix_filesystems actions as true or false.
returned: always
type: bool
msg:
description: Return message regarding the action.
returned: always
type: str
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansible.posix.plugins.module_utils.ismount import ismount
import re
def _fs_exists(module, filesystem):
"""
Check if file system already exists on /etc/filesystems.
:param module: Ansible module.
:param filesystem: filesystem name.
:return: True or False.
"""
lsfs_cmd = module.get_bin_path('lsfs', True)
rc, lsfs_out, err = module.run_command("%s -l %s" % (lsfs_cmd, filesystem))
if rc == 1:
if re.findall("No record matching", err):
return False
else:
module.fail_json(msg="Failed to run lsfs. Error message: %s" % err)
else:
return True
def _check_nfs_device(module, nfs_host, device):
"""
Validate if NFS server is exporting the device (remote export).
:param module: Ansible module.
:param nfs_host: nfs_host parameter, NFS server.
:param device: device parameter, remote export.
:return: True or False.
"""
showmount_cmd = module.get_bin_path('showmount', True)
rc, showmount_out, err = module.run_command(
"%s -a %s" % (showmount_cmd, nfs_host))
if rc != 0:
module.fail_json(msg="Failed to run showmount. Error message: %s" % err)
else:
showmount_data = showmount_out.splitlines()
for line in showmount_data:
if line.split(':')[1] == device:
return True
return False
def _validate_vg(module, vg):
"""
Check the current state of volume group.
:param module: Ansible module argument spec.
:param vg: Volume Group name.
:return: True (VG in varyon state) or False (VG in varyoff state) or
None (VG does not exist), message.
"""
lsvg_cmd = module.get_bin_path('lsvg', True)
rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd)
if rc != 0:
module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd)
if rc != 0:
module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
if vg in current_all_vgs and vg not in current_active_vgs:
msg = "Volume group %s is in varyoff state." % vg
return False, msg
elif vg in current_active_vgs:
msg = "Volume group %s is in varyon state." % vg
return True, msg
else:
msg = "Volume group %s does not exist." % vg
return None, msg
def resize_fs(module, filesystem, size):
""" Resize LVM file system. """
chfs_cmd = module.get_bin_path('chfs', True)
if not module.check_mode:
rc, chfs_out, err = module.run_command('%s -a size="%s" %s' % (chfs_cmd, size, filesystem))
if rc == 28:
changed = False
return changed, chfs_out
elif rc != 0:
if re.findall('Maximum allocation for logical', err):
changed = False
return changed, err
else:
module.fail_json(msg="Failed to run chfs. Error message: %s" % err)
else:
if re.findall('The filesystem size is already', chfs_out):
changed = False
else:
changed = True
return changed, chfs_out
else:
changed = True
msg = ''
return changed, msg
def create_fs(
module, fs_type, filesystem, vg, device, size, mount_group, auto_mount,
account_subsystem, permissions, nfs_server, attributes):
""" Create LVM file system or NFS remote mount point. """
attributes = ' -a '.join(attributes)
# Parameters definition.
account_subsys_opt = {
True: '-t yes',
False: '-t no'
}
if nfs_server is not None:
auto_mount_opt = {
True: '-A',
False: '-a'
}
else:
auto_mount_opt = {
True: '-A yes',
False: '-A no'
}
if size is None:
size = ''
else:
size = "-a size=%s" % size
if device is None:
device = ''
else:
device = "-d %s" % device
if vg is None:
vg = ''
else:
vg_state, msg = _validate_vg(module, vg)
if vg_state:
vg = "-g %s" % vg
else:
changed = False
return changed, msg
if mount_group is None:
mount_group = ''
else:
mount_group = "-u %s" % mount_group
auto_mount = auto_mount_opt[auto_mount]
account_subsystem = account_subsys_opt[account_subsystem]
if nfs_server is not None:
# Creates a NFS file system.
mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True)
if not module.check_mode:
rc, mknfsmnt_out, err = module.run_command('%s -f "%s" %s -h "%s" -t "%s" "%s" -w "bg"' % (
mknfsmnt_cmd, filesystem, device, nfs_server, permissions, auto_mount))
if rc != 0:
module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err)
else:
changed = True
msg = "NFS file system %s created." % filesystem
return changed, msg
else:
changed = True
msg = ''
return changed, msg
else:
# Creates a LVM file system.
crfs_cmd = module.get_bin_path('crfs', True)
if not module.check_mode:
cmd = "%s -v %s -m %s %s %s %s %s %s -p %s %s -a %s" % (
crfs_cmd, fs_type, filesystem, vg, device, mount_group, auto_mount, account_subsystem, permissions, size, attributes)
rc, crfs_out, err = module.run_command(cmd)
if rc == 10:
module.exit_json(
msg="Using a existent previously defined logical volume, "
"volume group needs to be empty. %s" % err)
elif rc != 0:
module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
else:
changed = True
return changed, crfs_out
else:
changed = True
msg = ''
return changed, msg
def remove_fs(module, filesystem, rm_mount_point):
""" Remove an LVM file system or NFS entry. """
# Command parameters.
rm_mount_point_opt = {
True: '-r',
False: ''
}
rm_mount_point = rm_mount_point_opt[rm_mount_point]
rmfs_cmd = module.get_bin_path('rmfs', True)
if not module.check_mode:
cmd = "%s -r %s %s" % (rmfs_cmd, rm_mount_point, filesystem)
rc, rmfs_out, err = module.run_command(cmd)
if rc != 0:
module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
else:
changed = True
msg = rmfs_out
if not rmfs_out:
msg = "File system %s removed." % filesystem
return changed, msg
else:
changed = True
msg = ''
return changed, msg
def mount_fs(module, filesystem):
""" Mount a file system. """
mount_cmd = module.get_bin_path('mount', True)
if not module.check_mode:
rc, mount_out, err = module.run_command(
"%s %s" % (mount_cmd, filesystem))
if rc != 0:
module.fail_json(msg="Failed to run mount. Error message: %s" % err)
else:
changed = True
msg = "File system %s mounted." % filesystem
return changed, msg
else:
changed = True
msg = ''
return changed, msg
def unmount_fs(module, filesystem):
""" Unmount a file system."""
unmount_cmd = module.get_bin_path('unmount', True)
if not module.check_mode:
rc, unmount_out, err = module.run_command("%s %s" % (unmount_cmd, filesystem))
if rc != 0:
module.fail_json(msg="Failed to run unmount. Error message: %s" % err)
else:
changed = True
msg = "File system %s unmounted." % filesystem
return changed, msg
else:
changed = True
msg = ''
return changed, msg
def main():
module = AnsibleModule(
argument_spec=dict(
account_subsystem=dict(type='bool', default=False),
attributes=dict(type='list', default=["agblksize='4096'", "isnapshot='no'"]),
auto_mount=dict(type='bool', default=True),
device=dict(type='str'),
filesystem=dict(type='str', required=True),
fs_type=dict(type='str', default='jfs2'),
permissions=dict(type='str', default='rw', choices=['rw', 'ro']),
mount_group=dict(type='str'),
nfs_server=dict(type='str'),
rm_mount_point=dict(type='bool', default=False),
size=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']),
vg=dict(type='str'),
),
supports_check_mode=True,
)
account_subsystem = module.params['account_subsystem']
attributes = module.params['attributes']
auto_mount = module.params['auto_mount']
device = module.params['device']
fs_type = module.params['fs_type']
permissions = module.params['permissions']
mount_group = module.params['mount_group']
filesystem = module.params['filesystem']
nfs_server = module.params['nfs_server']
rm_mount_point = module.params['rm_mount_point']
size = module.params['size']
state = module.params['state']
vg = module.params['vg']
result = dict(
changed=False,
msg='',
)
if state == 'present':
fs_mounted = ismount(filesystem)
fs_exists = _fs_exists(module, filesystem)
# Check if fs is mounted or exists.
if fs_mounted or fs_exists:
result['msg'] = "File system %s already exists." % filesystem
result['changed'] = False
# If parameter size was passed, resize fs.
if size is not None:
result['changed'], result['msg'] = resize_fs(module, filesystem, size)
# If fs doesn't exist, create it.
else:
# Check if fs will be a NFS device.
if nfs_server is not None:
if device is None:
result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.'
module.fail_json(**result)
else:
# Create a fs from NFS export.
if _check_nfs_device(module, nfs_server, device):
result['changed'], result['msg'] = create_fs(
module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
if device is None:
if vg is None:
result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.'
module.fail_json(**result)
else:
# Create a fs from
result['changed'], result['msg'] = create_fs(
module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
if device is not None and nfs_server is None:
# Create a fs from a previously lv device.
result['changed'], result['msg'] = create_fs(
module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
elif state == 'absent':
if ismount(filesystem):
result['msg'] = "File system %s mounted." % filesystem
else:
fs_status = _fs_exists(module, filesystem)
if not fs_status:
result['msg'] = "File system %s does not exist." % filesystem
else:
result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point)
elif state == 'mounted':
if ismount(filesystem):
result['changed'] = False
result['msg'] = "File system %s already mounted." % filesystem
else:
result['changed'], result['msg'] = mount_fs(module, filesystem)
elif state == 'unmounted':
if not ismount(filesystem):
result['changed'] = False
result['msg'] = "File system %s already unmounted." % filesystem
else:
result['changed'], result['msg'] = unmount_fs(module, filesystem)
else:
# Unreachable codeblock
result['msg'] = "Unexpected state %s." % state
module.fail_json(**result)
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,252 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Joris Weijters <joris.weijters@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- Joris Weijters (@molekuul)
module: aix_inittab
short_description: Manages the inittab on AIX
description:
- Manages the inittab on AIX.
options:
name:
description:
- Name of the inittab entry.
type: str
required: yes
aliases: [ service ]
runlevel:
description:
- Runlevel of the entry.
type: str
required: yes
action:
description:
- Action what the init has to do with this entry.
type: str
required: yes
choices:
- boot
- bootwait
- hold
- initdefault
- 'off'
- once
- ondemand
- powerfail
- powerwait
- respawn
- sysinit
- wait
command:
description:
- What command has to run.
type: str
required: yes
insertafter:
description:
- After which inittabline should the new entry inserted.
type: str
state:
description:
- Whether the entry should be present or absent in the inittab file.
type: str
choices: [ absent, present ]
default: present
notes:
- The changes are persistent across reboots.
- You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands.
- Tested on AIX 7.1.
requirements:
- itertools
'''
EXAMPLES = '''
# Add service startmyservice to the inittab, directly after service existingservice.
- name: Add startmyservice to inittab
aix_inittab:
name: startmyservice
runlevel: 4
action: once
command: echo hello
insertafter: existingservice
state: present
become: yes
# Change inittab entry startmyservice to runlevel "2" and processaction "wait".
- name: Change startmyservice to inittab
aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: echo hello
state: present
become: yes
- name: Remove startmyservice from inittab
aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: echo hello
state: absent
become: yes
'''
RETURN = '''
name:
description: Name of the adjusted inittab entry
returned: always
type: str
sample: startmyservice
msg:
description: Action done with the inittab entry
returned: changed
type: str
sample: changed inittab entry startmyservice
changed:
description: Whether the inittab changed or not
returned: always
type: bool
sample: true
'''
# Import necessary libraries
try:
# python 2
from itertools import izip
except ImportError:
izip = zip
from ansible.module_utils.basic import AnsibleModule
# end import modules
# start defining the functions
def check_current_entry(module):
# Check if entry exists, if not return False in exists in return dict,
# if true return True and the entry in return dict
existsdict = {'exist': False}
lsitab = module.get_bin_path('lsitab')
(rc, out, err) = module.run_command([lsitab, module.params['name']])
if rc == 0:
keys = ('name', 'runlevel', 'action', 'command')
values = out.split(":")
# strip non readable characters as \n
values = map(lambda s: s.strip(), values)
existsdict = dict(izip(keys, values))
existsdict.update({'exist': True})
return existsdict
def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, aliases=['service']),
runlevel=dict(type='str', required=True),
action=dict(type='str', choices=[
'boot',
'bootwait',
'hold',
'initdefault',
'off',
'once',
'ondemand',
'powerfail',
'powerwait',
'respawn',
'sysinit',
'wait',
]),
command=dict(type='str', required=True),
insertafter=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
supports_check_mode=True,
)
result = {
'name': module.params['name'],
'changed': False,
'msg': ""
}
# Find commandline strings
mkitab = module.get_bin_path('mkitab')
rmitab = module.get_bin_path('rmitab')
chitab = module.get_bin_path('chitab')
rc = 0
# check if the new entry exists
current_entry = check_current_entry(module)
# if action is install or change,
if module.params['state'] == 'present':
# create new entry string
new_entry = module.params['name'] + ":" + module.params['runlevel'] + \
":" + module.params['action'] + ":" + module.params['command']
# If current entry exists or fields are different(if the entry does not
# exists, then the entry wil be created
if (not current_entry['exist']) or (
module.params['runlevel'] != current_entry['runlevel'] or
module.params['action'] != current_entry['action'] or
module.params['command'] != current_entry['command']):
# If the entry does exist then change the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command([chitab, new_entry])
if rc != 0:
module.fail_json(
msg="could not change inittab", rc=rc, err=err)
result['msg'] = "changed inittab entry" + " " + current_entry['name']
result['changed'] = True
# If the entry does not exist create the entry
elif not current_entry['exist']:
if module.params['insertafter']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, '-i', module.params['insertafter'], new_entry])
else:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, new_entry])
if rc != 0:
module.fail_json(msg="could not adjust inittab", rc=rc, err=err)
result['msg'] = "add inittab entry" + " " + module.params['name']
result['changed'] = True
elif module.params['state'] == 'absent':
# If the action is remove and the entry exists then remove the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[rmitab, module.params['name']])
if rc != 0:
module.fail_json(
msg="could not remove entry grom inittab)", rc=rc, err=err)
result['msg'] = "removed inittab entry" + " " + current_entry['name']
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,368 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
author:
- Kairo Araujo (@kairoaraujo)
module: aix_lvg
short_description: Manage LVM volume groups on AIX
description:
- This module creates, removes or resize volume groups on AIX LVM.
options:
force:
description:
- Force volume group creation.
type: bool
default: no
pp_size:
description:
- The size of the physical partition in megabytes.
type: int
pvs:
description:
- List of comma-separated devices to use as physical devices in this volume group.
- Required when creating or extending (C(present) state) the volume group.
- If not informed reducing (C(absent) state) the volume group will be removed.
type: list
state:
description:
- Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff).
type: str
choices: [ absent, present, varyoff, varyon ]
default: present
vg:
description:
- The name of the volume group.
type: str
required: true
vg_type:
description:
- The type of the volume group.
type: str
choices: [ big, normal, scalable ]
default: normal
notes:
- AIX will permit remove VG only if all LV/Filesystems are not busy.
- Module does not modify PP size for already present volume group.
'''
EXAMPLES = r'''
- name: Create a volume group datavg
aix_lvg:
vg: datavg
pp_size: 128
vg_type: scalable
state: present
- name: Removing a volume group datavg
aix_lvg:
vg: datavg
state: absent
- name: Extending rootvg
aix_lvg:
vg: rootvg
pvs: hdisk1
state: present
- name: Reducing rootvg
aix_lvg:
vg: rootvg
pvs: hdisk1
state: absent
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
def _validate_pv(module, vg, pvs):
"""
Function to validate if the physical volume (PV) is not already in use by
another volume group or Oracle ASM.
:param module: Ansible module argument spec.
:param vg: Volume group name.
:param pvs: Physical volume list.
:return: [bool, message] or module.fail_json for errors.
"""
lspv_cmd = module.get_bin_path('lspv', True)
rc, current_lspv, stderr = module.run_command("%s" % lspv_cmd)
if rc != 0:
module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr)
for pv in pvs:
# Get pv list.
lspv_list = {}
for line in current_lspv.splitlines():
pv_data = line.split()
lspv_list[pv_data[0]] = pv_data[2]
# Check if pv exists and is free.
if pv not in lspv_list.keys():
module.fail_json(msg="Physical volume '%s' doesn't exist." % pv)
if lspv_list[pv] == 'None':
# Disk None, looks free.
# Check if PV is not already in use by Oracle ASM.
lquerypv_cmd = module.get_bin_path('lquerypv', True)
rc, current_lquerypv, stderr = module.run_command("%s -h /dev/%s 20 10" % (lquerypv_cmd, pv))
if rc != 0:
module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr)
if 'ORCLDISK' in current_lquerypv:
module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv)
msg = "Physical volume '%s' is ok to be used." % pv
return True, msg
# Check if PV is already in use for the same vg.
elif vg != lspv_list[pv]:
module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv]))
msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv])
return False, msg
def _validate_vg(module, vg):
"""
Check the current state of volume group.
:param module: Ansible module argument spec.
:param vg: Volume Group name.
:return: True (VG in varyon state) or False (VG in varyoff state) or
None (VG does not exist), message.
"""
lsvg_cmd = module.get_bin_path('lsvg', True)
rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd)
if rc != 0:
module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd)
if rc != 0:
module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
if vg in current_all_vgs and vg not in current_active_vgs:
msg = "Volume group '%s' is in varyoff state." % vg
return False, msg
if vg in current_active_vgs:
msg = "Volume group '%s' is in varyon state." % vg
return True, msg
msg = "Volume group '%s' does not exist." % vg
return None, msg
def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation):
""" Creates or extend a volume group. """
# Command option parameters.
force_opt = {
True: '-f',
False: ''
}
vg_opt = {
'normal': '',
'big': '-B',
'scalable': '-S',
}
# Validate if PV are not already in use.
pv_state, msg = _validate_pv(module, vg, pvs)
if not pv_state:
changed = False
return changed, msg
vg_state, msg = vg_validation
if vg_state is False:
changed = False
return changed, msg
elif vg_state is True:
# Volume group extension.
changed = True
msg = ""
if not module.check_mode:
extendvg_cmd = module.get_bin_path('extendvg', True)
rc, output, err = module.run_command("%s %s %s" % (extendvg_cmd, vg, ' '.join(pvs)))
if rc != 0:
changed = False
msg = "Extending volume group '%s' has failed." % vg
return changed, msg
msg = "Volume group '%s' extended." % vg
return changed, msg
elif vg_state is None:
# Volume group creation.
changed = True
msg = ''
if not module.check_mode:
mkvg_cmd = module.get_bin_path('mkvg', True)
rc, output, err = module.run_command("%s %s %s %s -y %s %s" % (mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], vg, ' '.join(pvs)))
if rc != 0:
changed = False
msg = "Creating volume group '%s' failed." % vg
return changed, msg
msg = "Volume group '%s' created." % vg
return changed, msg
def reduce_vg(module, vg, pvs, vg_validation):
vg_state, msg = vg_validation
if vg_state is False:
changed = False
return changed, msg
elif vg_state is None:
changed = False
return changed, msg
# Define pvs_to_remove (list of physical volumes to be removed).
if pvs is None:
# Remove VG if pvs are note informed.
# Remark: AIX will permit remove only if the VG has not LVs.
lsvg_cmd = module.get_bin_path('lsvg', True)
rc, current_pvs, err = module.run_command("%s -p %s" % (lsvg_cmd, vg))
if rc != 0:
module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd)
pvs_to_remove = []
for line in current_pvs.splitlines()[2:]:
pvs_to_remove.append(line.split()[0])
reduce_msg = "Volume group '%s' removed." % vg
else:
pvs_to_remove = pvs
reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg))
# Reduce volume group.
if len(pvs_to_remove) <= 0:
changed = False
msg = "No physical volumes to remove."
return changed, msg
changed = True
msg = ''
if not module.check_mode:
reducevg_cmd = module.get_bin_path('reducevg', True)
rc, stdout, stderr = module.run_command("%s -df %s %s" % (reducevg_cmd, vg, ' '.join(pvs_to_remove)))
if rc != 0:
module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr)
msg = reduce_msg
return changed, msg
def state_vg(module, vg, state, vg_validation):
vg_state, msg = vg_validation
if vg_state is None:
module.fail_json(msg=msg)
if state == 'varyon':
if vg_state is True:
changed = False
return changed, msg
changed = True
msg = ''
if not module.check_mode:
varyonvg_cmd = module.get_bin_path('varyonvg', True)
rc, varyonvg_out, err = module.run_command("%s %s" % (varyonvg_cmd, vg))
if rc != 0:
module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err)
msg = "Varyon volume group %s completed." % vg
return changed, msg
elif state == 'varyoff':
if vg_state is False:
changed = False
return changed, msg
changed = True
msg = ''
if not module.check_mode:
varyonvg_cmd = module.get_bin_path('varyoffvg', True)
rc, varyonvg_out, stderr = module.run_command("%s %s" % (varyonvg_cmd, vg))
if rc != 0:
module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr)
msg = "Varyoff volume group %s completed." % vg
return changed, msg
def main():
module = AnsibleModule(
argument_spec=dict(
force=dict(type='bool', default=False),
pp_size=dict(type='int'),
pvs=dict(type='list'),
state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']),
vg=dict(type='str', required=True),
vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable'])
),
supports_check_mode=True,
)
force = module.params['force']
pp_size = module.params['pp_size']
pvs = module.params['pvs']
state = module.params['state']
vg = module.params['vg']
vg_type = module.params['vg_type']
if pp_size is None:
pp_size = ''
else:
pp_size = "-s %s" % pp_size
vg_validation = _validate_vg(module, vg)
if state == 'present':
if not pvs:
changed = False
msg = "pvs is required to state 'present'."
module.fail_json(msg=msg)
else:
changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation)
elif state == 'absent':
changed, msg = reduce_vg(module, vg, pvs, vg_validation)
elif state == 'varyon' or state == 'varyoff':
changed, msg = state_vg(module, vg, state, vg_validation)
else:
changed = False
msg = "Unexpected state"
module.exit_json(changed=changed, msg=msg, state=state)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,340 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Alain Dejoux <adejoux@djouxtech.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- Alain Dejoux (@adejoux)
module: aix_lvol
short_description: Configure AIX LVM logical volumes
description:
- This module creates, removes or resizes AIX logical volumes. Inspired by lvol module.
options:
vg:
description:
- The volume group this logical volume is part of.
type: str
required: true
lv:
description:
- The name of the logical volume.
type: str
required: true
lv_type:
description:
- The type of the logical volume.
type: str
default: jfs2
size:
description:
- The size of the logical volume with one of the [MGT] units.
type: str
copies:
description:
- The number of copies of the logical volume.
- Maximum copies are 3.
type: int
default: 1
policy:
description:
- Sets the interphysical volume allocation policy.
- C(maximum) allocates logical partitions across the maximum number of physical volumes.
- C(minimum) allocates logical partitions across the minimum number of physical volumes.
type: str
choices: [ maximum, minimum ]
default: maximum
state:
description:
- Control if the logical volume exists. If C(present) and the
volume does not already exist then the C(size) option is required.
type: str
choices: [ absent, present ]
default: present
opts:
description:
- Free-form options to be passed to the mklv command.
type: str
pvs:
description:
- A list of physical volumes e.g. C(hdisk1,hdisk2).
type: list
'''
EXAMPLES = r'''
- name: Create a logical volume of 512M
aix_lvol:
vg: testvg
lv: testlv
size: 512M
- name: Create a logical volume of 512M with disks hdisk1 and hdisk2
aix_lvol:
vg: testvg
lv: test2lv
size: 512M
pvs: [ hdisk1, hdisk2 ]
- name: Create a logical volume of 512M mirrored
aix_lvol:
vg: testvg
lv: test3lv
size: 512M
copies: 2
- name: Create a logical volume of 1G with a minimum placement policy
aix_lvol:
vg: rootvg
lv: test4lv
size: 1G
policy: minimum
- name: Create a logical volume with special options like mirror pool
aix_lvol:
vg: testvg
lv: testlv
size: 512M
opts: -p copy1=poolA -p copy2=poolB
- name: Extend the logical volume to 1200M
aix_lvol:
vg: testvg
lv: test4lv
size: 1200M
- name: Remove the logical volume
aix_lvol:
vg: testvg
lv: testlv
state: absent
'''
RETURN = r'''
msg:
type: str
description: A friendly message describing the task result.
returned: always
sample: Logical volume testlv created.
'''
import re
from ansible.module_utils.basic import AnsibleModule
def convert_size(module, size):
unit = size[-1].upper()
units = ['M', 'G', 'T']
try:
multiplier = 1024 ** units.index(unit)
except ValueError:
module.fail_json(msg="No valid size unit specified.")
return int(size[:-1]) * multiplier
def round_ppsize(x, base=16):
new_size = int(base * round(float(x) / base))
if new_size < x:
new_size += base
return new_size
def parse_lv(data):
name = None
for line in data.splitlines():
match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
if match is not None:
name = match.group(1)
vg = match.group(2)
continue
match = re.search(r"LPs:\s+(\d+).*PPs", line)
if match is not None:
lps = int(match.group(1))
continue
match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None:
pp_size = int(match.group(1))
continue
match = re.search(r"INTER-POLICY:\s+(\w+)", line)
if match is not None:
policy = match.group(1)
continue
if not name:
return None
size = lps * pp_size
return {'name': name, 'vg': vg, 'size': size, 'policy': policy}
def parse_vg(data):
for line in data.splitlines():
match = re.search(r"VOLUME GROUP:\s+(\w+)", line)
if match is not None:
name = match.group(1)
continue
match = re.search(r"TOTAL PP.*\((\d+)", line)
if match is not None:
size = int(match.group(1))
continue
match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None:
pp_size = int(match.group(1))
continue
match = re.search(r"FREE PP.*\((\d+)", line)
if match is not None:
free = int(match.group(1))
continue
return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size}
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(type='str', required=True),
lv=dict(type='str', required=True),
lv_type=dict(type='str', default='jfs2'),
size=dict(type='str'),
opts=dict(type='str', default=''),
copies=dict(type='int', default=1),
state=dict(type='str', default='present', choices=['absent', 'present']),
policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']),
pvs=dict(type='list', default=list())
),
supports_check_mode=True,
)
vg = module.params['vg']
lv = module.params['lv']
lv_type = module.params['lv_type']
size = module.params['size']
opts = module.params['opts']
copies = module.params['copies']
policy = module.params['policy']
state = module.params['state']
pvs = module.params['pvs']
pv_list = ' '.join(pvs)
if policy == 'maximum':
lv_policy = 'x'
else:
lv_policy = 'm'
# Add echo command when running in check-mode
if module.check_mode:
test_opt = 'echo '
else:
test_opt = ''
# check if system commands are available
lsvg_cmd = module.get_bin_path("lsvg", required=True)
lslv_cmd = module.get_bin_path("lslv", required=True)
# Get information on volume group requested
rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, msg="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err)
this_vg = parse_vg(vg_info)
if size is not None:
# Calculate pp size and round it up based on pp size.
lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size'])
# Get information on logical volume requested
rc, lv_info, err = module.run_command(
"%s %s" % (lslv_cmd, lv))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv)
changed = False
this_lv = parse_lv(lv_info)
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
if this_lv is None:
if state == 'present':
if lv_size > this_vg['free']:
module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free']))
# create LV
mklv_cmd = module.get_bin_path("mklv", required=True)
cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list)
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s created." % lv)
else:
module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err)
else:
if state == 'absent':
# remove LV
rmlv_cmd = module.get_bin_path("rmlv", required=True)
rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name']))
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s deleted." % lv)
else:
module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err)
else:
if this_lv['policy'] != policy:
# change lv allocation policy
chlv_cmd = module.get_bin_path("chlv", required=True)
rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name']))
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy))
else:
module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err)
if vg != this_lv['vg']:
module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg']))
# from here the last remaining action is to resize it, if no size parameter is passed we do nothing.
if not size:
module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv))
# resize LV based on absolute values
if int(lv_size) > this_lv['size']:
extendlv_cmd = module.get_bin_path("extendlv", required=True)
cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size'])
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size))
else:
module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err)
elif lv_size < this_lv['size']:
module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size']))
else:
module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size))
if __name__ == '__main__':
main()

View file

@ -0,0 +1,164 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Gabe Mulley <gabe.mulley@gmail.com>
# Copyright: (c) 2015, David Wittman <dwittman@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: alternatives
short_description: Manages alternative programs for common commands
description:
- Manages symbolic links using the 'update-alternatives' tool.
- Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
author:
- David Wittman (@DavidWittman)
- Gabe Mulley (@mulby)
options:
name:
description:
- The generic name of the link.
type: str
required: true
path:
description:
- The path to the real executable that the link should point to.
type: path
required: true
link:
description:
- The path to the symbolic link that should point to the real executable.
- This option is always required on RHEL-based distributions. On Debian-based distributions this option is
required when the alternative I(name) is unknown to the system.
type: path
priority:
description:
- The priority of the alternative.
type: int
default: 50
requirements: [ update-alternatives ]
'''
EXAMPLES = r'''
- name: Correct java version selected
alternatives:
name: java
path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
- name: Alternatives link created
alternatives:
name: hadoop-conf
link: /etc/hadoop/conf
path: /etc/hadoop/conf.ansible
- name: Make java 32 bit an alternative with low priority
alternatives:
name: java
path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java
priority: -10
'''
import os
import re
import subprocess
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
path=dict(type='path', required=True),
link=dict(type='path'),
priority=dict(type='int', default=50),
),
supports_check_mode=True,
)
params = module.params
name = params['name']
path = params['path']
link = params['link']
priority = params['priority']
UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives', True)
current_path = None
all_alternatives = []
# Run `update-alternatives --display <name>` to find existing alternatives
(rc, display_output, _) = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name]
)
if rc == 0:
# Alternatives already exist for this link group
# Parse the output to determine the current path of the symlink and
# available alternatives
current_path_regex = re.compile(r'^\s*link currently points to (.*)$',
re.MULTILINE)
alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE)
match = current_path_regex.search(display_output)
if match:
current_path = match.group(1)
all_alternatives = alternative_regex.findall(display_output)
if not link:
# Read the current symlink target from `update-alternatives --query`
# in case we need to install the new alternative before setting it.
#
# This is only compatible on Debian-based systems, as the other
# alternatives don't have --query available
rc, query_output, _ = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name]
)
if rc == 0:
for line in query_output.splitlines():
if line.startswith('Link:'):
link = line.split()[1]
break
if current_path != path:
if module.check_mode:
module.exit_json(changed=True, current_path=current_path)
try:
# install the requested path if necessary
if path not in all_alternatives:
if not os.path.exists(path):
module.fail_json(msg="Specified path %s does not exist" % path)
if not link:
module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link")
module.run_command(
[UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)],
check_rc=True
)
# select the requested path
module.run_command(
[UPDATE_ALTERNATIVES, '--set', name, path],
check_rc=True
)
module.exit_json(changed=True)
except subprocess.CalledProcessError as cpe:
module.fail_json(msg=str(dir(cpe)))
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,156 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ted Trask <ttrask01@yahoo.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: awall
short_description: Manage awall policies
author: Ted Trask (@tdtrask) <ttrask01@yahoo.com>
description:
- This modules allows for enable/disable/activate of I(awall) policies.
- Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files
and activates the configuration on the system.
options:
name:
description:
- One or more policy names.
type: list
state:
description:
- Whether the policies should be enabled or disabled.
type: str
choices: [ disabled, enabled ]
default: enabled
activate:
description:
- Activate the new firewall rules.
- Can be run with other steps or on its own.
type: bool
default: no
'''
EXAMPLES = r'''
- name: Enable "foo" and "bar" policy
awall:
name: [ foo bar ]
state: enabled
- name: Disable "foo" and "bar" policy and activate new rules
awall:
name:
- foo
- bar
state: disabled
activate: no
- name: Activate currently enabled firewall rules
awall:
activate: yes
'''
RETURN = ''' # '''
import re
from ansible.module_utils.basic import AnsibleModule
def activate(module):
cmd = "%s activate --force" % (AWALL_PATH)
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
return True
else:
module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr)
def is_policy_enabled(module, name):
cmd = "%s list" % (AWALL_PATH)
rc, stdout, stderr = module.run_command(cmd)
if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE):
return True
return False
def enable_policy(module, names, act):
policies = []
for name in names:
if not is_policy_enabled(module, name):
policies.append(name)
if not policies:
module.exit_json(changed=False, msg="policy(ies) already enabled")
names = " ".join(policies)
if module.check_mode:
cmd = "%s list" % (AWALL_PATH)
else:
cmd = "%s enable %s" % (AWALL_PATH, names)
rc, stdout, stderr = module.run_command(cmd)
if rc != 0:
module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr)
if act and not module.check_mode:
activate(module)
module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names)
def disable_policy(module, names, act):
policies = []
for name in names:
if is_policy_enabled(module, name):
policies.append(name)
if not policies:
module.exit_json(changed=False, msg="policy(ies) already disabled")
names = " ".join(policies)
if module.check_mode:
cmd = "%s list" % (AWALL_PATH)
else:
cmd = "%s disable %s" % (AWALL_PATH, names)
rc, stdout, stderr = module.run_command(cmd)
if rc != 0:
module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr)
if act and not module.check_mode:
activate(module)
module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='enabled', choices=['disabled', 'enabled']),
name=dict(type='list'),
activate=dict(type='bool', default=False),
),
required_one_of=[['name', 'activate']],
supports_check_mode=True,
)
global AWALL_PATH
AWALL_PATH = module.get_bin_path('awall', required=True)
p = module.params
if p['name']:
if p['state'] == 'enabled':
enable_policy(module, p['name'], p['activate'])
elif p['state'] == 'disabled':
disable_policy(module, p['name'], p['activate'])
if p['activate']:
if not module.check_mode:
activate(module)
module.exit_json(changed=True, msg="activated awall rules")
module.fail_json(msg="no action defined")
if __name__ == '__main__':
main()

View file

@ -0,0 +1,424 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Adam Števko <adam.stevko@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: beadm
short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems.
description:
- Create, delete or activate ZFS boot environments.
- Mount and unmount ZFS boot environments.
author: Adam Števko (@xen0l)
options:
name:
description:
- ZFS boot environment name.
type: str
required: True
aliases: [ "be" ]
snapshot:
description:
- If specified, the new boot environment will be cloned from the given
snapshot or inactive boot environment.
type: str
description:
description:
- Associate a description with a new boot environment. This option is
available only on Solarish platforms.
type: str
options:
description:
- Create the datasets for new BE with specific ZFS properties.
- Multiple options can be specified.
- This option is available only on Solarish platforms.
type: str
mountpoint:
description:
- Path where to mount the ZFS boot environment.
type: path
state:
description:
- Create or delete ZFS boot environment.
type: str
choices: [ absent, activated, mounted, present, unmounted ]
default: present
force:
description:
- Specifies if the unmount should be forced.
type: bool
default: false
'''
EXAMPLES = r'''
- name: Create ZFS boot environment
beadm:
name: upgrade-be
state: present
- name: Create ZFS boot environment from existing inactive boot environment
beadm:
name: upgrade-be
snapshot: be@old
state: present
- name: Create ZFS boot environment with compression enabled and description "upgrade"
beadm:
name: upgrade-be
options: "compression=on"
description: upgrade
state: present
- name: Delete ZFS boot environment
beadm:
name: old-be
state: absent
- name: Mount ZFS boot environment on /tmp/be
beadm:
name: BE
mountpoint: /tmp/be
state: mounted
- name: Unmount ZFS boot environment
beadm:
name: BE
state: unmounted
- name: Activate ZFS boot environment
beadm:
name: upgrade-be
state: activated
'''
RETURN = r'''
name:
description: BE name
returned: always
type: str
sample: pre-upgrade
snapshot:
description: ZFS snapshot to create BE from
returned: always
type: str
sample: rpool/ROOT/oi-hipster@fresh
description:
description: BE description
returned: always
type: str
sample: Upgrade from 9.0 to 10.0
options:
description: BE additional options
returned: always
type: str
sample: compression=on
mountpoint:
description: BE mountpoint
returned: always
type: str
sample: /mnt/be
state:
description: state of the target
returned: always
type: str
sample: present
force:
description: If forced action is wanted
returned: always
type: bool
sample: False
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
class BE(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.snapshot = module.params['snapshot']
self.description = module.params['description']
self.options = module.params['options']
self.mountpoint = module.params['mountpoint']
self.state = module.params['state']
self.force = module.params['force']
self.is_freebsd = os.uname()[0] == 'FreeBSD'
def _beadm_list(self):
cmd = [self.module.get_bin_path('beadm')]
cmd.append('list')
cmd.append('-H')
if '@' in self.name:
cmd.append('-s')
return self.module.run_command(cmd)
def _find_be_by_name(self, out):
if '@' in self.name:
for line in out.splitlines():
if self.is_freebsd:
check = re.match(r'.+/({0})\s+\-'.format(self.name), line)
if check:
return check
else:
check = line.split(';')
if check[1] == self.name:
return check
else:
splitter = '\t' if self.is_freebsd else ';'
for line in out.splitlines():
check = line.split(splitter)
if check[0] == self.name:
return check
return None
def exists(self):
(rc, out, _) = self._beadm_list()
if rc == 0:
if self._find_be_by_name(out):
return True
else:
return False
else:
return False
def is_activated(self):
(rc, out, _) = self._beadm_list()
if rc == 0:
line = self._find_be_by_name(out)
if self.is_freebsd:
if line is not None and 'R' in line.split('\t')[1]:
return True
else:
if 'R' in line.split(';')[2]:
return True
return False
def activate_be(self):
cmd = [self.module.get_bin_path('beadm')]
cmd.append('activate')
cmd.append(self.name)
return self.module.run_command(cmd)
def create_be(self):
cmd = [self.module.get_bin_path('beadm')]
cmd.append('create')
if self.snapshot:
cmd.append('-e')
cmd.append(self.snapshot)
if not self.is_freebsd:
if self.description:
cmd.append('-d')
cmd.append(self.description)
if self.options:
cmd.append('-o')
cmd.append(self.options)
cmd.append(self.name)
return self.module.run_command(cmd)
def destroy_be(self):
cmd = [self.module.get_bin_path('beadm')]
cmd.append('destroy')
cmd.append('-F')
cmd.append(self.name)
return self.module.run_command(cmd)
def is_mounted(self):
(rc, out, _) = self._beadm_list()
if rc == 0:
line = self._find_be_by_name(out)
if self.is_freebsd:
# On FreeBSD, we exclude currently mounted BE on /, as it is
# special and can be activated even if it is mounted. That is not
# possible with non-root BEs.
if line.split('\t')[2] != '-' and \
line.split('\t')[2] != '/':
return True
else:
if line.split(';')[3]:
return True
return False
def mount_be(self):
cmd = [self.module.get_bin_path('beadm')]
cmd.append('mount')
cmd.append(self.name)
if self.mountpoint:
cmd.append(self.mountpoint)
return self.module.run_command(cmd)
def unmount_be(self):
cmd = [self.module.get_bin_path('beadm')]
cmd.append('unmount')
if self.force:
cmd.append('-f')
cmd.append(self.name)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, aliases=['be']),
snapshot=dict(type='str'),
description=dict(type='str'),
options=dict(type='str'),
mountpoint=dict(type='path'),
state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']),
force=dict(type='bool', default=False),
),
supports_check_mode=True,
)
be = BE(module)
rc = None
out = ''
err = ''
result = {}
result['name'] = be.name
result['state'] = be.state
if be.snapshot:
result['snapshot'] = be.snapshot
if be.description:
result['description'] = be.description
if be.options:
result['options'] = be.options
if be.mountpoint:
result['mountpoint'] = be.mountpoint
if be.state == 'absent':
# beadm on FreeBSD and Solarish systems differs in delete behaviour in
# that we are not allowed to delete activated BE on FreeBSD while on
# Solarish systems we cannot delete BE if it is mounted. We add mount
# check for both platforms as BE should be explicitly unmounted before
# being deleted. On FreeBSD, we also check if the BE is activated.
if be.exists():
if not be.is_mounted():
if module.check_mode:
module.exit_json(changed=True)
if be.is_freebsd:
if be.is_activated():
module.fail_json(msg='Unable to remove active BE!')
(rc, out, err) = be.destroy_be()
if rc != 0:
module.fail_json(msg='Error while destroying BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
else:
module.fail_json(msg='Unable to remove BE as it is mounted!')
elif be.state == 'present':
if not be.exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = be.create_be()
if rc != 0:
module.fail_json(msg='Error while creating BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
elif be.state == 'activated':
if not be.is_activated():
if module.check_mode:
module.exit_json(changed=True)
# On FreeBSD, beadm is unable to activate mounted BEs, so we add
# an explicit check for that case.
if be.is_freebsd:
if be.is_mounted():
module.fail_json(msg='Unable to activate mounted BE!')
(rc, out, err) = be.activate_be()
if rc != 0:
module.fail_json(msg='Error while activating BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
elif be.state == 'mounted':
if not be.is_mounted():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = be.mount_be()
if rc != 0:
module.fail_json(msg='Error while mounting BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
elif be.state == 'unmounted':
if be.is_mounted():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = be.unmount_be()
if rc != 0:
module.fail_json(msg='Error while unmounting BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,178 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Nate Coraor <nate@bx.psu.edu>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: capabilities
short_description: Manage Linux capabilities
description:
- This module manipulates files privileges using the Linux capabilities(7) system.
options:
path:
description:
- Specifies the path to the file to be managed.
type: str
required: yes
aliases: [ key ]
capability:
description:
- Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
type: str
required: yes
aliases: [ cap ]
state:
description:
- Whether the entry should be present or absent in the file's capabilities.
type: str
choices: [ absent, present ]
default: present
notes:
- The capabilities system will automatically transform operators and flags into the effective set,
so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).
- This module does not attempt to determine the final operator and flags to compare,
so you will want to ensure that your capabilities argument matches the final capabilities.
author:
- Nate Coraor (@natefoo)
'''
EXAMPLES = r'''
- name: Set cap_sys_chroot+ep on /foo
capabilities:
path: /foo
capability: cap_sys_chroot+ep
state: present
- name: Remove cap_net_bind_service from /bar
capabilities:
path: /bar
capability: cap_net_bind_service
state: absent
'''
from ansible.module_utils.basic import AnsibleModule
OPS = ('=', '-', '+')
class CapabilitiesModule(object):
platform = 'Linux'
distribution = None
def __init__(self, module):
self.module = module
self.path = module.params['path'].strip()
self.capability = module.params['capability'].strip().lower()
self.state = module.params['state']
self.getcap_cmd = module.get_bin_path('getcap', required=True)
self.setcap_cmd = module.get_bin_path('setcap', required=True)
self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present')
self.run()
def run(self):
current = self.getcap(self.path)
caps = [cap[0] for cap in current]
if self.state == 'present' and self.capability_tup not in current:
# need to add capability
if self.module.check_mode:
self.module.exit_json(changed=True, msg='capabilities changed')
else:
# remove from current cap list if it's already set (but op/flags differ)
current = list(filter(lambda x: x[0] != self.capability_tup[0], current))
# add new cap with correct op/flags
current.append(self.capability_tup)
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
elif self.state == 'absent' and self.capability_tup[0] in caps:
# need to remove capability
if self.module.check_mode:
self.module.exit_json(changed=True, msg='capabilities changed')
else:
# remove from current cap list and then set current list
current = filter(lambda x: x[0] != self.capability_tup[0], current)
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
self.module.exit_json(changed=False, state=self.state)
def getcap(self, path):
rval = []
cmd = "%s -v %s" % (self.getcap_cmd, path)
rc, stdout, stderr = self.module.run_command(cmd)
# If file xattrs are set but no caps are set the output will be:
# '/foo ='
# If file xattrs are unset the output will be:
# '/foo'
# If the file does not exist the output will be (with rc == 0...):
# '/foo (No such file or directory)'
if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):
self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
if stdout.strip() != path:
caps = stdout.split(' =')[1].strip().split()
for cap in caps:
cap = cap.lower()
# getcap condenses capabilities with the same op/flags into a
# comma-separated list, so we have to parse that
if ',' in cap:
cap_group = cap.split(',')
cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
for subcap in cap_group:
rval.append((subcap, op, flags))
else:
rval.append(self._parse_cap(cap))
return rval
def setcap(self, path, caps):
caps = ' '.join([''.join(cap) for cap in caps])
cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
else:
return stdout
def _parse_cap(self, cap, op_required=True):
opind = -1
try:
i = 0
while opind == -1:
opind = cap.find(OPS[i])
i += 1
except Exception:
if op_required:
self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
else:
return (cap, None, None)
op = cap[opind]
cap, flags = cap.split(op)
return (cap, op, flags)
# ==============================================================
# main
def main():
# defining module
module = AnsibleModule(
argument_spec=dict(
path=dict(type='str', required=True, aliases=['key']),
capability=dict(type='str', required=True, aliases=['cap']),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
supports_check_mode=True,
)
CapabilitiesModule(module)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,427 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Cronvar Plugin: The goal of this plugin is to provide an idempotent
# method for set cron variable values. It should play well with the
# existing cron module as well as allow for manually added variables.
# Each variable entered will be preceded with a comment describing the
# variable so that it can be found later. This is required to be
# present in order for this plugin to find/modify the variable
# This module is based on the crontab module.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cronvar
short_description: Manage variables in crontabs
description:
- Use this module to manage crontab variables.
- This module allows you to create, update, or delete cron variable definitions.
options:
name:
description:
- Name of the crontab variable.
type: str
required: yes
value:
description:
- The value to set this variable to.
- Required if C(state=present).
type: str
insertafter:
description:
- If specified, the variable will be inserted after the variable specified.
- Used with C(state=present).
type: str
insertbefore:
description:
- Used with C(state=present). If specified, the variable will be inserted
just before the variable specified.
type: str
state:
description:
- Whether to ensure that the variable is present or absent.
type: str
choices: [ absent, present ]
default: present
user:
description:
- The specific user whose crontab should be modified.
- This parameter defaults to C(root) when unset.
type: str
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
- Without a leading C(/), this is assumed to be in I(/etc/cron.d).
- With a leading C(/), this is taken as absolute.
type: str
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup) variable by this module.
type: bool
default: no
requirements:
- cron
author:
- Doug Luce (@dougluce)
'''
EXAMPLES = r'''
- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists
cronvar:
name: EMAIL
value: doug@ansibmod.con.com
- name: Ensure a variable does not exist. This may remove any variable named "LEGACY"
cronvar:
name: LEGACY
state: absent
- name: Add a variable to a file under /etc/cron.d
cronvar:
name: LOGFILE
value: /var/log/yum-autoupdate.log
user: root
cron_file: ansible_yum-autoupdate
'''
import os
import platform
import pwd
import re
import shlex
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import shlex_quote
class CronVarError(Exception):
pass
class CronVar(object):
"""
CronVar object to write variables to crontabs.
user - the user of the crontab (defaults to root)
cron_file - a cron file under /etc/cron.d
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.lines = None
self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',))
self.cron_cmd = self.module.get_bin_path('crontab', required=True)
if cron_file:
self.cron_file = ""
if os.path.isabs(cron_file):
self.cron_file = cron_file
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.cron_file, 'r')
self.lines = f.read().splitlines()
f.close()
except IOError:
# cron file does not exist
return
except Exception:
raise CronVarError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronVarError("Unable to read crontab")
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l
) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)):
self.lines.append(l)
count += 1
def log_message(self, message):
self.module.debug('ansible: "%s"' % message)
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'w')
elif self.cron_file:
fileh = open(self.cron_file, 'w')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
fileh = os.fdopen(filed, 'w')
fileh.write(self.render())
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
def remove_variable_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError:
# cron file does not exist
return False
except Exception:
raise CronVarError("Unexpected error:", sys.exc_info()[0])
def parse_for_var(self, line):
lexer = shlex.shlex(line)
lexer.wordchars = self.wordchars
varname = lexer.get_token()
is_env_var = lexer.get_token() == '='
value = ''.join(lexer)
if is_env_var:
return (varname, value)
raise CronVarError("Not a variable.")
def find_variable(self, name):
for l in self.lines:
try:
(varname, value) = self.parse_for_var(l)
if varname == name:
return value
except CronVarError:
pass
return None
def get_var_names(self):
var_names = []
for l in self.lines:
try:
(var_name, _) = self.parse_for_var(l)
var_names.append(var_name)
except CronVarError:
pass
return var_names
def add_variable(self, name, value, insertbefore, insertafter):
if insertbefore is None and insertafter is None:
# Add the variable to the top of the file.
self.lines.insert(0, "%s=%s" % (name, value))
else:
newlines = []
for l in self.lines:
try:
(varname, _) = self.parse_for_var(l) # Throws if not a var line
if varname == insertbefore:
newlines.append("%s=%s" % (name, value))
newlines.append(l)
elif varname == insertafter:
newlines.append(l)
newlines.append("%s=%s" % (name, value))
else:
raise CronVarError # Append.
except CronVarError:
newlines.append(l)
self.lines = newlines
def remove_variable(self, name):
self.update_variable(name, None, remove=True)
def update_variable(self, name, value, remove=False):
newlines = []
for l in self.lines:
try:
(varname, _) = self.parse_for_var(l) # Throws if not a var line
if varname != name:
raise CronVarError # Append.
if not remove:
newlines.append("%s=%s" % (name, value))
except CronVarError:
newlines.append(l)
self.lines = newlines
def render(self):
"""
Render a proper crontab
"""
result = '\n'.join(self.lines)
if result and result[-1] not in ['\n', '\r']:
result += '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
elif platform.system() == 'AIX':
return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (
shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
# ==================================================
def main():
# The following example playbooks:
#
# - cronvar: name="SHELL" value="/bin/bash"
#
# - name: Set the email
# cronvar: name="EMAILTO" value="doug@ansibmod.con.com"
#
# - name: Get rid of the old new host variable
# cronvar: name="NEW_HOST" state=absent
#
# Would produce:
# SHELL = /bin/bash
# EMAILTO = doug@ansibmod.con.com
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
value=dict(type='str'),
user=dict(type='str'),
cron_file=dict(type='str'),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
backup=dict(type='bool', default=False),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
supports_check_mode=False,
)
name = module.params['name']
value = module.params['value']
user = module.params['user']
cron_file = module.params['cron_file']
insertafter = module.params['insertafter']
insertbefore = module.params['insertbefore']
state = module.params['state']
backup = module.params['backup']
ensure_present = state == 'present'
changed = False
res_args = dict()
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022', 8))
cronvar = CronVar(module, user, cron_file)
module.debug('cronvar instantiated - name: "%s"' % name)
# --- user input validation ---
if name is None and ensure_present:
module.fail_json(msg="You must specify 'name' to insert a new cron variable")
if value is None and ensure_present:
module.fail_json(msg="You must specify 'value' to insert a new cron variable")
if name is None and not ensure_present:
module.fail_json(msg="You must specify 'name' to remove a cron variable")
# if requested make a backup before making a change
if backup:
(_, backup_file) = tempfile.mkstemp(prefix='cronvar')
cronvar.write(backup_file)
if cronvar.cron_file and not name and not ensure_present:
changed = cronvar.remove_job_file()
module.exit_json(changed=changed, cron_file=cron_file, state=state)
old_value = cronvar.find_variable(name)
if ensure_present:
if old_value is None:
cronvar.add_variable(name, value, insertbefore, insertafter)
changed = True
elif old_value != value:
cronvar.update_variable(name, value)
changed = True
else:
if old_value is not None:
cronvar.remove_variable(name)
changed = True
res_args = {
"vars": cronvar.get_var_names(),
"changed": changed
}
if changed:
cronvar.write()
# retain the backup only if crontab or cron file have changed
if backup:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,358 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Steve <yo@groks.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: crypttab
short_description: Encrypted Linux block devices
description:
- Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab).
options:
name:
description:
- Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/)
will be stripped from I(name).
type: str
required: yes
state:
description:
- Use I(present) to add a line to C(/etc/crypttab) or update its definition
if already present.
- Use I(absent) to remove a line with matching I(name).
- Use I(opts_present) to add options to those already present; options with
different values will be updated.
- Use I(opts_absent) to remove options from the existing set.
type: str
required: yes
choices: [ absent, opts_absent, opts_present, present ]
backing_device:
description:
- Path to the underlying block device or file, or the UUID of a block-device
prefixed with I(UUID=).
type: str
password:
description:
- Encryption password, the path to a file containing the password, or
C(-) or unset if the password should be entered at boot.
type: path
opts:
description:
- A comma-delimited list of options. See C(crypttab(5) ) for details.
type: str
path:
description:
- Path to file to use instead of C(/etc/crypttab).
- This might be useful in a chroot environment.
type: path
default: /etc/crypttab
author:
- Steve (@groks)
'''
EXAMPLES = r'''
- name: Set the options explicitly a device which must already exist
crypttab:
name: luks-home
state: present
opts: discard,cipher=aes-cbc-essiv:sha256
- name: Add the 'discard' option to any existing options for all devices
crypttab:
name: '{{ item.device }}'
state: opts_present
opts: discard
loop: '{{ ansible_mounts }}'
when: "'/dev/mapper/luks-' in {{ item.device }}"
'''
import os
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']),
backing_device=dict(type='str'),
password=dict(type='path'),
opts=dict(type='str'),
path=dict(type='path', default='/etc/crypttab')
),
supports_check_mode=True,
)
backing_device = module.params['backing_device']
password = module.params['password']
opts = module.params['opts']
state = module.params['state']
path = module.params['path']
name = module.params['name']
if name.startswith('/dev/mapper/'):
name = name[len('/dev/mapper/'):]
if state != 'absent' and backing_device is None and password is None and opts is None:
module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'",
**module.params)
if 'opts' in state and (backing_device is not None or password is not None):
module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state,
**module.params)
for arg_name, arg in (('name', name),
('backing_device', backing_device),
('password', password),
('opts', opts)):
if (arg is not None and (' ' in arg or '\t' in arg or arg == '')):
module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
**module.params)
try:
crypttab = Crypttab(path)
existing_line = crypttab.match(name)
except Exception as e:
module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e),
exception=traceback.format_exc(), **module.params)
if 'present' in state and existing_line is None and backing_device is None:
module.fail_json(msg="'backing_device' required to add a new entry",
**module.params)
changed, reason = False, '?'
if state == 'absent':
if existing_line is not None:
changed, reason = existing_line.remove()
elif state == 'present':
if existing_line is not None:
changed, reason = existing_line.set(backing_device, password, opts)
else:
changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
elif state == 'opts_present':
if existing_line is not None:
changed, reason = existing_line.opts.add(opts)
else:
changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
elif state == 'opts_absent':
if existing_line is not None:
changed, reason = existing_line.opts.remove(opts)
if changed and not module.check_mode:
try:
f = open(path, 'wb')
f.write(to_bytes(crypttab, errors='surrogate_or_strict'))
finally:
f.close()
module.exit_json(changed=changed, msg=reason, **module.params)
class Crypttab(object):
_lines = []
def __init__(self, path):
self.path = path
if not os.path.exists(path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
open(path, 'a').close()
try:
f = open(path, 'r')
for line in f.readlines():
self._lines.append(Line(line))
finally:
f.close()
def add(self, line):
self._lines.append(line)
return True, 'added line'
def lines(self):
for line in self._lines:
if line.valid():
yield line
def match(self, name):
for line in self.lines():
if line.name == name:
return line
return None
def __str__(self):
lines = []
for line in self._lines:
lines.append(str(line))
crypttab = '\n'.join(lines)
if len(crypttab) == 0:
crypttab += '\n'
if crypttab[-1] != '\n':
crypttab += '\n'
return crypttab
class Line(object):
def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None):
self.line = line
self.name = name
self.backing_device = backing_device
self.password = password
self.opts = Options(opts)
if line is not None:
self.line = self.line.rstrip('\n')
if self._line_valid(line):
self.name, backing_device, password, opts = self._split_line(line)
self.set(backing_device, password, opts)
def set(self, backing_device, password, opts):
changed = False
if backing_device is not None and self.backing_device != backing_device:
self.backing_device = backing_device
changed = True
if password is not None and self.password != password:
self.password = password
changed = True
if opts is not None:
opts = Options(opts)
if opts != self.opts:
self.opts = opts
changed = True
return changed, 'updated line'
def _line_valid(self, line):
if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4):
return False
return True
def _split_line(self, line):
fields = line.split()
try:
field2 = fields[2]
except IndexError:
field2 = None
try:
field3 = fields[3]
except IndexError:
field3 = None
return (fields[0],
fields[1],
field2,
field3)
def remove(self):
self.line, self.name, self.backing_device = '', None, None
return True, 'removed line'
def valid(self):
if self.name is not None and self.backing_device is not None:
return True
return False
def __str__(self):
if self.valid():
fields = [self.name, self.backing_device]
if self.password is not None or self.opts:
if self.password is not None:
fields.append(self.password)
else:
fields.append('none')
if self.opts:
fields.append(str(self.opts))
return ' '.join(fields)
return self.line
class Options(dict):
"""opts_string looks like: 'discard,foo=bar,baz=greeble' """
def __init__(self, opts_string):
super(Options, self).__init__()
self.itemlist = []
if opts_string is not None:
for opt in opts_string.split(','):
kv = opt.split('=')
if len(kv) > 1:
k, v = (kv[0], kv[1])
else:
k, v = (kv[0], None)
self[k] = v
def add(self, opts_string):
changed = False
for k, v in Options(opts_string).items():
if k in self:
if self[k] != v:
changed = True
else:
changed = True
self[k] = v
return changed, 'updated options'
def remove(self, opts_string):
changed = False
for k in Options(opts_string):
if k in self:
del self[k]
changed = True
return changed, 'removed options'
def keys(self):
return self.itemlist
def values(self):
return [self[key] for key in self]
def items(self):
return [(key, self[key]) for key in self]
def __iter__(self):
return iter(self.itemlist)
def __setitem__(self, key, value):
if key not in self:
self.itemlist.append(key)
super(Options, self).__setitem__(key, value)
def __delitem__(self, key):
self.itemlist.remove(key)
super(Options, self).__delitem__(key)
def __ne__(self, obj):
return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items()))
def __str__(self):
ret = []
for k, v in self.items():
if v is None:
ret.append(k)
else:
ret.append('%s=%s' % (k, v))
return ','.join(ret)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,381 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Branko Majic <branko@majic.rs>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: dconf
author:
- "Branko Majic (@azaghal)"
short_description: Modify and read dconf database
description:
- This module allows modifications and reading of dconf database. The module
is implemented as a wrapper around dconf tool. Please see the dconf(1) man
page for more details.
- Since C(dconf) requires a running D-Bus session to change values, the module
will try to detect an existing session and reuse it, or run the tool via
C(dbus-run-session).
notes:
- This module depends on C(psutil) Python library (version 4.0.0 and upwards),
C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on
distribution you are using, you may need to install additional packages to
have these available.
- Detection of existing, running D-Bus session, required to change settings
via C(dconf), is not 100% reliable due to implementation details of D-Bus
daemon itself. This might lead to running applications not picking-up
changes on the fly if options are changed via Ansible and
C(dbus-run-session).
- Keep in mind that the C(dconf) CLI tool, which this module wraps around,
utilises an unusual syntax for the values (GVariant). For example, if you
wanted to provide a string value, the correct syntax would be
C(value="'myvalue'") - with single quotes as part of the Ansible parameter
value.
- When using loops in combination with a value like
:code:`"[('xkb', 'us'), ('xkb', 'se')]"`, you need to be aware of possible
type conversions. Applying a filter :code:`"{{ item.value | string }}"`
to the parameter variable can avoid potential conversion problems.
- The easiest way to figure out exact syntax/value you need to provide for a
key is by making the configuration change in application affected by the
key, and then having a look at value set via commands C(dconf dump
/path/to/dir/) or C(dconf read /path/to/key).
options:
key:
required: true
description:
- A dconf key to modify or read from the dconf database.
value:
required: false
description:
- Value to set for the specified dconf key. Value should be specified in
GVariant format. Due to complexity of this format, it is best to have a
look at existing values in the dconf database. Required for
C(state=present).
state:
required: false
default: present
choices:
- read
- present
- absent
description:
- The action to take upon the key/value.
'''
RETURN = """
value:
description: value associated with the requested key
returned: success, state was "read"
type: str
sample: "'Default'"
"""
EXAMPLES = """
- name: Configure available keyboard layouts in Gnome
dconf:
key: "/org/gnome/desktop/input-sources/sources"
value: "[('xkb', 'us'), ('xkb', 'se')]"
state: present
- name: Read currently available keyboard layouts in Gnome
dconf:
key: "/org/gnome/desktop/input-sources/sources"
state: read
register: keyboard_layouts
- name: Reset the available keyboard layouts in Gnome
dconf:
key: "/org/gnome/desktop/input-sources/sources"
state: absent
- name: Configure available keyboard layouts in Cinnamon
dconf:
key: "/org/gnome/libgnomekbd/keyboard/layouts"
value: "['us', 'se']"
state: present
- name: Read currently available keyboard layouts in Cinnamon
dconf:
key: "/org/gnome/libgnomekbd/keyboard/layouts"
state: read
register: keyboard_layouts
- name: Reset the available keyboard layouts in Cinnamon
dconf:
key: "/org/gnome/libgnomekbd/keyboard/layouts"
state: absent
- name: Disable desktop effects in Cinnamon
dconf:
key: "/org/cinnamon/desktop-effects"
value: "false"
state: present
"""
import os
import traceback
PSUTIL_IMP_ERR = None
try:
import psutil
psutil_found = True
except ImportError:
PSUTIL_IMP_ERR = traceback.format_exc()
psutil_found = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class DBusWrapper(object):
"""
Helper class that can be used for running a command with a working D-Bus
session.
If possible, command will be run against an existing D-Bus session,
otherwise the session will be spawned via dbus-run-session.
Example usage:
dbus_wrapper = DBusWrapper(ansible_module)
dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"])
"""
def __init__(self, module):
"""
Initialises an instance of the class.
:param module: Ansible module instance used to signal failures and run commands.
:type module: AnsibleModule
"""
# Store passed-in arguments and set-up some defaults.
self.module = module
# Try to extract existing D-Bus session address.
self.dbus_session_bus_address = self._get_existing_dbus_session()
# If no existing D-Bus session was detected, check if dbus-run-session
# is available.
if self.dbus_session_bus_address is None:
self.module.get_bin_path('dbus-run-session', required=True)
def _get_existing_dbus_session(self):
"""
Detects and returns an existing D-Bus session bus address.
:returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None.
"""
# We'll be checking the processes of current user only.
uid = os.getuid()
# Go through all the pids for this user, try to extract the D-Bus
# session bus address from environment, and ensure it is possible to
# connect to it.
self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid)
for pid in psutil.pids():
process = psutil.Process(pid)
process_real_uid, _, _ = process.uids()
try:
if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ():
dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS']
self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate)
command = ['dbus-send', '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test']
rc, _, _ = self.module.run_command(command)
if rc == 0:
self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate)
return dbus_session_bus_address_candidate
# This can happen with things like SSH sessions etc.
except psutil.AccessDenied:
pass
self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session")
return None
def run_command(self, command):
"""
Runs the specified command within a functional D-Bus session. Command is
effectively passed-on to AnsibleModule.run_command() method, with
modification for using dbus-run-session if necessary.
:param command: Command to run, including parameters. Each element of the list should be a string.
:type module: list
:returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command.
"""
if self.dbus_session_bus_address is None:
self.module.debug("Using dbus-run-session wrapper for running commands.")
command = ['dbus-run-session'] + command
rc, out, err = self.module.run_command(command)
if self.dbus_session_bus_address is None and rc == 127:
self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err)
else:
extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address}
rc, out, err = self.module.run_command(command, environ_update=extra_environment)
return rc, out, err
class DconfPreference(object):
def __init__(self, module, check_mode=False):
"""
Initialises instance of the class.
:param module: Ansible module instance used to signal failures and run commands.
:type module: AnsibleModule
:param check_mode: Specify whether to only check if a change should be made or if to actually make a change.
:type check_mode: bool
"""
self.module = module
self.check_mode = check_mode
def read(self, key):
"""
Retrieves current value associated with the dconf key.
If an error occurs, a call will be made to AnsibleModule.fail_json.
:returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None.
"""
command = ["dconf", "read", key]
rc, out, err = self.module.run_command(command)
if rc != 0:
self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err)
if out == '':
value = None
else:
value = out.rstrip('\n')
return value
def write(self, key, value):
"""
Writes the value for specified key.
If an error occurs, a call will be made to AnsibleModule.fail_json.
:param key: dconf key for which the value should be set. Should be a full path.
:type key: str
:param value: Value to set for the specified dconf key. Should be specified in GVariant format.
:type value: str
:returns: bool -- True if a change was made, False if no change was required.
"""
# If no change is needed (or won't be done due to check_mode), notify
# caller straight away.
if value == self.read(key):
return False
elif self.check_mode:
return True
# Set-up command to run. Since DBus is needed for write operation, wrap
# dconf command dbus-launch.
command = ["dconf", "write", key, value]
# Run the command and fetch standard return code, stdout, and stderr.
dbus_wrapper = DBusWrapper(self.module)
rc, out, err = dbus_wrapper.run_command(command)
if rc != 0:
self.module.fail_json(msg='dconf failed while write the value with error: %s' % err)
# Value was changed.
return True
def reset(self, key):
"""
Returns value for the specified key (removes it from user configuration).
If an error occurs, a call will be made to AnsibleModule.fail_json.
:param key: dconf key to reset. Should be a full path.
:type key: str
:returns: bool -- True if a change was made, False if no change was required.
"""
# Read the current value first.
current_value = self.read(key)
# No change was needed, key is not set at all, or just notify user if we
# are in check mode.
if current_value is None:
return False
elif self.check_mode:
return True
# Set-up command to run. Since DBus is needed for reset operation, wrap
# dconf command dbus-launch.
command = ["dconf", "reset", key]
# Run the command and fetch standard return code, stdout, and stderr.
dbus_wrapper = DBusWrapper(self.module)
rc, out, err = dbus_wrapper.run_command(command)
if rc != 0:
self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err)
# Value was changed.
return True
def main():
# Setup the Ansible module
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent', 'read']),
key=dict(required=True, type='str'),
value=dict(required=False, default=None, type='str'),
),
supports_check_mode=True
)
if not psutil_found:
module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR)
# If present state was specified, value must be provided.
if module.params['state'] == 'present' and module.params['value'] is None:
module.fail_json(msg='State "present" requires "value" to be set.')
# Create wrapper instance.
dconf = DconfPreference(module, module.check_mode)
# Process based on different states.
if module.params['state'] == 'read':
value = dconf.read(module.params['key'])
module.exit_json(changed=False, value=value)
elif module.params['state'] == 'present':
changed = dconf.write(module.params['key'], module.params['value'])
module.exit_json(changed=changed)
elif module.params['state'] == 'absent':
changed = dconf.reset(module.params['key'])
module.exit_json(changed=changed)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,53 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: facter
short_description: Runs the discovery program I(facter) on the remote system
description:
- Runs the I(facter) discovery program
(U(https://github.com/puppetlabs/facter)) on the remote system, returning
JSON data that can be useful for inventory purposes.
requirements:
- facter
- ruby-json
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
# Example command-line invocation
ansible www.example.net -m facter
'''
import json
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict()
)
facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cmd = [facter_path, "--json"]
rc, out, err = module.run_command(cmd, check_rc=True)
module.exit_json(**json.loads(out))
if __name__ == '__main__':
main()

View file

@ -0,0 +1,405 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- Alexander Bulimov (@abulimov)
module: filesystem
short_description: Makes a filesystem
description:
- This module creates a filesystem.
options:
fstype:
choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap ]
description:
- Filesystem type to be created.
- reiserfs support was added in 2.2.
- lvm support was added in 2.5.
- since 2.5, I(dev) can be an image file.
- vfat support was added in 2.5
- ocfs2 support was added in 2.6
- f2fs support was added in 2.7
- swap support was added in 2.8
required: yes
aliases: [type]
dev:
description:
- Target path to device or image file.
required: yes
aliases: [device]
force:
description:
- If C(yes), allows to create new filesystem on devices that already has filesystem.
type: bool
default: 'no'
resizefs:
description:
- If C(yes), if the block device and filesystem size differ, grow the filesystem into the space.
- Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(vfat), C(swap) filesystems.
- XFS Will only grow if mounted.
- vFAT will likely fail if fatresize < 1.04.
type: bool
default: 'no'
opts:
description:
- List of options to be passed to mkfs command.
requirements:
- Uses tools related to the I(fstype) (C(mkfs)) and C(blkid) command. When I(resizefs) is enabled, C(blockdev) command is required too.
notes:
- Potential filesystem on I(dev) are checked using C(blkid), in case C(blkid) isn't able to detect an existing filesystem,
this filesystem is overwritten even if I(force) is C(no).
'''
EXAMPLES = '''
- name: Create a ext2 filesystem on /dev/sdb1
filesystem:
fstype: ext2
dev: /dev/sdb1
- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks
filesystem:
fstype: ext4
dev: /dev/sdb1
opts: -cc
'''
from distutils.version import LooseVersion
import os
import platform
import re
import stat
from ansible.module_utils.basic import AnsibleModule
class Device(object):
def __init__(self, module, path):
self.module = module
self.path = path
def size(self):
""" Return size in bytes of device. Returns int """
statinfo = os.stat(self.path)
if stat.S_ISBLK(statinfo.st_mode):
blockdev_cmd = self.module.get_bin_path("blockdev", required=True)
_, devsize_in_bytes, _ = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True)
return int(devsize_in_bytes)
elif os.path.isfile(self.path):
return os.path.getsize(self.path)
else:
self.module.fail_json(changed=False, msg="Target device not supported: %s" % self)
def __str__(self):
return self.path
class Filesystem(object):
GROW = None
MKFS = None
MKFS_FORCE_FLAGS = ''
LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}
def __init__(self, module):
self.module = module
@property
def fstype(self):
return type(self).__name__
def get_fs_size(self, dev):
""" Return size in bytes of filesystem on device. Returns int """
raise NotImplementedError()
def create(self, opts, dev):
if self.module.check_mode:
return
mkfs = self.module.get_bin_path(self.MKFS, required=True)
if opts is None:
cmd = "%s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, dev)
else:
cmd = "%s %s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, opts, dev)
self.module.run_command(cmd, check_rc=True)
def grow_cmd(self, dev):
cmd = self.module.get_bin_path(self.GROW, required=True)
return [cmd, str(dev)]
def grow(self, dev):
"""Get dev and fs size and compare. Returns stdout of used command."""
devsize_in_bytes = dev.size()
try:
fssize_in_bytes = self.get_fs_size(dev)
except NotImplementedError:
self.module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % self.fstype)
if not fssize_in_bytes < devsize_in_bytes:
self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
elif self.module.check_mode:
self.module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (self.fstype, dev))
else:
_, out, _ = self.module.run_command(self.grow_cmd(dev), check_rc=True)
return out
class Ext(Filesystem):
MKFS_FORCE_FLAGS = '-F'
GROW = 'resize2fs'
def get_fs_size(self, dev):
cmd = self.module.get_bin_path('tune2fs', required=True)
# Get Block count and Block size
_, size, _ = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
for line in size.splitlines():
if 'Block count:' in line:
block_count = int(line.split(':')[1].strip())
elif 'Block size:' in line:
block_size = int(line.split(':')[1].strip())
return block_size * block_count
class Ext2(Ext):
MKFS = 'mkfs.ext2'
class Ext3(Ext):
MKFS = 'mkfs.ext3'
class Ext4(Ext):
MKFS = 'mkfs.ext4'
class XFS(Filesystem):
MKFS = 'mkfs.xfs'
MKFS_FORCE_FLAGS = '-f'
GROW = 'xfs_growfs'
def get_fs_size(self, dev):
cmd = self.module.get_bin_path('xfs_growfs', required=True)
_, size, _ = self.module.run_command([cmd, '-n', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
for line in size.splitlines():
col = line.split('=')
if col[0].strip() == 'data':
if col[1].strip() != 'bsize':
self.module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "bsize")')
if col[2].split()[1] != 'blocks':
self.module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "blocks")')
block_size = int(col[2].split()[0])
block_count = int(col[3].split(',')[0])
return block_size * block_count
class Reiserfs(Filesystem):
MKFS = 'mkfs.reiserfs'
MKFS_FORCE_FLAGS = '-f'
class Btrfs(Filesystem):
MKFS = 'mkfs.btrfs'
def __init__(self, module):
super(Btrfs, self).__init__(module)
_, stdout, stderr = self.module.run_command('%s --version' % self.MKFS, check_rc=True)
match = re.search(r" v([0-9.]+)", stdout)
if not match:
# v0.20-rc1 use stderr
match = re.search(r" v([0-9.]+)", stderr)
if match:
# v0.20-rc1 doesn't have --force parameter added in following version v3.12
if LooseVersion(match.group(1)) >= LooseVersion('3.12'):
self.MKFS_FORCE_FLAGS = '-f'
else:
self.MKFS_FORCE_FLAGS = ''
else:
# assume version is greater or equal to 3.12
self.MKFS_FORCE_FLAGS = '-f'
self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr))
class Ocfs2(Filesystem):
MKFS = 'mkfs.ocfs2'
MKFS_FORCE_FLAGS = '-Fx'
class F2fs(Filesystem):
MKFS = 'mkfs.f2fs'
GROW = 'resize.f2fs'
@property
def MKFS_FORCE_FLAGS(self):
mkfs = self.module.get_bin_path(self.MKFS, required=True)
cmd = "%s %s" % (mkfs, os.devnull)
_, out, _ = self.module.run_command(cmd, check_rc=False, environ_update=self.LANG_ENV)
# Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)"
# mkfs.f2fs displays version since v1.2.0
match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out)
if match is not None:
# Since 1.9.0, mkfs.f2fs check overwrite before make filesystem
# before that version -f switch wasn't used
if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'):
return '-f'
return ''
def get_fs_size(self, dev):
cmd = self.module.get_bin_path('dump.f2fs', required=True)
# Get sector count and sector size
_, dump, _ = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV)
sector_size = None
sector_count = None
for line in dump.splitlines():
if 'Info: sector size = ' in line:
# expected: 'Info: sector size = 512'
sector_size = int(line.split()[4])
elif 'Info: total FS sectors = ' in line:
# expected: 'Info: total FS sectors = 102400 (50 MB)'
sector_count = int(line.split()[5])
if None not in (sector_size, sector_count):
break
else:
self.module.warn("Unable to process dump.f2fs output '%s'", '\n'.join(dump))
self.module.fail_json(msg="Unable to process dump.f2fs output for %s" % dev)
return sector_size * sector_count
class VFAT(Filesystem):
if platform.system() == 'FreeBSD':
MKFS = "newfs_msdos"
else:
MKFS = 'mkfs.vfat'
GROW = 'fatresize'
def get_fs_size(self, dev):
cmd = self.module.get_bin_path(self.GROW, required=True)
_, output, _ = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
for line in output.splitlines()[1:]:
param, value = line.split(':', 1)
if param.strip() == 'Size':
return int(value.strip())
self.module.fail_json(msg="fatresize failed to provide filesystem size for %s" % dev)
def grow_cmd(self, dev):
cmd = self.module.get_bin_path(self.GROW)
return [cmd, "-s", str(dev.size()), str(dev.path)]
class LVM(Filesystem):
MKFS = 'pvcreate'
MKFS_FORCE_FLAGS = '-f'
GROW = 'pvresize'
def get_fs_size(self, dev):
cmd = self.module.get_bin_path('pvs', required=True)
_, size, _ = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True)
block_count = int(size)
return block_count
class Swap(Filesystem):
MKFS = 'mkswap'
MKFS_FORCE_FLAGS = '-f'
FILESYSTEMS = {
'ext2': Ext2,
'ext3': Ext3,
'ext4': Ext4,
'ext4dev': Ext4,
'f2fs': F2fs,
'reiserfs': Reiserfs,
'xfs': XFS,
'btrfs': Btrfs,
'vfat': VFAT,
'ocfs2': Ocfs2,
'LVM2_member': LVM,
'swap': Swap,
}
def main():
friendly_names = {
'lvm': 'LVM2_member',
}
fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys())
# There is no "single command" to manipulate filesystems, so we map them all out and their options
module = AnsibleModule(
argument_spec=dict(
fstype=dict(required=True, aliases=['type'],
choices=list(fstypes)),
dev=dict(required=True, aliases=['device']),
opts=dict(),
force=dict(type='bool', default=False),
resizefs=dict(type='bool', default=False),
),
supports_check_mode=True,
)
dev = module.params['dev']
fstype = module.params['fstype']
opts = module.params['opts']
force = module.params['force']
resizefs = module.params['resizefs']
if fstype in friendly_names:
fstype = friendly_names[fstype]
changed = False
try:
klass = FILESYSTEMS[fstype]
except KeyError:
module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype)
if not os.path.exists(dev):
module.fail_json(msg="Device %s not found." % dev)
dev = Device(module, dev)
cmd = module.get_bin_path('blkid', required=True)
rc, raw_fs, err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
# In case blkid isn't able to identify an existing filesystem, device is considered as empty,
# then this existing filesystem would be overwritten even if force isn't enabled.
fs = raw_fs.strip()
filesystem = klass(module)
same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype]
if same_fs and not resizefs and not force:
module.exit_json(changed=False)
elif same_fs and resizefs:
if not filesystem.GROW:
module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
out = filesystem.grow(dev)
module.exit_json(changed=True, msg=out)
elif fs and not force:
module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err)
# create fs
filesystem.create(opts, dev)
changed = True
module.exit_json(changed=changed)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,863 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Adam Miller <maxamillion@fedoraproject.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: firewalld
short_description: Manage arbitrary ports/services with firewalld
description:
- This module allows for addition or deletion of services and ports (either TCP or UDP) in either running or permanent firewalld rules.
options:
service:
description:
- Name of a service to add/remove to/from firewalld.
- The service must be listed in output of firewall-cmd --get-services.
type: str
port:
description:
- Name of a port or port range to add/remove to/from firewalld.
- Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges.
type: str
rich_rule:
description:
- Rich rule to add/remove to/from firewalld.
type: str
source:
description:
- The source/network you would like to add/remove to/from firewalld.
type: str
interface:
description:
- The interface you would like to add/remove to/from a zone in firewalld.
type: str
icmp_block:
description:
- The ICMP block you would like to add/remove to/from a zone in firewalld.
type: str
icmp_block_inversion:
description:
- Enable/Disable inversion of ICMP blocks for a zone in firewalld.
type: str
zone:
description:
- The firewalld zone to add/remove to/from.
- Note that the default zone can be configured per system but C(public) is default from upstream.
- Available choices can be extended based on per-system configs, listed here are "out of the box" defaults.
- Possible values include C(block), C(dmz), C(drop), C(external), C(home), C(internal), C(public), C(trusted), C(work).
type: str
permanent:
description:
- Should this configuration be in the running firewalld configuration or persist across reboots.
- As of Ansible 2.3, permanent operations can operate on firewalld configs when it is not running (requires firewalld >= 3.0.9).
- Note that if this is C(no), immediate is assumed C(yes).
type: bool
immediate:
description:
- Should this configuration be applied immediately, if set as permanent.
type: bool
default: no
state:
description:
- Enable or disable a setting.
- 'For ports: Should this port accept (enabled) or reject (disabled) connections.'
- The states C(present) and C(absent) can only be used in zone level operations (i.e. when no other parameters but zone and state are set).
type: str
required: true
choices: [ absent, disabled, enabled, present ]
timeout:
description:
- The amount of time the rule should be in effect for when non-permanent.
type: int
default: 0
masquerade:
description:
- The masquerade setting you would like to enable/disable to/from zones within firewalld.
type: str
offline:
description:
- Whether to run this module even when firewalld is offline.
type: bool
notes:
- Not tested on any Debian based system.
- Requires the python2 bindings of firewalld, which may not be installed by default.
- For distributions where the python2 firewalld bindings are unavailable (e.g Fedora 28 and later) you will have to set the
ansible_python_interpreter for these hosts to the python3 interpreter path and install the python3 bindings.
- Zone transactions (creating, deleting) can be performed by using only the zone and state parameters "present" or "absent".
Note that zone transactions must explicitly be permanent. This is a limitation in firewalld.
This also means that you will have to reload firewalld after adding a zone that you wish to perform immediate actions on.
The module will not take care of this for you implicitly because that would undo any previously performed immediate actions which were not
permanent. Therefore, if you require immediate access to a newly created zone it is recommended you reload firewalld immediately after the zone
creation returns with a changed state and before you perform any other immediate, non-permanent actions on that zone.
requirements:
- firewalld >= 0.2.11
author:
- Adam Miller (@maxamillion)
'''
EXAMPLES = r'''
- firewalld:
service: https
permanent: yes
state: enabled
- firewalld:
port: 8081/tcp
permanent: yes
state: disabled
- firewalld:
port: 161-162/udp
permanent: yes
state: enabled
- firewalld:
zone: dmz
service: http
permanent: yes
state: enabled
- firewalld:
rich_rule: rule service name="ftp" audit limit value="1/m" accept
permanent: yes
state: enabled
- firewalld:
source: 192.0.2.0/24
zone: internal
state: enabled
- firewalld:
zone: trusted
interface: eth2
permanent: yes
state: enabled
- firewalld:
masquerade: yes
state: enabled
permanent: yes
zone: dmz
- firewalld:
zone: custom
state: present
permanent: yes
- firewalld:
zone: drop
state: enabled
permanent: yes
icmp_block_inversion: yes
- firewalld:
zone: drop
state: enabled
permanent: yes
icmp_block: echo-request
- name: Redirect port 443 to 8443 with Rich Rule
firewalld:
rich_rule: rule family=ipv4 forward-port port=443 protocol=tcp to-port=8443
zone: public
permanent: yes
immediate: yes
state: enabled
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.firewalld import FirewallTransaction, fw_offline
try:
from firewall.client import Rich_Rule
from firewall.client import FirewallClientZoneSettings
except ImportError:
# The import errors are handled via FirewallTransaction, don't need to
# duplicate that here
pass
class IcmpBlockTransaction(FirewallTransaction):
"""
IcmpBlockTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(IcmpBlockTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, icmp_block, timeout):
return icmp_block in self.fw.getIcmpBlocks(self.zone)
def get_enabled_permanent(self, icmp_block, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
return icmp_block in fw_settings.getIcmpBlocks()
def set_enabled_immediate(self, icmp_block, timeout):
self.fw.addIcmpBlock(self.zone, icmp_block, timeout)
def set_enabled_permanent(self, icmp_block, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addIcmpBlock(icmp_block)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, icmp_block, timeout):
self.fw.removeIcmpBlock(self.zone, icmp_block)
def set_disabled_permanent(self, icmp_block, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeIcmpBlock(icmp_block)
self.update_fw_settings(fw_zone, fw_settings)
class IcmpBlockInversionTransaction(FirewallTransaction):
"""
IcmpBlockInversionTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(IcmpBlockInversionTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self):
if self.fw.queryIcmpBlockInversion(self.zone) is True:
return True
else:
return False
def get_enabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
if fw_settings.getIcmpBlockInversion() is True:
return True
else:
return False
def set_enabled_immediate(self):
self.fw.addIcmpBlockInversion(self.zone)
def set_enabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.setIcmpBlockInversion(True)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self):
self.fw.removeIcmpBlockInversion(self.zone)
def set_disabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.setIcmpBlockInversion(False)
self.update_fw_settings(fw_zone, fw_settings)
class ServiceTransaction(FirewallTransaction):
"""
ServiceTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(ServiceTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, service, timeout):
if service in self.fw.getServices(self.zone):
return True
else:
return False
def get_enabled_permanent(self, service, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
if service in fw_settings.getServices():
return True
else:
return False
def set_enabled_immediate(self, service, timeout):
self.fw.addService(self.zone, service, timeout)
def set_enabled_permanent(self, service, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addService(service)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, service, timeout):
self.fw.removeService(self.zone, service)
def set_disabled_permanent(self, service, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeService(service)
self.update_fw_settings(fw_zone, fw_settings)
class MasqueradeTransaction(FirewallTransaction):
"""
MasqueradeTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(MasqueradeTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
self.enabled_msg = "Added masquerade to zone %s" % self.zone
self.disabled_msg = "Removed masquerade from zone %s" % self.zone
def get_enabled_immediate(self):
if self.fw.queryMasquerade(self.zone) is True:
return True
else:
return False
def get_enabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
if fw_settings.getMasquerade() is True:
return True
else:
return False
def set_enabled_immediate(self):
self.fw.addMasquerade(self.zone)
def set_enabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.setMasquerade(True)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self):
self.fw.removeMasquerade(self.zone)
def set_disabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.setMasquerade(False)
self.update_fw_settings(fw_zone, fw_settings)
class PortTransaction(FirewallTransaction):
"""
PortTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(PortTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, port, protocol, timeout):
port_proto = [port, protocol]
if self.fw_offline:
fw_zone, fw_settings = self.get_fw_zone_settings()
ports_list = fw_settings.getPorts()
else:
ports_list = self.fw.getPorts(self.zone)
if port_proto in ports_list:
return True
else:
return False
def get_enabled_permanent(self, port, protocol, timeout):
port_proto = (port, protocol)
fw_zone, fw_settings = self.get_fw_zone_settings()
if port_proto in fw_settings.getPorts():
return True
else:
return False
def set_enabled_immediate(self, port, protocol, timeout):
self.fw.addPort(self.zone, port, protocol, timeout)
def set_enabled_permanent(self, port, protocol, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addPort(port, protocol)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, port, protocol, timeout):
self.fw.removePort(self.zone, port, protocol)
def set_disabled_permanent(self, port, protocol, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removePort(port, protocol)
self.update_fw_settings(fw_zone, fw_settings)
class InterfaceTransaction(FirewallTransaction):
"""
InterfaceTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(InterfaceTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
self.enabled_msg = "Changed %s to zone %s" % \
(self.action_args[0], self.zone)
self.disabled_msg = "Removed %s from zone %s" % \
(self.action_args[0], self.zone)
def get_enabled_immediate(self, interface):
if self.fw_offline:
fw_zone, fw_settings = self.get_fw_zone_settings()
interface_list = fw_settings.getInterfaces()
else:
interface_list = self.fw.getInterfaces(self.zone)
if interface in interface_list:
return True
else:
return False
def get_enabled_permanent(self, interface):
fw_zone, fw_settings = self.get_fw_zone_settings()
if interface in fw_settings.getInterfaces():
return True
else:
return False
def set_enabled_immediate(self, interface):
self.fw.changeZoneOfInterface(self.zone, interface)
def set_enabled_permanent(self, interface):
fw_zone, fw_settings = self.get_fw_zone_settings()
if self.fw_offline:
iface_zone_objs = []
for zone in self.fw.config.get_zones():
old_zone_obj = self.fw.config.get_zone(zone)
if interface in old_zone_obj.interfaces:
iface_zone_objs.append(old_zone_obj)
if len(iface_zone_objs) > 1:
# Even it shouldn't happen, it's actually possible that
# the same interface is in several zone XML files
self.module.fail_json(
msg='ERROR: interface {0} is in {1} zone XML file, can only be in one'.format(
interface,
len(iface_zone_objs)
)
)
old_zone_obj = iface_zone_objs[0]
if old_zone_obj.name != self.zone:
old_zone_settings = FirewallClientZoneSettings(
self.fw.config.get_zone_config(old_zone_obj)
)
old_zone_settings.removeInterface(interface) # remove from old
self.fw.config.set_zone_config(
old_zone_obj,
old_zone_settings.settings
)
fw_settings.addInterface(interface) # add to new
self.fw.config.set_zone_config(fw_zone, fw_settings.settings)
else:
old_zone_name = self.fw.config().getZoneOfInterface(interface)
if old_zone_name != self.zone:
if old_zone_name:
old_zone_obj = self.fw.config().getZoneByName(old_zone_name)
old_zone_settings = old_zone_obj.getSettings()
old_zone_settings.removeInterface(interface) # remove from old
old_zone_obj.update(old_zone_settings)
fw_settings.addInterface(interface) # add to new
fw_zone.update(fw_settings)
def set_disabled_immediate(self, interface):
self.fw.removeInterface(self.zone, interface)
def set_disabled_permanent(self, interface):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeInterface(interface)
self.update_fw_settings(fw_zone, fw_settings)
class RichRuleTransaction(FirewallTransaction):
"""
RichRuleTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(RichRuleTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, rule, timeout):
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in self.fw.getRichRules(self.zone):
return True
else:
return False
def get_enabled_permanent(self, rule, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in fw_settings.getRichRules():
return True
else:
return False
def set_enabled_immediate(self, rule, timeout):
self.fw.addRichRule(self.zone, rule, timeout)
def set_enabled_permanent(self, rule, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addRichRule(rule)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, rule, timeout):
self.fw.removeRichRule(self.zone, rule)
def set_disabled_permanent(self, rule, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeRichRule(rule)
self.update_fw_settings(fw_zone, fw_settings)
class SourceTransaction(FirewallTransaction):
"""
SourceTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(SourceTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
self.enabled_msg = "Added %s to zone %s" % \
(self.action_args[0], self.zone)
self.disabled_msg = "Removed %s from zone %s" % \
(self.action_args[0], self.zone)
def get_enabled_immediate(self, source):
if source in self.fw.getSources(self.zone):
return True
else:
return False
def get_enabled_permanent(self, source):
fw_zone, fw_settings = self.get_fw_zone_settings()
if source in fw_settings.getSources():
return True
else:
return False
def set_enabled_immediate(self, source):
self.fw.addSource(self.zone, source)
def set_enabled_permanent(self, source):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addSource(source)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, source):
self.fw.removeSource(self.zone, source)
def set_disabled_permanent(self, source):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeSource(source)
self.update_fw_settings(fw_zone, fw_settings)
class ZoneTransaction(FirewallTransaction):
"""
ZoneTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None,
permanent=True, immediate=False, enabled_values=None, disabled_values=None):
super(ZoneTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone,
permanent=permanent, immediate=immediate,
enabled_values=enabled_values or ["present"],
disabled_values=disabled_values or ["absent"])
self.enabled_msg = "Added zone %s" % \
(self.zone)
self.disabled_msg = "Removed zone %s" % \
(self.zone)
self.tx_not_permanent_error_msg = "Zone operations must be permanent. " \
"Make sure you didn't set the 'permanent' flag to 'false' or the 'immediate' flag to 'true'."
def get_enabled_immediate(self):
self.module.fail_json(msg=self.tx_not_permanent_error_msg)
def get_enabled_permanent(self):
zones = self.fw.config().listZones()
zone_names = [self.fw.config().getZone(z).get_property("name") for z in zones]
if self.zone in zone_names:
return True
else:
return False
def set_enabled_immediate(self):
self.module.fail_json(msg=self.tx_not_permanent_error_msg)
def set_enabled_permanent(self):
self.fw.config().addZone(self.zone, FirewallClientZoneSettings())
def set_disabled_immediate(self):
self.module.fail_json(msg=self.tx_not_permanent_error_msg)
def set_disabled_permanent(self):
zone_obj = self.fw.config().getZoneByName(self.zone)
zone_obj.remove()
def main():
module = AnsibleModule(
argument_spec=dict(
icmp_block=dict(type='str'),
icmp_block_inversion=dict(type='str'),
service=dict(type='str'),
port=dict(type='str'),
rich_rule=dict(type='str'),
zone=dict(type='str'),
immediate=dict(type='bool', default=False),
source=dict(type='str'),
permanent=dict(type='bool'),
state=dict(type='str', required=True, choices=['absent', 'disabled', 'enabled', 'present']),
timeout=dict(type='int', default=0),
interface=dict(type='str'),
masquerade=dict(type='str'),
offline=dict(type='bool'),
),
supports_check_mode=True,
required_by=dict(
interface=('zone',),
source=('permanent',),
),
)
permanent = module.params['permanent']
desired_state = module.params['state']
immediate = module.params['immediate']
timeout = module.params['timeout']
interface = module.params['interface']
masquerade = module.params['masquerade']
# Sanity checks
FirewallTransaction.sanity_check(module)
# If neither permanent or immediate is provided, assume immediate (as
# written in the module's docs)
if not permanent and not immediate:
immediate = True
# Verify required params are provided
if immediate and fw_offline:
module.fail_json(msg='firewall is not currently running, unable to perform immediate actions without a running firewall daemon')
changed = False
msgs = []
icmp_block = module.params['icmp_block']
icmp_block_inversion = module.params['icmp_block_inversion']
service = module.params['service']
rich_rule = module.params['rich_rule']
source = module.params['source']
zone = module.params['zone']
if module.params['port'] is not None:
if '/' in module.params['port']:
port, protocol = module.params['port'].strip().split('/')
else:
protocol = None
if not protocol:
module.fail_json(msg='improper port format (missing protocol?)')
else:
port = None
modification_count = 0
if icmp_block is not None:
modification_count += 1
if icmp_block_inversion is not None:
modification_count += 1
if service is not None:
modification_count += 1
if port is not None:
modification_count += 1
if rich_rule is not None:
modification_count += 1
if interface is not None:
modification_count += 1
if masquerade is not None:
modification_count += 1
if source is not None:
modification_count += 1
if modification_count > 1:
module.fail_json(
msg='can only operate on port, service, rich_rule, masquerade, icmp_block, icmp_block_inversion, interface or source at once'
)
elif modification_count > 0 and desired_state in ['absent', 'present']:
module.fail_json(
msg='absent and present state can only be used in zone level operations'
)
if icmp_block is not None:
transaction = IcmpBlockTransaction(
module,
action_args=(icmp_block, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed icmp-block %s to %s" % (icmp_block, desired_state))
if icmp_block_inversion is not None:
transaction = IcmpBlockInversionTransaction(
module,
action_args=(),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed icmp-block-inversion %s to %s" % (icmp_block_inversion, desired_state))
if service is not None:
transaction = ServiceTransaction(
module,
action_args=(service, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed service %s to %s" % (service, desired_state))
if source is not None:
transaction = SourceTransaction(
module,
action_args=(source,),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if port is not None:
transaction = PortTransaction(
module,
action_args=(port, protocol, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append(
"Changed port %s to %s" % (
"%s/%s" % (port, protocol), desired_state
)
)
if rich_rule is not None:
transaction = RichRuleTransaction(
module,
action_args=(rich_rule, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state))
if interface is not None:
transaction = InterfaceTransaction(
module,
action_args=(interface,),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if masquerade is not None:
transaction = MasqueradeTransaction(
module,
action_args=(),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
''' If there are no changes within the zone we are operating on the zone itself '''
if modification_count == 0 and desired_state in ['absent', 'present']:
transaction = ZoneTransaction(
module,
action_args=(),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed zone %s to %s" % (zone, desired_state))
if fw_offline:
msgs.append("(offline operation: only on-disk configs were altered)")
module.exit_json(changed=changed, msg=', '.join(msgs))
if __name__ == '__main__':
main()

View file

@ -0,0 +1,232 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Kenneth D. Evensen <kevensen@redhat.com>
# Copyright: (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: gconftool2
author:
- Kenneth D. Evensen (@kevensen)
short_description: Edit GNOME Configurations
description:
- This module allows for the manipulation of GNOME 2 Configuration via
gconftool-2. Please see the gconftool-2(1) man pages for more details.
options:
key:
description:
- A GConf preference key is an element in the GConf repository
that corresponds to an application preference. See man gconftool-2(1)
required: yes
value:
description:
- Preference keys typically have simple values such as strings,
integers, or lists of strings and integers. This is ignored if the state
is "get". See man gconftool-2(1)
value_type:
description:
- The type of value being set. This is ignored if the state is "get".
choices: [ bool, float, int, string ]
state:
description:
- The action to take upon the key/value.
required: yes
choices: [ absent, get, present ]
config_source:
description:
- Specify a configuration source to use rather than the default path.
See man gconftool-2(1)
direct:
description:
- Access the config database directly, bypassing server. If direct is
specified then the config_source must be specified as well.
See man gconftool-2(1)
type: bool
default: 'no'
'''
EXAMPLES = """
- name: Change the widget font to "Serif 12"
gconftool2:
key: "/desktop/gnome/interface/font_name"
value_type: "string"
value: "Serif 12"
"""
RETURN = '''
key:
description: The key specified in the module parameters
returned: success
type: str
sample: /desktop/gnome/interface/font_name
value_type:
description: The type of the value that was changed
returned: success
type: str
sample: string
value:
description: The value of the preference key after executing the module
returned: success
type: str
sample: "Serif 12"
...
'''
from ansible.module_utils.basic import AnsibleModule
class GConf2Preference(object):
def __init__(self, ansible, key, value_type, value,
direct=False, config_source=""):
self.ansible = ansible
self.key = key
self.value_type = value_type
self.value = value
self.config_source = config_source
self.direct = direct
def value_already_set(self):
return False
def call(self, call_type, fail_onerr=True):
""" Helper function to perform gconftool-2 operations """
config_source = ''
direct = ''
changed = False
out = ''
# If the configuration source is different from the default, create
# the argument
if self.config_source is not None and len(self.config_source) > 0:
config_source = "--config-source " + self.config_source
# If direct is true, create the argument
if self.direct:
direct = "--direct"
# Execute the call
cmd = "gconftool-2 "
try:
# If the call is "get", then we don't need as many parameters and
# we can ignore some
if call_type == 'get':
cmd += "--get {0}".format(self.key)
# Otherwise, we will use all relevant parameters
elif call_type == 'set':
cmd += "{0} {1} --type {2} --{3} {4} \"{5}\"".format(direct,
config_source,
self.value_type,
call_type,
self.key,
self.value)
elif call_type == 'unset':
cmd += "--unset {0}".format(self.key)
# Start external command
rc, out, err = self.ansible.run_command(cmd, use_unsafe_shell=True)
if len(err) > 0:
if fail_onerr:
self.ansible.fail_json(msg='gconftool-2 failed with '
'error: %s' % (str(err)))
else:
changed = True
except OSError as exception:
self.ansible.fail_json(msg='gconftool-2 failed with exception: '
'%s' % exception)
return changed, out.rstrip()
def main():
# Setup the Ansible module
module = AnsibleModule(
argument_spec=dict(
key=dict(type='str', required=True),
value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']),
value=dict(type='str'),
state=dict(type='str', required=True, choices=['absent', 'get', 'present']),
direct=dict(type='bool', default=False),
config_source=dict(type='str'),
),
supports_check_mode=True
)
state_values = {"present": "set", "absent": "unset", "get": "get"}
# Assign module values to dictionary values
key = module.params['key']
value_type = module.params['value_type']
if module.params['value'].lower() == "true":
value = "true"
elif module.params['value'] == "false":
value = "false"
else:
value = module.params['value']
state = state_values[module.params['state']]
direct = module.params['direct']
config_source = module.params['config_source']
# Initialize some variables for later
change = False
new_value = ''
if state != "get":
if value is None or value == "":
module.fail_json(msg='State %s requires "value" to be set'
% str(state))
elif value_type is None or value_type == "":
module.fail_json(msg='State %s requires "value_type" to be set'
% str(state))
if direct and config_source is None:
module.fail_json(msg='If "direct" is "yes" then the ' +
'"config_source" must be specified')
elif not direct and config_source is not None:
module.fail_json(msg='If the "config_source" is specified ' +
'then "direct" must be "yes"')
# Create a gconf2 preference
gconf_pref = GConf2Preference(module, key, value_type,
value, direct, config_source)
# Now we get the current value, if not found don't fail
_, current_value = gconf_pref.call("get", fail_onerr=False)
# Check if the current value equals the value we want to set. If not, make
# a change
if current_value != value:
# If check mode, we know a change would have occurred.
if module.check_mode:
# So we will set the change to True
change = True
# And set the new_value to the value that would have been set
new_value = value
# If not check mode make the change.
else:
change, new_value = gconf_pref.call(state)
# If the value we want to set is the same as the current_value, we will
# set the new_value to the current_value for reporting
else:
new_value = current_value
facts = dict(gconftool2={'changed': change,
'key': key,
'value_type': value_type,
'new_value': new_value,
'previous_value': current_value,
'playbook_value': module.params['value']})
module.exit_json(changed=change, ansible_facts=facts)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,397 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2016, Roman Belyakovsky <ihryamzik () gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: interfaces_file
short_description: Tweak settings in /etc/network/interfaces files
extends_documentation_fragment: files
description:
- Manage (add, remove, change) individual interface options in an interfaces-style file without having
to manage the file as a whole with, say, M(template) or M(assemble). Interface has to be presented in a file.
- Read information about interfaces from interfaces-styled files
options:
dest:
description:
- Path to the interfaces file
default: /etc/network/interfaces
iface:
description:
- Name of the interface, required for value changes or option remove
address_family:
description:
- Address family of the interface, useful if same interface name is used for both inet and inet6
option:
description:
- Name of the option, required for value changes or option remove
value:
description:
- If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added.
If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated.
C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing
ones or cleaning the whole option set are supported
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
type: bool
default: 'no'
state:
description:
- If set to C(absent) the option or section will be removed if present instead of created.
default: "present"
choices: [ "present", "absent" ]
notes:
- If option is defined multiple times last one will be updated but all will be deleted in case of an absent state
requirements: []
author: "Roman Belyakovsky (@hryamzik)"
'''
RETURN = '''
dest:
description: destination file/path
returned: success
type: str
sample: "/etc/network/interfaces"
ifaces:
description: interfaces dictionary
returned: success
type: complex
contains:
ifaces:
description: interface dictionary
returned: success
type: dict
contains:
eth0:
description: Name of the interface
returned: success
type: dict
contains:
address_family:
description: interface address family
returned: success
type: str
sample: "inet"
method:
description: interface method
returned: success
type: str
sample: "manual"
mtu:
description: other options, all values returned as strings
returned: success
type: str
sample: "1500"
pre-up:
description: list of C(pre-up) scripts
returned: success
type: list
sample:
- "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
up:
description: list of C(up) scripts
returned: success
type: list
sample:
- "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
post-up:
description: list of C(post-up) scripts
returned: success
type: list
sample:
- "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
down:
description: list of C(down) scripts
returned: success
type: list
sample:
- "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
...
'''
EXAMPLES = '''
# Set eth1 mtu configuration value to 8000
- interfaces_file:
dest: /etc/network/interfaces.d/eth1.cfg
iface: eth1
option: mtu
value: 8000
backup: yes
state: present
register: eth1_cfg
'''
import os
import re
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes
def lineDict(line):
return {'line': line, 'line_type': 'unknown'}
def optionDict(line, iface, option, value, address_family):
return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family}
def getValueFromLine(s):
spaceRe = re.compile(r'\s+')
for m in spaceRe.finditer(s):
pass
valueEnd = m.start()
option = s.split()[0]
optionStart = s.find(option)
optionLen = len(option)
valueStart = re.search(r'\s', s[optionLen + optionStart:]).end() + optionLen + optionStart
return s[valueStart:valueEnd]
def read_interfaces_file(module, filename):
f = open(filename, 'r')
return read_interfaces_lines(module, f)
def read_interfaces_lines(module, line_strings):
lines = []
ifaces = {}
currently_processing = None
i = 0
for line in line_strings:
i += 1
words = line.split()
if len(words) < 1:
lines.append(lineDict(line))
continue
if words[0][0] == "#":
lines.append(lineDict(line))
continue
if words[0] == "mapping":
# currmap = calloc(1, sizeof *currmap);
lines.append(lineDict(line))
currently_processing = "MAPPING"
elif words[0] == "source":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "source-dir":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "source-directory":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "iface":
currif = {
"pre-up": [],
"up": [],
"down": [],
"post-up": []
}
iface_name = words[1]
try:
currif['address_family'] = words[2]
except IndexError:
currif['address_family'] = None
address_family = currif['address_family']
try:
currif['method'] = words[3]
except IndexError:
currif['method'] = None
ifaces[iface_name] = currif
lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family})
currently_processing = "IFACE"
elif words[0] == "auto":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0].startswith("allow-"):
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "no-auto-down":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "no-scripts":
lines.append(lineDict(line))
currently_processing = "NONE"
else:
if currently_processing == "IFACE":
option_name = words[0]
# TODO: if option_name in currif.options
value = getValueFromLine(line)
lines.append(optionDict(line, iface_name, option_name, value, address_family))
if option_name in ["pre-up", "up", "down", "post-up"]:
currif[option_name].append(value)
else:
currif[option_name] = value
elif currently_processing == "MAPPING":
lines.append(lineDict(line))
elif currently_processing == "NONE":
lines.append(lineDict(line))
else:
module.fail_json(msg="misplaced option %s in line %d" % (line, i))
return None, None
return lines, ifaces
def setInterfaceOption(module, lines, iface, option, raw_value, state, address_family=None):
value = str(raw_value)
changed = False
iface_lines = [item for item in lines if "iface" in item and item["iface"] == iface]
if address_family is not None:
iface_lines = [item for item in iface_lines
if "address_family" in item and item["address_family"] == address_family]
if len(iface_lines) < 1:
# interface not found
module.fail_json(msg="Error: interface %s not found" % iface)
return changed, None
iface_options = list(filter(lambda i: i['line_type'] == 'option', iface_lines))
target_options = list(filter(lambda i: i['option'] == option, iface_options))
if state == "present":
if len(target_options) < 1:
changed = True
# add new option
last_line_dict = iface_lines[-1]
changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family)
else:
if option in ["pre-up", "up", "down", "post-up"]:
if len(list(filter(lambda i: i['value'] == value, target_options))) < 1:
changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family)
else:
# if more than one option found edit the last one
if target_options[-1]['value'] != value:
changed = True
target_option = target_options[-1]
old_line = target_option['line']
old_value = target_option['value']
address_family = target_option['address_family']
prefix_start = old_line.find(option)
optionLen = len(option)
old_value_position = re.search(r"\s+".join(old_value.split()), old_line[prefix_start + optionLen:])
start = old_value_position.start() + prefix_start + optionLen
end = old_value_position.end() + prefix_start + optionLen
line = old_line[:start] + value + old_line[end:]
index = len(lines) - lines[::-1].index(target_option) - 1
lines[index] = optionDict(line, iface, option, value, address_family)
elif state == "absent":
if len(target_options) >= 1:
if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None":
for target_option in filter(lambda i: i['value'] == value, target_options):
changed = True
lines = list(filter(lambda ln: ln != target_option, lines))
else:
changed = True
for target_option in target_options:
lines = list(filter(lambda ln: ln != target_option, lines))
else:
module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
return changed, lines
def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family):
# Changing method of interface is not an addition
if option == 'method':
changed = False
for ln in lines:
if ln.get('line_type', '') == 'iface' and ln.get('iface', '') == iface and value != ln.get('params', {}).get('method', ''):
changed = True
ln['line'] = re.sub(ln.get('params', {}).get('method', '') + '$', value, ln.get('line'))
ln['params']['method'] = value
return changed, lines
last_line = last_line_dict['line']
prefix_start = last_line.find(last_line.split()[0])
suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1])
prefix = last_line[:prefix_start]
if len(iface_options) < 1:
# interface has no options, ident
prefix += " "
line = prefix + "%s %s" % (option, value) + last_line[suffix_start:]
option_dict = optionDict(line, iface, option, value, address_family)
index = len(lines) - lines[::-1].index(last_line_dict)
lines.insert(index, option_dict)
return True, lines
def write_changes(module, lines, dest):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd, 'wb')
f.write(to_bytes(''.join(lines), errors='surrogate_or_strict'))
f.close()
module.atomic_move(tmpfile, os.path.realpath(dest))
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(type='path', default='/etc/network/interfaces'),
iface=dict(type='str'),
address_family=dict(type='str'),
option=dict(type='str'),
value=dict(type='str'),
backup=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
add_file_common_args=True,
supports_check_mode=True,
required_by=dict(
option=('iface',),
),
)
dest = module.params['dest']
iface = module.params['iface']
address_family = module.params['address_family']
option = module.params['option']
value = module.params['value']
backup = module.params['backup']
state = module.params['state']
if option is not None and state == "present" and value is None:
module.fail_json(msg="Value must be set if option is defined and state is 'present'")
lines, ifaces = read_interfaces_file(module, dest)
changed = False
if option is not None:
changed, lines = setInterfaceOption(module, lines, iface, option, value, state, address_family)
if changed:
_, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d])
if changed and not module.check_mode:
if backup:
module.backup_local(dest)
write_changes(module, [d['line'] for d in lines if 'line' in d], dest)
module.exit_json(dest=dest, changed=changed, ifaces=ifaces)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,403 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, RSD Services S.A
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: java_cert
short_description: Uses keytool to import/remove key from java keystore (cacerts)
description:
- This is a wrapper module around keytool, which can be used to import/remove
certificates from a given java keystore.
options:
cert_url:
description:
- Basic URL to fetch SSL certificate from.
- One of C(cert_url) or C(cert_path) is required to load certificate.
type: str
cert_port:
description:
- Port to connect to URL.
- This will be used to create server URL:PORT.
type: int
default: 443
cert_path:
description:
- Local path to load certificate from.
- One of C(cert_url) or C(cert_path) is required to load certificate.
type: path
cert_alias:
description:
- Imported certificate alias.
- The alias is used when checking for the presence of a certificate in the keystore.
type: str
trust_cacert:
description:
- Trust imported cert as CAcert.
type: bool
default: False
pkcs12_path:
description:
- Local path to load PKCS12 keystore from.
type: path
pkcs12_password:
description:
- Password for importing from PKCS12 keystore.
type: str
default: ''
pkcs12_alias:
description:
- Alias in the PKCS12 keystore.
type: str
keystore_path:
description:
- Path to keystore.
type: path
keystore_pass:
description:
- Keystore password.
type: str
required: true
keystore_create:
description:
- Create keystore if it does not exist.
type: bool
keystore_type:
description:
- Keystore type (JCEKS, JKS).
type: str
executable:
description:
- Path to keytool binary if not used we search in PATH for it.
type: str
default: keytool
state:
description:
- Defines action which can be either certificate import or removal.
type: str
choices: [ absent, present ]
default: present
author:
- Adam Hamsik (@haad)
'''
EXAMPLES = r'''
- name: Import SSL certificate from google.com to a given cacerts keystore
java_cert:
cert_url: google.com
cert_port: 443
keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
keystore_pass: changeit
state: present
- name: Remove certificate with given alias from a keystore
java_cert:
cert_url: google.com
keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
keystore_pass: changeit
executable: /usr/lib/jvm/jre7/bin/keytool
state: absent
- name: Import trusted CA from SSL certificate
java_cert:
cert_path: /opt/certs/rootca.crt
keystore_path: /tmp/cacerts
keystore_pass: changeit
keystore_create: yes
state: present
cert_alias: LE_RootCA
trust_cacert: True
- name: Import SSL certificate from google.com to a keystore, create it if it doesn't exist
java_cert:
cert_url: google.com
keystore_path: /tmp/cacerts
keystore_pass: changeit
keystore_create: yes
state: present
- name: Import a pkcs12 keystore with a specified alias, create it if it doesn't exist
java_cert:
pkcs12_path: "/tmp/importkeystore.p12"
cert_alias: default
keystore_path: /opt/wildfly/standalone/configuration/defaultkeystore.jks
keystore_pass: changeit
keystore_create: yes
state: present
- name: Import SSL certificate to JCEKS keystore
java_cert:
pkcs12_path: "/tmp/importkeystore.p12"
pkcs12_alias: default
pkcs12_password: somepass
cert_alias: default
keystore_path: /opt/someapp/security/keystore.jceks
keystore_type: "JCEKS"
keystore_pass: changeit
keystore_create: yes
state: present
'''
RETURN = r'''
msg:
description: Output from stdout of keytool command after execution of given command.
returned: success
type: str
sample: "Module require existing keystore at keystore_path '/tmp/test/cacerts'"
rc:
description: Keytool command execution return value.
returned: success
type: int
sample: "0"
cmd:
description: Executed command to get action done.
returned: success
type: str
sample: "keytool -importcert -noprompt -keystore"
'''
import os
import re
# import module snippets
from ansible.module_utils.basic import AnsibleModule
def get_keystore_type(keystore_type):
''' Check that custom keystore is presented in parameters '''
if keystore_type:
return " -storetype '%s'" % keystore_type
return ''
def check_cert_present(module, executable, keystore_path, keystore_pass, alias, keystore_type):
''' Check if certificate with alias is present in keystore
located at keystore_path '''
test_cmd = ("%s -noprompt -list -keystore '%s' -storepass '%s' "
"-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type))
(check_rc, _, _) = module.run_command(test_cmd)
if check_rc == 0:
return True
return False
def import_cert_url(module, executable, url, port, keystore_path, keystore_pass, alias, keystore_type, trust_cacert):
''' Import certificate from URL into keystore located at keystore_path '''
https_proxy = os.getenv("https_proxy")
no_proxy = os.getenv("no_proxy")
proxy_opts = ''
if https_proxy is not None:
(proxy_host, proxy_port) = https_proxy.split(':')
proxy_opts = "-J-Dhttps.proxyHost=%s -J-Dhttps.proxyPort=%s" % (proxy_host, proxy_port)
if no_proxy is not None:
# For Java's nonProxyHosts property, items are separated by '|',
# and patterns have to start with "*".
non_proxy_hosts = no_proxy.replace(',', '|')
non_proxy_hosts = re.sub(r'(^|\|)\.', r'\1*.', non_proxy_hosts)
# The property name is http.nonProxyHosts, there is no
# separate setting for HTTPS.
proxy_opts += " -J-Dhttp.nonProxyHosts='%s'" % non_proxy_hosts
fetch_cmd = "%s -printcert -rfc -sslserver %s %s:%d" % (executable, proxy_opts, url, port)
import_cmd = ("%s -importcert -noprompt -keystore '%s' "
"-storepass '%s' -alias '%s' %s") % (executable, keystore_path,
keystore_pass, alias,
get_keystore_type(keystore_type))
if trust_cacert:
import_cmd = import_cmd + " -trustcacerts"
# Fetch SSL certificate from remote host.
(_, fetch_out, _) = module.run_command(fetch_cmd, check_rc=True)
# Use remote certificate from remote host and import it to a java keystore
(import_rc, import_out, import_err) = module.run_command(import_cmd,
data=fetch_out,
check_rc=False)
diff = {'before': '\n', 'after': '%s\n' % alias}
if import_rc == 0:
module.exit_json(changed=True, msg=import_out,
rc=import_rc, cmd=import_cmd, stdout=import_out,
diff=diff)
else:
module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd,
error=import_err)
def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert):
''' Import certificate from path into keystore located on
keystore_path as alias '''
import_cmd = ("%s -importcert -noprompt -keystore '%s' "
"-storepass '%s' -file '%s' -alias '%s' %s") % (executable, keystore_path,
keystore_pass, path, alias,
get_keystore_type(keystore_type))
if trust_cacert:
import_cmd = import_cmd + " -trustcacerts"
# Use local certificate from local path and import it to a java keystore
(import_rc, import_out, import_err) = module.run_command(import_cmd,
check_rc=False)
diff = {'before': '\n', 'after': '%s\n' % alias}
if import_rc == 0:
module.exit_json(changed=True, msg=import_out,
rc=import_rc, cmd=import_cmd, stdout=import_out,
error=import_err, diff=diff)
else:
module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd)
def import_pkcs12_path(module, executable, path, keystore_path, keystore_pass, pkcs12_pass, pkcs12_alias, alias, keystore_type):
''' Import pkcs12 from path into keystore located on
keystore_path as alias '''
import_cmd = ("%s -importkeystore -noprompt -destkeystore '%s' -srcstoretype PKCS12 "
"-deststorepass '%s' -destkeypass '%s' -srckeystore '%s' -srcstorepass '%s' "
"-srcalias '%s' -destalias '%s' %s") % (executable, keystore_path, keystore_pass,
keystore_pass, path, pkcs12_pass, pkcs12_alias,
alias, get_keystore_type(keystore_type))
# Use local certificate from local path and import it to a java keystore
(import_rc, import_out, import_err) = module.run_command(import_cmd,
check_rc=False)
diff = {'before': '\n', 'after': '%s\n' % alias}
if import_rc == 0:
module.exit_json(changed=True, msg=import_out,
rc=import_rc, cmd=import_cmd, stdout=import_out,
error=import_err, diff=diff)
else:
module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd)
def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type):
''' Delete certificate identified with alias from keystore on keystore_path '''
del_cmd = ("%s -delete -keystore '%s' -storepass '%s' "
"-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type))
# Delete SSL certificate from keystore
(del_rc, del_out, del_err) = module.run_command(del_cmd, check_rc=True)
diff = {'before': '%s\n' % alias, 'after': None}
module.exit_json(changed=True, msg=del_out,
rc=del_rc, cmd=del_cmd, stdout=del_out,
error=del_err, diff=diff)
def test_keytool(module, executable):
''' Test if keytool is actually executable or not '''
module.run_command("%s" % executable, check_rc=True)
def test_keystore(module, keystore_path):
''' Check if we can access keystore as file or not '''
if keystore_path is None:
keystore_path = ''
if not os.path.exists(keystore_path) and not os.path.isfile(keystore_path):
# Keystore doesn't exist we want to create it
module.fail_json(changed=False, msg="Module require existing keystore at keystore_path '%s'" % keystore_path)
def main():
argument_spec = dict(
cert_url=dict(type='str'),
cert_path=dict(type='path'),
pkcs12_path=dict(type='path'),
pkcs12_password=dict(type='str', no_log=True),
pkcs12_alias=dict(type='str'),
cert_alias=dict(type='str'),
cert_port=dict(type='int', default=443),
keystore_path=dict(type='path'),
keystore_pass=dict(type='str', required=True, no_log=True),
trust_cacert=dict(type='bool', default=False),
keystore_create=dict(type='bool', default=False),
keystore_type=dict(type='str'),
executable=dict(type='str', default='keytool'),
state=dict(type='str', default='present', choices=['absent', 'present']),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[['cert_path', 'cert_url', 'pkcs12_path']],
required_together=[['keystore_path', 'keystore_pass']],
mutually_exclusive=[
['cert_url', 'cert_path', 'pkcs12_path']
],
supports_check_mode=True,
)
url = module.params.get('cert_url')
path = module.params.get('cert_path')
port = module.params.get('cert_port')
pkcs12_path = module.params.get('pkcs12_path')
pkcs12_pass = module.params.get('pkcs12_password', '')
pkcs12_alias = module.params.get('pkcs12_alias', '1')
cert_alias = module.params.get('cert_alias') or url
trust_cacert = module.params.get('trust_cacert')
keystore_path = module.params.get('keystore_path')
keystore_pass = module.params.get('keystore_pass')
keystore_create = module.params.get('keystore_create')
keystore_type = module.params.get('keystore_type')
executable = module.params.get('executable')
state = module.params.get('state')
if path and not cert_alias:
module.fail_json(changed=False,
msg="Using local path import from %s requires alias argument."
% keystore_path)
test_keytool(module, executable)
if not keystore_create:
test_keystore(module, keystore_path)
cert_present = check_cert_present(module, executable, keystore_path,
keystore_pass, cert_alias, keystore_type)
if state == 'absent' and cert_present:
if module.check_mode:
module.exit_json(changed=True)
delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type)
elif state == 'present' and not cert_present:
if module.check_mode:
module.exit_json(changed=True)
if pkcs12_path:
import_pkcs12_path(module, executable, pkcs12_path, keystore_path,
keystore_pass, pkcs12_pass, pkcs12_alias, cert_alias, keystore_type)
if path:
import_cert_path(module, executable, path, keystore_path,
keystore_pass, cert_alias, keystore_type, trust_cacert)
if url:
import_cert_url(module, executable, url, port, keystore_path,
keystore_pass, cert_alias, keystore_type, trust_cacert)
module.exit_json(changed=False)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,288 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Guillaume Grossetie <ggrossetie@yuzutech.fr>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: java_keystore
short_description: Create or delete a Java keystore in JKS format.
description:
- Create or delete a Java keystore in JKS format for a given certificate.
options:
name:
description:
- Name of the certificate.
required: true
certificate:
description:
- Certificate that should be used to create the key store.
required: true
private_key:
description:
- Private key that should be used to create the key store.
required: true
password:
description:
- Password that should be used to secure the key store.
required: true
dest:
description:
- Absolute path where the jks should be generated.
required: true
owner:
description:
- Name of the user that should own jks file.
required: false
group:
description:
- Name of the group that should own jks file.
required: false
mode:
description:
- Mode the file should be.
required: false
force:
description:
- Key store will be created even if it already exists.
required: false
type: bool
default: 'no'
requirements: [openssl, keytool]
author: Guillaume Grossetie (@Mogztter)
'''
EXAMPLES = '''
# Create a key store for the given certificate (inline)
- java_keystore:
name: example
certificate: |
-----BEGIN CERTIFICATE-----
h19dUZ2co2fI/ibYiwxWk4aeNE6KWvCaTQOMQ8t6Uo2XKhpL/xnjoAgh1uCQN/69
MG+34+RhUWzCfdZH7T8/qDxJw2kEPKluaYh7KnMsba+5jHjmtzix5QIDAQABo4IB
-----END CERTIFICATE-----
private_key: |
-----BEGIN RSA PRIVATE KEY-----
DBVFTEVDVFJJQ0lURSBERSBGUkFOQ0UxFzAVBgNVBAsMDjAwMDIgNTUyMDgxMzE3
GLlDNMw/uHyME7gHFsqJA7O11VY6O5WQ4IDP3m/s5ZV6s+Nn6Lerz17VZ99
-----END RSA PRIVATE KEY-----
password: changeit
dest: /etc/security/keystore.jks
# Create a key store for the given certificate (lookup)
- java_keystore:
name: example
certificate: "{{lookup('file', '/path/to/certificate.crt') }}"
private_key: "{{lookup('file', '/path/to/private.key') }}"
password: changeit
dest: /etc/security/keystore.jks
'''
RETURN = '''
msg:
description: Output from stdout of keytool/openssl command after execution of given command or an error.
returned: changed and failure
type: str
sample: "Unable to find the current certificate fingerprint in ..."
rc:
description: keytool/openssl command execution return value
returned: changed and failure
type: int
sample: "0"
cmd:
description: Executed command to get action done
returned: changed and failure
type: str
sample: "openssl x509 -noout -in /tmp/cert.crt -fingerprint -sha256"
'''
from ansible.module_utils.basic import AnsibleModule
import os
import re
def read_certificate_fingerprint(module, openssl_bin, certificate_path):
current_certificate_fingerprint_cmd = "%s x509 -noout -in %s -fingerprint -sha256" % (openssl_bin, certificate_path)
(rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = run_commands(module, current_certificate_fingerprint_cmd)
if rc != 0:
return module.fail_json(msg=current_certificate_fingerprint_out,
err=current_certificate_fingerprint_err,
rc=rc,
cmd=current_certificate_fingerprint_cmd)
current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out)
if not current_certificate_match:
return module.fail_json(
msg="Unable to find the current certificate fingerprint in %s" % current_certificate_fingerprint_out,
rc=rc,
cmd=current_certificate_fingerprint_err
)
return current_certificate_match.group(1)
def read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_password):
stored_certificate_fingerprint_cmd = "%s -list -alias '%s' -keystore '%s' -storepass '%s' -v" % (keytool_bin, alias, keystore_path, keystore_password)
(rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = run_commands(module, stored_certificate_fingerprint_cmd)
if rc != 0:
if "keytool error: java.lang.Exception: Alias <%s> does not exist" % alias not in stored_certificate_fingerprint_out:
return module.fail_json(msg=stored_certificate_fingerprint_out,
err=stored_certificate_fingerprint_err,
rc=rc,
cmd=stored_certificate_fingerprint_cmd)
else:
return None
else:
stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out)
if not stored_certificate_match:
return module.fail_json(
msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out,
rc=rc,
cmd=stored_certificate_fingerprint_cmd
)
return stored_certificate_match.group(1)
def run_commands(module, cmd, check_rc=True):
return module.run_command(cmd, check_rc)
def create_file(path, content):
with open(path, 'w') as f:
f.write(content)
return path
def create_tmp_certificate(module):
return create_file("/tmp/%s.crt" % module.params['name'], module.params['certificate'])
def create_tmp_private_key(module):
return create_file("/tmp/%s.key" % module.params['name'], module.params['private_key'])
def cert_changed(module, openssl_bin, keytool_bin, keystore_path, keystore_pass, alias):
certificate_path = create_tmp_certificate(module)
try:
current_certificate_fingerprint = read_certificate_fingerprint(module, openssl_bin, certificate_path)
stored_certificate_fingerprint = read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_pass)
return current_certificate_fingerprint != stored_certificate_fingerprint
finally:
os.remove(certificate_path)
def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password):
if module.check_mode:
module.exit_json(changed=True)
else:
certificate_path = create_tmp_certificate(module)
private_key_path = create_tmp_private_key(module)
try:
if os.path.exists(keystore_path):
os.remove(keystore_path)
keystore_p12_path = "/tmp/keystore.p12"
if os.path.exists(keystore_p12_path):
os.remove(keystore_p12_path)
export_p12_cmd = "%s pkcs12 -export -name '%s' -in '%s' -inkey '%s' -out '%s' -passout 'pass:%s'" % (
openssl_bin, name, certificate_path, private_key_path, keystore_p12_path, password)
(rc, export_p12_out, export_p12_err) = run_commands(module, export_p12_cmd)
if rc != 0:
return module.fail_json(msg=export_p12_out,
rc=rc,
cmd=export_p12_cmd)
import_keystore_cmd = "%s -importkeystore " \
"-destkeystore '%s' " \
"-srckeystore '%s' " \
"-srcstoretype pkcs12 " \
"-alias '%s' " \
"-deststorepass '%s' " \
"-srcstorepass '%s' " \
"-noprompt" % (keytool_bin, keystore_path, keystore_p12_path, name, password, password)
(rc, import_keystore_out, import_keystore_err) = run_commands(module, import_keystore_cmd)
if rc == 0:
update_jks_perm(module, keystore_path)
return module.exit_json(changed=True,
msg=import_keystore_out,
rc=rc,
cmd=import_keystore_cmd,
stdout_lines=import_keystore_out)
else:
return module.fail_json(msg=import_keystore_out,
rc=rc,
cmd=import_keystore_cmd)
finally:
os.remove(certificate_path)
os.remove(private_key_path)
def update_jks_perm(module, keystore_path):
file_args = module.load_file_common_arguments(module.params, path=keystore_path)
module.set_fs_attributes_if_different(file_args, False)
def process_jks(module):
name = module.params['name']
password = module.params['password']
keystore_path = module.params['dest']
force = module.params['force']
openssl_bin = module.get_bin_path('openssl', True)
keytool_bin = module.get_bin_path('keytool', True)
if os.path.exists(keystore_path):
if force:
create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password)
else:
if cert_changed(module, openssl_bin, keytool_bin, keystore_path, password, name):
create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password)
else:
if not module.check_mode:
update_jks_perm(module, keystore_path)
return module.exit_json(changed=False)
else:
create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.add_file_common_args = True
argument_spec = dict(
name=dict(required=True),
certificate=dict(required=True, no_log=True),
private_key=dict(required=True, no_log=True),
password=dict(required=True, no_log=True),
dest=dict(required=True),
force=dict(required=False, default=False, type='bool')
)
self.argument_spec = argument_spec
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
add_file_common_args=spec.add_file_common_args,
supports_check_mode=spec.supports_check_mode
)
process_jks(module)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,154 @@
#!/usr/bin/python
# encoding: utf-8 -*-
# Copyright: (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kernel_blacklist
author:
- Matthias Vogelgesang (@matze)
short_description: Blacklist kernel modules
description:
- Add or remove kernel modules from blacklist.
options:
name:
description:
- Name of kernel module to black- or whitelist.
required: true
state:
description:
- Whether the module should be present in the blacklist or absent.
choices: [ absent, present ]
default: present
blacklist_file:
description:
- If specified, use this blacklist file instead of
C(/etc/modprobe.d/blacklist-ansible.conf).
'''
EXAMPLES = '''
- name: Blacklist the nouveau driver module
kernel_blacklist:
name: nouveau
state: present
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
class Blacklist(object):
def __init__(self, module, filename, checkmode):
self.filename = filename
self.module = module
self.checkmode = checkmode
def create_file(self):
if not self.checkmode and not os.path.exists(self.filename):
open(self.filename, 'a').close()
return True
elif self.checkmode and not os.path.exists(self.filename):
self.filename = os.devnull
return True
else:
return False
def get_pattern(self):
return r'^blacklist\s*' + self.module + '$'
def readlines(self):
f = open(self.filename, 'r')
lines = f.readlines()
f.close()
return lines
def module_listed(self):
lines = self.readlines()
pattern = self.get_pattern()
for line in lines:
stripped = line.strip()
if stripped.startswith('#'):
continue
if re.match(pattern, stripped):
return True
return False
def remove_module(self):
lines = self.readlines()
pattern = self.get_pattern()
if self.checkmode:
f = open(os.devnull, 'w')
else:
f = open(self.filename, 'w')
for line in lines:
if not re.match(pattern, line.strip()):
f.write(line)
f.close()
def add_module(self):
if self.checkmode:
f = open(os.devnull, 'a')
else:
f = open(self.filename, 'a')
f.write('blacklist %s\n' % self.module)
f.close()
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
blacklist_file=dict(type='str')
),
supports_check_mode=True,
)
args = dict(changed=False, failed=False,
name=module.params['name'], state=module.params['state'])
filename = '/etc/modprobe.d/blacklist-ansible.conf'
if module.params['blacklist_file']:
filename = module.params['blacklist_file']
blacklist = Blacklist(args['name'], filename, module.check_mode)
if blacklist.create_file():
args['changed'] = True
else:
args['changed'] = False
if blacklist.module_listed():
if args['state'] == 'absent':
blacklist.remove_module()
args['changed'] = True
else:
if args['state'] == 'present':
blacklist.add_module()
args['changed'] = True
module.exit_json(**args)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,130 @@
#!/usr/bin/python
# Copyright: (c) 2019, Kaarle Ritvanen <kaarle.ritvanen@datakunkku.fi>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: lbu
short_description: Local Backup Utility for Alpine Linux
description:
- Manage Local Backup Utility of Alpine Linux in run-from-RAM mode
options:
commit:
description:
- Control whether to commit changed files.
type: bool
exclude:
description:
- List of paths to exclude.
type: list
include:
description:
- List of paths to include.
type: list
author:
- Kaarle Ritvanen (@kunkku)
'''
EXAMPLES = '''
# Commit changed files (if any)
- name: Commit
lbu:
commit: true
# Exclude path and commit
- name: Exclude directory
lbu:
commit: true
exclude:
- /etc/opt
# Include paths without committing
- name: Include file and directory
lbu:
include:
- /root/.ssh/authorized_keys
- /var/lib/misc
'''
RETURN = '''
msg:
description: Error message
type: str
returned: on failure
'''
from ansible.module_utils.basic import AnsibleModule
import os.path
def run_module():
module = AnsibleModule(
argument_spec={
'commit': {'type': 'bool'},
'exclude': {'type': 'list', 'elements': 'str'},
'include': {'type': 'list', 'elements': 'str'}
},
supports_check_mode=True
)
changed = False
def run_lbu(*args):
code, stdout, stderr = module.run_command(
[module.get_bin_path('lbu', required=True)] + list(args)
)
if code:
module.fail_json(changed=changed, msg=stderr)
return stdout
update = False
commit = False
for param in ('include', 'exclude'):
if module.params[param]:
paths = run_lbu(param, '-l').split('\n')
for path in module.params[param]:
if os.path.normpath('/' + path)[1:] not in paths:
update = True
if module.params['commit']:
commit = update or run_lbu('status') > ''
if module.check_mode:
module.exit_json(changed=update or commit)
if update:
for param in ('include', 'exclude'):
if module.params[param]:
run_lbu(param, *module.params[param])
changed = True
if commit:
run_lbu('commit')
changed = True
module.exit_json(changed=changed)
def main():
run_module()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,248 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, Nathan Davison <ndavison85@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: listen_ports_facts
author:
- Nathan Davison (@ndavison)
description:
- Gather facts on processes listening on TCP and UDP ports.
short_description: Gather facts on processes listening on TCP and UDP ports.
'''
EXAMPLES = r'''
- name: Gather facts on listening ports
listen_ports_facts:
- name: TCP whitelist violation
debug:
msg: TCP port {{ item.port }} by pid {{ item.pid }} violates the whitelist
vars:
tcp_listen_violations: "{{ ansible_facts.tcp_listen | selectattr('port', 'in', tcp_whitelist) | list }}"
tcp_whitelist:
- 22
- 25
loop: "{{ tcp_listen_violations }}"
- name: List TCP ports
debug:
msg: "{{ ansible_facts.tcp_listen | map(attribute='port') | sort | list }}"
- name: List UDP ports
debug:
msg: "{{ ansible_facts.udp_listen | map(attribute='port') | sort | list }}"
- name: List all ports
debug:
msg: "{{ (ansible_facts.tcp_listen + ansible_facts.udp_listen) | map(attribute='port') | unique | sort | list }}"
'''
RETURN = r'''
ansible_facts:
description: Dictionary containing details of TCP and UDP ports with listening servers
returned: always
type: complex
contains:
tcp_listen:
description: A list of processes that are listening on a TCP port.
returned: if TCP servers were found
type: list
contains:
address:
description: The address the server is listening on.
returned: always
type: str
sample: "0.0.0.0"
name:
description: The name of the listening process.
returned: if user permissions allow
type: str
sample: "mysqld"
pid:
description: The pid of the listening process.
returned: always
type: int
sample: 1223
port:
description: The port the server is listening on.
returned: always
type: int
sample: 3306
protocol:
description: The network protocol of the server.
returned: always
type: str
sample: "tcp"
stime:
description: The start time of the listening process.
returned: always
type: str
sample: "Thu Feb 2 13:29:45 2017"
user:
description: The user who is running the listening process.
returned: always
type: str
sample: "mysql"
udp_listen:
description: A list of processes that are listening on a UDP port.
returned: if UDP servers were found
type: list
contains:
address:
description: The address the server is listening on.
returned: always
type: str
sample: "0.0.0.0"
name:
description: The name of the listening process.
returned: if user permissions allow
type: str
sample: "rsyslogd"
pid:
description: The pid of the listening process.
returned: always
type: int
sample: 609
port:
description: The port the server is listening on.
returned: always
type: int
sample: 514
protocol:
description: The network protocol of the server.
returned: always
type: str
sample: "udp"
stime:
description: The start time of the listening process.
returned: always
type: str
sample: "Thu Feb 2 13:29:45 2017"
user:
description: The user who is running the listening process.
returned: always
type: str
sample: "root"
'''
import re
import platform
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
def netStatParse(raw):
results = list()
for line in raw.splitlines():
listening_search = re.search('[^ ]+:[0-9]+', line)
if listening_search:
splitted = line.split()
conns = re.search('([^ ]+):([0-9]+)', splitted[3])
pidstr = ''
if 'tcp' in splitted[0]:
protocol = 'tcp'
pidstr = splitted[6]
elif 'udp' in splitted[0]:
protocol = 'udp'
pidstr = splitted[5]
pids = re.search(r'(([0-9]+)/(.*)|-)', pidstr)
if conns and pids:
address = conns.group(1)
port = conns.group(2)
if (pids.group(2)):
pid = pids.group(2)
else:
pid = 0
if (pids.group(3)):
name = pids.group(3)
else:
name = ''
result = {
'pid': int(pid),
'address': address,
'port': int(port),
'protocol': protocol,
'name': name,
}
if result not in results:
results.append(result)
else:
raise EnvironmentError('Could not get process information for the listening ports.')
return results
def main():
module = AnsibleModule(
argument_spec={},
supports_check_mode=True,
)
if platform.system() != 'Linux':
module.fail_json(msg='This module requires Linux.')
def getPidSTime(pid):
ps_cmd = module.get_bin_path('ps', True)
rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'lstart', '-p', str(pid)])
stime = ''
if rc == 0:
for line in ps_output.splitlines():
if 'started' not in line:
stime = line
return stime
def getPidUser(pid):
ps_cmd = module.get_bin_path('ps', True)
rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'user', '-p', str(pid)])
user = ''
if rc == 0:
for line in ps_output.splitlines():
if line != 'USER':
user = line
return user
result = {
'changed': False,
'ansible_facts': {
'tcp_listen': [],
'udp_listen': [],
},
}
try:
netstat_cmd = module.get_bin_path('netstat', True)
# which ports are listening for connections?
rc, stdout, stderr = module.run_command([netstat_cmd, '-plunt'])
if rc == 0:
netstatOut = netStatParse(stdout)
for p in netstatOut:
p['stime'] = getPidSTime(p['pid'])
p['user'] = getPidUser(p['pid'])
if p['protocol'] == 'tcp':
result['ansible_facts']['tcp_listen'].append(p)
elif p['protocol'] == 'udp':
result['ansible_facts']['udp_listen'].append(p)
except (KeyError, EnvironmentError) as e:
module.fail_json(msg=to_native(e))
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,236 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: locale_gen
short_description: Creates or removes locales
description:
- Manages locales by editing /etc/locale.gen and invoking locale-gen.
author:
- Augustus Kling (@AugustusKling)
options:
name:
description:
- Name and encoding of the locale, such as "en_GB.UTF-8".
required: true
state:
description:
- Whether the locale shall be present.
choices: [ absent, present ]
default: present
'''
EXAMPLES = '''
- name: Ensure a locale exists
locale_gen:
name: de_CH.UTF-8
state: present
'''
import os
import re
from subprocess import Popen, PIPE, call
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
LOCALE_NORMALIZATION = {
".utf8": ".UTF-8",
".eucjp": ".EUC-JP",
".iso885915": ".ISO-8859-15",
".cp1251": ".CP1251",
".koi8r": ".KOI8-R",
".armscii8": ".ARMSCII-8",
".euckr": ".EUC-KR",
".gbk": ".GBK",
".gb18030": ".GB18030",
".euctw": ".EUC-TW",
}
# ===========================================
# location module specific support methods.
#
def is_available(name, ubuntuMode):
"""Check if the given locale is available on the system. This is done by
checking either :
* if the locale is present in /etc/locales.gen
* or if the locale is present in /usr/share/i18n/SUPPORTED"""
if ubuntuMode:
__regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/usr/share/i18n/SUPPORTED'
else:
__regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/etc/locale.gen'
re_compiled = re.compile(__regexp)
fd = open(__locales_available, 'r')
for line in fd:
result = re_compiled.match(line)
if result and result.group('locale') == name:
return True
fd.close()
return False
def is_present(name):
"""Checks if the given locale is currently installed."""
output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
output = to_native(output)
return any(fix_case(name) == fix_case(line) for line in output.splitlines())
def fix_case(name):
"""locale -a might return the encoding in either lower or upper case.
Passing through this function makes them uniform for comparisons."""
for s, r in LOCALE_NORMALIZATION.items():
name = name.replace(s, r)
return name
def replace_line(existing_line, new_line):
"""Replaces lines in /etc/locale.gen"""
try:
f = open("/etc/locale.gen", "r")
lines = [line.replace(existing_line, new_line) for line in f]
finally:
f.close()
try:
f = open("/etc/locale.gen", "w")
f.write("".join(lines))
finally:
f.close()
def set_locale(name, enabled=True):
""" Sets the state of the locale. Defaults to enabled. """
search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
if enabled:
new_string = r'%s \g<charset>' % (name)
else:
new_string = r'# %s \g<charset>' % (name)
try:
f = open("/etc/locale.gen", "r")
lines = [re.sub(search_string, new_string, line) for line in f]
finally:
f.close()
try:
f = open("/etc/locale.gen", "w")
f.write("".join(lines))
finally:
f.close()
def apply_change(targetState, name):
"""Create or remove locale.
Keyword arguments:
targetState -- Desired state, either present or absent.
name -- Name including encoding such as de_CH.UTF-8.
"""
if targetState == "present":
# Create locale.
set_locale(name, enabled=True)
else:
# Delete locale.
set_locale(name, enabled=False)
localeGenExitValue = call("locale-gen")
if localeGenExitValue != 0:
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
def apply_change_ubuntu(targetState, name):
"""Create or remove locale.
Keyword arguments:
targetState -- Desired state, either present or absent.
name -- Name including encoding such as de_CH.UTF-8.
"""
if targetState == "present":
# Create locale.
# Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
localeGenExitValue = call(["locale-gen", name])
else:
# Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
try:
f = open("/var/lib/locales/supported.d/local", "r")
content = f.readlines()
finally:
f.close()
try:
f = open("/var/lib/locales/supported.d/local", "w")
for line in content:
locale, charset = line.split(' ')
if locale != name:
f.write(line)
finally:
f.close()
# Purge locales and regenerate.
# Please provide a patch if you know how to avoid regenerating the locales to keep!
localeGenExitValue = call(["locale-gen", "--purge"])
if localeGenExitValue != 0:
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
supports_check_mode=True,
)
name = module.params['name']
state = module.params['state']
if not os.path.exists("/etc/locale.gen"):
if os.path.exists("/var/lib/locales/supported.d/"):
# Ubuntu created its own system to manage locales.
ubuntuMode = True
else:
module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
else:
# We found the common way to manage locales.
ubuntuMode = False
if not is_available(name, ubuntuMode):
module.fail_json(msg="The locale you've entered is not available "
"on your system.")
if is_present(name):
prev_state = "present"
else:
prev_state = "absent"
changed = (prev_state != state)
if module.check_mode:
module.exit_json(changed=changed)
else:
if changed:
try:
if ubuntuMode is False:
apply_change(state, name)
else:
apply_change_ubuntu(state, name)
except EnvironmentError as e:
module.fail_json(msg=to_native(e), exitValue=e.errno)
module.exit_json(name=name, changed=changed, msg="OK")
if __name__ == '__main__':
main()

View file

@ -0,0 +1,292 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
# Based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- Alexander Bulimov (@abulimov)
module: lvg
short_description: Configure LVM volume groups
description:
- This module creates, removes or resizes volume groups.
options:
vg:
description:
- The name of the volume group.
type: str
required: true
pvs:
description:
- List of comma-separated devices to use as physical devices in this volume group.
- Required when creating or resizing volume group.
- The module will take care of running pvcreate if needed.
type: list
pesize:
description:
- "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector
(where the sector size is the largest sector size of the PVs currently used in the VG),
or at least 128KiB."
- Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
type: str
default: "4"
pv_options:
description:
- Additional options to pass to C(pvcreate) when creating the volume group.
type: str
vg_options:
description:
- Additional options to pass to C(vgcreate) when creating the volume group.
type: str
state:
description:
- Control if the volume group exists.
type: str
choices: [ absent, present ]
default: present
force:
description:
- If C(yes), allows to remove volume group with logical volumes.
type: bool
default: no
seealso:
- module: filesystem
- module: lvol
- module: parted
notes:
- This module does not modify PE size for already present volume group.
'''
EXAMPLES = r'''
- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB
lvg:
vg: vg.services
pvs: /dev/sda1
pesize: 32
- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB
lvg:
vg: vg.services
pvs: /dev/sdb
pesize: 128K
# If, for example, we already have VG vg.services on top of /dev/sdb1,
# this VG will be extended by /dev/sdc5. Or if vg.services was created on
# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
# and then reduce by /dev/sda5.
- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
lvg:
vg: vg.services
pvs: /dev/sdb1,/dev/sdc5
- name: Remove a volume group with name vg.services
lvg:
vg: vg.services
state: absent
'''
import itertools
import os
from ansible.module_utils.basic import AnsibleModule
def parse_vgs(data):
vgs = []
for line in data.splitlines():
parts = line.strip().split(';')
vgs.append({
'name': parts[0],
'pv_count': int(parts[1]),
'lv_count': int(parts[2]),
})
return vgs
def find_mapper_device_name(module, dm_device):
dmsetup_cmd = module.get_bin_path('dmsetup', True)
mapper_prefix = '/dev/mapper/'
rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
if rc != 0:
module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
mapper_device = mapper_prefix + dm_name.rstrip()
return mapper_device
def parse_pvs(module, data):
pvs = []
dm_prefix = '/dev/dm-'
for line in data.splitlines():
parts = line.strip().split(';')
if parts[0].startswith(dm_prefix):
parts[0] = find_mapper_device_name(module, parts[0])
pvs.append({
'name': parts[0],
'vg_name': parts[1],
})
return pvs
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(type='str', required=True),
pvs=dict(type='list'),
pesize=dict(type='str', default='4'),
pv_options=dict(type='str', default=''),
vg_options=dict(type='str', default=''),
state=dict(type='str', default='present', choices=['absent', 'present']),
force=dict(type='bool', default=False),
),
supports_check_mode=True,
)
vg = module.params['vg']
state = module.params['state']
force = module.boolean(module.params['force'])
pesize = module.params['pesize']
pvoptions = module.params['pv_options'].split()
vgoptions = module.params['vg_options'].split()
dev_list = []
if module.params['pvs']:
dev_list = list(module.params['pvs'])
elif state == 'present':
module.fail_json(msg="No physical volumes given.")
# LVM always uses real paths not symlinks so replace symlinks with actual path
for idx, dev in enumerate(dev_list):
dev_list[idx] = os.path.realpath(dev)
if state == 'present':
# check given devices
for test_dev in dev_list:
if not os.path.exists(test_dev):
module.fail_json(msg="Device %s not found." % test_dev)
# get pv list
pvs_cmd = module.get_bin_path('pvs', True)
if dev_list:
pvs_filter_pv_name = ' || '.join(
'pv_name = {0}'.format(x)
for x in itertools.chain(dev_list, module.params['pvs'])
)
pvs_filter_vg_name = 'vg_name = {0}'.format(vg)
pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name)
else:
pvs_filter = ''
rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter))
if rc != 0:
module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err)
# check pv for devices
pvs = parse_pvs(module, current_pvs)
used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg]
if used_pvs:
module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name']))
vgs_cmd = module.get_bin_path('vgs', True)
rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
if rc != 0:
module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err)
changed = False
vgs = parse_vgs(current_vgs)
for test_vg in vgs:
if test_vg['name'] == vg:
this_vg = test_vg
break
else:
this_vg = None
if this_vg is None:
if state == 'present':
# create VG
if module.check_mode:
changed = True
else:
# create PV
pvcreate_cmd = module.get_bin_path('pvcreate', True)
for current_dev in dev_list:
rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
vgcreate_cmd = module.get_bin_path('vgcreate')
rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err)
else:
if state == 'absent':
if module.check_mode:
module.exit_json(changed=True)
else:
if this_vg['lv_count'] == 0 or force:
# remove VG
vgremove_cmd = module.get_bin_path('vgremove', True)
rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err)
else:
module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg))
# resize VG
current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg]
devs_to_remove = list(set(current_devs) - set(dev_list))
devs_to_add = list(set(dev_list) - set(current_devs))
if devs_to_add or devs_to_remove:
if module.check_mode:
changed = True
else:
if devs_to_add:
devs_to_add_string = ' '.join(devs_to_add)
# create PV
pvcreate_cmd = module.get_bin_path('pvcreate', True)
for current_dev in devs_to_add:
rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
# add PV to our VG
vgextend_cmd = module.get_bin_path('vgextend', True)
rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
if rc == 0:
changed = True
else:
module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err)
# remove some PV from our VG
if devs_to_remove:
devs_to_remove_string = ' '.join(devs_to_remove)
vgreduce_cmd = module.get_bin_path('vgreduce', True)
rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
if rc == 0:
changed = True
else:
module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err)
module.exit_json(changed=changed)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,557 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- Jeroen Hoekx (@jhoekx)
- Alexander Bulimov (@abulimov)
module: lvol
short_description: Configure LVM logical volumes
description:
- This module creates, removes or resizes logical volumes.
options:
vg:
description:
- The volume group this logical volume is part of.
lv:
description:
- The name of the logical volume.
size:
description:
- The size of the logical volume, according to lvcreate(8) --size, by
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
Float values must begin with a digit.
Resizing using percentage values was not supported prior to 2.1.
state:
description:
- Control if the logical volume exists. If C(present) and the
volume does not already exist then the C(size) option is required.
choices: [ absent, present ]
default: present
active:
description:
- Whether the volume is active and visible to the host.
type: bool
default: 'yes'
force:
description:
- Shrink or remove operations of volumes requires this switch. Ensures that
that filesystems get never corrupted/destroyed by mistake.
type: bool
default: 'no'
opts:
description:
- Free-form options to be passed to the lvcreate command.
snapshot:
description:
- The name of the snapshot volume
pvs:
description:
- Comma separated list of physical volumes (e.g. /dev/sda,/dev/sdb).
thinpool:
description:
- The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name.
shrink:
description:
- Shrink if current size is higher than size requested.
type: bool
default: 'yes'
resizefs:
description:
- Resize the underlying filesystem together with the logical volume.
type: bool
default: 'no'
notes:
- You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume).
'''
EXAMPLES = '''
- name: Create a logical volume of 512m
lvol:
vg: firefly
lv: test
size: 512
- name: Create a logical volume of 512m with disks /dev/sda and /dev/sdb
lvol:
vg: firefly
lv: test
size: 512
pvs: /dev/sda,/dev/sdb
- name: Create cache pool logical volume
lvol:
vg: firefly
lv: lvcache
size: 512m
opts: --type cache-pool
- name: Create a logical volume of 512g.
lvol:
vg: firefly
lv: test
size: 512g
- name: Create a logical volume the size of all remaining space in the volume group
lvol:
vg: firefly
lv: test
size: 100%FREE
- name: Create a logical volume with special options
lvol:
vg: firefly
lv: test
size: 512g
opts: -r 16
- name: Extend the logical volume to 1024m.
lvol:
vg: firefly
lv: test
size: 1024
- name: Extend the logical volume to consume all remaining space in the volume group
lvol:
vg: firefly
lv: test
size: +100%FREE
- name: Extend the logical volume to take all remaining space of the PVs and resize the underlying filesystem
lvol:
vg: firefly
lv: test
size: 100%PVS
resizefs: true
- name: Resize the logical volume to % of VG
lvol:
vg: firefly
lv: test
size: 80%VG
force: yes
- name: Reduce the logical volume to 512m
lvol:
vg: firefly
lv: test
size: 512
force: yes
- name: Set the logical volume to 512m and do not try to shrink if size is lower than current one
lvol:
vg: firefly
lv: test
size: 512
shrink: no
- name: Remove the logical volume.
lvol:
vg: firefly
lv: test
state: absent
force: yes
- name: Create a snapshot volume of the test logical volume.
lvol:
vg: firefly
lv: test
snapshot: snap1
size: 100m
- name: Deactivate a logical volume
lvol:
vg: firefly
lv: test
active: false
- name: Create a deactivated logical volume
lvol:
vg: firefly
lv: test
size: 512g
active: false
- name: Create a thin pool of 512g
lvol:
vg: firefly
thinpool: testpool
size: 512g
- name: Create a thin volume of 128g
lvol:
vg: firefly
lv: test
thinpool: testpool
size: 128g
'''
import re
from ansible.module_utils.basic import AnsibleModule
LVOL_ENV_VARS = dict(
# make sure we use the C locale when running lvol-related commands
LANG='C',
LC_ALL='C',
LC_MESSAGES='C',
LC_CTYPE='C',
)
def mkversion(major, minor, patch):
return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
def parse_lvs(data):
lvs = []
for line in data.splitlines():
parts = line.strip().split(';')
lvs.append({
'name': parts[0].replace('[', '').replace(']', ''),
'size': float(parts[1]),
'active': (parts[2][4] == 'a'),
'thinpool': (parts[2][0] == 't'),
'thinvol': (parts[2][0] == 'V'),
})
return lvs
def parse_vgs(data):
vgs = []
for line in data.splitlines():
parts = line.strip().split(';')
vgs.append({
'name': parts[0],
'size': float(parts[1]),
'free': float(parts[2]),
'ext_size': float(parts[3])
})
return vgs
def get_lvm_version(module):
ver_cmd = module.get_bin_path("lvm", required=True)
rc, out, err = module.run_command("%s version" % (ver_cmd))
if rc != 0:
return None
m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
if not m:
return None
return mkversion(m.group(1), m.group(2), m.group(3))
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(type='str', required=True),
lv=dict(type='str'),
size=dict(type='str'),
opts=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
force=dict(type='bool', default=False),
shrink=dict(type='bool', default=True),
active=dict(type='bool', default=True),
snapshot=dict(type='str'),
pvs=dict(type='str'),
resizefs=dict(type='bool', default=False),
thinpool=dict(type='str'),
),
supports_check_mode=True,
required_one_of=(
['lv', 'thinpool'],
),
)
module.run_command_environ_update = LVOL_ENV_VARS
# Determine if the "--yes" option should be used
version_found = get_lvm_version(module)
if version_found is None:
module.fail_json(msg="Failed to get LVM version number")
version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
if version_found >= version_yesopt:
yesopt = "--yes"
else:
yesopt = ""
vg = module.params['vg']
lv = module.params['lv']
size = module.params['size']
opts = module.params['opts']
state = module.params['state']
force = module.boolean(module.params['force'])
shrink = module.boolean(module.params['shrink'])
active = module.boolean(module.params['active'])
resizefs = module.boolean(module.params['resizefs'])
thinpool = module.params['thinpool']
size_opt = 'L'
size_unit = 'm'
snapshot = module.params['snapshot']
pvs = module.params['pvs']
if pvs is None:
pvs = ""
else:
pvs = pvs.replace(",", " ")
if opts is None:
opts = ""
# Add --test option when running in check-mode
if module.check_mode:
test_opt = ' --test'
else:
test_opt = ''
if size:
# LVCREATE(8) -l --extents option with percentage
if '%' in size:
size_parts = size.split('%', 1)
size_percent = int(size_parts[0])
if size_percent > 100:
module.fail_json(msg="Size percentage cannot be larger than 100%")
size_whole = size_parts[1]
if size_whole == 'ORIGIN':
module.fail_json(msg="Snapshot Volumes are not supported")
elif size_whole not in ['VG', 'PVS', 'FREE']:
module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
size_opt = 'l'
size_unit = ''
if '%' not in size:
# LVCREATE(8) -L --size option unit
if size[-1].lower() in 'bskmgtpe':
size_unit = size[-1].lower()
size = size[0:-1]
try:
float(size)
if not size[0].isdigit():
raise ValueError()
except ValueError:
module.fail_json(msg="Bad size specification of '%s'" % size)
# when no unit, megabytes by default
if size_opt == 'l':
unit = 'm'
else:
unit = size_unit
# Get information on volume group requested
vgs_cmd = module.get_bin_path("vgs", required=True)
rc, current_vgs, err = module.run_command(
"%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
vgs = parse_vgs(current_vgs)
this_vg = vgs[0]
# Get information on logical volume requested
lvs_cmd = module.get_bin_path("lvs", required=True)
rc, current_lvs, err = module.run_command(
"%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
changed = False
lvs = parse_lvs(current_lvs)
if snapshot:
# Check snapshot pre-conditions
for test_lv in lvs:
if test_lv['name'] == lv or test_lv['name'] == thinpool:
if not test_lv['thinpool'] and not thinpool:
break
else:
module.fail_json(msg="Snapshots of thin pool LVs are not supported.")
else:
module.fail_json(msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg))
check_lv = snapshot
elif thinpool:
if lv:
# Check thin volume pre-conditions
for test_lv in lvs:
if test_lv['name'] == thinpool:
break
else:
module.fail_json(msg="Thin pool LV %s does not exist in volume group %s." % (thinpool, vg))
check_lv = lv
else:
check_lv = thinpool
else:
check_lv = lv
for test_lv in lvs:
if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]):
this_lv = test_lv
break
else:
this_lv = None
msg = ''
if this_lv is None:
if state == 'present':
# Require size argument except for snapshot of thin volumes
if (lv or thinpool) and not size:
for test_lv in lvs:
if test_lv['name'] == lv and test_lv['thinvol'] and snapshot:
break
else:
module.fail_json(msg="No size given.")
# create LV
lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
if snapshot is not None:
if size:
cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
else:
cmd = "%s %s %s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv)
elif thinpool and lv:
if size_opt == 'l':
module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.")
size_opt = 'V'
cmd = "%s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool)
elif thinpool and not lv:
cmd = "%s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, size_opt, size, size_unit, opts, vg, thinpool)
else:
cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
rc, _, err = module.run_command(cmd)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
else:
if state == 'absent':
# remove LV
if not force:
module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
lvremove_cmd = module.get_bin_path("lvremove", required=True)
rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
elif not size:
pass
elif size_opt == 'l':
# Resize LV based on % value
tool = None
size_free = this_vg['free']
if size_whole == 'VG' or size_whole == 'PVS':
size_requested = size_percent * this_vg['size'] / 100
else: # size_whole == 'FREE':
size_requested = size_percent * this_vg['free'] / 100
if '+' in size:
size_requested += this_lv['size']
if this_lv['size'] < size_requested:
if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
tool = module.get_bin_path("lvextend", required=True)
else:
module.fail_json(
msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" %
(this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)
)
elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
if size_requested == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
elif not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
if resizefs:
tool = '%s %s' % (tool, '--resizefs')
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
else:
# resize LV based on absolute values
tool = None
if float(size) > this_lv['size']:
tool = module.get_bin_path("lvextend", required=True)
elif shrink and float(size) < this_lv['size']:
if float(size) == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
if not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
if resizefs:
tool = '%s %s' % (tool, '--resizefs')
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
if this_lv is not None:
if active:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
else:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err)
module.exit_json(changed=changed, msg=msg)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,168 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: make
short_description: Run targets in a Makefile
requirements:
- make
author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
description:
- Run targets in a Makefile.
options:
target:
description:
- The target to run.
- Typically this would be something like C(install),C(test) or C(all)."
type: str
params:
description:
- Any extra parameters to pass to make.
type: dict
chdir:
description:
- Change to this directory before running make.
type: path
required: true
file:
description:
- Use a custom Makefile.
type: path
'''
EXAMPLES = r'''
- name: Build the default target
make:
chdir: /home/ubuntu/cool-project
- name: Run 'install' target as root
make:
chdir: /home/ubuntu/cool-project
target: install
become: yes
- name: Build 'all' target with extra arguments
make:
chdir: /home/ubuntu/cool-project
target: all
params:
NUM_THREADS: 4
BACKEND: lapack
- name: Build 'all' target with a custom Makefile
make:
chdir: /home/ubuntu/cool-project
target: all
file: /some-project/Makefile
'''
RETURN = r'''# '''
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
def run_command(command, module, check_rc=True):
"""
Run a command using the module, return
the result code and std{err,out} content.
:param command: list of command arguments
:param module: Ansible make module instance
:return: return code, stdout content, stderr content
"""
rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir'])
return rc, sanitize_output(out), sanitize_output(err)
def sanitize_output(output):
"""
Sanitize the output string before we
pass it to module.fail_json. Defaults
the string to empty if it is None, else
strips trailing newlines.
:param output: output to sanitize
:return: sanitized output
"""
if output is None:
return ''
else:
return output.rstrip("\r\n")
def main():
module = AnsibleModule(
argument_spec=dict(
target=dict(type='str'),
params=dict(type='dict'),
chdir=dict(type='path', required=True),
file=dict(type='path'),
),
supports_check_mode=True,
)
# Build up the invocation of `make` we are going to use
# For non-Linux OSes, prefer gmake (GNU make) over make
make_path = module.get_bin_path('gmake', required=False)
if not make_path:
# Fall back to system make
make_path = module.get_bin_path('make', required=True)
make_target = module.params['target']
if module.params['params'] is not None:
make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])]
else:
make_parameters = []
if module.params['file'] is not None:
base_command = [make_path, "-f", module.params['file'], make_target]
else:
base_command = [make_path, make_target]
base_command.extend(make_parameters)
# Check if the target is already up to date
rc, out, err = run_command(base_command + ['--question'], module, check_rc=False)
if module.check_mode:
# If we've been asked to do a dry run, we only need
# to report whether or not the target is up to date
changed = (rc != 0)
else:
if rc == 0:
# The target is up to date, so we don't have to
# do anything
changed = False
else:
# The target isn't up to date, so we need to run it
rc, out, err = run_command(base_command, module,
check_rc=True)
changed = True
# We don't report the return code, as if this module failed
# we would be calling fail_json from run_command, so even if
# we had a non-zero return code, we did not fail. However, if
# we report a non-zero return code here, we will be marked as
# failed regardless of what we signal using the failed= kwarg.
module.exit_json(
changed=changed,
failed=False,
stdout=out,
stderr=err,
target=module.params['target'],
params=module.params['params'],
chdir=module.params['chdir'],
file=module.params['file']
)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,208 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
author: Kairo Araujo (@kairoaraujo)
module: mksysb
short_description: Generates AIX mksysb rootvg backups.
description:
- This module manages a basic AIX mksysb (image) of rootvg.
options:
backup_crypt_files:
description:
- Backup encrypted files.
type: bool
default: "yes"
backup_dmapi_fs:
description:
- Back up DMAPI filesystem files.
type: bool
default: "yes"
create_map_files:
description:
- Creates a new MAP files.
type: bool
default: "no"
exclude_files:
description:
- Excludes files using C(/etc/rootvg.exclude).
type: bool
default: "no"
exclude_wpar_files:
description:
- Excludes WPAR files.
type: bool
default: "no"
extended_attrs:
description:
- Backup extended attributes.
type: bool
default: "yes"
name:
description:
- Backup name
required: true
new_image_data:
description:
- Creates a new file data.
type: bool
default: "yes"
software_packing:
description:
- Exclude files from packing option listed in
C(/etc/exclude_packing.rootvg).
type: bool
default: "no"
storage_path:
description:
- Storage path where the mksysb will stored.
required: true
use_snapshot:
description:
- Creates backup using snapshots.
type: bool
default: "no"
'''
EXAMPLES = '''
- name: Running a backup image mksysb
mksysb:
name: myserver
storage_path: /repository/images
exclude_files: yes
exclude_wpar_files: yes
'''
RETURN = '''
changed:
description: Return changed for mksysb actions as true or false.
returned: always
type: bool
version_added: 2.5
msg:
description: Return message regarding the action.
returned: always
type: str
version_added: 2.5
'''
from ansible.module_utils.basic import AnsibleModule
import os
def main():
module = AnsibleModule(
argument_spec=dict(
backup_crypt_files=dict(type='bool', default=True),
backup_dmapi_fs=dict(type='bool', default=True),
create_map_files=dict(type='bool', default=False),
exclude_files=dict(type='bool', default=False),
exclude_wpar_files=dict(type='bool', default=False),
extended_attrs=dict(type='bool', default=True),
name=dict(required=True),
new_image_data=dict(type='bool', default=True),
software_packing=dict(type='bool', default=False),
storage_path=dict(required=True),
use_snapshot=dict(type='bool', default=False)
),
supports_check_mode=True,
)
# Command options.
map_file_opt = {
True: '-m',
False: ''
}
use_snapshot_opt = {
True: '-T',
False: ''
}
exclude_files_opt = {
True: '-e',
False: ''
}
exclude_wpar_opt = {
True: '-G',
False: ''
}
new_image_data_opt = {
True: '-i',
False: ''
}
soft_packing_opt = {
True: '',
False: '-p'
}
extend_attr_opt = {
True: '',
False: '-a'
}
crypt_files_opt = {
True: '',
False: '-Z'
}
dmapi_fs_opt = {
True: '-a',
False: ''
}
backup_crypt_files = crypt_files_opt[module.params['backup_crypt_files']]
backup_dmapi_fs = dmapi_fs_opt[module.params['backup_dmapi_fs']]
create_map_files = map_file_opt[module.params['create_map_files']]
exclude_files = exclude_files_opt[module.params['exclude_files']]
exclude_wpar_files = exclude_wpar_opt[module.params['exclude_wpar_files']]
extended_attrs = extend_attr_opt[module.params['extended_attrs']]
name = module.params['name']
new_image_data = new_image_data_opt[module.params['new_image_data']]
software_packing = soft_packing_opt[module.params['software_packing']]
storage_path = module.params['storage_path']
use_snapshot = use_snapshot_opt[module.params['use_snapshot']]
# Validate if storage_path is a valid directory.
if os.path.isdir(storage_path):
if not module.check_mode:
# Generates the mksysb image backup.
mksysb_cmd = module.get_bin_path('mksysb', True)
rc, mksysb_output, err = module.run_command(
"%s -X %s %s %s %s %s %s %s %s %s %s/%s" % (
mksysb_cmd, create_map_files, use_snapshot, exclude_files,
exclude_wpar_files, software_packing, extended_attrs,
backup_crypt_files, backup_dmapi_fs, new_image_data,
storage_path, name))
if rc == 0:
module.exit_json(changed=True, msg=mksysb_output)
else:
module.fail_json(msg="mksysb failed.", rc=rc, err=err)
module.exit_json(changed=True)
else:
module.fail_json(msg="Storage path %s is not valid." % storage_path)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,128 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, David Stygstra <david.stygstra@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: modprobe
short_description: Load or unload kernel modules
author:
- David Stygstra (@stygstra)
- Julien Dauphant (@jdauphant)
- Matt Jeffery (@mattjeffery)
description:
- Load or unload kernel modules.
options:
name:
required: true
description:
- Name of kernel module to manage.
state:
description:
- Whether the module should be present or absent.
choices: [ absent, present ]
default: present
params:
description:
- Modules parameters.
default: ''
'''
EXAMPLES = '''
- name: Add the 802.1q module
modprobe:
name: 8021q
state: present
- name: Add the dummy module
modprobe:
name: dummy
state: present
params: 'numdummies=2'
'''
import os.path
import shlex
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
params=dict(type='str', default=''),
),
supports_check_mode=True,
)
name = module.params['name']
params = module.params['params']
state = module.params['state']
# FIXME: Adding all parameters as result values is useless
result = dict(
changed=False,
name=name,
params=params,
state=state,
)
# Check if module is present
try:
present = False
with open('/proc/modules') as modules:
module_name = name.replace('-', '_') + ' '
for line in modules:
if line.startswith(module_name):
present = True
break
if not present:
command = [module.get_bin_path('uname', True), '-r']
rc, uname_kernel_release, err = module.run_command(command)
module_file = '/' + name + '.ko'
builtin_path = os.path.join('/lib/modules/', uname_kernel_release.strip(),
'modules.builtin')
with open(builtin_path) as builtins:
for line in builtins:
if line.endswith(module_file):
present = True
break
except IOError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **result)
# Add/remove module as needed
if state == 'present':
if not present:
if not module.check_mode:
command = [module.get_bin_path('modprobe', True), name]
command.extend(shlex.split(params))
rc, out, err = module.run_command(command)
if rc != 0:
module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
result['changed'] = True
elif state == 'absent':
if present:
if not module.check_mode:
rc, out, err = module.run_command([module.get_bin_path('modprobe', True), '-r', name])
if rc != 0:
module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,505 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Thomas Caravia <taca@kadisius.eu>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nosh
author:
- "Thomas Caravia (@tacatac)"
short_description: Manage services with nosh
description:
- Control running and enabled state for system-wide or user services.
- BSD and Linux systems are supported.
options:
name:
required: true
description:
- Name of the service to manage.
state:
required: false
choices: [ started, stopped, reset, restarted, reloaded ]
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary.
C(restarted) will always bounce the service.
C(reloaded) will send a SIGHUP or start the service.
C(reset) will start or stop the service according to whether it is
enabled or not.
enabled:
required: false
type: bool
description:
- Enable or disable the service, independently of C(*.preset) file
preference or running state. Mutually exclusive with I(preset). Will take
effect prior to I(state=reset).
preset:
required: false
type: bool
description:
- Enable or disable the service according to local preferences in *.preset files.
Mutually exclusive with I(enabled). Only has an effect if set to true. Will take
effect prior to I(state=reset).
user:
required: false
default: 'no'
type: bool
description:
- Run system-control talking to the calling user's service manager, rather than
the system-wide service manager.
requirements:
- A system with an active nosh service manager, see Notes for further information.
notes:
- Information on the nosh utilities suite may be found at U(https://jdebp.eu/Softwares/nosh/).
'''
EXAMPLES = '''
- name: start dnscache if not running
nosh: name=dnscache state=started
- name: stop mpd, if running
nosh: name=mpd state=stopped
- name: restart unbound or start it if not already running
nosh:
name: unbound
state: restarted
- name: reload fail2ban or start it if not already running
nosh:
name: fail2ban
state: reloaded
- name: disable nsd
nosh: name=nsd enabled=no
- name: for package installers, set nginx running state according to local enable settings, preset and reset
nosh: name=nginx preset=True state=reset
- name: reboot the host if nosh is the system manager, would need a "wait_for*" task at least, not recommended as-is
nosh: name=reboot state=started
- name: using conditionals with the module facts
tasks:
- name: obtain information on tinydns service
nosh: name=tinydns
register: result
- name: fail if service not loaded
fail: msg="The {{ result.name }} service is not loaded"
when: not result.status
- name: fail if service is running
fail: msg="The {{ result.name }} service is running"
when: result.status and result.status['DaemontoolsEncoreState'] == "running"
'''
RETURN = '''
name:
description: name used to find the service
returned: success
type: str
sample: "sshd"
service_path:
description: resolved path for the service
returned: success
type: str
sample: "/var/sv/sshd"
enabled:
description: whether the service is enabled at system bootstrap
returned: success
type: bool
sample: True
preset:
description: whether the enabled status reflects the one set in the relevant C(*.preset) file
returned: success
type: bool
sample: 'False'
state:
description: service process run state, C(None) if the service is not loaded and will not be started
returned: if state option is used
type: str
sample: "reloaded"
status:
description: a dictionary with the key=value pairs returned by `system-control show-json` or C(None) if the service is not loaded
returned: success
type: complex
contains:
After:
returned: success
type: list
sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys", "log"]
Before:
returned: success
type: list
sample: ["/etc/service-bundles/targets/shutdown"]
Conflicts:
returned: success
type: list
sample: '[]'
DaemontoolsEncoreState:
returned: success
type: str
sample: "running"
DaemontoolsState:
returned: success
type: str
sample: "up"
Enabled:
returned: success
type: bool
sample: True
LogService:
returned: success
type: str
sample: "../cyclog@sshd"
MainPID:
returned: success
type: int
sample: 661
Paused:
returned: success
type: bool
sample: 'False'
ReadyAfterRun:
returned: success
type: bool
sample: 'False'
RemainAfterExit:
returned: success
type: bool
sample: 'False'
Required-By:
returned: success
type: list
sample: '[]'
RestartExitStatusCode:
returned: success
type: int
sample: '0'
RestartExitStatusNumber:
returned: success
type: int
sample: '0'
RestartTimestamp:
returned: success
type: int
sample: 4611686019935648081
RestartUTCTimestamp:
returned: success
type: int
sample: 1508260140
RunExitStatusCode:
returned: success
type: int
sample: '0'
RunExitStatusNumber:
returned: success
type: int
sample: '0'
RunTimestamp:
returned: success
type: int
sample: 4611686019935648081
RunUTCTimestamp:
returned: success
type: int
sample: 1508260140
StartExitStatusCode:
returned: success
type: int
sample: 1
StartExitStatusNumber:
returned: success
type: int
sample: '0'
StartTimestamp:
returned: success
type: int
sample: 4611686019935648081
StartUTCTimestamp:
returned: success
type: int
sample: 1508260140
StopExitStatusCode:
returned: success
type: int
sample: '0'
StopExitStatusNumber:
returned: success
type: int
sample: '0'
StopTimestamp:
returned: success
type: int
sample: 4611686019935648081
StopUTCTimestamp:
returned: success
type: int
sample: 1508260140
Stopped-By:
returned: success
type: list
sample: ["/etc/service-bundles/targets/shutdown"]
Timestamp:
returned: success
type: int
sample: 4611686019935648081
UTCTimestamp:
returned: success
type: int
sample: 1508260140
Want:
returned: success
type: str
sample: "nothing"
Wanted-By:
returned: success
type: list
sample: ["/etc/service-bundles/targets/server","/etc/service-bundles/targets/sockets"]
Wants:
returned: success
type: list
sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys"]
user:
description: whether the user-level service manager is called
returned: success
type: bool
sample: False
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.service import fail_if_missing
from ansible.module_utils._text import to_native
def run_sys_ctl(module, args):
sys_ctl = [module.get_bin_path('system-control', required=True)]
if module.params['user']:
sys_ctl = sys_ctl + ['--user']
return module.run_command(sys_ctl + args)
def get_service_path(module, service):
(rc, out, err) = run_sys_ctl(module, ['find', service])
# fail if service not found
if rc != 0:
fail_if_missing(module, False, service, msg='host')
else:
return to_native(out).strip()
def service_is_enabled(module, service_path):
(rc, out, err) = run_sys_ctl(module, ['is-enabled', service_path])
return rc == 0
def service_is_preset_enabled(module, service_path):
(rc, out, err) = run_sys_ctl(module, ['preset', '--dry-run', service_path])
return to_native(out).strip().startswith("enable")
def service_is_loaded(module, service_path):
(rc, out, err) = run_sys_ctl(module, ['is-loaded', service_path])
return rc == 0
def get_service_status(module, service_path):
(rc, out, err) = run_sys_ctl(module, ['show-json', service_path])
# will fail if not service is not loaded
if err is not None and err:
module.fail_json(msg=err)
else:
json_out = json.loads(to_native(out).strip())
status = json_out[service_path] # descend past service path header
return status
def service_is_running(service_status):
return service_status['DaemontoolsEncoreState'] in set(['starting', 'started', 'running'])
def handle_enabled(module, result, service_path):
"""Enable or disable a service as needed.
- 'preset' will set the enabled state according to available preset file settings.
- 'enabled' will set the enabled state explicitly, independently of preset settings.
These options are set to "mutually exclusive" but the explicit 'enabled' option will
have priority if the check is bypassed.
"""
# computed prior in control flow
preset = result['preset']
enabled = result['enabled']
# preset, effect only if option set to true (no reverse preset)
if module.params['preset']:
action = 'preset'
# run preset if needed
if preset != module.params['preset']:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = run_sys_ctl(module, [action, service_path])
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
result['preset'] = not preset
result['enabled'] = not enabled
# enabled/disabled state
if module.params['enabled'] is not None:
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
# change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = run_sys_ctl(module, [action, service_path])
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
result['enabled'] = not enabled
result['preset'] = not preset
def handle_state(module, result, service_path):
"""Set service running state as needed.
Takes into account the fact that a service may not be loaded (no supervise directory) in
which case it is 'stopped' as far as the service manager is concerned. No status information
can be obtained and the service can only be 'started'.
"""
# default to desired state, no action
result['state'] = module.params['state']
state = module.params['state']
action = None
# computed prior in control flow, possibly modified by handle_enabled()
enabled = result['enabled']
# service not loaded -> not started by manager, no status information
if not service_is_loaded(module, service_path):
if state in ['started', 'restarted', 'reloaded']:
action = 'start'
result['state'] = 'started'
elif state == 'reset':
if enabled:
action = 'start'
result['state'] = 'started'
else:
result['state'] = None
else:
result['state'] = None
# service is loaded
else:
# get status information
result['status'] = get_service_status(module, service_path)
running = service_is_running(result['status'])
if state == 'started':
if not running:
action = 'start'
elif state == 'stopped':
if running:
action = 'stop'
# reset = start/stop according to enabled status
elif state == 'reset':
if enabled is not running:
if running:
action = 'stop'
result['state'] = 'stopped'
else:
action = 'start'
result['state'] = 'started'
# start if not running, 'service' module constraint
elif state == 'restarted':
if not running:
action = 'start'
result['state'] = 'started'
else:
action = 'condrestart'
# start if not running, 'service' module constraint
elif state == 'reloaded':
if not running:
action = 'start'
result['state'] = 'started'
else:
action = 'hangup'
# change state as needed
if action:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = run_sys_ctl(module, [action, service_path])
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, err))
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(choices=['started', 'stopped', 'reset', 'restarted', 'reloaded'], type='str'),
enabled=dict(type='bool'),
preset=dict(type='bool'),
user=dict(type='bool', default=False),
),
supports_check_mode=True,
mutually_exclusive=[['enabled', 'preset']],
)
service = module.params['name']
rc = 0
out = err = ''
result = {
'name': service,
'changed': False,
'status': None,
}
# check service can be found (or fail) and get path
service_path = get_service_path(module, service)
# get preliminary service facts
result['service_path'] = service_path
result['user'] = module.params['user']
result['enabled'] = service_is_enabled(module, service_path)
result['preset'] = result['enabled'] is service_is_preset_enabled(module, service_path)
# set enabled state, service need not be loaded
if module.params['enabled'] is not None or module.params['preset']:
handle_enabled(module, result, service_path)
# set service running state
if module.params['state'] is not None:
handle_state(module, result, service_path)
# get final service status if possible
if service_is_loaded(module, service_path):
result['status'] = get_service_status(module, service_path)
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,51 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ohai
short_description: Returns inventory data from I(Ohai)
description:
- Similar to the M(facter) module, this runs the I(Ohai) discovery program
(U(https://docs.chef.io/ohai.html)) on the remote host and
returns JSON inventory data.
I(Ohai) data is a bit more verbose and nested than I(facter).
options: {}
notes: []
requirements: [ "ohai" ]
author:
- "Ansible Core Team"
- "Michael DeHaan (@mpdehaan)"
'''
EXAMPLES = '''
# Retrieve (ohai) data from all Web servers and store in one-file per host
ansible webservers -m ohai --tree=/tmp/ohaidata
'''
import json
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict()
)
cmd = ["/usr/bin/env", "ohai"]
rc, out, err = module.run_command(cmd, check_rc=True)
module.exit_json(**json.loads(out))
if __name__ == '__main__':
main()

View file

@ -0,0 +1,365 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: open_iscsi
author:
- Serge van Ginderachter (@srvg)
short_description: Manage iSCSI targets with Open-iSCSI
description:
- Discover targets on given portal, (dis)connect targets, mark targets to
manually or auto start, return device nodes of connected targets.
requirements:
- open_iscsi library and tools (iscsiadm)
options:
portal:
description:
- The IP address of the iSCSI target.
type: str
aliases: [ ip ]
port:
description:
- The port on which the iSCSI target process listens.
type: str
default: 3260
target:
description:
- The iSCSI target name.
type: str
aliases: [ name, targetname ]
login:
description:
- Whether the target node should be connected.
type: bool
aliases: [ state ]
node_auth:
description:
- The value for C(discovery.sendtargets.auth.authmethod).
type: str
default: CHAP
node_user:
description:
- The value for C(discovery.sendtargets.auth.username).
type: str
node_pass:
description:
- The value for C(discovery.sendtargets.auth.password).
type: str
auto_node_startup:
description:
- Whether the target node should be automatically connected at startup.
type: bool
aliases: [ automatic ]
discover:
description:
- Whether the list of target nodes on the portal should be
(re)discovered and added to the persistent iSCSI database.
- Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup)
to manual, hence combined with C(auto_node_startup=yes) will always return
a changed state.
type: bool
show_nodes:
description:
- Whether the list of nodes in the persistent iSCSI database should be returned by the module.
type: bool
'''
EXAMPLES = r'''
- name: Perform a discovery on 10.1.2.3 and show available target nodes
open_iscsi:
show_nodes: yes
discover: yes
portal: 10.1.2.3
# NOTE: Only works if exactly one target is exported to the initiator
- name: Discover targets on portal and login to the one available
open_iscsi:
portal: '{{ iscsi_target }}'
login: yes
discover: yes
- name: Connect to the named target, after updating the local persistent database (cache)
open_iscsi:
login: yes
target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
- name: Disconnect from the cached named target
open_iscsi:
login: no
target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
'''
import glob
import os
import time
from ansible.module_utils.basic import AnsibleModule
ISCSIADM = 'iscsiadm'
def compare_nodelists(l1, l2):
l1.sort()
l2.sort()
return l1 == l2
def iscsi_get_cached_nodes(module, portal=None):
cmd = '%s --mode node' % iscsiadm_cmd
(rc, out, err) = module.run_command(cmd)
if rc == 0:
lines = out.splitlines()
nodes = []
for line in lines:
# line format is "ip:port,target_portal_group_tag targetname"
parts = line.split()
if len(parts) > 2:
module.fail_json(msg='error parsing output', cmd=cmd)
target = parts[1]
parts = parts[0].split(':')
target_portal = parts[0]
if portal is None or portal == target_portal:
nodes.append(target)
# older versions of scsiadm don't have nice return codes
# for newer versions see iscsiadm(8); also usr/iscsiadm.c for details
# err can contain [N|n]o records...
elif rc == 21 or (rc == 255 and "o records found" in err):
nodes = []
else:
module.fail_json(cmd=cmd, rc=rc, msg=err)
return nodes
def iscsi_discover(module, portal, port):
cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port)
(rc, out, err) = module.run_command(cmd)
if rc > 0:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def target_loggedon(module, target):
cmd = '%s --mode session' % iscsiadm_cmd
(rc, out, err) = module.run_command(cmd)
if rc == 0:
return target in out
elif rc == 21:
return False
else:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def target_login(module, target, portal=None, port=None):
node_auth = module.params['node_auth']
node_user = module.params['node_user']
node_pass = module.params['node_pass']
if node_user:
params = [('node.session.auth.authmethod', node_auth),
('node.session.auth.username', node_user),
('node.session.auth.password', node_pass)]
for (name, value) in params:
cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value)
(rc, out, err) = module.run_command(cmd)
if rc > 0:
module.fail_json(cmd=cmd, rc=rc, msg=err)
cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target)
if portal is not None and port is not None:
cmd += ' --portal %s:%s' % (portal, port)
(rc, out, err) = module.run_command(cmd)
if rc > 0:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def target_logout(module, target):
cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target)
(rc, out, err) = module.run_command(cmd)
if rc > 0:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def target_device_node(module, target):
# if anyone know a better way to find out which devicenodes get created for
# a given target...
devices = glob.glob('/dev/disk/by-path/*%s*' % target)
devdisks = []
for dev in devices:
# exclude partitions
if "-part" not in dev:
devdisk = os.path.realpath(dev)
# only add once (multi-path?)
if devdisk not in devdisks:
devdisks.append(devdisk)
return devdisks
def target_isauto(module, target):
cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target)
(rc, out, err) = module.run_command(cmd)
if rc == 0:
lines = out.splitlines()
for line in lines:
if 'node.startup' in line:
return 'automatic' in line
return False
else:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def target_setauto(module, target):
cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target)
(rc, out, err) = module.run_command(cmd)
if rc > 0:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def target_setmanual(module, target):
cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target)
(rc, out, err) = module.run_command(cmd)
if rc > 0:
module.fail_json(cmd=cmd, rc=rc, msg=err)
def main():
# load ansible module object
module = AnsibleModule(
argument_spec=dict(
# target
portal=dict(type='str', aliases=['ip']),
port=dict(type='str', default='3260'),
target=dict(type='str', aliases=['name', 'targetname']),
node_auth=dict(type='str', default='CHAP'),
node_user=dict(type='str'),
node_pass=dict(type='str', no_log=True),
# actions
login=dict(type='bool', aliases=['state']),
auto_node_startup=dict(type='bool', aliases=['automatic']),
discover=dict(type='bool', default=False),
show_nodes=dict(type='bool', default=False),
),
required_together=[['discover_user', 'discover_pass'],
['node_user', 'node_pass']],
supports_check_mode=True,
)
global iscsiadm_cmd
iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True)
# parameters
portal = module.params['portal']
target = module.params['target']
port = module.params['port']
login = module.params['login']
automatic = module.params['auto_node_startup']
discover = module.params['discover']
show_nodes = module.params['show_nodes']
check = module.check_mode
cached = iscsi_get_cached_nodes(module, portal)
# return json dict
result = {}
result['changed'] = False
if discover:
if portal is None:
module.fail_json(msg="Need to specify at least the portal (ip) to discover")
elif check:
nodes = cached
else:
iscsi_discover(module, portal, port)
nodes = iscsi_get_cached_nodes(module, portal)
if not compare_nodelists(cached, nodes):
result['changed'] |= True
result['cache_updated'] = True
else:
nodes = cached
if login is not None or automatic is not None:
if target is None:
if len(nodes) > 1:
module.fail_json(msg="Need to specify a target")
else:
target = nodes[0]
else:
# check given target is in cache
check_target = False
for node in nodes:
if node == target:
check_target = True
break
if not check_target:
module.fail_json(msg="Specified target not found")
if show_nodes:
result['nodes'] = nodes
if login is not None:
loggedon = target_loggedon(module, target)
if (login and loggedon) or (not login and not loggedon):
result['changed'] |= False
if login:
result['devicenodes'] = target_device_node(module, target)
elif not check:
if login:
target_login(module, target, portal, port)
# give udev some time
time.sleep(1)
result['devicenodes'] = target_device_node(module, target)
else:
target_logout(module, target)
result['changed'] |= True
result['connection_changed'] = True
else:
result['changed'] |= True
result['connection_changed'] = True
if automatic is not None:
isauto = target_isauto(module, target)
if (automatic and isauto) or (not automatic and not isauto):
result['changed'] |= False
result['automatic_changed'] = False
elif not check:
if automatic:
target_setauto(module, target)
else:
target_setmanual(module, target)
result['changed'] |= True
result['automatic_changed'] = True
else:
result['changed'] |= True
result['automatic_changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,199 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Andrew Gaffney <andrew@agaffney.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: openwrt_init
author:
- "Andrew Gaffney (@agaffney)"
short_description: Manage services on OpenWrt.
description:
- Controls OpenWrt services on remote hosts.
options:
name:
description:
- Name of the service.
required: true
aliases: ['service']
state:
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
enabled:
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
type: bool
pattern:
description:
- If the service does not respond to the 'running' command, name a
substring to look for as would be found in the output of the I(ps)
command as a stand-in for a 'running' result. If the string is found,
the service will be assumed to be running.
notes:
- One option other than name is required.
requirements:
- An OpenWrt system (with python)
'''
EXAMPLES = '''
# Example action to start service httpd, if not running
- openwrt_init:
state: started
name: httpd
# Example action to stop service cron, if running
- openwrt_init:
name: cron
state: stopped
# Example action to reload service httpd, in all cases
- openwrt_init:
name: httpd
state: reloaded
# Example action to enable service httpd
- openwrt_init:
name: httpd
enabled: yes
'''
RETURN = '''
'''
import os
import glob
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
module = None
init_script = None
# ===============================
# Check if service is enabled
def is_enabled():
(rc, out, err) = module.run_command("%s enabled" % init_script)
if rc == 0:
return True
return False
# ===========================================
# Main control flow
def main():
global module, init_script
# init
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str', aliases=['service']),
state=dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'),
enabled=dict(type='bool'),
pattern=dict(required=False, default=None),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled']],
)
# initialize
service = module.params['name']
init_script = '/etc/init.d/' + service
rc = 0
out = err = ''
result = {
'name': service,
'changed': False,
}
# check if service exists
if not os.path.exists(init_script):
module.fail_json(msg='service %s does not exist' % service)
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
# do we need to enable the service?
enabled = is_enabled()
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s" % (init_script, action))
# openwrt init scripts can return a non-zero exit code on a successful 'enable'
# command if the init script doesn't contain a STOP value, so we ignore the exit
# code and explicitly check if the service is now in the desired state
if is_enabled() != module.params['enabled']:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
result['enabled'] = not enabled
if module.params['state'] is not None:
running = False
# check if service is currently running
if module.params['pattern']:
# Find ps binary
psbin = module.get_bin_path('ps', True)
# this should be busybox ps, so we only want/need to the 'w' option
(rc, psout, pserr) = module.run_command('%s w' % psbin)
# If rc is 0, set running as appropriate
if rc == 0:
lines = psout.split("\n")
for line in lines:
if module.params['pattern'] in line and "pattern=" not in line:
# so as to not confuse ./hacking/test-module.py
running = True
break
else:
(rc, out, err) = module.run_command("%s running" % init_script)
if rc == 0:
running = True
# default to desired state
result['state'] = module.params['state']
# determine action, if any
action = None
if module.params['state'] == 'started':
if not running:
action = 'start'
result['changed'] = True
elif module.params['state'] == 'stopped':
if running:
action = 'stop'
result['changed'] = True
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
result['changed'] = True
if action:
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s" % (init_script, action))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,391 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, GeekChimp - Franck Nijhof <franck@geekchimp.com>
# Copyright: (c) 2019, Ansible project
# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: osx_defaults
author:
- Franck Nijhof (@frenck)
short_description: Manage macOS user defaults
description:
- osx_defaults allows users to read, write, and delete macOS user defaults from Ansible scripts.
- macOS applications and other programs use the defaults system to record user preferences and other
information that must be maintained when the applications are not running (such as default font for new
documents, or the position of an Info panel).
options:
domain:
description:
- The domain is a domain name of the form C(com.companyname.appname).
type: str
default: NSGlobalDomain
host:
description:
- The host on which the preference should apply.
- The special value C(currentHost) corresponds to the C(-currentHost) switch of the defaults commandline tool.
type: str
key:
description:
- The key of the user preference.
type: str
required: true
type:
description:
- The type of value to write.
type: str
choices: [ array, bool, boolean, date, float, int, integer, string ]
default: string
array_add:
description:
- Add new elements to the array for a key which has an array as its value.
type: bool
default: no
value:
description:
- The value to write.
- Only required when C(state=present).
type: raw
state:
description:
- The state of the user defaults.
- If set to C(list) will query the given parameter specified by C(key). Returns 'null' is nothing found or mis-spelled.
- C(list) added in version 2.8.
type: str
choices: [ absent, list, present ]
default: present
path:
description:
- The path in which to search for C(defaults).
type: str
default: /usr/bin:/usr/local/bin
notes:
- Apple Mac caches defaults. You may need to logout and login to apply the changes.
'''
EXAMPLES = r'''
- osx_defaults:
domain: com.apple.Safari
key: IncludeInternalDebugMenu
type: bool
value: true
state: present
- osx_defaults:
domain: NSGlobalDomain
key: AppleMeasurementUnits
type: string
value: Centimeters
state: present
- osx_defaults:
domain: /Library/Preferences/com.apple.SoftwareUpdate
key: AutomaticCheckEnabled
type: int
value: 1
become: yes
- osx_defaults:
domain: com.apple.screensaver
host: currentHost
key: showClock
type: int
value: 1
- osx_defaults:
key: AppleMeasurementUnits
type: string
value: Centimeters
- osx_defaults:
key: AppleLanguages
type: array
value:
- en
- nl
- osx_defaults:
domain: com.geekchimp.macable
key: ExampleKeyToRemove
state: absent
'''
from datetime import datetime
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import binary_type, text_type
# exceptions --------------------------------------------------------------- {{{
class OSXDefaultsException(Exception):
def __init__(self, msg):
self.message = msg
# /exceptions -------------------------------------------------------------- }}}
# class MacDefaults -------------------------------------------------------- {{{
class OSXDefaults(object):
""" Class to manage Mac OS user defaults """
# init ---------------------------------------------------------------- {{{
def __init__(self, module):
""" Initialize this module. Finds 'defaults' executable and preps the parameters """
# Initial var for storing current defaults value
self.current_value = None
self.module = module
self.domain = module.params['domain']
self.host = module.params['host']
self.key = module.params['key']
self.type = module.params['type']
self.array_add = module.params['array_add']
self.value = module.params['value']
self.state = module.params['state']
self.path = module.params['path']
# Try to find the defaults executable
self.executable = self.module.get_bin_path(
'defaults',
required=False,
opt_dirs=self.path.split(':'),
)
if not self.executable:
raise OSXDefaultsException("Unable to locate defaults executable.")
# Ensure the value is the correct type
if self.state != 'absent':
self.value = self._convert_type(self.type, self.value)
# /init --------------------------------------------------------------- }}}
# tools --------------------------------------------------------------- {{{
@staticmethod
def _convert_type(data_type, value):
""" Converts value to given type """
if data_type == "string":
return str(value)
elif data_type in ["bool", "boolean"]:
if isinstance(value, (binary_type, text_type)):
value = value.lower()
if value in [True, 1, "true", "1", "yes"]:
return True
elif value in [False, 0, "false", "0", "no"]:
return False
raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
elif data_type == "date":
try:
return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
except ValueError:
raise OSXDefaultsException(
"Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
)
elif data_type in ["int", "integer"]:
if not str(value).isdigit():
raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value)))
return int(value)
elif data_type == "float":
try:
value = float(value)
except ValueError:
raise OSXDefaultsException("Invalid float value: {0}".format(repr(value)))
return value
elif data_type == "array":
if not isinstance(value, list):
raise OSXDefaultsException("Invalid value. Expected value to be an array")
return value
raise OSXDefaultsException('Type is not supported: {0}'.format(data_type))
def _host_args(self):
""" Returns a normalized list of commandline arguments based on the "host" attribute """
if self.host is None:
return []
elif self.host == 'currentHost':
return ['-currentHost']
else:
return ['-host', self.host]
def _base_command(self):
""" Returns a list containing the "defaults" executable and any common base arguments """
return [self.executable] + self._host_args()
@staticmethod
def _convert_defaults_str_to_list(value):
""" Converts array output from defaults to an list """
# Split output of defaults. Every line contains a value
value = value.splitlines()
# Remove first and last item, those are not actual values
value.pop(0)
value.pop(-1)
# Remove extra spaces and comma (,) at the end of values
value = [re.sub(',$', '', x.strip(' ')) for x in value]
return value
# /tools -------------------------------------------------------------- }}}
# commands ------------------------------------------------------------ {{{
def read(self):
""" Reads value of this domain & key from defaults """
# First try to find out the type
rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key])
# If RC is 1, the key does not exist
if rc == 1:
return None
# If the RC is not 0, then terrible happened! Ooooh nooo!
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key type from defaults: %s" % out)
# Ok, lets parse the type from output
data_type = out.strip().replace('Type is ', '')
# Now get the current value
rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key])
# Strip output
out = out.strip()
# An non zero RC at this point is kinda strange...
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key value from defaults: %s" % out)
# Convert string to list when type is array
if data_type == "array":
out = self._convert_defaults_str_to_list(out)
# Store the current_value
self.current_value = self._convert_type(data_type, out)
def write(self):
""" Writes value to this domain & key to defaults """
# We need to convert some values so the defaults commandline understands it
if isinstance(self.value, bool):
if self.value:
value = "TRUE"
else:
value = "FALSE"
elif isinstance(self.value, (int, float)):
value = str(self.value)
elif self.array_add and self.current_value is not None:
value = list(set(self.value) - set(self.current_value))
elif isinstance(self.value, datetime):
value = self.value.strftime('%Y-%m-%d %H:%M:%S')
else:
value = self.value
# When the type is array and array_add is enabled, morph the type :)
if self.type == "array" and self.array_add:
self.type = "array-add"
# All values should be a list, for easy passing it to the command
if not isinstance(value, list):
value = [value]
rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value)
if rc != 0:
raise OSXDefaultsException('An error occurred while writing value to defaults: %s' % out)
def delete(self):
""" Deletes defaults key from domain """
rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
if rc != 0:
raise OSXDefaultsException("An error occurred while deleting key from defaults: %s" % out)
# /commands ----------------------------------------------------------- }}}
# run ----------------------------------------------------------------- {{{
""" Does the magic! :) """
def run(self):
# Get the current value from defaults
self.read()
if self.state == 'list':
self.module.exit_json(key=self.key, value=self.current_value)
# Handle absent state
if self.state == "absent":
if self.current_value is None:
return False
if self.module.check_mode:
return True
self.delete()
return True
# There is a type mismatch! Given type does not match the type in defaults
value_type = type(self.value)
if self.current_value is not None and not isinstance(self.current_value, value_type):
raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__)
# Current value matches the given value. Nothing need to be done. Arrays need extra care
if self.type == "array" and self.current_value is not None and not self.array_add and \
set(self.current_value) == set(self.value):
return False
elif self.type == "array" and self.current_value is not None and self.array_add and len(list(set(self.value) - set(self.current_value))) == 0:
return False
elif self.current_value == self.value:
return False
if self.module.check_mode:
return True
# Change/Create/Set given key/value for domain in defaults
self.write()
return True
# /run ---------------------------------------------------------------- }}}
# /class MacDefaults ------------------------------------------------------ }}}
# main -------------------------------------------------------------------- {{{
def main():
module = AnsibleModule(
argument_spec=dict(
domain=dict(type='str', default='NSGlobalDomain'),
host=dict(type='str'),
key=dict(type='str'),
type=dict(type='str', default='string', choices=['array', 'bool', 'boolean', 'date', 'float', 'int', 'integer', 'string']),
array_add=dict(type='bool', default=False),
value=dict(type='raw'),
state=dict(type='str', default='present', choices=['absent', 'list', 'present']),
path=dict(type='str', default='/usr/bin:/usr/local/bin'),
),
supports_check_mode=True,
required_if=(
('state', 'present', ['value']),
),
)
try:
defaults = OSXDefaults(module=module)
module.exit_json(changed=defaults.run())
except OSXDefaultsException as e:
module.fail_json(msg=e.message)
# /main ------------------------------------------------------------------- }}}
if __name__ == '__main__':
main()

View file

@ -0,0 +1,315 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Sebastien Rohaut <sebastien.rohaut@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pam_limits
author:
- "Sebastien Rohaut (@usawa)"
short_description: Modify Linux PAM limits
description:
- The C(pam_limits) module modifies PAM limits. The default file is
C(/etc/security/limits.conf). For the full documentation, see C(man 5
limits.conf).
options:
domain:
description:
- A username, @groupname, wildcard, uid/gid range.
required: true
limit_type:
description:
- Limit type, see C(man 5 limits.conf) for an explanation
required: true
choices: [ "hard", "soft", "-" ]
limit_item:
description:
- The limit to be set
required: true
choices:
- "core"
- "data"
- "fsize"
- "memlock"
- "nofile"
- "rss"
- "stack"
- "cpu"
- "nproc"
- "as"
- "maxlogins"
- "maxsyslogins"
- "priority"
- "locks"
- "sigpending"
- "msgqueue"
- "nice"
- "rtprio"
- "chroot"
value:
description:
- The value of the limit.
required: true
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
type: bool
default: "no"
use_min:
description:
- If set to C(yes), the minimal value will be used or conserved.
If the specified value is inferior to the value in the file, file content is replaced with the new value,
else content is not modified.
required: false
type: bool
default: "no"
use_max:
description:
- If set to C(yes), the maximal value will be used or conserved.
If the specified value is superior to the value in the file, file content is replaced with the new value,
else content is not modified.
required: false
type: bool
default: "no"
dest:
description:
- Modify the limits.conf path.
required: false
default: "/etc/security/limits.conf"
comment:
description:
- Comment associated with the limit.
required: false
default: ''
notes:
- If C(dest) file doesn't exist, it is created.
'''
EXAMPLES = '''
- name: Add or modify nofile soft limit for the user joe
pam_limits:
domain: joe
limit_type: soft
limit_item: nofile
value: 64000
- name: Add or modify fsize hard limit for the user smith. Keep or set the maximal value.
pam_limits:
domain: smith
limit_type: hard
limit_item: fsize
value: 1000000
use_max: yes
- name: Add or modify memlock, both soft and hard, limit for the user james with a comment.
pam_limits:
domain: james
limit_type: '-'
limit_item: memlock
value: unlimited
comment: unlimited memory lock for james
- name: Add or modify hard nofile limits for wildcard domain
pam_limits:
domain: '*'
limit_type: hard
limit_item: nofile
value: 39693561
'''
import os
import os.path
import tempfile
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def main():
pam_items = ['core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks',
'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot']
pam_types = ['soft', 'hard', '-']
limits_conf = '/etc/security/limits.conf'
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=dict(
domain=dict(required=True, type='str'),
limit_type=dict(required=True, type='str', choices=pam_types),
limit_item=dict(required=True, type='str', choices=pam_items),
value=dict(required=True, type='str'),
use_max=dict(default=False, type='bool'),
use_min=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
dest=dict(default=limits_conf, type='str'),
comment=dict(required=False, default='', type='str')
)
)
domain = module.params['domain']
limit_type = module.params['limit_type']
limit_item = module.params['limit_item']
value = module.params['value']
use_max = module.params['use_max']
use_min = module.params['use_min']
backup = module.params['backup']
limits_conf = module.params['dest']
new_comment = module.params['comment']
changed = False
if os.path.isfile(limits_conf):
if not os.access(limits_conf, os.W_OK):
module.fail_json(msg="%s is not writable. Use sudo" % limits_conf)
else:
limits_conf_dir = os.path.dirname(limits_conf)
if os.path.isdir(limits_conf_dir) and os.access(limits_conf_dir, os.W_OK):
open(limits_conf, 'a').close()
changed = True
else:
module.fail_json(msg="directory %s is not writable (check presence, access rights, use sudo)" % limits_conf_dir)
if use_max and use_min:
module.fail_json(msg="Cannot use use_min and use_max at the same time.")
if not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()):
module.fail_json(msg="Argument 'value' can be one of 'unlimited', 'infinity', '-1' or positive number. Refer to manual pages for more details.")
# Backup
if backup:
backup_file = module.backup_local(limits_conf)
space_pattern = re.compile(r'\s+')
message = ''
f = open(limits_conf, 'rb')
# Tempfile
nf = tempfile.NamedTemporaryFile(mode='w+')
found = False
new_value = value
for line in f:
line = to_native(line, errors='surrogate_or_strict')
if line.startswith('#'):
nf.write(line)
continue
newline = re.sub(space_pattern, ' ', line).strip()
if not newline:
nf.write(line)
continue
# Remove comment in line
newline = newline.split('#', 1)[0]
try:
old_comment = line.split('#', 1)[1]
except Exception:
old_comment = ''
newline = newline.rstrip()
if not new_comment:
new_comment = old_comment
line_fields = newline.split(' ')
if len(line_fields) != 4:
nf.write(line)
continue
line_domain = line_fields[0]
line_type = line_fields[1]
line_item = line_fields[2]
actual_value = line_fields[3]
if not (actual_value in ['unlimited', 'infinity', '-1'] or actual_value.isdigit()):
module.fail_json(msg="Invalid configuration of '%s'. Current value of %s is unsupported." % (limits_conf, line_item))
# Found the line
if line_domain == domain and line_type == limit_type and line_item == limit_item:
found = True
if value == actual_value:
message = line
nf.write(line)
continue
actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1']
value_unlimited = value in ['unlimited', 'infinity', '-1']
if use_max:
if value.isdigit() and actual_value.isdigit():
new_value = str(max(int(value), int(actual_value)))
elif actual_value_unlimited:
new_value = actual_value
else:
new_value = value
if use_min:
if value.isdigit() and actual_value.isdigit():
new_value = str(min(int(value), int(actual_value)))
elif value_unlimited:
new_value = actual_value
else:
new_value = value
# Change line only if value has changed
if new_value != actual_value:
changed = True
if new_comment:
new_comment = "\t#" + new_comment
new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
message = new_limit
nf.write(new_limit)
else:
message = line
nf.write(line)
else:
nf.write(line)
if not found:
changed = True
if new_comment:
new_comment = "\t#" + new_comment
new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
message = new_limit
nf.write(new_limit)
f.close()
nf.flush()
# Copy tempfile to newfile
module.atomic_move(nf.name, f.name)
try:
nf.close()
except Exception:
pass
res_args = dict(
changed=changed, msg=message
)
if backup:
res_args['backup_file'] = backup_file
module.exit_json(**res_args)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,878 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Kenneth D. Evensen <kdevensen@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
module: pamd
author:
- Kenneth D. Evensen (@kevensen)
short_description: Manage PAM Modules
description:
- Edit PAM service's type, control, module path and module arguments.
- In order for a PAM rule to be modified, the type, control and
module_path must match an existing rule. See man(5) pam.d for details.
options:
name:
description:
- The name generally refers to the PAM service file to
change, for example system-auth.
type: str
required: true
type:
description:
- The type of the PAM rule being modified.
- The C(type), C(control) and C(module_path) all must match a rule to be modified.
type: str
required: true
choices: [ account, -account, auth, -auth, password, -password, session, -session ]
control:
description:
- The control of the PAM rule being modified.
- This may be a complicated control with brackets. If this is the case, be
sure to put "[bracketed controls]" in quotes.
- The C(type), C(control) and C(module_path) all must match a rule to be modified.
type: str
required: true
module_path:
description:
- The module path of the PAM rule being modified.
- The C(type), C(control) and C(module_path) all must match a rule to be modified.
type: str
required: true
new_type:
description:
- The new type to assign to the new rule.
type: str
choices: [ account, -account, auth, -auth, password, -password, session, -session ]
new_control:
description:
- The new control to assign to the new rule.
type: str
new_module_path:
description:
- The new module path to be assigned to the new rule.
type: str
module_arguments:
description:
- When state is C(updated), the module_arguments will replace existing module_arguments.
- When state is C(args_absent) args matching those listed in module_arguments will be removed.
- When state is C(args_present) any args listed in module_arguments are added if
missing from the existing rule.
- Furthermore, if the module argument takes a value denoted by C(=),
the value will be changed to that specified in module_arguments.
type: list
state:
description:
- The default of C(updated) will modify an existing rule if type,
control and module_path all match an existing rule.
- With C(before), the new rule will be inserted before a rule matching type,
control and module_path.
- Similarly, with C(after), the new rule will be inserted after an existing rulematching type,
control and module_path.
- With either C(before) or C(after) new_type, new_control, and new_module_path must all be specified.
- If state is C(args_absent) or C(args_present), new_type, new_control, and new_module_path will be ignored.
- State C(absent) will remove the rule. The 'absent' state was added in Ansible 2.4.
type: str
choices: [ absent, before, after, args_absent, args_present, updated ]
default: updated
path:
description:
- This is the path to the PAM service files.
type: path
default: /etc/pam.d
backup:
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
'''
EXAMPLES = r'''
- name: Update pamd rule's control in /etc/pam.d/system-auth
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
new_control: sufficient
- name: Update pamd rule's complex control in /etc/pam.d/system-auth
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
new_control: '[success=2 default=ignore]'
- name: Insert a new rule before an existing rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
new_type: auth
new_control: sufficient
new_module_path: pam_faillock.so
state: before
- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \
existing rule pam_rootok.so
pamd:
name: su
type: auth
control: sufficient
module_path: pam_rootok.so
new_type: auth
new_control: required
new_module_path: pam_wheel.so
module_arguments: 'use_uid'
state: after
- name: Remove module arguments from an existing rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
module_arguments: ''
state: updated
- name: Replace all module arguments in an existing rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
module_arguments: 'preauth
silent
deny=3
unlock_time=604800
fail_interval=900'
state: updated
- name: Remove specific arguments from a rule
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
module_arguments: crond,quiet
state: args_absent
- name: Ensure specific arguments are present in a rule
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
module_arguments: crond,quiet
state: args_present
- name: Ensure specific arguments are present in a rule (alternative)
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
module_arguments:
- crond
- quiet
state: args_present
- name: Module arguments requiring commas must be listed as a Yaml list
pamd:
name: special-module
type: account
control: required
module_path: pam_access.so
module_arguments:
- listsep=,
state: args_present
- name: Update specific argument value in a rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
module_arguments: 'fail_interval=300'
state: args_present
- name: Add pam common-auth rule for duo
pamd:
name: common-auth
new_type: auth
new_control: '[success=1 default=ignore]'
new_module_path: '/lib64/security/pam_duo.so'
state: after
type: auth
module_path: pam_sss.so
control: 'requisite'
'''
RETURN = r'''
change_count:
description: How many rules were changed.
type: int
sample: 1
returned: success
version_added: 2.4
new_rule:
description: The changes to the rule. This was available in Ansible 2.4 and Ansible 2.5. It was removed in Ansible 2.6.
type: str
sample: None None None sha512 shadow try_first_pass use_authtok
returned: success
version_added: 2.4
updated_rule_(n):
description: The rule(s) that was/were changed. This is only available in
Ansible 2.4 and was removed in Ansible 2.5.
type: str
sample:
- password sufficient pam_unix.so sha512 shadow try_first_pass
use_authtok
returned: success
version_added: 2.4
action:
description:
- "That action that was taken and is one of: update_rule,
insert_before_rule, insert_after_rule, args_present, args_absent,
absent. This was available in Ansible 2.4 and removed in Ansible 2.8"
returned: always
type: str
sample: "update_rule"
version_added: 2.4
dest:
description:
- "Path to pam.d service that was changed. This is only available in
Ansible 2.3 and was removed in Ansible 2.4."
returned: success
type: str
sample: "/etc/pam.d/system-auth"
backupdest:
description:
- "The file name of the backup file, if created."
returned: success
type: str
version_added: 2.6
...
'''
from ansible.module_utils.basic import AnsibleModule
import os
import re
from tempfile import NamedTemporaryFile
from datetime import datetime
RULE_REGEX = re.compile(r"""(?P<rule_type>-?(?:auth|account|session|password))\s+
(?P<control>\[.*\]|\S*)\s+
(?P<path>\S*)\s*
(?P<args>.*)\s*""", re.X)
RULE_ARG_REGEX = re.compile(r"""(\[.*\]|\S*)""")
VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session']
class PamdLine(object):
def __init__(self, line):
self.line = line
self.prev = None
self.next = None
@property
def is_valid(self):
if self.line == '':
return True
return False
def validate(self):
if not self.is_valid:
return False, "Rule is not valid " + self.line
return True, "Rule is valid " + self.line
# Method to check if a rule matches the type, control and path.
def matches(self, rule_type, rule_control, rule_path, rule_args=None):
return False
def __str__(self):
return str(self.line)
class PamdComment(PamdLine):
def __init__(self, line):
super(PamdComment, self).__init__(line)
@property
def is_valid(self):
if self.line.startswith('#'):
return True
return False
class PamdInclude(PamdLine):
def __init__(self, line):
super(PamdInclude, self).__init__(line)
@property
def is_valid(self):
if self.line.startswith('@include'):
return True
return False
class PamdRule(PamdLine):
valid_simple_controls = ['required', 'requisite', 'sufficient', 'optional', 'include', 'substack', 'definitive']
valid_control_values = ['success', 'open_err', 'symbol_err', 'service_err', 'system_err', 'buf_err',
'perm_denied', 'auth_err', 'cred_insufficient', 'authinfo_unavail', 'user_unknown',
'maxtries', 'new_authtok_reqd', 'acct_expired', 'session_err', 'cred_unavail',
'cred_expired', 'cred_err', 'no_module_data', 'conv_err', 'authtok_err',
'authtok_recover_err', 'authtok_lock_busy', 'authtok_disable_aging', 'try_again',
'ignore', 'abort', 'authtok_expired', 'module_unknown', 'bad_item', 'conv_again',
'incomplete', 'default']
valid_control_actions = ['ignore', 'bad', 'die', 'ok', 'done', 'reset']
def __init__(self, rule_type, rule_control, rule_path, rule_args=None):
self.prev = None
self.next = None
self._control = None
self._args = None
self.rule_type = rule_type
self.rule_control = rule_control
self.rule_path = rule_path
self.rule_args = rule_args
# Method to check if a rule matches the type, control and path.
def matches(self, rule_type, rule_control, rule_path, rule_args=None):
if (rule_type == self.rule_type and
rule_control == self.rule_control and
rule_path == self.rule_path):
return True
return False
@classmethod
def rule_from_string(cls, line):
rule_match = RULE_REGEX.search(line)
rule_args = parse_module_arguments(rule_match.group('args'))
return cls(rule_match.group('rule_type'), rule_match.group('control'), rule_match.group('path'), rule_args)
def __str__(self):
if self.rule_args:
return '{0: <11}{1} {2} {3}'.format(self.rule_type, self.rule_control, self.rule_path, ' '.join(self.rule_args))
return '{0: <11}{1} {2}'.format(self.rule_type, self.rule_control, self.rule_path)
@property
def rule_control(self):
if isinstance(self._control, list):
return '[' + ' '.join(self._control) + ']'
return self._control
@rule_control.setter
def rule_control(self, control):
if control.startswith('['):
control = control.replace(' = ', '=').replace('[', '').replace(']', '')
self._control = control.split(' ')
else:
self._control = control
@property
def rule_args(self):
if not self._args:
return []
return self._args
@rule_args.setter
def rule_args(self, args):
self._args = parse_module_arguments(args)
@property
def line(self):
return str(self)
@classmethod
def is_action_unsigned_int(cls, string_num):
number = 0
try:
number = int(string_num)
except ValueError:
return False
if number >= 0:
return True
return False
@property
def is_valid(self):
return self.validate()[0]
def validate(self):
# Validate the rule type
if self.rule_type not in VALID_TYPES:
return False, "Rule type, " + self.rule_type + ", is not valid in rule " + self.line
# Validate the rule control
if isinstance(self._control, str) and self.rule_control not in PamdRule.valid_simple_controls:
return False, "Rule control, " + self.rule_control + ", is not valid in rule " + self.line
elif isinstance(self._control, list):
for control in self._control:
value, action = control.split("=")
if value not in PamdRule.valid_control_values:
return False, "Rule control value, " + value + ", is not valid in rule " + self.line
if action not in PamdRule.valid_control_actions and not PamdRule.is_action_unsigned_int(action):
return False, "Rule control action, " + action + ", is not valid in rule " + self.line
# TODO: Validate path
return True, "Rule is valid " + self.line
# PamdService encapsulates an entire service and contains one or more rules. It seems the best way is to do this
# as a doubly linked list.
class PamdService(object):
def __init__(self, content):
self._head = None
self._tail = None
for line in content.splitlines():
if line.lstrip().startswith('#'):
pamd_line = PamdComment(line)
elif line.lstrip().startswith('@include'):
pamd_line = PamdInclude(line)
elif line == '':
pamd_line = PamdLine(line)
else:
pamd_line = PamdRule.rule_from_string(line)
self.append(pamd_line)
def append(self, pamd_line):
if self._head is None:
self._head = self._tail = pamd_line
else:
pamd_line.prev = self._tail
pamd_line.next = None
self._tail.next = pamd_line
self._tail = pamd_line
def remove(self, rule_type, rule_control, rule_path):
current_line = self._head
changed = 0
while current_line is not None:
if current_line.matches(rule_type, rule_control, rule_path):
if current_line.prev is not None:
current_line.prev.next = current_line.next
if current_line.next is not None:
current_line.next.prev = current_line.prev
else:
self._head = current_line.next
current_line.next.prev = None
changed += 1
current_line = current_line.next
return changed
def get(self, rule_type, rule_control, rule_path):
lines = []
current_line = self._head
while current_line is not None:
if isinstance(current_line, PamdRule) and current_line.matches(rule_type, rule_control, rule_path):
lines.append(current_line)
current_line = current_line.next
return lines
def has_rule(self, rule_type, rule_control, rule_path):
if self.get(rule_type, rule_control, rule_path):
return True
return False
def update_rule(self, rule_type, rule_control, rule_path,
new_type=None, new_control=None, new_path=None, new_args=None):
# Get a list of rules we want to change
rules_to_find = self.get(rule_type, rule_control, rule_path)
new_args = parse_module_arguments(new_args)
changes = 0
for current_rule in rules_to_find:
rule_changed = False
if new_type:
if(current_rule.rule_type != new_type):
rule_changed = True
current_rule.rule_type = new_type
if new_control:
if(current_rule.rule_control != new_control):
rule_changed = True
current_rule.rule_control = new_control
if new_path:
if(current_rule.rule_path != new_path):
rule_changed = True
current_rule.rule_path = new_path
if new_args:
if(current_rule.rule_args != new_args):
rule_changed = True
current_rule.rule_args = new_args
if rule_changed:
changes += 1
return changes
def insert_before(self, rule_type, rule_control, rule_path,
new_type=None, new_control=None, new_path=None, new_args=None):
# Get a list of rules we want to change
rules_to_find = self.get(rule_type, rule_control, rule_path)
changes = 0
# There are two cases to consider.
# 1. The new rule doesn't exist before the existing rule
# 2. The new rule exists
for current_rule in rules_to_find:
# Create a new rule
new_rule = PamdRule(new_type, new_control, new_path, new_args)
# First we'll get the previous rule.
previous_rule = current_rule.prev
# Next we may have to loop backwards if the previous line is a comment. If it
# is, we'll get the previous "rule's" previous.
while previous_rule is not None and isinstance(previous_rule, PamdComment):
previous_rule = previous_rule.prev
# Next we'll see if the previous rule matches what we are trying to insert.
if previous_rule is not None and not previous_rule.matches(new_type, new_control, new_path):
# First set the original previous rule's next to the new_rule
previous_rule.next = new_rule
# Second, set the new_rule's previous to the original previous
new_rule.prev = previous_rule
# Third, set the new rule's next to the current rule
new_rule.next = current_rule
# Fourth, set the current rule's previous to the new_rule
current_rule.prev = new_rule
changes += 1
# Handle the case where it is the first rule in the list.
elif previous_rule is None:
# This is the case where the current rule is not only the first rule
# but the first line as well. So we set the head to the new rule
if current_rule.prev is None:
self._head = new_rule
# This case would occur if the previous line was a comment.
else:
current_rule.prev.next = new_rule
new_rule.prev = current_rule.prev
new_rule.next = current_rule
current_rule.prev = new_rule
changes += 1
return changes
def insert_after(self, rule_type, rule_control, rule_path,
new_type=None, new_control=None, new_path=None, new_args=None):
# Get a list of rules we want to change
rules_to_find = self.get(rule_type, rule_control, rule_path)
changes = 0
# There are two cases to consider.
# 1. The new rule doesn't exist after the existing rule
# 2. The new rule exists
for current_rule in rules_to_find:
# First we'll get the next rule.
next_rule = current_rule.next
# Next we may have to loop forwards if the next line is a comment. If it
# is, we'll get the next "rule's" next.
while next_rule is not None and isinstance(next_rule, PamdComment):
next_rule = next_rule.next
# First we create a new rule
new_rule = PamdRule(new_type, new_control, new_path, new_args)
if next_rule is not None and not next_rule.matches(new_type, new_control, new_path):
# If the previous rule doesn't match we'll insert our new rule.
# Second set the original next rule's previous to the new_rule
next_rule.prev = new_rule
# Third, set the new_rule's next to the original next rule
new_rule.next = next_rule
# Fourth, set the new rule's previous to the current rule
new_rule.prev = current_rule
# Fifth, set the current rule's next to the new_rule
current_rule.next = new_rule
changes += 1
# This is the case where the current_rule is the last in the list
elif next_rule is None:
new_rule.prev = self._tail
new_rule.next = None
self._tail.next = new_rule
self._tail = new_rule
current_rule.next = new_rule
changes += 1
return changes
def add_module_arguments(self, rule_type, rule_control, rule_path, args_to_add):
# Get a list of rules we want to change
rules_to_find = self.get(rule_type, rule_control, rule_path)
args_to_add = parse_module_arguments(args_to_add)
changes = 0
for current_rule in rules_to_find:
rule_changed = False
# create some structures to evaluate the situation
simple_new_args = set()
key_value_new_args = dict()
for arg in args_to_add:
if arg.startswith("["):
continue
elif "=" in arg:
key, value = arg.split("=")
key_value_new_args[key] = value
else:
simple_new_args.add(arg)
key_value_new_args_set = set(key_value_new_args)
simple_current_args = set()
key_value_current_args = dict()
for arg in current_rule.rule_args:
if arg.startswith("["):
continue
elif "=" in arg:
key, value = arg.split("=")
key_value_current_args[key] = value
else:
simple_current_args.add(arg)
key_value_current_args_set = set(key_value_current_args)
new_args_to_add = list()
# Handle new simple arguments
if simple_new_args.difference(simple_current_args):
for arg in simple_new_args.difference(simple_current_args):
new_args_to_add.append(arg)
# Handle new key value arguments
if key_value_new_args_set.difference(key_value_current_args_set):
for key in key_value_new_args_set.difference(key_value_current_args_set):
new_args_to_add.append(key + '=' + key_value_new_args[key])
if new_args_to_add:
current_rule.rule_args += new_args_to_add
rule_changed = True
# Handle existing key value arguments when value is not equal
if key_value_new_args_set.intersection(key_value_current_args_set):
for key in key_value_new_args_set.intersection(key_value_current_args_set):
if key_value_current_args[key] != key_value_new_args[key]:
arg_index = current_rule.rule_args.index(key + '=' + key_value_current_args[key])
current_rule.rule_args[arg_index] = str(key + '=' + key_value_new_args[key])
rule_changed = True
if rule_changed:
changes += 1
return changes
def remove_module_arguments(self, rule_type, rule_control, rule_path, args_to_remove):
# Get a list of rules we want to change
rules_to_find = self.get(rule_type, rule_control, rule_path)
args_to_remove = parse_module_arguments(args_to_remove)
changes = 0
for current_rule in rules_to_find:
if not args_to_remove:
args_to_remove = []
# Let's check to see if there are any args to remove by finding the intersection
# of the rule's current args and the args_to_remove lists
if not list(set(current_rule.rule_args) & set(args_to_remove)):
continue
# There are args to remove, so we create a list of new_args absent the args
# to remove.
current_rule.rule_args = [arg for arg in current_rule.rule_args if arg not in args_to_remove]
changes += 1
return changes
def validate(self):
current_line = self._head
while current_line is not None:
if not current_line.validate()[0]:
return current_line.validate()
current_line = current_line.next
return True, "Module is valid"
def __str__(self):
lines = []
current_line = self._head
while current_line is not None:
lines.append(str(current_line))
current_line = current_line.next
if lines[1].startswith("# Updated by Ansible"):
lines.pop(1)
lines.insert(1, "# Updated by Ansible - " + datetime.now().isoformat())
return '\n'.join(lines) + '\n'
def parse_module_arguments(module_arguments):
# Return empty list if we have no args to parse
if not module_arguments:
return []
elif isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]:
return []
if not isinstance(module_arguments, list):
module_arguments = [module_arguments]
parsed_args = list()
for arg in module_arguments:
for item in filter(None, RULE_ARG_REGEX.findall(arg)):
if not item.startswith("["):
re.sub("\\s*=\\s*", "=", item)
parsed_args.append(item)
return parsed_args
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
type=dict(type='str', required=True, choices=VALID_TYPES),
control=dict(type='str', required=True),
module_path=dict(type='str', required=True),
new_type=dict(type='str', choices=VALID_TYPES),
new_control=dict(type='str'),
new_module_path=dict(type='str'),
module_arguments=dict(type='list'),
state=dict(type='str', default='updated', choices=['absent', 'after', 'args_absent', 'args_present', 'before', 'updated']),
path=dict(type='path', default='/etc/pam.d'),
backup=dict(type='bool', default=False),
),
supports_check_mode=True,
required_if=[
("state", "args_present", ["module_arguments"]),
("state", "args_absent", ["module_arguments"]),
("state", "before", ["new_control"]),
("state", "before", ["new_type"]),
("state", "before", ["new_module_path"]),
("state", "after", ["new_control"]),
("state", "after", ["new_type"]),
("state", "after", ["new_module_path"]),
],
)
content = str()
fname = os.path.join(module.params["path"], module.params["name"])
# Open the file and read the content or fail
try:
with open(fname, 'r') as service_file_obj:
content = service_file_obj.read()
except IOError as e:
# If unable to read the file, fail out
module.fail_json(msg='Unable to open/read PAM module \
file %s with error %s.' %
(fname, str(e)))
# Assuming we didn't fail, create the service
service = PamdService(content)
# Set the action
action = module.params['state']
changes = 0
# Take action
if action == 'updated':
changes = service.update_rule(module.params['type'], module.params['control'], module.params['module_path'],
module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
module.params['module_arguments'])
elif action == 'before':
changes = service.insert_before(module.params['type'], module.params['control'], module.params['module_path'],
module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
module.params['module_arguments'])
elif action == 'after':
changes = service.insert_after(module.params['type'], module.params['control'], module.params['module_path'],
module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
module.params['module_arguments'])
elif action == 'args_absent':
changes = service.remove_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
module.params['module_arguments'])
elif action == 'args_present':
if [arg for arg in parse_module_arguments(module.params['module_arguments']) if arg.startswith("[")]:
module.fail_json(msg="Unable to process bracketed '[' complex arguments with 'args_present'. Please use 'updated'.")
changes = service.add_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
module.params['module_arguments'])
elif action == 'absent':
changes = service.remove(module.params['type'], module.params['control'], module.params['module_path'])
valid, msg = service.validate()
# If the module is not valid (meaning one of the rules is invalid), we will fail
if not valid:
module.fail_json(msg=msg)
result = dict(
changed=(changes > 0),
change_count=changes,
backupdest='',
)
# If not check mode and something changed, backup the original if necessary then write out the file or fail
if not module.check_mode and result['changed']:
# First, create a backup if desired.
if module.params['backup']:
result['backupdest'] = module.backup_local(fname)
try:
temp_file = NamedTemporaryFile(mode='w', dir=module.tmpdir, delete=False)
with open(temp_file.name, 'w') as fd:
fd.write(str(service))
except IOError:
module.fail_json(msg='Unable to create temporary \
file %s' % temp_file)
module.atomic_move(temp_file.name, os.path.realpath(fname))
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,694 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Fabrizio Colonna <colofabrix@tin.it>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- Fabrizio Colonna (@ColOfAbRiX)
module: parted
short_description: Configure block device partitions
description:
- This module allows configuring block device partition using the C(parted)
command line tool. For a full description of the fields and the options
check the GNU parted manual.
requirements:
- This module requires parted version 1.8.3 and above.
- If the version of parted is below 3.1, it requires a Linux version running
the sysfs file system C(/sys/).
options:
device:
description: The block device (disk) where to operate.
type: str
required: True
align:
description: Set alignment for newly created partitions.
type: str
choices: [ cylinder, minimal, none, optimal ]
default: optimal
number:
description:
- The number of the partition to work with or the number of the partition
that will be created.
- Required when performing any action on the disk, except fetching information.
type: int
unit:
description:
- Selects the current default unit that Parted will use to display
locations and capacities on the disk and to interpret those given by the
user if they are not suffixed by an unit.
- When fetching information about a disk, it is always recommended to specify a unit.
type: str
choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ]
default: KiB
label:
description: Creates a new disk label.
type: str
choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ]
default: msdos
part_type:
description:
- May be specified only with 'msdos' or 'dvh' partition tables.
- A C(name) must be specified for a 'gpt' partition table.
- Neither C(part_type) nor C(name) may be used with a 'sun' partition table.
type: str
choices: [ extended, logical, primary ]
default: primary
part_start:
description:
- Where the partition will start as offset from the beginning of the disk,
that is, the "distance" from the start of the disk.
- The distance can be specified with all the units supported by parted
(except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
type: str
default: 0%
part_end :
description:
- Where the partition will end as offset from the beginning of the disk,
that is, the "distance" from the start of the disk.
- The distance can be specified with all the units supported by parted
(except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
type: str
default: 100%
name:
description:
- Sets the name for the partition number (GPT, Mac, MIPS and PC98 only).
type: str
flags:
description: A list of the flags that has to be set on the partition.
type: list
state:
description:
- Whether to create or delete a partition.
- If set to C(info) the module will only return the device information.
type: str
choices: [ absent, present, info ]
default: info
notes:
- When fetching information about a new disk and when the version of parted
installed on the system is before version 3.1, the module queries the kernel
through C(/sys/) to obtain disk information. In this case the units CHS and
CYL are not supported.
'''
RETURN = r'''
partition_info:
description: Current partition information
returned: success
type: complex
contains:
device:
description: Generic device information.
type: dict
partitions:
description: List of device partitions.
type: list
sample: {
"disk": {
"dev": "/dev/sdb",
"logical_block": 512,
"model": "VMware Virtual disk",
"physical_block": 512,
"size": 5.0,
"table": "msdos",
"unit": "gib"
},
"partitions": [{
"begin": 0.0,
"end": 1.0,
"flags": ["boot", "lvm"],
"fstype": "",
"name": "",
"num": 1,
"size": 1.0
}, {
"begin": 1.0,
"end": 5.0,
"flags": [],
"fstype": "",
"name": "",
"num": 2,
"size": 4.0
}]
}
'''
EXAMPLES = r'''
- name: Create a new primary partition
parted:
device: /dev/sdb
number: 1
state: present
- name: Remove partition number 1
parted:
device: /dev/sdb
number: 1
state: absent
- name: Create a new primary partition with a size of 1GiB
parted:
device: /dev/sdb
number: 1
state: present
part_end: 1GiB
- name: Create a new primary partition for LVM
parted:
device: /dev/sdb
number: 2
flags: [ lvm ]
state: present
part_start: 1GiB
# Example on how to read info and reuse it in subsequent task
- name: Read device information (always use unit when probing)
parted: device=/dev/sdb unit=MiB
register: sdb_info
- name: Remove all partitions from disk
parted:
device: /dev/sdb
number: '{{ item.num }}'
state: absent
loop: '{{ sdb_info.partitions }}'
'''
from ansible.module_utils.basic import AnsibleModule
import math
import re
import os
# Reference prefixes (International System of Units and IEC)
units_si = ['B', 'KB', 'MB', 'GB', 'TB']
units_iec = ['KiB', 'MiB', 'GiB', 'TiB']
parted_units = units_si + units_iec + ['s', '%', 'cyl', 'chs', 'compact']
def parse_unit(size_str, unit=''):
"""
Parses a string containing a size of information
"""
matches = re.search(r'^([\d.]+)([\w%]+)?$', size_str)
if matches is None:
# "<cylinder>,<head>,<sector>" format
matches = re.search(r'^(\d+),(\d+),(\d+)$', size_str)
if matches is None:
module.fail_json(
msg="Error interpreting parted size output: '%s'" % size_str
)
size = {
'cylinder': int(matches.group(1)),
'head': int(matches.group(2)),
'sector': int(matches.group(3))
}
unit = 'chs'
else:
# Normal format: "<number>[<unit>]"
if matches.group(2) is not None:
unit = matches.group(2)
size = float(matches.group(1))
return size, unit
def parse_partition_info(parted_output, unit):
"""
Parses the output of parted and transforms the data into
a dictionary.
Parted Machine Parseable Output:
See: https://lists.alioth.debian.org/pipermail/parted-devel/2006-December/00
0573.html
- All lines end with a semicolon (;)
- The first line indicates the units in which the output is expressed.
CHS, CYL and BYT stands for CHS, Cylinder and Bytes respectively.
- The second line is made of disk information in the following format:
"path":"size":"transport-type":"logical-sector-size":"physical-sector-siz
e":"partition-table-type":"model-name";
- If the first line was either CYL or CHS, the next line will contain
information on no. of cylinders, heads, sectors and cylinder size.
- Partition information begins from the next line. This is of the format:
(for BYT)
"number":"begin":"end":"size":"filesystem-type":"partition-name":"flags-s
et";
(for CHS/CYL)
"number":"begin":"end":"filesystem-type":"partition-name":"flags-set";
"""
lines = [x for x in parted_output.split('\n') if x.strip() != '']
# Generic device info
generic_params = lines[1].rstrip(';').split(':')
# The unit is read once, because parted always returns the same unit
size, unit = parse_unit(generic_params[1], unit)
generic = {
'dev': generic_params[0],
'size': size,
'unit': unit.lower(),
'table': generic_params[5],
'model': generic_params[6],
'logical_block': int(generic_params[3]),
'physical_block': int(generic_params[4])
}
# CYL and CHS have an additional line in the output
if unit in ['cyl', 'chs']:
chs_info = lines[2].rstrip(';').split(':')
cyl_size, cyl_unit = parse_unit(chs_info[3])
generic['chs_info'] = {
'cylinders': int(chs_info[0]),
'heads': int(chs_info[1]),
'sectors': int(chs_info[2]),
'cyl_size': cyl_size,
'cyl_size_unit': cyl_unit.lower()
}
lines = lines[1:]
parts = []
for line in lines[2:]:
part_params = line.rstrip(';').split(':')
# CHS use a different format than BYT, but contrary to what stated by
# the author, CYL is the same as BYT. I've tested this undocumented
# behaviour down to parted version 1.8.3, which is the first version
# that supports the machine parseable output.
if unit != 'chs':
size = parse_unit(part_params[3])[0]
fstype = part_params[4]
name = part_params[5]
flags = part_params[6]
else:
size = ""
fstype = part_params[3]
name = part_params[4]
flags = part_params[5]
parts.append({
'num': int(part_params[0]),
'begin': parse_unit(part_params[1])[0],
'end': parse_unit(part_params[2])[0],
'size': size,
'fstype': fstype,
'name': name,
'flags': [f.strip() for f in flags.split(', ') if f != ''],
'unit': unit.lower(),
})
return {'generic': generic, 'partitions': parts}
def format_disk_size(size_bytes, unit):
"""
Formats a size in bytes into a different unit, like parted does. It doesn't
manage CYL and CHS formats, though.
This function has been adapted from https://github.com/Distrotech/parted/blo
b/279d9d869ff472c52b9ec2e180d568f0c99e30b0/libparted/unit.c
"""
global units_si, units_iec
unit = unit.lower()
# Shortcut
if size_bytes == 0:
return 0.0, 'b'
# Cases where we default to 'compact'
if unit in ['', 'compact', 'cyl', 'chs']:
index = max(0, int(
(math.log10(size_bytes) - 1.0) / 3.0
))
unit = 'b'
if index < len(units_si):
unit = units_si[index]
# Find the appropriate multiplier
multiplier = 1.0
if unit in units_si:
multiplier = 1000.0 ** units_si.index(unit)
elif unit in units_iec:
multiplier = 1024.0 ** units_iec.index(unit)
output = size_bytes // multiplier * (1 + 1E-16)
# Corrections to round up as per IEEE754 standard
if output < 10:
w = output + 0.005
elif output < 100:
w = output + 0.05
else:
w = output + 0.5
if w < 10:
precision = 2
elif w < 100:
precision = 1
else:
precision = 0
# Round and return
return round(output, precision), unit
def get_unlabeled_device_info(device, unit):
"""
Fetches device information directly from the kernel and it is used when
parted cannot work because of a missing label. It always returns a 'unknown'
label.
"""
device_name = os.path.basename(device)
base = "/sys/block/%s" % device_name
vendor = read_record(base + "/device/vendor", "Unknown")
model = read_record(base + "/device/model", "model")
logic_block = int(read_record(base + "/queue/logical_block_size", 0))
phys_block = int(read_record(base + "/queue/physical_block_size", 0))
size_bytes = int(read_record(base + "/size", 0)) * logic_block
size, unit = format_disk_size(size_bytes, unit)
return {
'generic': {
'dev': device,
'table': "unknown",
'size': size,
'unit': unit,
'logical_block': logic_block,
'physical_block': phys_block,
'model': "%s %s" % (vendor, model),
},
'partitions': []
}
def get_device_info(device, unit):
"""
Fetches information about a disk and its partitions and it returns a
dictionary.
"""
global module, parted_exec
# If parted complains about missing labels, it means there are no partitions.
# In this case only, use a custom function to fetch information and emulate
# parted formats for the unit.
label_needed = check_parted_label(device)
if label_needed:
return get_unlabeled_device_info(device, unit)
command = "%s -s -m %s -- unit '%s' print" % (parted_exec, device, unit)
rc, out, err = module.run_command(command)
if rc != 0 and 'unrecognised disk label' not in err:
module.fail_json(msg=(
"Error while getting device information with parted "
"script: '%s'" % command),
rc=rc, out=out, err=err
)
return parse_partition_info(out, unit)
def check_parted_label(device):
"""
Determines if parted needs a label to complete its duties. Versions prior
to 3.1 don't return data when there is no label. For more information see:
http://upstream.rosalinux.ru/changelogs/libparted/3.1/changelog.html
"""
global parted_exec
# Check the version
parted_major, parted_minor, _ = parted_version()
if (parted_major == 3 and parted_minor >= 1) or parted_major > 3:
return False
# Older parted versions return a message in the stdout and RC > 0.
rc, out, err = module.run_command("%s -s -m %s print" % (parted_exec, device))
if rc != 0 and 'unrecognised disk label' in out.lower():
return True
return False
def parted_version():
"""
Returns the major and minor version of parted installed on the system.
"""
global module, parted_exec
rc, out, err = module.run_command("%s --version" % parted_exec)
if rc != 0:
module.fail_json(
msg="Failed to get parted version.", rc=rc, out=out, err=err
)
lines = [x for x in out.split('\n') if x.strip() != '']
if len(lines) == 0:
module.fail_json(msg="Failed to get parted version.", rc=0, out=out)
matches = re.search(r'^parted.+(\d+)\.(\d+)(?:\.(\d+))?$', lines[0])
if matches is None:
module.fail_json(msg="Failed to get parted version.", rc=0, out=out)
# Convert version to numbers
major = int(matches.group(1))
minor = int(matches.group(2))
rev = 0
if matches.group(3) is not None:
rev = int(matches.group(3))
return major, minor, rev
def parted(script, device, align):
"""
Runs a parted script.
"""
global module, parted_exec
if script and not module.check_mode:
command = "%s -s -m -a %s %s -- %s" % (parted_exec, align, device, script)
rc, out, err = module.run_command(command)
if rc != 0:
module.fail_json(
msg="Error while running parted script: %s" % command.strip(),
rc=rc, out=out, err=err
)
def read_record(file_path, default=None):
"""
Reads the first line of a file and returns it.
"""
try:
f = open(file_path, 'r')
try:
return f.readline().strip()
finally:
f.close()
except IOError:
return default
def part_exists(partitions, attribute, number):
"""
Looks if a partition that has a specific value for a specific attribute
actually exists.
"""
return any(
part[attribute] and
part[attribute] == number for part in partitions
)
def check_size_format(size_str):
"""
Checks if the input string is an allowed size
"""
size, unit = parse_unit(size_str)
return unit in parted_units
def main():
global module, units_si, units_iec, parted_exec
changed = False
output_script = ""
script = ""
module = AnsibleModule(
argument_spec=dict(
device=dict(type='str', required=True),
align=dict(type='str', default='optimal', choices=['cylinder', 'minimal', 'none', 'optimal']),
number=dict(type='int'),
# unit <unit> command
unit=dict(type='str', default='KiB', choices=parted_units),
# mklabel <label-type> command
label=dict(type='str', default='msdos', choices=['aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun']),
# mkpart <part-type> [<fs-type>] <start> <end> command
part_type=dict(type='str', default='primary', choices=['extended', 'logical', 'primary']),
part_start=dict(type='str', default='0%'),
part_end=dict(type='str', default='100%'),
# name <partition> <name> command
name=dict(type='str'),
# set <partition> <flag> <state> command
flags=dict(type='list'),
# rm/mkpart command
state=dict(type='str', default='info', choices=['absent', 'info', 'present']),
),
required_if=[
['state', 'present', ['number']],
['state', 'absent', ['number']],
],
supports_check_mode=True,
)
module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C', 'LC_CTYPE': 'C'}
# Data extraction
device = module.params['device']
align = module.params['align']
number = module.params['number']
unit = module.params['unit']
label = module.params['label']
part_type = module.params['part_type']
part_start = module.params['part_start']
part_end = module.params['part_end']
name = module.params['name']
state = module.params['state']
flags = module.params['flags']
# Parted executable
parted_exec = module.get_bin_path('parted', True)
# Conditioning
if number is not None and number < 1:
module.fail_json(msg="The partition number must be greater then 0.")
if not check_size_format(part_start):
module.fail_json(
msg="The argument 'part_start' doesn't respect required format."
"The size unit is case sensitive.",
err=parse_unit(part_start)
)
if not check_size_format(part_end):
module.fail_json(
msg="The argument 'part_end' doesn't respect required format."
"The size unit is case sensitive.",
err=parse_unit(part_end)
)
# Read the current disk information
current_device = get_device_info(device, unit)
current_parts = current_device['partitions']
if state == 'present':
# Assign label if required
if current_device['generic'].get('table', None) != label:
script += "mklabel %s " % label
# Create partition if required
if part_type and not part_exists(current_parts, 'num', number):
script += "mkpart %s %s %s " % (
part_type,
part_start,
part_end
)
# Set the unit of the run
if unit and script:
script = "unit %s %s" % (unit, script)
# Execute the script and update the data structure.
# This will create the partition for the next steps
if script:
output_script += script
parted(script, device, align)
changed = True
script = ""
current_parts = get_device_info(device, unit)['partitions']
if part_exists(current_parts, 'num', number) or module.check_mode:
partition = {'flags': []} # Empty structure for the check-mode
if not module.check_mode:
partition = [p for p in current_parts if p['num'] == number][0]
# Assign name to the partition
if name is not None and partition.get('name', None) != name:
# Wrap double quotes in single quotes so the shell doesn't strip
# the double quotes as those need to be included in the arg
# passed to parted
script += 'name %s \'"%s"\' ' % (number, name)
# Manage flags
if flags:
# Parted infers boot with esp, if you assign esp, boot is set
# and if boot is unset, esp is also unset.
if 'esp' in flags and 'boot' not in flags:
flags.append('boot')
# Compute only the changes in flags status
flags_off = list(set(partition['flags']) - set(flags))
flags_on = list(set(flags) - set(partition['flags']))
for f in flags_on:
script += "set %s %s on " % (number, f)
for f in flags_off:
script += "set %s %s off " % (number, f)
# Set the unit of the run
if unit and script:
script = "unit %s %s" % (unit, script)
# Execute the script
if script:
output_script += script
changed = True
parted(script, device, align)
elif state == 'absent':
# Remove the partition
if part_exists(current_parts, 'num', number) or module.check_mode:
script = "rm %s " % number
output_script += script
changed = True
parted(script, device, align)
elif state == 'info':
output_script = "unit '%s' print " % unit
# Final status of the device
final_device_status = get_device_info(device, unit)
module.exit_json(
changed=changed,
disk=final_device_status['generic'],
partitions=final_device_status['partitions'],
script=output_script.strip()
)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,88 @@
#!/usr/bin/python
# Copyright: (c) 2019, Saranya Sridharan
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: pids
description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists."
short_description: "Retrieves process IDs list if the process is running otherwise return empty list"
author:
- Saranya Sridharan (@saranyasridharan)
requirements:
- psutil(python module)
options:
name:
description: the name of the process you want to get PID for.
required: true
type: str
'''
EXAMPLES = '''
# Pass the process name
- name: Getting process IDs of the process
pids:
name: python
register: pids_of_python
- name: Printing the process IDs obtained
debug:
msg: "PIDS of python:{{pids_of_python.pids|join(',')}}"
'''
RETURN = '''
pids:
description: Process IDs of the given process
returned: list of none, one, or more process IDs
type: list
sample: [100,200]
'''
from ansible.module_utils.basic import AnsibleModule
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
def compare_lower(a, b):
if a is None or b is None:
# this could just be "return False" but would lead to surprising behavior if both a and b are None
return a == b
return a.lower() == b.lower()
def get_pid(name):
pids = []
for proc in psutil.process_iter(attrs=['name', 'cmdline']):
if compare_lower(proc.info['name'], name) or \
proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name):
pids.append(proc.pid)
return pids
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type="str"),
),
supports_check_mode=True,
)
if not HAS_PSUTIL:
module.fail_json(msg="Missing required 'psutil' python module. Try installing it with: pip install psutil")
name = module.params["name"]
response = dict(pids=get_pid(name))
module.exit_json(**response)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,330 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: puppet
short_description: Runs puppet
description:
- Runs I(puppet) agent or apply in a reliable manner.
options:
timeout:
description:
- How long to wait for I(puppet) to finish.
type: str
default: 30m
puppetmaster:
description:
- The hostname of the puppetmaster to contact.
type: str
modulepath:
description:
- Path to an alternate location for puppet modules.
type: str
manifest:
description:
- Path to the manifest file to run puppet apply on.
type: str
noop:
description:
- Override puppet.conf noop mode.
- When C(yes), run Puppet agent with C(--noop) switch set.
- When C(no), run Puppet agent with C(--no-noop) switch set.
- When unset (default), use default or puppet.conf value if defined.
type: bool
facts:
description:
- A dict of values to pass in as persistent external facter facts.
type: dict
facter_basename:
description:
- Basename of the facter output file.
type: str
default: ansible
environment:
description:
- Puppet environment to be used.
type: str
logdest:
description:
- Where the puppet logs should go, if puppet apply is being used.
- C(all) will go to both C(stdout) and C(syslog).
type: str
choices: [ all, stdout, syslog ]
default: stdout
certname:
description:
- The name to use when handling certificates.
type: str
tags:
description:
- A list of puppet tags to be used.
type: list
execute:
description:
- Execute a specific piece of Puppet code.
- It has no effect with a puppetmaster.
type: str
use_srv_records:
description:
- Toggles use_srv_records flag
type: bool
summarize:
description:
- Whether to print a transaction summary.
type: bool
verbose:
description:
- Print extra information.
type: bool
debug:
description:
- Enable full debugging.
type: bool
requirements:
- puppet
author:
- Monty Taylor (@emonty)
'''
EXAMPLES = r'''
- name: Run puppet agent and fail if anything goes wrong
puppet:
- name: Run puppet and timeout in 5 minutes
puppet:
timeout: 5m
- name: Run puppet using a different environment
puppet:
environment: testing
- name: Run puppet using a specific certname
puppet:
certname: agent01.example.com
- name: Run puppet using a specific piece of Puppet code. Has no effect with a puppetmaster
puppet:
execute: include ::mymodule
- name: Run puppet using a specific tags
puppet:
tags:
- update
- nginx
- name: Run puppet agent in noop mode
puppet:
noop: yes
- name: Run a manifest with debug, log to both syslog and stdout, specify module path
puppet:
modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
logdest: all
manifest: /var/lib/example/puppet_step_config.pp
'''
import json
import os
import stat
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import shlex_quote
def _get_facter_dir():
if os.getuid() == 0:
return '/etc/facter/facts.d'
else:
return os.path.expanduser('~/.facter/facts.d')
def _write_structured_data(basedir, basename, data):
if not os.path.exists(basedir):
os.makedirs(basedir)
file_path = os.path.join(basedir, "{0}.json".format(basename))
# This is more complex than you might normally expect because we want to
# open the file with only u+rw set. Also, we use the stat constants
# because ansible still supports python 2.4 and the octal syntax changed
out_file = os.fdopen(
os.open(
file_path, os.O_CREAT | os.O_WRONLY,
stat.S_IRUSR | stat.S_IWUSR), 'w')
out_file.write(json.dumps(data).encode('utf8'))
out_file.close()
def main():
module = AnsibleModule(
argument_spec=dict(
timeout=dict(type='str', default='30m'),
puppetmaster=dict(type='str'),
modulepath=dict(type='str'),
manifest=dict(type='str'),
noop=dict(required=False, type='bool'),
logdest=dict(type='str', default='stdout', choices=['all',
'stdout',
'syslog']),
# internal code to work with --diff, do not use
show_diff=dict(type='bool', default=False, aliases=['show-diff']),
facts=dict(type='dict'),
facter_basename=dict(type='str', default='ansible'),
environment=dict(type='str'),
certname=dict(type='str'),
tags=dict(type='list'),
execute=dict(type='str'),
summarize=dict(type='bool', default=False),
debug=dict(type='bool', default=False),
verbose=dict(type='bool', default=False),
use_srv_records=dict(type='bool'),
),
supports_check_mode=True,
mutually_exclusive=[
('puppetmaster', 'manifest'),
('puppetmaster', 'manifest', 'execute'),
('puppetmaster', 'modulepath'),
],
)
p = module.params
global PUPPET_CMD
PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin'])
if not PUPPET_CMD:
module.fail_json(
msg="Could not find puppet. Please ensure it is installed.")
global TIMEOUT_CMD
TIMEOUT_CMD = module.get_bin_path("timeout", False)
if p['manifest']:
if not os.path.exists(p['manifest']):
module.fail_json(
msg="Manifest file %(manifest)s not found." % dict(
manifest=p['manifest']))
# Check if puppet is disabled here
if not p['manifest']:
rc, stdout, stderr = module.run_command(
PUPPET_CMD + " config print agent_disabled_lockfile")
if os.path.exists(stdout.strip()):
module.fail_json(
msg="Puppet agent is administratively disabled.",
disabled=True)
elif rc != 0:
module.fail_json(
msg="Puppet agent state could not be determined.")
if module.params['facts'] and not module.check_mode:
_write_structured_data(
_get_facter_dir(),
module.params['facter_basename'],
module.params['facts'])
if TIMEOUT_CMD:
base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict(
timeout_cmd=TIMEOUT_CMD,
timeout=shlex_quote(p['timeout']),
puppet_cmd=PUPPET_CMD)
else:
base_cmd = PUPPET_CMD
if not p['manifest'] and not p['execute']:
cmd = ("%(base_cmd)s agent --onetime"
" --no-daemonize --no-usecacheonfailure --no-splay"
" --detailed-exitcodes --verbose --color 0") % dict(base_cmd=base_cmd)
if p['puppetmaster']:
cmd += " --server %s" % shlex_quote(p['puppetmaster'])
if p['show_diff']:
cmd += " --show_diff"
if p['environment']:
cmd += " --environment '%s'" % p['environment']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if module.check_mode:
cmd += " --noop"
if p['use_srv_records'] is not None:
if not p['use_srv_records']:
cmd += " --no-use_srv_records"
else:
cmd += " --use_srv_records"
elif 'noop' in p:
if p['noop']:
cmd += " --noop"
else:
cmd += " --no-noop"
else:
cmd = "%s apply --detailed-exitcodes " % base_cmd
if p['logdest'] == 'syslog':
cmd += "--logdest syslog "
if p['logdest'] == 'all':
cmd += " --logdest syslog --logdest stdout"
if p['modulepath']:
cmd += "--modulepath='%s'" % p['modulepath']
if p['environment']:
cmd += "--environment '%s' " % p['environment']
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if module.check_mode:
cmd += "--noop "
elif 'noop' in p:
if p['noop']:
cmd += " --noop"
else:
cmd += " --no-noop"
if p['execute']:
cmd += " --execute '%s'" % p['execute']
else:
cmd += shlex_quote(p['manifest'])
if p['summarize']:
cmd += " --summarize"
if p['debug']:
cmd += " --debug"
if p['verbose']:
cmd += " --verbose"
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
# success
module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
elif rc == 1:
# rc==1 could be because it's disabled
# rc==1 could also mean there was a compilation failure
disabled = "administratively disabled" in stdout
if disabled:
msg = "puppet is disabled"
else:
msg = "puppet did not run"
module.exit_json(
rc=rc, disabled=disabled, msg=msg,
error=True, stdout=stdout, stderr=stderr)
elif rc == 2:
# success with changes
module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
elif rc == 124:
# timeout
module.exit_json(
rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
else:
# failure
module.fail_json(
rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
stdout=stdout, stderr=stderr)
if __name__ == '__main__':
main()

View file

@ -0,0 +1 @@
python_requirements_info.py

View file

@ -0,0 +1,174 @@
#!/usr/bin/python
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
module: python_requirements_info
short_description: Show python path and assert dependency versions
description:
- Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
- This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
options:
dependencies:
description: >
A list of version-likes or module names to check for installation.
Supported operators: <, >, <=, >=, or ==. The bare module name like
I(ansible), the module with a specific version like I(boto3==1.6.1), or a
partial version like I(requests>2) are all valid specifications.
author:
- Will Thames (@willthames)
- Ryan Scott Brown (@ryansb)
'''
EXAMPLES = '''
- name: show python lib/site paths
python_requirements_info:
- name: check for modern boto3 and botocore versions
python_requirements_info:
dependencies:
- boto3>1.6
- botocore<2
'''
RETURN = '''
python:
description: path to python version used
returned: always
type: str
sample: /usr/local/opt/python@2/bin/python2.7
python_version:
description: version of python
returned: always
type: str
sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
python_system_path:
description: List of paths python is looking for modules in
returned: always
type: list
sample:
- /usr/local/opt/python@2/site-packages/
- /usr/lib/python/site-packages/
- /usr/lib/python/site-packages/
valid:
description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
returned: always
type: dict
sample:
boto3:
desired: null
installed: 1.7.60
botocore:
desired: botocore<2
installed: 1.10.60
mismatched:
description: A dictionary of dependencies that did not satisfy the desired version
returned: always
type: dict
sample:
botocore:
desired: botocore>2
installed: 1.10.60
not_found:
description: A list of packages that could not be imported at all, and are not installed
returned: always
type: list
sample:
- boto4
- requests
'''
import re
import sys
import operator
HAS_DISTUTILS = False
try:
import pkg_resources
from distutils.version import LooseVersion
HAS_DISTUTILS = True
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
operations = {
'<=': operator.le,
'>=': operator.ge,
'<': operator.lt,
'>': operator.gt,
'==': operator.eq,
}
def main():
module = AnsibleModule(
argument_spec=dict(
dependencies=dict(type='list')
),
supports_check_mode=True,
)
if module._name == 'python_requirements_facts':
module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'", version='2.13')
if not HAS_DISTUTILS:
module.fail_json(
msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
python=sys.executable,
python_version=sys.version,
python_system_path=sys.path,
)
pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$')
results = dict(
not_found=[],
mismatched={},
valid={},
)
for dep in (module.params.get('dependencies') or []):
match = pkg_dep_re.match(dep)
if match is None:
module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
pkg, op, version = match.groups()
if op is not None and op not in operations:
module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
try:
existing = pkg_resources.get_distribution(pkg).version
except pkg_resources.DistributionNotFound:
# not there
results['not_found'].append(pkg)
continue
if op is None and version is None:
results['valid'][pkg] = {
'installed': existing,
'desired': None,
}
elif operations[op](LooseVersion(existing), LooseVersion(version)):
results['valid'][pkg] = {
'installed': existing,
'desired': dep,
}
else:
results['mismatched'] = {
'installed': existing,
'desired': dep,
}
module.exit_json(
python=sys.executable,
python_version=sys.version,
python_system_path=sys.path,
**results
)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,284 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
# This is a modification of @bcoca's `svc` module
DOCUMENTATION = r'''
---
module: runit
author:
- James Sumners (@jsumners)
short_description: Manage runit services
description:
- Controls runit services on remote hosts using the sv utility.
options:
name:
description:
- Name of the service to manage.
type: str
required: yes
state:
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
service (sv restart) and C(killed) will always bounce the service (sv force-stop).
C(reloaded) will send a HUP (sv reload).
C(once) will run a normally downed sv once (sv once), not really
an idempotent operation.
type: str
choices: [ killed, once, reloaded, restarted, started, stopped ]
enabled:
description:
- Whether the service is enabled or not, if disabled it also implies stopped.
type: bool
service_dir:
description:
- directory runsv watches for services
type: str
default: /var/service
service_src:
description:
- directory where services are defined, the source of symlinks to service_dir.
type: str
default: /etc/sv
'''
EXAMPLES = r'''
- name: Start sv dnscache, if not running
runit:
name: dnscache
state: started
- name: Stop sv dnscache, if running
runit:
name: dnscache
state: stopped
- name: Kill sv dnscache, in all cases
runit:
name: dnscache
state: killed
- name: Restart sv dnscache, in all cases
runit:
name: dnscache
state: restarted
- name: Reload sv dnscache, in all cases
runit:
name: dnscache
state: reloaded
- name: Use alternative sv directory location
runit:
name: dnscache
state: reloaded
service_dir: /run/service
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Sv(object):
"""
Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc
"""
# def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = []
self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True)
self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([self.service_dir, self.name])
self.src_full = '/'.join([self.service_src, self.name])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.get_status()
else:
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError as e:
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
self.execute_command([self.svc_cmd, 'force-stop', self.src_full])
try:
os.unlink(self.svc_full)
except OSError as e:
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
# full_state *may* contain information about the logger:
# "down: /etc/service/service-without-logger: 1s, normally up\n"
# "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n"
full_state_no_logger = self.full_state.split("; ")[0]
m = re.search(r'\(pid (\d+)\)', full_state_no_logger)
if m:
self.pid = m.group(1)
m = re.search(r' (\d+)s', full_state_no_logger)
if m:
self.duration = m.group(1)
if re.search(r'^run:', full_state_no_logger):
self.state = 'started'
elif re.search(r'^down:', full_state_no_logger):
self.state = 'stopped'
else:
self.state = 'unknown'
return
def started(self):
return self.start()
def start(self):
return self.execute_command([self.svc_cmd, 'start', self.svc_full])
def stopped(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, 'stop', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, 'once', self.svc_full])
def reloaded(self):
return self.reload()
def reload(self):
return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
def restarted(self):
return self.restart()
def restart(self):
return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
def killed(self):
return self.kill()
def kill(self):
return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception as e:
self.module.fail_json(msg="failed to execute: %s" % to_native(e))
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
dist=dict(type='str', default='runit'),
service_dir=dict(type='str', default='/var/service'),
service_src=dict(type='str', default='/etc/sv'),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
state = module.params['state']
enabled = module.params['enabled']
sv = Sv(module)
changed = False
orig_state = sv.report()
if enabled is not None and enabled != sv.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
sv.enable()
else:
sv.disable()
except (OSError, IOError) as e:
module.fail_json(msg="Could not change service link: %s" % to_native(e))
if state is not None and state != sv.state:
changed = True
if not module.check_mode:
getattr(sv, state)()
module.exit_json(changed=changed, sv=sv.report())
if __name__ == '__main__':
main()

View file

@ -0,0 +1,296 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: sefcontext
short_description: Manages SELinux file context mapping definitions
description:
- Manages SELinux file context mapping definitions.
- Similar to the C(semanage fcontext) command.
options:
target:
description:
- Target path (expression).
type: str
required: yes
aliases: [ path ]
ftype:
description:
- The file type that should have SELinux contexts applied.
- "The following file type options are available:"
- C(a) for all files,
- C(b) for block devices,
- C(c) for character devices,
- C(d) for directories,
- C(f) for regular files,
- C(l) for symbolic links,
- C(p) for named pipes,
- C(s) for socket files.
type: str
choices: [ a, b, c, d, f, l, p, s ]
default: a
setype:
description:
- SELinux type for the specified target.
type: str
required: yes
seuser:
description:
- SELinux user for the specified target.
type: str
selevel:
description:
- SELinux range for the specified target.
type: str
aliases: [ serange ]
state:
description:
- Whether the SELinux file context must be C(absent) or C(present).
type: str
choices: [ absent, present ]
default: present
reload:
description:
- Reload SELinux policy after commit.
- Note that this does not apply SELinux file contexts to existing files.
type: bool
default: yes
ignore_selinux_state:
description:
- Useful for scenarios (chrooted environment) that you can't get the real SELinux state.
type: bool
default: no
notes:
- The changes are persistent across reboots.
- The M(sefcontext) module does not modify existing files to the new
SELinux context(s), so it is advisable to first create the SELinux
file contexts before creating files, or run C(restorecon) manually
for the existing files that require the new SELinux file contexts.
- Not applying SELinux fcontexts to existing files is a deliberate
decision as it would be unclear what reported changes would entail
to, and there's no guarantee that applying SELinux fcontext does
not pick up other unrelated prior changes.
requirements:
- libselinux-python
- policycoreutils-python
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Allow apache to modify files in /srv/git_repos
sefcontext:
target: '/srv/git_repos(/.*)?'
setype: httpd_git_rw_content_t
state: present
- name: Apply new SELinux file context to filesystem
command: restorecon -irv /srv/git_repos
'''
RETURN = r'''
# Default return values
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
SELINUX_IMP_ERR = None
try:
import selinux
HAVE_SELINUX = True
except ImportError:
SELINUX_IMP_ERR = traceback.format_exc()
HAVE_SELINUX = False
SEOBJECT_IMP_ERR = None
try:
import seobject
HAVE_SEOBJECT = True
except ImportError:
SEOBJECT_IMP_ERR = traceback.format_exc()
HAVE_SEOBJECT = False
# Add missing entries (backward compatible)
if HAVE_SEOBJECT:
seobject.file_types.update(
a=seobject.SEMANAGE_FCONTEXT_ALL,
b=seobject.SEMANAGE_FCONTEXT_BLOCK,
c=seobject.SEMANAGE_FCONTEXT_CHAR,
d=seobject.SEMANAGE_FCONTEXT_DIR,
f=seobject.SEMANAGE_FCONTEXT_REG,
l=seobject.SEMANAGE_FCONTEXT_LINK,
p=seobject.SEMANAGE_FCONTEXT_PIPE,
s=seobject.SEMANAGE_FCONTEXT_SOCK,
)
# Make backward compatible
option_to_file_type_str = dict(
a='all files',
b='block device',
c='character device',
d='directory',
f='regular file',
l='symbolic link',
p='named pipe',
s='socket',
)
def get_runtime_status(ignore_selinux_state=False):
return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
def semanage_fcontext_exists(sefcontext, target, ftype):
''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
# Beware that records comprise of a string representation of the file_type
record = (target, option_to_file_type_str[ftype])
records = sefcontext.get_all()
try:
return records[record]
except KeyError:
return None
def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''):
''' Add or modify SELinux file context mapping definition to the policy. '''
changed = False
prepared_diff = ''
try:
sefcontext = seobject.fcontextRecords(sestore)
sefcontext.set_reload(do_reload)
exists = semanage_fcontext_exists(sefcontext, target, ftype)
if exists:
# Modify existing entry
orig_seuser, orig_serole, orig_setype, orig_serange = exists
if seuser is None:
seuser = orig_seuser
if serange is None:
serange = orig_serange
if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
if not module.check_mode:
sefcontext.modify(target, setype, ftype, serange, seuser)
changed = True
if module._diff:
prepared_diff += '# Change to semanage file context mappings\n'
prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
else:
# Add missing entry
if seuser is None:
seuser = 'system_u'
if serange is None:
serange = 's0'
if not module.check_mode:
sefcontext.add(target, setype, ftype, serange, seuser)
changed = True
if module._diff:
prepared_diff += '# Addition to semanage file context mappings\n'
prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
except Exception as e:
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
if module._diff and prepared_diff:
result['diff'] = dict(prepared=prepared_diff)
module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
''' Delete SELinux file context mapping definition from the policy. '''
changed = False
prepared_diff = ''
try:
sefcontext = seobject.fcontextRecords(sestore)
sefcontext.set_reload(do_reload)
exists = semanage_fcontext_exists(sefcontext, target, ftype)
if exists:
# Remove existing entry
orig_seuser, orig_serole, orig_setype, orig_serange = exists
if not module.check_mode:
sefcontext.delete(target, ftype)
changed = True
if module._diff:
prepared_diff += '# Deletion to semanage file context mappings\n'
prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
except Exception as e:
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
if module._diff and prepared_diff:
result['diff'] = dict(prepared=prepared_diff)
module.exit_json(changed=changed, **result)
def main():
module = AnsibleModule(
argument_spec=dict(
ignore_selinux_state=dict(type='bool', default=False),
target=dict(type='str', required=True, aliases=['path']),
ftype=dict(type='str', default='a', choices=option_to_file_type_str.keys()),
setype=dict(type='str', required=True),
seuser=dict(type='str'),
selevel=dict(type='str', aliases=['serange']),
state=dict(type='str', default='present', choices=['absent', 'present']),
reload=dict(type='bool', default=True),
),
supports_check_mode=True,
)
if not HAVE_SELINUX:
module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
if not HAVE_SEOBJECT:
module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
ignore_selinux_state = module.params['ignore_selinux_state']
if not get_runtime_status(ignore_selinux_state):
module.fail_json(msg="SELinux is disabled on this host.")
target = module.params['target']
ftype = module.params['ftype']
setype = module.params['setype']
seuser = module.params['seuser']
serange = module.params['selevel']
state = module.params['state']
do_reload = module.params['reload']
result = dict(target=target, ftype=ftype, setype=setype, state=state)
if state == 'present':
semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser)
elif state == 'absent':
semanage_fcontext_delete(module, result, target, ftype, do_reload)
else:
module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
if __name__ == '__main__':
main()

View file

@ -0,0 +1,131 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Michael Scherer <misc@zarb.org>
# inspired by code of github.com/dandiker/
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: selinux_permissive
short_description: Change permissive domain in SELinux policy
description:
- Add and remove a domain from the list of permissive domains.
options:
domain:
description:
- The domain that will be added or removed from the list of permissive domains.
type: str
required: true
default: ''
aliases: [ name ]
permissive:
description:
- Indicate if the domain should or should not be set as permissive.
type: bool
required: true
no_reload:
description:
- Disable reloading of the SELinux policy after making change to a domain's permissive setting.
- The default is C(no), which causes policy to be reloaded when a domain changes state.
- Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6."
type: bool
default: no
store:
description:
- Name of the SELinux policy store to use.
type: str
notes:
- Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer).
requirements: [ policycoreutils-python ]
author:
- Michael Scherer (@mscherer) <misc@zarb.org>
'''
EXAMPLES = r'''
- name: Change the httpd_t domain to permissive
selinux_permissive:
name: httpd_t
permissive: true
'''
import traceback
HAVE_SEOBJECT = False
SEOBJECT_IMP_ERR = None
try:
import seobject
HAVE_SEOBJECT = True
except ImportError:
SEOBJECT_IMP_ERR = traceback.format_exc()
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
domain=dict(type='str', required=True, aliases=['name']),
store=dict(type='str', default=''),
permissive=dict(type='bool', required=True),
no_reload=dict(type='bool', default=False),
),
supports_check_mode=True,
)
# global vars
changed = False
store = module.params['store']
permissive = module.params['permissive']
domain = module.params['domain']
no_reload = module.params['no_reload']
if not HAVE_SEOBJECT:
module.fail_json(changed=False, msg=missing_required_lib("policycoreutils-python"),
exception=SEOBJECT_IMP_ERR)
try:
permissive_domains = seobject.permissiveRecords(store)
except ValueError as e:
module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
# not supported on EL 6
if 'set_reload' in dir(permissive_domains):
permissive_domains.set_reload(not no_reload)
try:
all_domains = permissive_domains.get_all()
except ValueError as e:
module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
if permissive:
if domain not in all_domains:
if not module.check_mode:
try:
permissive_domains.add(domain)
except ValueError as e:
module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
changed = True
else:
if domain in all_domains:
if not module.check_mode:
try:
permissive_domains.delete(domain)
except ValueError as e:
module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
changed = True
module.exit_json(changed=changed, store=store,
permissive=permissive, domain=domain)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,259 @@
#!/usr/bin/python
# (c) 2017, Petr Lautrbach <plautrba@redhat.com>
# Based on seport.py module (c) 2014, Dan Keder <dan.keder@gmail.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: selogin
short_description: Manages linux user to SELinux user mapping
description:
- Manages linux user to SELinux user mapping
options:
login:
description:
- a Linux user
required: true
seuser:
description:
- SELinux user name
required: true
selevel:
aliases: [ serange ]
description:
- MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range.
default: s0
state:
description:
- Desired mapping value.
required: true
default: present
choices: [ 'present', 'absent' ]
reload:
description:
- Reload SELinux policy after commit.
default: yes
ignore_selinux_state:
description:
- Run independent of selinux runtime state
type: bool
default: false
notes:
- The changes are persistent across reboots
- Not tested on any debian based system
requirements: [ 'libselinux', 'policycoreutils' ]
author:
- Dan Keder (@dankeder)
- Petr Lautrbach (@bachradsusi)
- James Cassell (@jamescassell)
'''
EXAMPLES = '''
# Modify the default user on the system to the guest_u user
- selogin:
login: __default__
seuser: guest_u
state: present
# Assign gijoe user on an MLS machine a range and to the staff_u user
- selogin:
login: gijoe
seuser: staff_u
serange: SystemLow-Secret
state: present
# Assign all users in the engineering group to the staff_u user
- selogin:
login: '%engineering'
seuser: staff_u
state: present
'''
RETURN = r'''
# Default return values
'''
import traceback
SELINUX_IMP_ERR = None
try:
import selinux
HAVE_SELINUX = True
except ImportError:
SELINUX_IMP_ERR = traceback.format_exc()
HAVE_SELINUX = False
SEOBJECT_IMP_ERR = None
try:
import seobject
HAVE_SEOBJECT = True
except ImportError:
SEOBJECT_IMP_ERR = traceback.format_exc()
HAVE_SEOBJECT = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''):
""" Add linux user to SELinux user mapping
:type module: AnsibleModule
:param module: Ansible module
:type login: str
:param login: a Linux User or a Linux group if it begins with %
:type seuser: str
:param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
:type serange: str
:param serange: SELinux MLS/MCS range (defaults to 's0')
:type do_reload: bool
:param do_reload: Whether to reload SELinux policy after commit
:type sestore: str
:param sestore: SELinux store
:rtype: bool
:return: True if the policy was changed, otherwise False
"""
try:
selogin = seobject.loginRecords(sestore)
selogin.set_reload(do_reload)
change = False
all_logins = selogin.get_all()
# module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
# for local_login in all_logins:
if login not in all_logins.keys():
change = True
if not module.check_mode:
selogin.add(login, seuser, serange)
else:
if all_logins[login][0] != seuser or all_logins[login][1] != serange:
change = True
if not module.check_mode:
selogin.modify(login, seuser, serange)
except (ValueError, KeyError, OSError, RuntimeError) as e:
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
return change
def semanage_login_del(module, login, seuser, do_reload, sestore=''):
""" Delete linux user to SELinux user mapping
:type module: AnsibleModule
:param module: Ansible module
:type login: str
:param login: a Linux User or a Linux group if it begins with %
:type seuser: str
:param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
:type do_reload: bool
:param do_reload: Whether to reload SELinux policy after commit
:type sestore: str
:param sestore: SELinux store
:rtype: bool
:return: True if the policy was changed, otherwise False
"""
try:
selogin = seobject.loginRecords(sestore)
selogin.set_reload(do_reload)
change = False
all_logins = selogin.get_all()
# module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
if login in all_logins.keys():
change = True
if not module.check_mode:
selogin.delete(login)
except (ValueError, KeyError, OSError, RuntimeError) as e:
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
return change
def get_runtime_status(ignore_selinux_state=False):
return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
def main():
module = AnsibleModule(
argument_spec=dict(
ignore_selinux_state=dict(type='bool', default=False),
login=dict(type='str', required=True),
seuser=dict(type='str'),
selevel=dict(type='str', aliases=['serange'], default='s0'),
state=dict(type='str', default='present', choices=['absent', 'present']),
reload=dict(type='bool', default=True),
),
required_if=[
["state", "present", ["seuser"]]
],
supports_check_mode=True
)
if not HAVE_SELINUX:
module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR)
if not HAVE_SEOBJECT:
module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR)
ignore_selinux_state = module.params['ignore_selinux_state']
if not get_runtime_status(ignore_selinux_state):
module.fail_json(msg="SELinux is disabled on this host.")
login = module.params['login']
seuser = module.params['seuser']
serange = module.params['selevel']
state = module.params['state']
do_reload = module.params['reload']
result = {
'login': login,
'seuser': seuser,
'serange': serange,
'state': state,
}
if state == 'present':
result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange)
elif state == 'absent':
result['changed'] = semanage_login_del(module, login, seuser, do_reload)
else:
module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,309 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Dan Keder <dan.keder@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: seport
short_description: Manages SELinux network port type definitions
description:
- Manages SELinux network port type definitions.
options:
ports:
description:
- Ports or port ranges.
- Can be a list (since 2.6) or comma separated string.
type: list
required: true
proto:
description:
- Protocol for the specified port.
type: str
required: true
choices: [ tcp, udp ]
setype:
description:
- SELinux type for the specified port.
type: str
required: true
state:
description:
- Desired boolean value.
type: str
choices: [ absent, present ]
default: present
reload:
description:
- Reload SELinux policy after commit.
type: bool
default: yes
ignore_selinux_state:
description:
- Run independent of selinux runtime state
type: bool
default: no
notes:
- The changes are persistent across reboots.
- Not tested on any debian based system.
requirements:
- libselinux-python
- policycoreutils-python
author:
- Dan Keder (@dankeder)
'''
EXAMPLES = r'''
- name: Allow Apache to listen on tcp port 8888
seport:
ports: 8888
proto: tcp
setype: http_port_t
state: present
- name: Allow sshd to listen on tcp port 8991
seport:
ports: 8991
proto: tcp
setype: ssh_port_t
state: present
- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
seport:
ports: 10000-10100,10112
proto: tcp
setype: memcache_port_t
state: present
- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
seport:
ports:
- 10000-10100
- 10112
proto: tcp
setype: memcache_port_t
state: present
'''
import traceback
SELINUX_IMP_ERR = None
try:
import selinux
HAVE_SELINUX = True
except ImportError:
SELINUX_IMP_ERR = traceback.format_exc()
HAVE_SELINUX = False
SEOBJECT_IMP_ERR = None
try:
import seobject
HAVE_SEOBJECT = True
except ImportError:
SEOBJECT_IMP_ERR = traceback.format_exc()
HAVE_SEOBJECT = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
def get_runtime_status(ignore_selinux_state=False):
return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
def semanage_port_get_ports(seport, setype, proto):
""" Get the list of ports that have the specified type definition.
:param seport: Instance of seobject.portRecords
:type setype: str
:param setype: SELinux type.
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:rtype: list
:return: List of ports that have the specified SELinux type.
"""
records = seport.get_all_by_type()
if (setype, proto) in records:
return records[(setype, proto)]
else:
return []
def semanage_port_get_type(seport, port, proto):
""" Get the SELinux type of the specified port.
:param seport: Instance of seobject.portRecords
:type port: str
:param port: Port or port range (example: "8080", "8080-9090")
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:rtype: tuple
:return: Tuple containing the SELinux type and MLS/MCS level, or None if not found.
"""
if isinstance(port, str):
ports = port.split('-', 1)
if len(ports) == 1:
ports.extend(ports)
else:
ports = (port, port)
key = (int(ports[0]), int(ports[1]), proto)
records = seport.get_all()
if key in records:
return records[key]
else:
return None
def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''):
""" Add SELinux port type definition to the policy.
:type module: AnsibleModule
:param module: Ansible module
:type ports: list
:param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"])
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:type setype: str
:param setype: SELinux type
:type do_reload: bool
:param do_reload: Whether to reload SELinux policy after commit
:type serange: str
:param serange: SELinux MLS/MCS range (defaults to 's0')
:type sestore: str
:param sestore: SELinux store
:rtype: bool
:return: True if the policy was changed, otherwise False
"""
try:
seport = seobject.portRecords(sestore)
seport.set_reload(do_reload)
change = False
ports_by_type = semanage_port_get_ports(seport, setype, proto)
for port in ports:
if port not in ports_by_type:
change = True
port_type = semanage_port_get_type(seport, port, proto)
if port_type is None and not module.check_mode:
seport.add(port, proto, serange, setype)
elif port_type is not None and not module.check_mode:
seport.modify(port, proto, serange, setype)
except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
return change
def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''):
""" Delete SELinux port type definition from the policy.
:type module: AnsibleModule
:param module: Ansible module
:type ports: list
:param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"])
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:type setype: str
:param setype: SELinux type.
:type do_reload: bool
:param do_reload: Whether to reload SELinux policy after commit
:type sestore: str
:param sestore: SELinux store
:rtype: bool
:return: True if the policy was changed, otherwise False
"""
try:
seport = seobject.portRecords(sestore)
seport.set_reload(do_reload)
change = False
ports_by_type = semanage_port_get_ports(seport, setype, proto)
for port in ports:
if port in ports_by_type:
change = True
if not module.check_mode:
seport.delete(port, proto)
except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
return change
def main():
module = AnsibleModule(
argument_spec=dict(
ignore_selinux_state=dict(type='bool', default=False),
ports=dict(type='list', required=True),
proto=dict(type='str', required=True, choices=['tcp', 'udp']),
setype=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
reload=dict(type='bool', default=True),
),
supports_check_mode=True,
)
if not HAVE_SELINUX:
module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
if not HAVE_SEOBJECT:
module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
ignore_selinux_state = module.params['ignore_selinux_state']
if not get_runtime_status(ignore_selinux_state):
module.fail_json(msg="SELinux is disabled on this host.")
ports = module.params['ports']
proto = module.params['proto']
setype = module.params['setype']
state = module.params['state']
do_reload = module.params['reload']
result = {
'ports': ports,
'proto': proto,
'setype': setype,
'state': state,
}
if state == 'present':
result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload)
elif state == 'absent':
result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload)
else:
module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,490 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Paul Markham <pmarkham@netrefinery.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: solaris_zone
short_description: Manage Solaris zones
description:
- Create, start, stop and delete Solaris zones.
- This module does not currently allow changing of options for a zone that is already been created.
author:
- Paul Markham (@pmarkham)
requirements:
- Solaris 10 or 11
options:
state:
description:
- C(present), configure and install the zone.
- C(installed), synonym for C(present).
- C(running), if the zone already exists, boot it, otherwise, configure and install
the zone first, then boot it.
- C(started), synonym for C(running).
- C(stopped), shutdown a zone.
- C(absent), destroy the zone.
- C(configured), configure the ready so that it's to be attached.
- C(attached), attach a zone, but do not boot it.
- C(detached), shutdown and detach a zone
type: str
choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ]
default: present
required: true
name:
description:
- Zone name.
- A zone name must be unique name.
- A zone name must begin with an alpha-numeric character.
- The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.).
- The name cannot be longer than 64 characters.
type: str
required: true
path:
description:
- The path where the zone will be created. This is required when the zone is created, but not
used otherwise.
type: str
sparse:
description:
- Whether to create a sparse (C(true)) or whole root (C(false)) zone.
type: bool
default: no
root_password:
description:
- The password hash for the root account. If not specified, the zone's root account
will not have a password.
type: str
config:
description:
- 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options
and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g.
"set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"'
type: str
default: ''
create_options:
description:
- 'Extra options to the zonecfg(1M) create command.'
type: str
default: ''
install_options:
description:
- 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation,
use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"'
type: str
default: ''
attach_options:
description:
- 'Extra options to the zoneadm attach command. For example, this can be used to specify
whether a minimum or full update of packages is required and if any packages need to
be deleted. For valid values, see zoneadm(1M)'
type: str
default: ''
timeout:
description:
- Timeout, in seconds, for zone to boot.
type: int
default: 600
'''
EXAMPLES = '''
- name: Create and install a zone, but don't boot it
solaris_zone:
name: zone1
state: present
path: /zones/zone1
sparse: True
root_password: Be9oX7OSwWoU.
config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
- name: Create and install a zone and boot it
solaris_zone:
name: zone1
state: running
path: /zones/zone1
root_password: Be9oX7OSwWoU.
config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
- name: Boot an already installed zone
solaris_zone:
name: zone1
state: running
- name: Stop a zone
solaris_zone:
name: zone1
state: stopped
- name: Destroy a zone
solaris_zone:
name: zone1
state: absent
- name: Detach a zone
solaris_zone:
name: zone1
state: detached
- name: Configure a zone, ready to be attached
solaris_zone:
name: zone1
state: configured
path: /zones/zone1
root_password: Be9oX7OSwWoU.
config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
- name: Attach zone1
solaris_zone:
name: zone1
state: attached
attach_options: -u
'''
import os
import platform
import re
import tempfile
import time
from ansible.module_utils.basic import AnsibleModule
class Zone(object):
def __init__(self, module):
self.changed = False
self.msg = []
self.module = module
self.path = self.module.params['path']
self.name = self.module.params['name']
self.sparse = self.module.params['sparse']
self.root_password = self.module.params['root_password']
self.timeout = self.module.params['timeout']
self.config = self.module.params['config']
self.create_options = self.module.params['create_options']
self.install_options = self.module.params['install_options']
self.attach_options = self.module.params['attach_options']
self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True)
self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True)
self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True)
if self.module.check_mode:
self.msg.append('Running in check mode')
if platform.system() != 'SunOS':
self.module.fail_json(msg='This module requires Solaris')
(self.os_major, self.os_minor) = platform.release().split('.')
if int(self.os_minor) < 10:
self.module.fail_json(msg='This module requires Solaris 10 or later')
match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name)
if not match:
self.module.fail_json(msg="Provided zone name is not a valid zone name. "
"Please refer documentation for correct zone name specifications.")
def configure(self):
if not self.path:
self.module.fail_json(msg='Missing required argument: path')
if not self.module.check_mode:
t = tempfile.NamedTemporaryFile(delete=False)
if self.sparse:
t.write('create %s\n' % self.create_options)
self.msg.append('creating sparse-root zone')
else:
t.write('create -b %s\n' % self.create_options)
self.msg.append('creating whole-root zone')
t.write('set zonepath=%s\n' % self.path)
t.write('%s\n' % self.config)
t.close()
cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to create zone. %s' % (out + err))
os.unlink(t.name)
self.changed = True
self.msg.append('zone configured')
def install(self):
if not self.module.check_mode:
cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to install zone. %s' % (out + err))
if int(self.os_minor) == 10:
self.configure_sysid()
self.configure_password()
self.configure_ssh_keys()
self.changed = True
self.msg.append('zone installed')
def uninstall(self):
if self.is_installed():
if not self.module.check_mode:
cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err))
self.changed = True
self.msg.append('zone uninstalled')
def configure_sysid(self):
if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path):
os.unlink('%s/root/etc/.UNCONFIGURED' % self.path)
open('%s/root/noautoshutdown' % self.path, 'w').close()
node = open('%s/root/etc/nodename' % self.path, 'w')
node.write(self.name)
node.close()
id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w')
id.write('1 # System previously configured?\n')
id.write('1 # Bootparams succeeded?\n')
id.write('1 # System is on a network?\n')
id.write('1 # Extended network information gathered?\n')
id.write('0 # Autobinder succeeded?\n')
id.write('1 # Network has subnets?\n')
id.write('1 # root password prompted for?\n')
id.write('1 # locale and term prompted for?\n')
id.write('1 # security policy in place\n')
id.write('1 # NFSv4 domain configured\n')
id.write('0 # Auto Registration Configured\n')
id.write('vt100')
id.close()
def configure_ssh_keys(self):
rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path
dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path
if not os.path.isfile(rsa_key_file):
cmd = '%s -f %s -t rsa -N ""' % (self.ssh_keygen_cmd, rsa_key_file)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err))
if not os.path.isfile(dsa_key_file):
cmd = '%s -f %s -t dsa -N ""' % (self.ssh_keygen_cmd, dsa_key_file)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err))
def configure_password(self):
shadow = '%s/root/etc/shadow' % self.path
if self.root_password:
f = open(shadow, 'r')
lines = f.readlines()
f.close()
for i in range(0, len(lines)):
fields = lines[i].split(':')
if fields[0] == 'root':
fields[1] = self.root_password
lines[i] = ':'.join(fields)
f = open(shadow, 'w')
for line in lines:
f.write(line)
f.close()
def boot(self):
if not self.module.check_mode:
cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to boot zone. %s' % (out + err))
"""
The boot command can return before the zone has fully booted. This is especially
true on the first boot when the zone initializes the SMF services. Unless the zone
has fully booted, subsequent tasks in the playbook may fail as services aren't running yet.
Wait until the zone's console login is running; once that's running, consider the zone booted.
"""
elapsed = 0
while True:
if elapsed > self.timeout:
self.module.fail_json(msg='timed out waiting for zone to boot')
rc = os.system('ps -z %s -o args|grep "ttymon.*-d /dev/console" > /dev/null 2>/dev/null' % self.name)
if rc == 0:
break
time.sleep(10)
elapsed += 10
self.changed = True
self.msg.append('zone booted')
def destroy(self):
if self.is_running():
self.stop()
if self.is_installed():
self.uninstall()
if not self.module.check_mode:
cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to delete zone. %s' % (out + err))
self.changed = True
self.msg.append('zone deleted')
def stop(self):
if not self.module.check_mode:
cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to stop zone. %s' % (out + err))
self.changed = True
self.msg.append('zone stopped')
def detach(self):
if not self.module.check_mode:
cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to detach zone. %s' % (out + err))
self.changed = True
self.msg.append('zone detached')
def attach(self):
if not self.module.check_mode:
cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to attach zone. %s' % (out + err))
self.changed = True
self.msg.append('zone attached')
def exists(self):
cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def is_running(self):
return self.status() == 'running'
def is_installed(self):
return self.status() == 'installed'
def is_configured(self):
return self.status() == 'configured'
def status(self):
cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return out.split(':')[2]
else:
return 'undefined'
def state_present(self):
if self.exists():
self.msg.append('zone already exists')
else:
self.configure()
self.install()
def state_running(self):
self.state_present()
if self.is_running():
self.msg.append('zone already running')
else:
self.boot()
def state_stopped(self):
if self.exists():
self.stop()
else:
self.module.fail_json(msg='zone does not exist')
def state_absent(self):
if self.exists():
if self.is_running():
self.stop()
self.destroy()
else:
self.msg.append('zone does not exist')
def state_configured(self):
if self.exists():
self.msg.append('zone already exists')
else:
self.configure()
def state_detached(self):
if not self.exists():
self.module.fail_json(msg='zone does not exist')
if self.is_configured():
self.msg.append('zone already detached')
else:
self.stop()
self.detach()
def state_attached(self):
if not self.exists():
self.msg.append('zone does not exist')
if self.is_configured():
self.attach()
else:
self.msg.append('zone already attached')
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present',
choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']),
path=dict(type='str'),
sparse=dict(type='bool', default=False),
root_password=dict(type='str', no_log=True),
timeout=dict(type='int', default=600),
config=dict(type='str', default=''),
create_options=dict(type='str', default=''),
install_options=dict(type='str', default=''),
attach_options=dict(type='str', default=''),
),
supports_check_mode=True,
)
zone = Zone(module)
state = module.params['state']
if state == 'running' or state == 'started':
zone.state_running()
elif state == 'present' or state == 'installed':
zone.state_present()
elif state == 'stopped':
zone.state_stopped()
elif state == 'absent':
zone.state_absent()
elif state == 'configured':
zone.state_configured()
elif state == 'detached':
zone.state_detached()
elif state == 'attached':
zone.state_attached()
else:
module.fail_json(msg='Invalid state: %s' % state)
module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))
if __name__ == '__main__':
main()

View file

@ -0,0 +1,302 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: svc
author:
- Brian Coca (@bcoca)
short_description: Manage daemontools services
description:
- Controls daemontools services on remote hosts using the svc utility.
options:
name:
description:
- Name of the service to manage.
type: str
required: true
state:
description:
- C(Started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
svc (svc -t) and C(killed) will always bounce the svc (svc -k).
C(reloaded) will send a sigusr1 (svc -1).
C(once) will run a normally downed svc once (svc -o), not really
an idempotent operation.
type: str
choices: [ killed, once, reloaded, restarted, started, stopped ]
downed:
description:
- Should a 'down' file exist or not, if it exists it disables auto startup.
Defaults to no. Downed does not imply stopped.
type: bool
default: no
enabled:
description:
- Whether the service is enabled or not, if disabled it also implies stopped.
Take note that a service can be enabled and downed (no auto restart).
type: bool
service_dir:
description:
- Directory svscan watches for services
type: str
default: /service
service_src:
description:
- Directory where services are defined, the source of symlinks to service_dir.
type: str
default: /etc/service
'''
EXAMPLES = '''
- name: Start svc dnscache, if not running
svc:
name: dnscache
state: started
- name: Stop svc dnscache, if running
svc:
name: dnscache
state: stopped
- name: Kill svc dnscache, in all cases
svc:
name: dnscache
state: killed
- name: Restart svc dnscache, in all cases
svc:
name: dnscache
state: restarted
- name: Reload svc dnscache, in all cases
svc:
name: dnscache
state: reloaded
- name: Using alternative svc directory location
svc:
name: dnscache
state: reloaded
service_dir: /var/service
'''
import os
import re
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Svc(object):
"""
Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc
"""
# def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = ['/command', '/usr/local/bin']
self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.downed = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([self.service_dir, self.name])
self.src_full = '/'.join([self.service_src, self.name])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.downed = os.path.lexists('%s/down' % self.svc_full)
self.get_status()
else:
self.downed = os.path.lexists('%s/down' % self.src_full)
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError as e:
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
try:
os.unlink(self.svc_full)
except OSError as e:
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
self.execute_command([self.svc_cmd, '-dx', self.src_full])
src_log = '%s/log' % self.src_full
if os.path.exists(src_log):
self.execute_command([self.svc_cmd, '-dx', src_log])
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
m = re.search(r'\(pid (\d+)\)', out)
if m:
self.pid = m.group(1)
m = re.search(r'(\d+) seconds', out)
if m:
self.duration = m.group(1)
if re.search(' up ', out):
self.state = 'start'
elif re.search(' down ', out):
self.state = 'stopp'
else:
self.state = 'unknown'
return
if re.search(' want ', out):
self.state += 'ing'
else:
self.state += 'ed'
def start(self):
return self.execute_command([self.svc_cmd, '-u', self.svc_full])
def stopp(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, '-d', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, '-o', self.svc_full])
def reload(self):
return self.execute_command([self.svc_cmd, '-1', self.svc_full])
def restart(self):
return self.execute_command([self.svc_cmd, '-t', self.svc_full])
def kill(self):
return self.execute_command([self.svc_cmd, '-k', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception as e:
self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc())
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
downed=dict(type='bool'),
service_dir=dict(type='str', default='/service'),
service_src=dict(type='str', default='/etc/service'),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
state = module.params['state']
enabled = module.params['enabled']
downed = module.params['downed']
svc = Svc(module)
changed = False
orig_state = svc.report()
if enabled is not None and enabled != svc.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
svc.enable()
else:
svc.disable()
except (OSError, IOError) as e:
module.fail_json(msg="Could not change service link: %s" % to_native(e))
if state is not None and state != svc.state:
changed = True
if not module.check_mode:
getattr(svc, state[:-2])()
if downed is not None and downed != svc.downed:
changed = True
if not module.check_mode:
d_file = "%s/down" % svc.svc_full
try:
if downed:
open(d_file, "a").close()
else:
os.unlink(d_file)
except (OSError, IOError) as e:
module.fail_json(msg="Could not change downed file: %s " % (to_native(e)))
module.exit_json(changed=changed, svc=svc.report())
if __name__ == '__main__':
main()

View file

@ -0,0 +1,180 @@
#!/usr/bin/python
# Copyright: (c) 2019, Andrew Klaus <andrewklaus@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: syspatch
short_description: Manage OpenBSD system patches
description:
- "Manage OpenBSD system patches using syspatch"
options:
apply:
description:
- Apply all available system patches
default: False
required: false
revert:
description:
- Revert system patches
required: false
type: str
choices: [ all, one ]
author:
- Andrew Klaus (@precurse)
'''
EXAMPLES = '''
- name: Apply all available system patches
syspatch:
apply: true
- name: Revert last patch
syspatch:
revert: one
- name: Revert all patches
syspatch:
revert: all
# NOTE: You can reboot automatically if a patch requires it:
- name: Apply all patches and store result
syspatch:
apply: true
register: syspatch
- name: Reboot if patch requires it
reboot:
when: syspatch.reboot_needed
'''
RETURN = r'''
rc:
description: The command return code (0 means success)
returned: always
type: int
stdout:
description: syspatch standard output
returned: always
type: str
sample: "001_rip6cksum"
stderr:
description: syspatch standard error
returned: always
type: str
sample: "syspatch: need root privileges"
reboot_needed:
description: Whether or not a reboot is required after an update
returned: always
type: bool
sample: True
'''
from ansible.module_utils.basic import AnsibleModule
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
apply=dict(type='bool'),
revert=dict(type='str', choices=['all', 'one'])
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
required_one_of=[['apply', 'revert']]
)
result = syspatch_run(module)
module.exit_json(**result)
def syspatch_run(module):
cmd = module.get_bin_path('syspatch', True)
changed = False
reboot_needed = False
warnings = []
# Set safe defaults for run_flag and check_flag
run_flag = ['-c']
check_flag = ['-c']
if module.params['revert']:
check_flag = ['-l']
if module.params['revert'] == 'all':
run_flag = ['-R']
else:
run_flag = ['-r']
elif module.params['apply']:
check_flag = ['-c']
run_flag = []
# Run check command
rc, out, err = module.run_command([cmd] + check_flag)
if rc != 0:
module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
if len(out) > 0:
# Changes pending
change_pending = True
else:
# No changes pending
change_pending = False
if module.check_mode:
changed = change_pending
elif change_pending:
rc, out, err = module.run_command([cmd] + run_flag)
# Workaround syspatch ln bug:
# http://openbsd-archive.7691.n7.nabble.com/Warning-applying-latest-syspatch-td354250.html
if rc != 0 and err != 'ln: /usr/X11R6/bin/X: No such file or directory\n':
module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
elif out.lower().find('create unique kernel') > 0:
# Kernel update applied
reboot_needed = True
elif out.lower().find('syspatch updated itself') > 0:
warnings.append('Syspatch was updated. Please run syspatch again.')
# If no stdout, then warn user
if len(out) == 0:
warnings.append('syspatch had suggested changes, but stdout was empty.')
changed = True
else:
changed = False
return dict(
changed=changed,
reboot_needed=reboot_needed,
rc=rc,
stderr=err,
stdout=out,
warnings=warnings
)
def main():
run_module()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,908 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Shinichi TAMURA (@tmshn)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: timezone
short_description: Configure timezone setting
description:
- This module configures the timezone setting, both of the system clock and of the hardware clock. If you want to set up the NTP, use M(service) module.
- It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time.
- Several different tools are used depending on the OS/Distribution involved.
For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock).
On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified.
On AIX, C(chtz) is used.
- As of Ansible 2.3 support was added for SmartOS and BSDs.
- As of Ansible 2.4 support was added for macOS.
- As of Ansible 2.9 support was added for AIX 6.1+
- Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails.
options:
name:
description:
- Name of the timezone for the system clock.
- Default is to keep current setting.
- B(At least one of name and hwclock are required.)
type: str
hwclock:
description:
- Whether the hardware clock is in UTC or in local timezone.
- Default is to keep current setting.
- Note that this option is recommended not to change and may fail
to configure, especially on virtual environments such as AWS.
- B(At least one of name and hwclock are required.)
- I(Only used on Linux.)
type: str
aliases: [ rtc ]
choices: [ local, UTC ]
notes:
- On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone
- On AIX only Olson/tz database timezones are useable (POSIX is not supported).
- An OS reboot is also required on AIX for the new timezone setting to take effect.
author:
- Shinichi TAMURA (@tmshn)
- Jasper Lievisse Adriaanse (@jasperla)
- Indrajit Raychaudhuri (@indrajitr)
'''
RETURN = r'''
diff:
description: The differences about the given arguments.
returned: success
type: complex
contains:
before:
description: The values before change
type: dict
after:
description: The values after change
type: dict
'''
EXAMPLES = r'''
- name: Set timezone to Asia/Tokyo
timezone:
name: Asia/Tokyo
'''
import errno
import os
import platform
import random
import re
import string
import filecmp
from ansible.module_utils.basic import AnsibleModule, get_distribution
from ansible.module_utils.six import iteritems
class Timezone(object):
"""This is a generic Timezone manipulation class that is subclassed based on platform.
A subclass may wish to override the following action methods:
- get(key, phase) ... get the value from the system at `phase`
- set(key, value) ... set the value to the current system
"""
def __new__(cls, module):
"""Return the platform-specific subclass.
It does not use load_platform_subclass() because it needs to judge based
on whether the `timedatectl` command exists and is available.
Args:
module: The AnsibleModule.
"""
if platform.system() == 'Linux':
timedatectl = module.get_bin_path('timedatectl')
if timedatectl is not None:
rc, stdout, stderr = module.run_command(timedatectl)
if rc == 0:
return super(Timezone, SystemdTimezone).__new__(SystemdTimezone)
else:
module.warn('timedatectl command was found but not usable: %s. using other method.' % stderr)
return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
else:
return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
elif re.match('^joyent_.*Z', platform.version()):
# platform.system() returns SunOS, which is too broad. So look at the
# platform version instead. However we have to ensure that we're not
# running in the global zone where changing the timezone has no effect.
zonename_cmd = module.get_bin_path('zonename')
if zonename_cmd is not None:
(rc, stdout, _) = module.run_command(zonename_cmd)
if rc == 0 and stdout.strip() == 'global':
module.fail_json(msg='Adjusting timezone is not supported in Global Zone')
return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone)
elif re.match('^Darwin', platform.platform()):
return super(Timezone, DarwinTimezone).__new__(DarwinTimezone)
elif re.match('^(Free|Net|Open)BSD', platform.platform()):
return super(Timezone, BSDTimezone).__new__(BSDTimezone)
elif platform.system() == 'AIX':
AIXoslevel = int(platform.version() + platform.release())
if AIXoslevel >= 61:
return super(Timezone, AIXTimezone).__new__(AIXTimezone)
else:
module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel)
else:
# Not supported yet
return super(Timezone, Timezone).__new__(Timezone)
def __init__(self, module):
"""Initialize of the class.
Args:
module: The AnsibleModule.
"""
super(Timezone, self).__init__()
self.msg = []
# `self.value` holds the values for each params on each phases.
# Initially there's only info of "planned" phase, but the
# `self.check()` function will fill out it.
self.value = dict()
for key in module.argument_spec:
value = module.params[key]
if value is not None:
self.value[key] = dict(planned=value)
self.module = module
def abort(self, msg):
"""Abort the process with error message.
This is just the wrapper of module.fail_json().
Args:
msg: The error message.
"""
error_msg = ['Error message:', msg]
if len(self.msg) > 0:
error_msg.append('Other message(s):')
error_msg.extend(self.msg)
self.module.fail_json(msg='\n'.join(error_msg))
def execute(self, *commands, **kwargs):
"""Execute the shell command.
This is just the wrapper of module.run_command().
Args:
*commands: The command to execute.
It will be concatenated with single space.
**kwargs: Only 'log' key is checked.
If kwargs['log'] is true, record the command to self.msg.
Returns:
stdout: Standard output of the command.
"""
command = ' '.join(commands)
(rc, stdout, stderr) = self.module.run_command(command, check_rc=True)
if kwargs.get('log', False):
self.msg.append('executed `%s`' % command)
return stdout
def diff(self, phase1='before', phase2='after'):
"""Calculate the difference between given 2 phases.
Args:
phase1, phase2: The names of phase to compare.
Returns:
diff: The difference of value between phase1 and phase2.
This is in the format which can be used with the
`--diff` option of ansible-playbook.
"""
diff = {phase1: {}, phase2: {}}
for key, value in iteritems(self.value):
diff[phase1][key] = value[phase1]
diff[phase2][key] = value[phase2]
return diff
def check(self, phase):
"""Check the state in given phase and set it to `self.value`.
Args:
phase: The name of the phase to check.
Returns:
NO RETURN VALUE
"""
if phase == 'planned':
return
for key, value in iteritems(self.value):
value[phase] = self.get(key, phase)
def change(self):
"""Make the changes effect based on `self.value`."""
for key, value in iteritems(self.value):
if value['before'] != value['planned']:
self.set(key, value['planned'])
# ===========================================
# Platform specific methods (must be replaced by subclass).
def get(self, key, phase):
"""Get the value for the key at the given phase.
Called from self.check().
Args:
key: The key to get the value
phase: The phase to get the value
Return:
value: The value for the key at the given phase.
"""
self.abort('get(key, phase) is not implemented on target platform')
def set(self, key, value):
"""Set the value for the key (of course, for the phase 'after').
Called from self.change().
Args:
key: Key to set the value
value: Value to set
"""
self.abort('set(key, value) is not implemented on target platform')
def _verify_timezone(self):
tz = self.value['name']['planned']
tzfile = '/usr/share/zoneinfo/%s' % tz
if not os.path.isfile(tzfile):
self.abort('given timezone "%s" is not available' % tz)
return tzfile
class SystemdTimezone(Timezone):
"""This is a Timezone manipulation class for systemd-powered Linux.
It uses the `timedatectl` command to check/set all arguments.
"""
regexps = dict(
hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE),
name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE)
)
subcmds = dict(
hwclock='set-local-rtc',
name='set-timezone'
)
def __init__(self, module):
super(SystemdTimezone, self).__init__(module)
self.timedatectl = module.get_bin_path('timedatectl', required=True)
self.status = dict()
# Validate given timezone
if 'name' in self.value:
self._verify_timezone()
def _get_status(self, phase):
if phase not in self.status:
self.status[phase] = self.execute(self.timedatectl, 'status')
return self.status[phase]
def get(self, key, phase):
status = self._get_status(phase)
value = self.regexps[key].search(status).group(1)
if key == 'hwclock':
# For key='hwclock'; convert yes/no -> local/UTC
if self.module.boolean(value):
value = 'local'
else:
value = 'UTC'
return value
def set(self, key, value):
# For key='hwclock'; convert UTC/local -> yes/no
if key == 'hwclock':
if value == 'local':
value = 'yes'
else:
value = 'no'
self.execute(self.timedatectl, self.subcmds[key], value, log=True)
class NosystemdTimezone(Timezone):
"""This is a Timezone manipulation class for non systemd-powered Linux.
For timezone setting, it edits the following file and reflect changes:
- /etc/sysconfig/clock ... RHEL/CentOS
- /etc/timezone ... Debian/Ubuntu
For hwclock setting, it executes `hwclock --systohc` command with the
'--utc' or '--localtime' option.
"""
conf_files = dict(
name=None, # To be set in __init__
hwclock=None, # To be set in __init__
adjtime='/etc/adjtime'
)
# It's fine if all tree config files don't exist
allow_no_file = dict(
name=True,
hwclock=True,
adjtime=True
)
regexps = dict(
name=None, # To be set in __init__
hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE),
adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE)
)
dist_regexps = dict(
SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE),
redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE)
)
dist_tzline_format = dict(
SuSE='TIMEZONE="%s"\n',
redhat='ZONE="%s"\n'
)
def __init__(self, module):
super(NosystemdTimezone, self).__init__(module)
# Validate given timezone
if 'name' in self.value:
tzfile = self._verify_timezone()
# `--remove-destination` is needed if /etc/localtime is a symlink so
# that it overwrites it instead of following it.
self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)]
self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
# Distribution-specific configurations
if self.module.get_bin_path('dpkg-reconfigure') is not None:
# Debian/Ubuntu
if 'name' in self.value:
self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile),
'%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)]
self.conf_files['name'] = '/etc/timezone'
self.conf_files['hwclock'] = '/etc/default/rcS'
self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE)
self.tzline_format = '%s\n'
else:
# RHEL/CentOS/SUSE
if self.module.get_bin_path('tzdata-update') is not None:
# tzdata-update cannot update the timezone if /etc/localtime is
# a symlink so we have to use cp to update the time zone which
# was set above.
if not os.path.islink('/etc/localtime'):
self.update_timezone = [self.module.get_bin_path('tzdata-update', required=True)]
# else:
# self.update_timezone = 'cp --remove-destination ...' <- configured above
self.conf_files['name'] = '/etc/sysconfig/clock'
self.conf_files['hwclock'] = '/etc/sysconfig/clock'
try:
f = open(self.conf_files['name'], 'r')
except IOError as err:
if self._allow_ioerror(err, 'name'):
# If the config file doesn't exist detect the distribution and set regexps.
distribution = get_distribution()
if distribution == 'SuSE':
# For SUSE
self.regexps['name'] = self.dist_regexps['SuSE']
self.tzline_format = self.dist_tzline_format['SuSE']
else:
# For RHEL/CentOS
self.regexps['name'] = self.dist_regexps['redhat']
self.tzline_format = self.dist_tzline_format['redhat']
else:
self.abort('could not read configuration file "%s"' % self.conf_files['name'])
else:
# The key for timezone might be `ZONE` or `TIMEZONE`
# (the former is used in RHEL/CentOS and the latter is used in SUSE linux).
# So check the content of /etc/sysconfig/clock and decide which key to use.
sysconfig_clock = f.read()
f.close()
if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE):
# For SUSE
self.regexps['name'] = self.dist_regexps['SuSE']
self.tzline_format = self.dist_tzline_format['SuSE']
else:
# For RHEL/CentOS
self.regexps['name'] = self.dist_regexps['redhat']
self.tzline_format = self.dist_tzline_format['redhat']
def _allow_ioerror(self, err, key):
# In some cases, even if the target file does not exist,
# simply creating it may solve the problem.
# In such cases, we should continue the configuration rather than aborting.
if err.errno != errno.ENOENT:
# If the error is not ENOENT ("No such file or directory"),
# (e.g., permission error, etc), we should abort.
return False
return self.allow_no_file.get(key, False)
def _edit_file(self, filename, regexp, value, key):
"""Replace the first matched line with given `value`.
If `regexp` matched more than once, other than the first line will be deleted.
Args:
filename: The name of the file to edit.
regexp: The regular expression to search with.
value: The line which will be inserted.
key: For what key the file is being editted.
"""
# Read the file
try:
file = open(filename, 'r')
except IOError as err:
if self._allow_ioerror(err, key):
lines = []
else:
self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
else:
lines = file.readlines()
file.close()
# Find the all matched lines
matched_indices = []
for i, line in enumerate(lines):
if regexp.search(line):
matched_indices.append(i)
if len(matched_indices) > 0:
insert_line = matched_indices[0]
else:
insert_line = 0
# Remove all matched lines
for i in matched_indices[::-1]:
del lines[i]
# ...and insert the value
lines.insert(insert_line, value)
# Write the changes
try:
file = open(filename, 'w')
except IOError:
self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename))
else:
file.writelines(lines)
file.close()
self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename))
def _get_value_from_config(self, key, phase):
filename = self.conf_files[key]
try:
file = open(filename, mode='r')
except IOError as err:
if self._allow_ioerror(err, key):
if key == 'hwclock':
return 'n/a'
elif key == 'adjtime':
return 'UTC'
elif key == 'name':
return 'n/a'
else:
self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
else:
status = file.read()
file.close()
try:
value = self.regexps[key].search(status).group(1)
except AttributeError:
if key == 'hwclock':
# If we cannot find UTC in the config that's fine.
return 'n/a'
elif key == 'adjtime':
# If we cannot find UTC/LOCAL in /etc/cannot that means UTC
# will be used by default.
return 'UTC'
elif key == 'name':
if phase == 'before':
# In 'before' phase UTC/LOCAL doesn't need to be set in
# the timezone config file, so we ignore this error.
return 'n/a'
else:
self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename))
else:
if key == 'hwclock':
# convert yes/no -> UTC/local
if self.module.boolean(value):
value = 'UTC'
else:
value = 'local'
elif key == 'adjtime':
# convert LOCAL -> local
if value != 'UTC':
value = value.lower()
return value
def get(self, key, phase):
planned = self.value[key]['planned']
if key == 'hwclock':
value = self._get_value_from_config(key, phase)
if value == planned:
# If the value in the config file is the same as the 'planned'
# value, we need to check /etc/adjtime.
value = self._get_value_from_config('adjtime', phase)
elif key == 'name':
value = self._get_value_from_config(key, phase)
if value == planned:
# If the planned values is the same as the one in the config file
# we need to check if /etc/localtime is also set to the 'planned' zone.
if os.path.islink('/etc/localtime'):
# If /etc/localtime is a symlink and is not set to the TZ we 'planned'
# to set, we need to return the TZ which the symlink points to.
if os.path.exists('/etc/localtime'):
# We use readlink() because on some distros zone files are symlinks
# to other zone files, so it's hard to get which TZ is actually set
# if we follow the symlink.
path = os.readlink('/etc/localtime')
linktz = re.search(r'/usr/share/zoneinfo/(.*)', path, re.MULTILINE)
if linktz:
valuelink = linktz.group(1)
if valuelink != planned:
value = valuelink
else:
# Set current TZ to 'n/a' if the symlink points to a path
# which isn't a zone file.
value = 'n/a'
else:
# Set current TZ to 'n/a' if the symlink to the zone file is broken.
value = 'n/a'
else:
# If /etc/localtime is not a symlink best we can do is compare it with
# the 'planned' zone info file and return 'n/a' if they are different.
try:
if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned):
return 'n/a'
except Exception:
return 'n/a'
else:
self.abort('unknown parameter "%s"' % key)
return value
def set_timezone(self, value):
self._edit_file(filename=self.conf_files['name'],
regexp=self.regexps['name'],
value=self.tzline_format % value,
key='name')
for cmd in self.update_timezone:
self.execute(cmd)
def set_hwclock(self, value):
if value == 'local':
option = '--localtime'
utc = 'no'
else:
option = '--utc'
utc = 'yes'
if self.conf_files['hwclock'] is not None:
self._edit_file(filename=self.conf_files['hwclock'],
regexp=self.regexps['hwclock'],
value='UTC=%s\n' % utc,
key='hwclock')
self.execute(self.update_hwclock, '--systohc', option, log=True)
def set(self, key, value):
if key == 'name':
self.set_timezone(value)
elif key == 'hwclock':
self.set_hwclock(value)
else:
self.abort('unknown parameter "%s"' % key)
class SmartOSTimezone(Timezone):
"""This is a Timezone manipulation class for SmartOS instances.
It uses the C(sm-set-timezone) utility to set the timezone, and
inspects C(/etc/default/init) to determine the current timezone.
NB: A zone needs to be rebooted in order for the change to be
activated.
"""
def __init__(self, module):
super(SmartOSTimezone, self).__init__(module)
self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False)
if not self.settimezone:
module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.')
def get(self, key, phase):
"""Lookup the current timezone name in `/etc/default/init`. If anything else
is requested, or if the TZ field is not set we fail.
"""
if key == 'name':
try:
f = open('/etc/default/init', 'r')
for line in f:
m = re.match('^TZ=(.*)$', line.strip())
if m:
return m.groups()[0]
except Exception:
self.module.fail_json(msg='Failed to read /etc/default/init')
else:
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
def set(self, key, value):
"""Set the requested timezone through sm-set-timezone, an invalid timezone name
will be rejected and we have no further input validation to perform.
"""
if key == 'name':
cmd = 'sm-set-timezone %s' % value
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg=stderr)
# sm-set-timezone knows no state and will always set the timezone.
# XXX: https://github.com/joyent/smtools/pull/2
m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1])
if not (m and m.groups()[-1] == value):
self.module.fail_json(msg='Failed to set timezone')
else:
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
class DarwinTimezone(Timezone):
"""This is the timezone implementation for Darwin which, unlike other *BSD
implementations, uses the `systemsetup` command on Darwin to check/set
the timezone.
"""
regexps = dict(
name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE)
)
def __init__(self, module):
super(DarwinTimezone, self).__init__(module)
self.systemsetup = module.get_bin_path('systemsetup', required=True)
self.status = dict()
# Validate given timezone
if 'name' in self.value:
self._verify_timezone()
def _get_current_timezone(self, phase):
"""Lookup the current timezone via `systemsetup -gettimezone`."""
if phase not in self.status:
self.status[phase] = self.execute(self.systemsetup, '-gettimezone')
return self.status[phase]
def _verify_timezone(self):
tz = self.value['name']['planned']
# Lookup the list of supported timezones via `systemsetup -listtimezones`.
# Note: Skip the first line that contains the label 'Time Zones:'
out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:]
tz_list = list(map(lambda x: x.strip(), out))
if tz not in tz_list:
self.abort('given timezone "%s" is not available' % tz)
return tz
def get(self, key, phase):
if key == 'name':
status = self._get_current_timezone(phase)
value = self.regexps[key].search(status).group(1)
return value
else:
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
def set(self, key, value):
if key == 'name':
self.execute(self.systemsetup, '-settimezone', value, log=True)
else:
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
class BSDTimezone(Timezone):
"""This is the timezone implementation for *BSD which works simply through
updating the `/etc/localtime` symlink to point to a valid timezone name under
`/usr/share/zoneinfo`.
"""
def __init__(self, module):
super(BSDTimezone, self).__init__(module)
def __get_timezone(self):
zoneinfo_dir = '/usr/share/zoneinfo/'
localtime_file = '/etc/localtime'
# Strategy 1:
# If /etc/localtime does not exist, assum the timezone is UTC.
if not os.path.exists(localtime_file):
self.module.warn('Could not read /etc/localtime. Assuming UTC.')
return 'UTC'
# Strategy 2:
# Follow symlink of /etc/localtime
zoneinfo_file = localtime_file
while not zoneinfo_file.startswith(zoneinfo_dir):
try:
zoneinfo_file = os.readlink(localtime_file)
except OSError:
# OSError means "end of symlink chain" or broken link.
break
else:
return zoneinfo_file.replace(zoneinfo_dir, '')
# Strategy 3:
# (If /etc/localtime is not symlinked)
# Check all files in /usr/share/zoneinfo and return first non-link match.
for dname, _, fnames in sorted(os.walk(zoneinfo_dir)):
for fname in sorted(fnames):
zoneinfo_file = os.path.join(dname, fname)
if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file):
return zoneinfo_file.replace(zoneinfo_dir, '')
# Strategy 4:
# As a fall-back, return 'UTC' as default assumption.
self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.')
return 'UTC'
def get(self, key, phase):
"""Lookup the current timezone by resolving `/etc/localtime`."""
if key == 'name':
return self.__get_timezone()
else:
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
def set(self, key, value):
if key == 'name':
# First determine if the requested timezone is valid by looking in
# the zoneinfo directory.
zonefile = '/usr/share/zoneinfo/' + value
try:
if not os.path.isfile(zonefile):
self.module.fail_json(msg='%s is not a recognized timezone' % value)
except Exception:
self.module.fail_json(msg='Failed to stat %s' % zonefile)
# Now (somewhat) atomically update the symlink by creating a new
# symlink and move it into place. Otherwise we have to remove the
# original symlink and create the new symlink, however that would
# create a race condition in case another process tries to read
# /etc/localtime between removal and creation.
suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)])
new_localtime = '/etc/localtime.' + suffix
try:
os.symlink(zonefile, new_localtime)
os.rename(new_localtime, '/etc/localtime')
except Exception:
os.remove(new_localtime)
self.module.fail_json(msg='Could not update /etc/localtime')
else:
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
class AIXTimezone(Timezone):
"""This is a Timezone manipulation class for AIX instances.
It uses the C(chtz) utility to set the timezone, and
inspects C(/etc/environment) to determine the current timezone.
While AIX time zones can be set using two formats (POSIX and
Olson) the prefered method is Olson.
See the following article for more information:
https://developer.ibm.com/articles/au-aix-posix/
NB: AIX needs to be rebooted in order for the change to be
activated.
"""
def __init__(self, module):
super(AIXTimezone, self).__init__(module)
self.settimezone = self.module.get_bin_path('chtz', required=True)
def __get_timezone(self):
""" Return the current value of TZ= in /etc/environment """
try:
f = open('/etc/environment', 'r')
etcenvironment = f.read()
f.close()
except Exception:
self.module.fail_json(msg='Issue reading contents of /etc/environment')
match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE)
if match:
return match.group(1)
else:
return None
def get(self, key, phase):
"""Lookup the current timezone name in `/etc/environment`. If anything else
is requested, or if the TZ field is not set we fail.
"""
if key == 'name':
return self.__get_timezone()
else:
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
def set(self, key, value):
"""Set the requested timezone through chtz, an invalid timezone name
will be rejected and we have no further input validation to perform.
"""
if key == 'name':
# chtz seems to always return 0 on AIX 7.2, even for invalid timezone values.
# It will only return non-zero if the chtz command itself fails, it does not check for
# valid timezones. We need to perform a basic check to confirm that the timezone
# definition exists in /usr/share/lib/zoneinfo
# This does mean that we can only support Olson for now. The below commented out regex
# detects Olson date formats, so in the future we could detect Posix or Olson and
# act accordingly.
# regex_olson = re.compile('^([a-z0-9_\-\+]+\/?)+$', re.IGNORECASE)
# if not regex_olson.match(value):
# msg = 'Supplied timezone (%s) does not appear to a be valid Olson string' % value
# self.module.fail_json(msg=msg)
# First determine if the requested timezone is valid by looking in the zoneinfo
# directory.
zonefile = '/usr/share/lib/zoneinfo/' + value
try:
if not os.path.isfile(zonefile):
self.module.fail_json(msg='%s is not a recognized timezone.' % value)
except Exception:
self.module.fail_json(msg='Failed to check %s.' % zonefile)
# Now set the TZ using chtz
cmd = 'chtz %s' % value
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg=stderr)
# The best condition check we can do is to check the value of TZ after making the
# change.
TZ = self.__get_timezone()
if TZ != value:
msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value)
self.module.fail_json(msg=msg)
else:
self.module.fail_json(msg='%s is not a supported option on target platform' % key)
def main():
# Construct 'module' and 'tz'
module = AnsibleModule(
argument_spec=dict(
hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']),
name=dict(type='str'),
),
required_one_of=[
['hwclock', 'name']
],
supports_check_mode=True,
)
tz = Timezone(module)
# Check the current state
tz.check(phase='before')
if module.check_mode:
diff = tz.diff('before', 'planned')
# In check mode, 'planned' state is treated as 'after' state
diff['after'] = diff.pop('planned')
else:
# Make change
tz.change()
# Check the current state
tz.check(phase='after')
# Examine if the current state matches planned state
(after, planned) = tz.diff('after', 'planned').values()
if after != planned:
tz.abort('still not desired state, though changes have made - '
'planned: %s, after: %s' % (str(planned), str(after)))
diff = tz.diff('before', 'after')
changed = (diff['before'] != diff['after'])
if len(tz.msg) > 0:
module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg))
else:
module.exit_json(changed=changed, diff=diff)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,593 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
# Copyright: (c) 2014, Jarno Keskikangas <jarno.keskikangas@gmail.com>
# Copyright: (c) 2013, Aleksey Ovcharenko <aleksey.ovcharenko@gmail.com>
# Copyright: (c) 2013, James Martin <jmartin@basho.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ufw
short_description: Manage firewall with UFW
description:
- Manage firewall with UFW.
author:
- Aleksey Ovcharenko (@ovcharenko)
- Jarno Keskikangas (@pyykkis)
- Ahti Kitsik (@ahtik)
notes:
- See C(man ufw) for more examples.
requirements:
- C(ufw) package
options:
state:
description:
- C(enabled) reloads firewall and enables firewall on boot.
- C(disabled) unloads firewall and disables firewall on boot.
- C(reloaded) reloads firewall.
- C(reset) disables and resets firewall to installation defaults.
type: str
choices: [ disabled, enabled, reloaded, reset ]
default:
description:
- Change the default policy for incoming or outgoing traffic.
type: str
choices: [ allow, deny, reject ]
aliases: [ policy ]
direction:
description:
- Select direction for a rule or default policy command. Mutually
exclusive with I(interface_in) and I(interface_out).
type: str
choices: [ in, incoming, out, outgoing, routed ]
logging:
description:
- Toggles logging. Logged packets use the LOG_KERN syslog facility.
type: str
choices: [ 'on', 'off', low, medium, high, full ]
insert:
description:
- Insert the corresponding rule as rule number NUM.
- Note that ufw numbers rules starting with 1.
type: int
insert_relative_to:
description:
- Allows to interpret the index in I(insert) relative to a position.
- C(zero) interprets the rule number as an absolute index (i.e. 1 is
the first rule).
- C(first-ipv4) interprets the rule number relative to the index of the
first IPv4 rule, or relative to the position where the first IPv4 rule
would be if there is currently none.
- C(last-ipv4) interprets the rule number relative to the index of the
last IPv4 rule, or relative to the position where the last IPv4 rule
would be if there is currently none.
- C(first-ipv6) interprets the rule number relative to the index of the
first IPv6 rule, or relative to the position where the first IPv6 rule
would be if there is currently none.
- C(last-ipv6) interprets the rule number relative to the index of the
last IPv6 rule, or relative to the position where the last IPv6 rule
would be if there is currently none.
type: str
choices: [ first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero ]
default: zero
rule:
description:
- Add firewall rule
type: str
choices: [ allow, deny, limit, reject ]
log:
description:
- Log new connections matched to this rule
type: bool
from_ip:
description:
- Source IP address.
type: str
default: any
aliases: [ from, src ]
from_port:
description:
- Source port.
type: str
to_ip:
description:
- Destination IP address.
type: str
default: any
aliases: [ dest, to]
to_port:
description:
- Destination port.
type: str
aliases: [ port ]
proto:
description:
- TCP/IP protocol.
type: str
choices: [ any, tcp, udp, ipv6, esp, ah, gre, igmp ]
aliases: [ protocol ]
name:
description:
- Use profile located in C(/etc/ufw/applications.d).
type: str
aliases: [ app ]
delete:
description:
- Delete rule.
type: bool
interface:
description:
- Specify interface for the rule. The direction (in or out) used
for the interface depends on the value of I(direction). See
I(interface_in) and I(interface_out) for routed rules that needs
to supply both an input and output interface. Mutually
exclusive with I(interface_in) and I(interface_out).
type: str
aliases: [ if ]
interface_in:
description:
- Specify input interface for the rule. This is mutually
exclusive with I(direction) and I(interface). However, it is
compatible with I(interface_out) for routed rules.
type: str
aliases: [ if_in ]
interface_out:
description:
- Specify output interface for the rule. This is mutually
exclusive with I(direction) and I(interface). However, it is
compatible with I(interface_in) for routed rules.
type: str
aliases: [ if_out ]
route:
description:
- Apply the rule to routed/forwarded packets.
type: bool
comment:
description:
- Add a comment to the rule. Requires UFW version >=0.35.
type: str
'''
EXAMPLES = r'''
- name: Allow everything and enable UFW
ufw:
state: enabled
policy: allow
- name: Set logging
ufw:
logging: 'on'
# Sometimes it is desirable to let the sender know when traffic is
# being denied, rather than simply ignoring it. In these cases, use
# reject instead of deny. In addition, log rejected connections:
- ufw:
rule: reject
port: auth
log: yes
# ufw supports connection rate limiting, which is useful for protecting
# against brute-force login attacks. ufw will deny connections if an IP
# address has attempted to initiate 6 or more connections in the last
# 30 seconds. See http://www.debian-administration.org/articles/187
# for details. Typical usage is:
- ufw:
rule: limit
port: ssh
proto: tcp
# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
# a rule=allow task can leave those ports exposed. Either use delete=yes
# or a separate state=reset task)
- ufw:
rule: allow
name: OpenSSH
- name: Delete OpenSSH rule
ufw:
rule: allow
name: OpenSSH
delete: yes
- name: Deny all access to port 53
ufw:
rule: deny
port: '53'
- name: Allow port range 60000-61000
ufw:
rule: allow
port: 60000:61000
proto: tcp
- name: Allow all access to tcp port 80
ufw:
rule: allow
port: '80'
proto: tcp
- name: Allow all access from RFC1918 networks to this host
ufw:
rule: allow
src: '{{ item }}'
loop:
- 10.0.0.0/8
- 172.16.0.0/12
- 192.168.0.0/16
- name: Deny access to udp port 514 from host 1.2.3.4 and include a comment
ufw:
rule: deny
proto: udp
src: 1.2.3.4
port: '514'
comment: Block syslog
- name: Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
ufw:
rule: allow
interface: eth0
direction: in
proto: udp
src: 1.2.3.5
from_port: '5469'
dest: 1.2.3.4
to_port: '5469'
# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
- name: Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host
ufw:
rule: deny
proto: tcp
src: 2001:db8::/32
port: '25'
- name: Deny all IPv6 traffic to tcp port 20 on this host
# this should be the first IPv6 rule
ufw:
rule: deny
proto: tcp
port: '20'
to_ip: "::"
insert: 0
insert_relative_to: first-ipv6
- name: Deny all IPv4 traffic to tcp port 20 on this host
# This should be the third to last IPv4 rule
# (insert: -1 addresses the second to last IPv4 rule;
# so the new rule will be inserted before the second
# to last IPv4 rule, and will be come the third to last
# IPv4 rule.)
ufw:
rule: deny
proto: tcp
port: '20'
to_ip: "::"
insert: -1
insert_relative_to: last-ipv4
# Can be used to further restrict a global FORWARD policy set to allow
- name: Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24
ufw:
rule: deny
route: yes
src: 1.2.3.0/24
dest: 4.5.6.0/24
'''
import re
from operator import itemgetter
from ansible.module_utils.basic import AnsibleModule
def compile_ipv4_regexp():
r = r"((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}"
r += r"(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])"
return re.compile(r)
def compile_ipv6_regexp():
"""
validation pattern provided by :
https://stackoverflow.com/questions/53497/regular-expression-that-matches-
valid-ipv6-addresses#answer-17871737
"""
r = r"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:"
r += r"|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}"
r += r"(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4})"
r += r"{1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]"
r += r"{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]"
r += r"{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4})"
r += r"{0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]"
r += r"|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
r += r"[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
r += r"[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"
return re.compile(r)
def main():
command_keys = ['state', 'default', 'rule', 'logging']
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
delete=dict(type='bool', default=False),
route=dict(type='bool', default=False),
insert=dict(type='int'),
insert_relative_to=dict(choices=['zero', 'first-ipv4', 'last-ipv4', 'first-ipv6', 'last-ipv6'], default='zero'),
rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
interface=dict(type='str', aliases=['if']),
interface_in=dict(type='str', aliases=['if_in']),
interface_out=dict(type='str', aliases=['if_out']),
log=dict(type='bool', default=False),
from_ip=dict(type='str', default='any', aliases=['from', 'src']),
from_port=dict(type='str'),
to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
to_port=dict(type='str', aliases=['port']),
proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp']),
name=dict(type='str', aliases=['app']),
comment=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['name', 'proto', 'logging'],
# Mutual exclusivity with `interface` implied by `required_by`.
['direction', 'interface_in'],
['direction', 'interface_out'],
],
required_one_of=([command_keys]),
required_by=dict(
interface=('direction', ),
),
)
cmds = []
ipv4_regexp = compile_ipv4_regexp()
ipv6_regexp = compile_ipv6_regexp()
def filter_line_that_not_start_with(pattern, content):
return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)])
def filter_line_that_contains(pattern, content):
return [line for line in content.splitlines(True) if pattern in line]
def filter_line_that_not_contains(pattern, content):
return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)])
def filter_line_that_match_func(match_func, content):
return ''.join([line for line in content.splitlines(True) if match_func(line) is not None])
def filter_line_that_contains_ipv4(content):
return filter_line_that_match_func(ipv4_regexp.search, content)
def filter_line_that_contains_ipv6(content):
return filter_line_that_match_func(ipv6_regexp.search, content)
def is_starting_by_ipv4(ip):
return ipv4_regexp.match(ip) is not None
def is_starting_by_ipv6(ip):
return ipv6_regexp.match(ip) is not None
def execute(cmd, ignore_error=False):
cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
cmds.append(cmd)
(rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"})
if rc != 0 and not ignore_error:
module.fail_json(msg=err or out, commands=cmds)
return out
def get_current_rules():
user_rules_files = ["/lib/ufw/user.rules",
"/lib/ufw/user6.rules",
"/etc/ufw/user.rules",
"/etc/ufw/user6.rules",
"/var/lib/ufw/user.rules",
"/var/lib/ufw/user6.rules"]
cmd = [[grep_bin], ["-h"], ["'^### tuple'"]]
cmd.extend([[f] for f in user_rules_files])
return execute(cmd, ignore_error=True)
def ufw_version():
"""
Returns the major and minor version of ufw installed on the system.
"""
out = execute([[ufw_bin], ["--version"]])
lines = [x for x in out.split('\n') if x.strip() != '']
if len(lines) == 0:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
if matches is None:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
# Convert version to numbers
major = int(matches.group(1))
minor = int(matches.group(2))
rev = 0
if matches.group(3) is not None:
rev = int(matches.group(3))
return major, minor, rev
params = module.params
commands = dict((key, params[key]) for key in command_keys if params[key])
# Ensure ufw is available
ufw_bin = module.get_bin_path('ufw', True)
grep_bin = module.get_bin_path('grep', True)
# Save the pre state and rules in order to recognize changes
pre_state = execute([[ufw_bin], ['status verbose']])
pre_rules = get_current_rules()
changed = False
# Execute filter
for (command, value) in commands.items():
cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
if command == 'state':
states = {'enabled': 'enable', 'disabled': 'disable',
'reloaded': 'reload', 'reset': 'reset'}
if value in ['reloaded', 'reset']:
changed = True
if module.check_mode:
# "active" would also match "inactive", hence the space
ufw_enabled = pre_state.find(" active") != -1
if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled):
changed = True
else:
execute(cmd + [['-f'], [states[value]]])
elif command == 'logging':
extract = re.search(r'Logging: (on|off)(?: \(([a-z]+)\))?', pre_state)
if extract:
current_level = extract.group(2)
current_on_off_value = extract.group(1)
if value != "off":
if current_on_off_value == "off":
changed = True
elif value != "on" and value != current_level:
changed = True
elif current_on_off_value != "off":
changed = True
else:
changed = True
if not module.check_mode:
execute(cmd + [[command], [value]])
elif command == 'default':
if params['direction'] not in ['outgoing', 'incoming', 'routed', None]:
module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.')
if module.check_mode:
regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)'
extract = re.search(regexp, pre_state)
if extract is not None:
current_default_values = {}
current_default_values["incoming"] = extract.group(1)
current_default_values["outgoing"] = extract.group(2)
current_default_values["routed"] = extract.group(3)
v = current_default_values[params['direction'] or 'incoming']
if v not in (value, 'disabled'):
changed = True
else:
changed = True
else:
execute(cmd + [[command], [value], [params['direction']]])
elif command == 'rule':
if params['direction'] not in ['in', 'out', None]:
module.fail_json(msg='For rules, direction must be one of "in" and "out", or direction must not be specified.')
if not params['route'] and params['interface_in'] and params['interface_out']:
module.fail_json(msg='Only route rules can combine '
'interface_in and interface_out')
# Rules are constructed according to the long format
#
# ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
# [proto protocol] [app application] [comment COMMENT]
cmd.append([module.boolean(params['route']), 'route'])
cmd.append([module.boolean(params['delete']), 'delete'])
if params['insert'] is not None:
relative_to_cmd = params['insert_relative_to']
if relative_to_cmd == 'zero':
insert_to = params['insert']
else:
(dummy, numbered_state, dummy) = module.run_command([ufw_bin, 'status', 'numbered'])
numbered_line_re = re.compile(R'^\[ *([0-9]+)\] ')
lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()]
lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher]
last_number = max([no for (no, ipv6) in lines]) if lines else 0
has_ipv4 = any([not ipv6 for (no, ipv6) in lines])
has_ipv6 = any([ipv6 for (no, ipv6) in lines])
if relative_to_cmd == 'first-ipv4':
relative_to = 1
elif relative_to_cmd == 'last-ipv4':
relative_to = max([no for (no, ipv6) in lines if not ipv6]) if has_ipv4 else 1
elif relative_to_cmd == 'first-ipv6':
relative_to = max([no for (no, ipv6) in lines if not ipv6]) + 1 if has_ipv4 else 1
elif relative_to_cmd == 'last-ipv6':
relative_to = last_number if has_ipv6 else last_number + 1
insert_to = params['insert'] + relative_to
if insert_to > last_number:
# ufw does not like it when the insert number is larger than the
# maximal rule number for IPv4/IPv6.
insert_to = None
cmd.append([insert_to is not None, "insert %s" % insert_to])
cmd.append([value])
cmd.append([params['direction'], "%s" % params['direction']])
cmd.append([params['interface'], "on %s" % params['interface']])
cmd.append([params['interface_in'], "in on %s" % params['interface_in']])
cmd.append([params['interface_out'], "out on %s" % params['interface_out']])
cmd.append([module.boolean(params['log']), 'log'])
for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
('to_ip', "to %s"), ('to_port', "port %s"),
('proto', "proto %s"), ('name', "app '%s'")]:
value = params[key]
cmd.append([value, template % (value)])
ufw_major, ufw_minor, dummy = ufw_version()
# comment is supported only in ufw version after 0.35
if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
cmd.append([params['comment'], "comment '%s'" % params['comment']])
rules_dry = execute(cmd)
if module.check_mode:
nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry))
if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))):
rules_dry = filter_line_that_not_start_with("### tuple", rules_dry)
# ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules
if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']):
if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry):
changed = True
elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']):
if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry):
changed = True
elif pre_rules != rules_dry:
changed = True
# Get the new state
if module.check_mode:
return module.exit_json(changed=changed, commands=cmds)
else:
post_state = execute([[ufw_bin], ['status'], ['verbose']])
if not changed:
post_rules = get_current_rules()
changed = (pre_state != post_state) or (pre_rules != post_rules)
return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
if __name__ == '__main__':
main()

View file

@ -0,0 +1,872 @@
#!/usr/bin/python
# Copyright: (c) 2018, Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
author:
- Bryan Gurney (@bgurney-rh)
module: vdo
short_description: Module to control VDO
description:
- This module controls the VDO dedupe and compression device.
- VDO, or Virtual Data Optimizer, is a device-mapper target that
provides inline block-level deduplication, compression, and
thin provisioning capabilities to primary storage.
options:
name:
description:
- The name of the VDO volume.
type: str
required: true
state:
description:
- Whether this VDO volume should be "present" or "absent".
If a "present" VDO volume does not exist, it will be
created. If a "present" VDO volume already exists, it
will be modified, by updating the configuration, which
will take effect when the VDO volume is restarted.
Not all parameters of an existing VDO volume can be
modified; the "statusparamkeys" list contains the
parameters that can be modified after creation. If an
"absent" VDO volume does not exist, it will not be
removed.
type: str
required: true
choices: [ absent, present ]
default: present
activated:
description:
- The "activate" status for a VDO volume. If this is set
to "no", the VDO volume cannot be started, and it will
not start on system startup. However, on initial
creation, a VDO volume with "activated" set to "off"
will be running, until stopped. This is the default
behavior of the "vdo create" command; it provides the
user an opportunity to write a base amount of metadata
(filesystem, LVM headers, etc.) to the VDO volume prior
to stopping the volume, and leaving it deactivated
until ready to use.
type: bool
running:
description:
- Whether this VDO volume is running.
- A VDO volume must be activated in order to be started.
type: bool
device:
description:
- The full path of the device to use for VDO storage.
- This is required if "state" is "present".
type: str
logicalsize:
description:
- The logical size of the VDO volume (in megabytes, or
LVM suffix format). If not specified for a new volume,
this defaults to the same size as the underlying storage
device, which is specified in the 'device' parameter.
Existing volumes will maintain their size if the
logicalsize parameter is not specified, or is smaller
than or identical to the current size. If the specified
size is larger than the current size, a growlogical
operation will be performed.
type: str
deduplication:
description:
- Configures whether deduplication is enabled. The
default for a created volume is 'enabled'. Existing
volumes will maintain their previously configured
setting unless a different value is specified in the
playbook.
type: str
choices: [ disabled, enabled ]
compression:
description:
- Configures whether compression is enabled. The default
for a created volume is 'enabled'. Existing volumes
will maintain their previously configured setting unless
a different value is specified in the playbook.
type: str
choices: [ disabled, enabled ]
blockmapcachesize:
description:
- The amount of memory allocated for caching block map
pages, in megabytes (or may be issued with an LVM-style
suffix of K, M, G, or T). The default (and minimum)
value is 128M. The value specifies the size of the
cache; there is a 15% memory usage overhead. Each 1.25G
of block map covers 1T of logical blocks, therefore a
small amount of block map cache memory can cache a
significantly large amount of block map data. Existing
volumes will maintain their previously configured
setting unless a different value is specified in the
playbook.
type: str
readcache:
description:
- Enables or disables the read cache. The default is
'disabled'. Choosing 'enabled' enables a read cache
which may improve performance for workloads of high
deduplication, read workloads with a high level of
compression, or on hard disk storage. Existing
volumes will maintain their previously configured
setting unless a different value is specified in the
playbook.
- The read cache feature is available in VDO 6.1 and older.
type: str
choices: [ disabled, enabled ]
readcachesize:
description:
- Specifies the extra VDO device read cache size in
megabytes. This is in addition to a system-defined
minimum. Using a value with a suffix of K, M, G, or T
is optional. The default value is 0. 1.125 MB of
memory per bio thread will be used per 1 MB of read
cache specified (for example, a VDO volume configured
with 4 bio threads will have a read cache memory usage
overhead of 4.5 MB per 1 MB of read cache specified).
Existing volumes will maintain their previously
configured setting unless a different value is specified
in the playbook.
- The read cache feature is available in VDO 6.1 and older.
type: str
emulate512:
description:
- Enables 512-byte emulation mode, allowing drivers or
filesystems to access the VDO volume at 512-byte
granularity, instead of the default 4096-byte granularity.
Default is 'disabled'; only recommended when a driver
or filesystem requires 512-byte sector level access to
a device. This option is only available when creating
a new volume, and cannot be changed for an existing
volume.
type: bool
growphysical:
description:
- Specifies whether to attempt to execute a growphysical
operation, if there is enough unused space on the
device. A growphysical operation will be executed if
there is at least 64 GB of free space, relative to the
previous physical size of the affected VDO volume.
type: bool
default: false
slabsize:
description:
- The size of the increment by which the physical size of
a VDO volume is grown, in megabytes (or may be issued
with an LVM-style suffix of K, M, G, or T). Must be a
power of two between 128M and 32G. The default is 2G,
which supports volumes having a physical size up to 16T.
The maximum, 32G, supports a physical size of up to 256T.
This option is only available when creating a new
volume, and cannot be changed for an existing volume.
type: str
writepolicy:
description:
- Specifies the write policy of the VDO volume. The
'sync' mode acknowledges writes only after data is on
stable storage. The 'async' mode acknowledges writes
when data has been cached for writing to stable
storage. The default (and highly recommended) 'auto'
mode checks the storage device to determine whether it
supports flushes. Devices that support flushes will
result in a VDO volume in 'async' mode, while devices
that do not support flushes will run in sync mode.
Existing volumes will maintain their previously
configured setting unless a different value is
specified in the playbook.
type: str
choices: [ async, auto, sync ]
indexmem:
description:
- Specifies the amount of index memory in gigabytes. The
default is 0.25. The special decimal values 0.25, 0.5,
and 0.75 can be used, as can any positive integer.
This option is only available when creating a new
volume, and cannot be changed for an existing volume.
type: str
indexmode:
description:
- Specifies the index mode of the Albireo index. The
default is 'dense', which has a deduplication window of
1 GB of index memory per 1 TB of incoming data,
requiring 10 GB of index data on persistent storage.
The 'sparse' mode has a deduplication window of 1 GB of
index memory per 10 TB of incoming data, but requires
100 GB of index data on persistent storage. This option
is only available when creating a new volume, and cannot
be changed for an existing volume.
type: str
choices: [ dense, sparse ]
ackthreads:
description:
- Specifies the number of threads to use for
acknowledging completion of requested VDO I/O operations.
Valid values are integer values from 1 to 100 (lower
numbers are preferable due to overhead). The default is
1. Existing volumes will maintain their previously
configured setting unless a different value is specified
in the playbook.
type: str
biothreads:
description:
- Specifies the number of threads to use for submitting I/O
operations to the storage device. Valid values are
integer values from 1 to 100 (lower numbers are
preferable due to overhead). The default is 4.
Existing volumes will maintain their previously
configured setting unless a different value is specified
in the playbook.
type: str
cputhreads:
description:
- Specifies the number of threads to use for CPU-intensive
work such as hashing or compression. Valid values are
integer values from 1 to 100 (lower numbers are
preferable due to overhead). The default is 2.
Existing volumes will maintain their previously
configured setting unless a different value is specified
in the playbook.
type: str
logicalthreads:
description:
- Specifies the number of threads across which to
subdivide parts of the VDO processing based on logical
block addresses. Valid values are integer values from
1 to 100 (lower numbers are preferable due to overhead).
The default is 1. Existing volumes will maintain their
previously configured setting unless a different value
is specified in the playbook.
type: str
physicalthreads:
description:
- Specifies the number of threads across which to
subdivide parts of the VDO processing based on physical
block addresses. Valid values are integer values from
1 to 16 (lower numbers are preferable due to overhead).
The physical space used by the VDO volume must be
larger than (slabsize * physicalthreads). The default
is 1. Existing volumes will maintain their previously
configured setting unless a different value is specified
in the playbook.
type: str
notes:
- In general, the default thread configuration should be used.
requirements:
- PyYAML
- kmod-kvdo
- vdo
'''
EXAMPLES = r'''
- name: Create 2 TB VDO volume vdo1 on device /dev/md0
vdo:
name: vdo1
state: present
device: /dev/md0
logicalsize: 2T
- name: Remove VDO volume vdo1
vdo:
name: vdo1
state: absent
'''
RETURN = r'''# '''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
import re
import traceback
YAML_IMP_ERR = None
try:
import yaml
HAS_YAML = True
except ImportError:
YAML_IMP_ERR = traceback.format_exc()
HAS_YAML = False
# Generate a list of VDO volumes, whether they are running or stopped.
#
# @param module The AnsibleModule object.
# @param vdocmd The path of the 'vdo' command.
#
# @return vdolist A list of currently created VDO volumes.
def inventory_vdos(module, vdocmd):
rc, vdostatusout, err = module.run_command("%s status" % (vdocmd))
# if rc != 0:
# module.fail_json(msg="Inventorying VDOs failed: %s"
# % vdostatusout, rc=rc, err=err)
vdolist = []
if (rc == 2 and
re.findall(r"vdoconf.yml does not exist", err, re.MULTILINE)):
# If there is no /etc/vdoconf.yml file, assume there are no
# VDO volumes. Return an empty list of VDO volumes.
return vdolist
if rc != 0:
module.fail_json(msg="Inventorying VDOs failed: %s"
% vdostatusout, rc=rc, err=err)
vdostatusyaml = yaml.load(vdostatusout)
if vdostatusyaml is None:
return vdolist
vdoyamls = vdostatusyaml['VDOs']
if vdoyamls is not None:
vdolist = vdoyamls.keys()
return vdolist
def list_running_vdos(module, vdocmd):
rc, vdolistout, err = module.run_command("%s list" % (vdocmd))
runningvdolist = filter(None, vdolistout.split('\n'))
return runningvdolist
# Generate a string containing options to pass to the 'VDO' command.
# Note that a 'create' operation will pass more options than a
# 'modify' operation.
#
# @param params A dictionary of parameters, and their values
# (values of 'None' and/or nonexistent values are ignored).
#
# @return vdocmdoptions A string to be used in a 'vdo <action>' command.
def start_vdo(module, vdoname, vdocmd):
rc, out, err = module.run_command("%s start --name=%s" % (vdocmd, vdoname))
if rc == 0:
module.log("started VDO volume %s" % vdoname)
return rc
def stop_vdo(module, vdoname, vdocmd):
rc, out, err = module.run_command("%s stop --name=%s" % (vdocmd, vdoname))
if rc == 0:
module.log("stopped VDO volume %s" % vdoname)
return rc
def activate_vdo(module, vdoname, vdocmd):
rc, out, err = module.run_command("%s activate --name=%s"
% (vdocmd, vdoname))
if rc == 0:
module.log("activated VDO volume %s" % vdoname)
return rc
def deactivate_vdo(module, vdoname, vdocmd):
rc, out, err = module.run_command("%s deactivate --name=%s"
% (vdocmd, vdoname))
if rc == 0:
module.log("deactivated VDO volume %s" % vdoname)
return rc
def add_vdooptions(params):
vdocmdoptions = ""
options = []
if ('logicalsize' in params) and (params['logicalsize'] is not None):
options.append("--vdoLogicalSize=" + params['logicalsize'])
if (('blockmapcachesize' in params) and
(params['blockmapcachesize'] is not None)):
options.append("--blockMapCacheSize=" + params['blockmapcachesize'])
if ('readcache' in params) and (params['readcache'] == 'enabled'):
options.append("--readCache=enabled")
if ('readcachesize' in params) and (params['readcachesize'] is not None):
options.append("--readCacheSize=" + params['readcachesize'])
if ('slabsize' in params) and (params['slabsize'] is not None):
options.append("--vdoSlabSize=" + params['slabsize'])
if ('emulate512' in params) and (params['emulate512']):
options.append("--emulate512=enabled")
if ('indexmem' in params) and (params['indexmem'] is not None):
options.append("--indexMem=" + params['indexmem'])
if ('indexmode' in params) and (params['indexmode'] == 'sparse'):
options.append("--sparseIndex=enabled")
# Entering an invalid thread config results in a cryptic
# 'Could not set up device mapper for %s' error from the 'vdo'
# command execution. The dmsetup module on the system will
# output a more helpful message, but one would have to log
# onto that system to read the error. For now, heed the thread
# limit warnings in the DOCUMENTATION section above.
if ('ackthreads' in params) and (params['ackthreads'] is not None):
options.append("--vdoAckThreads=" + params['ackthreads'])
if ('biothreads' in params) and (params['biothreads'] is not None):
options.append("--vdoBioThreads=" + params['biothreads'])
if ('cputhreads' in params) and (params['cputhreads'] is not None):
options.append("--vdoCpuThreads=" + params['cputhreads'])
if ('logicalthreads' in params) and (params['logicalthreads'] is not None):
options.append("--vdoLogicalThreads=" + params['logicalthreads'])
if (('physicalthreads' in params) and
(params['physicalthreads'] is not None)):
options.append("--vdoPhysicalThreads=" + params['physicalthreads'])
vdocmdoptions = ' '.join(options)
return vdocmdoptions
def run_module():
# Define the available arguments/parameters that a user can pass to
# the module.
# Defaults for VDO parameters are None, in order to facilitate
# the detection of parameters passed from the playbook.
# Creation param defaults are determined by the creation section.
module_args = dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
activated=dict(type='bool'),
running=dict(type='bool'),
growphysical=dict(type='bool', default=False),
device=dict(type='str'),
logicalsize=dict(type='str'),
deduplication=dict(type='str', choices=['disabled', 'enabled']),
compression=dict(type='str', choices=['disabled', 'enabled']),
blockmapcachesize=dict(type='str'),
readcache=dict(type='str', choices=['disabled', 'enabled']),
readcachesize=dict(type='str'),
emulate512=dict(type='bool', default=False),
slabsize=dict(type='str'),
writepolicy=dict(type='str', choices=['async', 'auto', 'sync']),
indexmem=dict(type='str'),
indexmode=dict(type='str', choices=['dense', 'sparse']),
ackthreads=dict(type='str'),
biothreads=dict(type='str'),
cputhreads=dict(type='str'),
logicalthreads=dict(type='str'),
physicalthreads=dict(type='str')
)
# Seed the result dictionary in the object. There will be an
# 'invocation' dictionary added with 'module_args' (arguments
# given).
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False,
)
if not HAS_YAML:
module.fail_json(msg=missing_required_lib('PyYAML'), exception=YAML_IMP_ERR)
vdocmd = module.get_bin_path("vdo", required=True)
if not vdocmd:
module.fail_json(msg='VDO is not installed.', **result)
# Print a pre-run list of VDO volumes in the result object.
vdolist = inventory_vdos(module, vdocmd)
runningvdolist = list_running_vdos(module, vdocmd)
# Collect the name of the desired VDO volume, and its state. These will
# determine what to do.
desiredvdo = module.params['name']
state = module.params['state']
# Create a desired VDO volume that doesn't exist yet.
if (desiredvdo not in vdolist) and (state == 'present'):
device = module.params['device']
if device is None:
module.fail_json(msg="Creating a VDO volume requires specifying "
"a 'device' in the playbook.")
# Create a dictionary of the options from the AnsibleModule
# parameters, compile the vdo command options, and run "vdo create"
# with those options.
# Since this is a creation of a new VDO volume, it will contain all
# all of the parameters given by the playbook; the rest will
# assume default values.
options = module.params
vdocmdoptions = add_vdooptions(options)
rc, out, err = module.run_command("%s create --name=%s --device=%s %s"
% (vdocmd, desiredvdo, device,
vdocmdoptions))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Creating VDO %s failed."
% desiredvdo, rc=rc, err=err)
if (module.params['compression'] == 'disabled'):
rc, out, err = module.run_command("%s disableCompression --name=%s"
% (vdocmd, desiredvdo))
if ((module.params['deduplication'] is not None) and
module.params['deduplication'] == 'disabled'):
rc, out, err = module.run_command("%s disableDeduplication "
"--name=%s"
% (vdocmd, desiredvdo))
if module.params['activated'] == 'no':
deactivate_vdo(module, desiredvdo, vdocmd)
if module.params['running'] == 'no':
stop_vdo(module, desiredvdo, vdocmd)
# Print a post-run list of VDO volumes in the result object.
vdolist = inventory_vdos(module, vdocmd)
module.log("created VDO volume %s" % desiredvdo)
module.exit_json(**result)
# Modify the current parameters of a VDO that exists.
if (desiredvdo in vdolist) and (state == 'present'):
rc, vdostatusoutput, err = module.run_command("%s status" % (vdocmd))
vdostatusyaml = yaml.load(vdostatusoutput)
# An empty dictionary to contain dictionaries of VDO statistics
processedvdos = {}
vdoyamls = vdostatusyaml['VDOs']
if vdoyamls is not None:
processedvdos = vdoyamls
# The 'vdo status' keys that are currently modifiable.
statusparamkeys = ['Acknowledgement threads',
'Bio submission threads',
'Block map cache size',
'CPU-work threads',
'Logical threads',
'Physical threads',
'Read cache',
'Read cache size',
'Configured write policy',
'Compression',
'Deduplication']
# A key translation table from 'vdo status' output to Ansible
# module parameters. This covers all of the 'vdo status'
# parameter keys that could be modified with the 'vdo'
# command.
vdokeytrans = {
'Logical size': 'logicalsize',
'Compression': 'compression',
'Deduplication': 'deduplication',
'Block map cache size': 'blockmapcachesize',
'Read cache': 'readcache',
'Read cache size': 'readcachesize',
'Configured write policy': 'writepolicy',
'Acknowledgement threads': 'ackthreads',
'Bio submission threads': 'biothreads',
'CPU-work threads': 'cputhreads',
'Logical threads': 'logicalthreads',
'Physical threads': 'physicalthreads'
}
# Build a dictionary of the current VDO status parameters, with
# the keys used by VDO. (These keys will be converted later.)
currentvdoparams = {}
# Build a "lookup table" dictionary containing a translation table
# of the parameters that can be modified
modtrans = {}
for statfield in statusparamkeys:
if statfield in processedvdos[desiredvdo]:
currentvdoparams[statfield] = processedvdos[desiredvdo][statfield]
modtrans[statfield] = vdokeytrans[statfield]
# Build a dictionary of current parameters formatted with the
# same keys as the AnsibleModule parameters.
currentparams = {}
for paramkey in modtrans.keys():
currentparams[modtrans[paramkey]] = modtrans[paramkey]
diffparams = {}
# Check for differences between the playbook parameters and the
# current parameters. This will need a comparison function;
# since AnsibleModule params are all strings, compare them as
# strings (but if it's None; skip).
for key in currentparams.keys():
if module.params[key] is not None:
if str(currentparams[key]) != module.params[key]:
diffparams[key] = module.params[key]
if diffparams:
vdocmdoptions = add_vdooptions(diffparams)
if vdocmdoptions:
rc, out, err = module.run_command("%s modify --name=%s %s"
% (vdocmd,
desiredvdo,
vdocmdoptions))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Modifying VDO %s failed."
% desiredvdo, rc=rc, err=err)
if 'deduplication' in diffparams.keys():
dedupemod = diffparams['deduplication']
if dedupemod == 'disabled':
rc, out, err = module.run_command("%s "
"disableDeduplication "
"--name=%s"
% (vdocmd, desiredvdo))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing deduplication on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
if dedupemod == 'enabled':
rc, out, err = module.run_command("%s "
"enableDeduplication "
"--name=%s"
% (vdocmd, desiredvdo))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing deduplication on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
if 'compression' in diffparams.keys():
compressmod = diffparams['compression']
if compressmod == 'disabled':
rc, out, err = module.run_command("%s disableCompression "
"--name=%s"
% (vdocmd, desiredvdo))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing compression on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
if compressmod == 'enabled':
rc, out, err = module.run_command("%s enableCompression "
"--name=%s"
% (vdocmd, desiredvdo))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing compression on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
if 'writepolicy' in diffparams.keys():
writepolmod = diffparams['writepolicy']
if writepolmod == 'auto':
rc, out, err = module.run_command("%s "
"changeWritePolicy "
"--name=%s "
"--writePolicy=%s"
% (vdocmd,
desiredvdo,
writepolmod))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing write policy on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
if writepolmod == 'sync':
rc, out, err = module.run_command("%s "
"changeWritePolicy "
"--name=%s "
"--writePolicy=%s"
% (vdocmd,
desiredvdo,
writepolmod))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing write policy on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
if writepolmod == 'async':
rc, out, err = module.run_command("%s "
"changeWritePolicy "
"--name=%s "
"--writePolicy=%s"
% (vdocmd,
desiredvdo,
writepolmod))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing write policy on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
# Process the size parameters, to determine of a growPhysical or
# growLogical operation needs to occur.
sizeparamkeys = ['Logical size', ]
currentsizeparams = {}
sizetrans = {}
for statfield in sizeparamkeys:
currentsizeparams[statfield] = processedvdos[desiredvdo][statfield]
sizetrans[statfield] = vdokeytrans[statfield]
sizeparams = {}
for paramkey in currentsizeparams.keys():
sizeparams[sizetrans[paramkey]] = currentsizeparams[paramkey]
diffsizeparams = {}
for key in sizeparams.keys():
if module.params[key] is not None:
if str(sizeparams[key]) != module.params[key]:
diffsizeparams[key] = module.params[key]
if module.params['growphysical']:
physdevice = module.params['device']
rc, devsectors, err = module.run_command("blockdev --getsz %s"
% (physdevice))
devblocks = (int(devsectors) / 8)
dmvdoname = ('/dev/mapper/' + desiredvdo)
currentvdostats = (processedvdos[desiredvdo]
['VDO statistics']
[dmvdoname])
currentphysblocks = currentvdostats['physical blocks']
# Set a growPhysical threshold to grow only when there is
# guaranteed to be more than 2 slabs worth of unallocated
# space on the device to use. For now, set to device
# size + 64 GB, since 32 GB is the largest possible
# slab size.
growthresh = devblocks + 16777216
if currentphysblocks > growthresh:
result['changed'] = True
rc, out, err = module.run_command("%s growPhysical --name=%s"
% (vdocmd, desiredvdo))
if 'logicalsize' in diffsizeparams.keys():
result['changed'] = True
vdocmdoptions = ("--vdoLogicalSize=" +
diffsizeparams['logicalsize'])
rc, out, err = module.run_command("%s growLogical --name=%s %s"
% (vdocmd,
desiredvdo,
vdocmdoptions))
vdoactivatestatus = processedvdos[desiredvdo]['Activate']
if ((module.params['activated'] == 'no') and
(vdoactivatestatus == 'enabled')):
deactivate_vdo(module, desiredvdo, vdocmd)
if not result['changed']:
result['changed'] = True
if ((module.params['activated'] == 'yes') and
(vdoactivatestatus == 'disabled')):
activate_vdo(module, desiredvdo, vdocmd)
if not result['changed']:
result['changed'] = True
if ((module.params['running'] == 'no') and
(desiredvdo in runningvdolist)):
stop_vdo(module, desiredvdo, vdocmd)
if not result['changed']:
result['changed'] = True
# Note that a disabled VDO volume cannot be started by the
# 'vdo start' command, by design. To accurately track changed
# status, don't try to start a disabled VDO volume.
# If the playbook contains 'activated: yes', assume that
# the activate_vdo() operation succeeded, as 'vdoactivatestatus'
# will have the activated status prior to the activate_vdo()
# call.
if (((vdoactivatestatus == 'enabled') or
(module.params['activated'] == 'yes')) and
(module.params['running'] == 'yes') and
(desiredvdo not in runningvdolist)):
start_vdo(module, desiredvdo, vdocmd)
if not result['changed']:
result['changed'] = True
# Print a post-run list of VDO volumes in the result object.
vdolist = inventory_vdos(module, vdocmd)
if diffparams:
module.log("modified parameters of VDO volume %s" % desiredvdo)
module.exit_json(**result)
# Remove a desired VDO that currently exists.
if (desiredvdo in vdolist) and (state == 'absent'):
rc, out, err = module.run_command("%s remove --name=%s"
% (vdocmd, desiredvdo))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Removing VDO %s failed."
% desiredvdo, rc=rc, err=err)
# Print a post-run list of VDO volumes in the result object.
vdolist = inventory_vdos(module, vdocmd)
module.log("removed VDO volume %s" % desiredvdo)
module.exit_json(**result)
# fall through
# The state for the desired VDO volume was absent, and it does
# not exist. Print a post-run list of VDO volumes in the result
# object.
vdolist = inventory_vdos(module, vdocmd)
module.log("received request to remove non-existent VDO volume %s"
% desiredvdo)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,214 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Joseph Benden <joe@benden.us>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: xfconf
author:
- "Joseph Benden (@jbenden)"
short_description: Edit XFCE4 Configurations
description:
- This module allows for the manipulation of Xfce 4 Configuration via
xfconf-query. Please see the xfconf-query(1) man pages for more details.
options:
channel:
description:
- A Xfconf preference channel is a top-level tree key, inside of the
Xfconf repository that corresponds to the location for which all
application properties/keys are stored. See man xfconf-query(1)
required: yes
property:
description:
- A Xfce preference key is an element in the Xfconf repository
that corresponds to an application preference. See man xfconf-query(1)
required: yes
value:
description:
- Preference properties typically have simple values such as strings,
integers, or lists of strings and integers. This is ignored if the state
is "get". See man xfconf-query(1)
value_type:
description:
- The type of value being set. This is ignored if the state is "get".
choices: [ int, bool, float, string ]
state:
description:
- The action to take upon the property/value.
choices: [ get, present, absent ]
default: "present"
'''
EXAMPLES = """
- name: Change the DPI to "192"
xfconf:
channel: "xsettings"
property: "/Xft/DPI"
value_type: "int"
value: "192"
become: True
become_user: johnsmith
"""
RETURN = '''
channel:
description: The channel specified in the module parameters
returned: success
type: str
sample: "xsettings"
property:
description: The property specified in the module parameters
returned: success
type: str
sample: "/Xft/DPI"
value_type:
description: The type of the value that was changed
returned: success
type: str
sample: "int"
value:
description: The value of the preference key after executing the module
returned: success
type: str
sample: "192"
...
'''
import sys
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import shlex_quote
class XfconfPreference(object):
def __init__(self, module, channel, property, value_type, value):
self.module = module
self.channel = channel
self.property = property
self.value_type = value_type
self.value = value
def call(self, call_type, fail_onerr=True):
""" Helper function to perform xfconf-query operations """
changed = False
out = ''
# Execute the call
cmd = "{0} --channel {1} --property {2}".format(self.module.get_bin_path('xfconf-query', True),
shlex_quote(self.channel),
shlex_quote(self.property))
try:
if call_type == 'set':
cmd += " --type {0} --create --set {1}".format(shlex_quote(self.value_type),
shlex_quote(self.value))
elif call_type == 'unset':
cmd += " --reset"
# Start external command
rc, out, err = self.module.run_command(cmd, check_rc=False)
if rc != 0 or len(err) > 0:
if fail_onerr:
self.module.fail_json(msg='xfconf-query failed with error: %s' % (str(err)))
else:
changed = True
except OSError as exception:
self.module.fail_json(msg='xfconf-query failed with exception: %s' % exception)
return changed, out.rstrip()
def main():
# Setup the Ansible module
module = AnsibleModule(
argument_spec=dict(
channel=dict(required=True, type='str'),
property=dict(required=True, type='str'),
value_type=dict(required=False,
choices=['int', 'bool', 'float', 'string'],
type='str'),
value=dict(required=False, default=None, type='str'),
state=dict(default='present',
choices=['present', 'get', 'absent'],
type='str')
),
supports_check_mode=True
)
state_values = {"present": "set", "absent": "unset", "get": "get"}
# Assign module values to dictionary values
channel = module.params['channel']
property = module.params['property']
value_type = module.params['value_type']
if module.params['value'].lower() == "true":
value = "true"
elif module.params['value'] == "false":
value = "false"
else:
value = module.params['value']
state = state_values[module.params['state']]
# Initialize some variables for later
change = False
new_value = ''
if state != "get":
if value is None or value == "":
module.fail_json(msg='State %s requires "value" to be set'
% str(state))
elif value_type is None or value_type == "":
module.fail_json(msg='State %s requires "value_type" to be set'
% str(state))
# Create a Xfconf preference
xfconf = XfconfPreference(module,
channel,
property,
value_type,
value)
# Now we get the current value, if not found don't fail
dummy, current_value = xfconf.call("get", fail_onerr=False)
# Check if the current value equals the value we want to set. If not, make
# a change
if current_value != value:
# If check mode, we know a change would have occurred.
if module.check_mode:
# So we will set the change to True
change = True
# And set the new_value to the value that would have been set
new_value = value
# If not check mode make the change.
else:
change, new_value = xfconf.call(state)
# If the value we want to set is the same as the current_value, we will
# set the new_value to the current_value for reporting
else:
new_value = current_value
facts = dict(xfconf={'changed': change,
'channel': channel,
'property': property,
'value_type': value_type,
'new_value': new_value,
'previous_value': current_value,
'playbook_value': module.params['value']})
module.exit_json(changed=change, ansible_facts=facts)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,432 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Emmanouil Kampitakis <info@kampitakis.de>
# Copyright: (c) 2018, William Leemans <willie@elaba.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: xfs_quota
short_description: Manage quotas on XFS filesystems
description:
- Configure quotas on XFS filesystems.
- Before using this module /etc/projects and /etc/projid need to be configured.
author:
- William Leemans (@bushvin)
options:
type:
description:
- The XFS quota type.
type: str
required: true
choices:
- user
- group
- project
name:
description:
- The name of the user, group or project to apply the quota to, if other than default.
type: str
mountpoint:
description:
- The mount point on which to apply the quotas.
type: str
required: true
bhard:
description:
- Hard blocks quota limit.
- This argument supports human readable sizes.
type: str
bsoft:
description:
- Soft blocks quota limit.
- This argument supports human readable sizes.
type: str
ihard:
description:
- Hard inodes quota limit.
type: int
isoft:
description:
- Soft inodes quota limit.
type: int
rtbhard:
description:
- Hard realtime blocks quota limit.
- This argument supports human readable sizes.
type: str
rtbsoft:
description:
- Soft realtime blocks quota limit.
- This argument supports human readable sizes.
type: str
state:
description:
- Whether to apply the limits or remove them.
- When removing limit, they are set to 0, and not quite removed.
type: str
default: present
choices:
- present
- absent
requirements:
- xfsprogs
'''
EXAMPLES = r'''
- name: Set default project soft and hard limit on /opt of 1g
xfs_quota:
type: project
mountpoint: /opt
bsoft: 1g
bhard: 1g
state: present
- name: Remove the default limits on /opt
xfs_quota:
type: project
mountpoint: /opt
state: absent
- name: Set default soft user inode limits on /home of 1024 inodes and hard of 2048
xfs_quota:
type: user
mountpoint: /home
isoft: 1024
ihard: 2048
'''
RETURN = r'''
bhard:
description: the current bhard setting in bytes
returned: always
type: int
sample: 1024
bsoft:
description: the current bsoft setting in bytes
returned: always
type: int
sample: 1024
ihard:
description: the current ihard setting in bytes
returned: always
type: int
sample: 100
isoft:
description: the current isoft setting in bytes
returned: always
type: int
sample: 100
rtbhard:
description: the current rtbhard setting in bytes
returned: always
type: int
sample: 1024
rtbsoft:
description: the current rtbsoft setting in bytes
returned: always
type: int
sample: 1024
'''
import grp
import os
import pwd
from ansible.module_utils.basic import AnsibleModule, human_to_bytes
def main():
module = AnsibleModule(
argument_spec=dict(
bhard=dict(type='str'),
bsoft=dict(type='str'),
ihard=dict(type='int'),
isoft=dict(type='int'),
mountpoint=dict(type='str', required=True),
name=dict(type='str'),
rtbhard=dict(type='str'),
rtbsoft=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
type=dict(type='str', required=True, choices=['group', 'project', 'user'])
),
supports_check_mode=True,
)
quota_type = module.params['type']
name = module.params['name']
mountpoint = module.params['mountpoint']
bhard = module.params['bhard']
bsoft = module.params['bsoft']
ihard = module.params['ihard']
isoft = module.params['isoft']
rtbhard = module.params['rtbhard']
rtbsoft = module.params['rtbsoft']
state = module.params['state']
if bhard is not None:
bhard = human_to_bytes(bhard)
if bsoft is not None:
bsoft = human_to_bytes(bsoft)
if rtbhard is not None:
rtbhard = human_to_bytes(rtbhard)
if rtbsoft is not None:
rtbsoft = human_to_bytes(rtbsoft)
result = dict(
changed=False,
)
if not os.path.ismount(mountpoint):
module.fail_json(msg="Path '%s' is not a mount point" % mountpoint, **result)
mp = get_fs_by_mountpoint(mountpoint)
if mp is None:
module.fail_json(msg="Path '%s' is not a mount point or not located on an xfs file system." % mountpoint, **result)
if quota_type == 'user':
type_arg = '-u'
quota_default = 'root'
if name is None:
name = quota_default
if 'uquota' not in mp['mntopts'] and 'usrquota' not in mp['mntopts'] and 'quota' not in mp['mntopts'] and 'uqnoenforce' not in mp['mntopts'] and \
'qnoenforce' not in mp['mntopts']:
module.fail_json(
msg="Path '%s' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option." % mountpoint, **result
)
try:
pwd.getpwnam(name)
except KeyError as e:
module.fail_json(msg="User '%s' does not exist." % name, **result)
elif quota_type == 'group':
type_arg = '-g'
quota_default = 'root'
if name is None:
name = quota_default
if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']:
module.fail_json(
msg="Path '%s' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)" % (mountpoint, mp['mntopts']), **result
)
try:
grp.getgrnam(name)
except KeyError as e:
module.fail_json(msg="User '%s' does not exist." % name, **result)
elif quota_type == 'project':
type_arg = '-p'
quota_default = '#0'
if name is None:
name = quota_default
if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']:
module.fail_json(msg="Path '%s' is not mounted with the pquota/prjquota/pqnoenforce option." % mountpoint, **result)
if name != quota_default and not os.path.isfile('/etc/projects'):
module.fail_json(msg="Path '/etc/projects' does not exist.", **result)
if name != quota_default and not os.path.isfile('/etc/projid'):
module.fail_json(msg="Path '/etc/projid' does not exist.", **result)
if name != quota_default and name is not None and get_project_id(name) is None:
module.fail_json(msg="Entry '%s' has not been defined in /etc/projid." % name, **result)
prj_set = True
if name != quota_default:
cmd = 'project %s' % name
rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
if rc != 0:
result['cmd'] = cmd
result['rc'] = rc
result['stdout'] = stdout
result['stderr'] = stderr
module.fail_json(msg='Could not get project state.', **result)
else:
for line in stdout.split('\n'):
if "Project Id '%s' - is not set." in line:
prj_set = False
break
if not prj_set and not module.check_mode:
cmd = 'project -s'
rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
if rc != 0:
result['cmd'] = cmd
result['rc'] = rc
result['stdout'] = stdout
result['stderr'] = stderr
module.fail_json(msg='Could not get quota realtime block report.', **result)
result['changed'] = True
elif not prj_set and module.check_mode:
result['changed'] = True
# Set limits
if state == 'absent':
bhard = 0
bsoft = 0
ihard = 0
isoft = 0
rtbhard = 0
rtbsoft = 0
current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b')
current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i')
current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb')
result['xfs_quota'] = dict(
bsoft=current_bsoft,
bhard=current_bhard,
isoft=current_isoft,
ihard=current_ihard,
rtbsoft=current_rtbsoft,
rtbhard=current_rtbhard
)
limit = []
if bsoft is not None and int(bsoft) != current_bsoft:
limit.append('bsoft=%s' % bsoft)
result['bsoft'] = int(bsoft)
if bhard is not None and int(bhard) != current_bhard:
limit.append('bhard=%s' % bhard)
result['bhard'] = int(bhard)
if isoft is not None and isoft != current_isoft:
limit.append('isoft=%s' % isoft)
result['isoft'] = isoft
if ihard is not None and ihard != current_ihard:
limit.append('ihard=%s' % ihard)
result['ihard'] = ihard
if rtbsoft is not None and int(rtbsoft) != current_rtbsoft:
limit.append('rtbsoft=%s' % rtbsoft)
result['rtbsoft'] = int(rtbsoft)
if rtbhard is not None and int(rtbhard) != current_rtbhard:
limit.append('rtbhard=%s' % rtbhard)
result['rtbhard'] = int(rtbhard)
if len(limit) > 0 and not module.check_mode:
if name == quota_default:
cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit))
else:
cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name)
rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
if rc != 0:
result['cmd'] = cmd
result['rc'] = rc
result['stdout'] = stdout
result['stderr'] = stderr
module.fail_json(msg='Could not set limits.', **result)
result['changed'] = True
elif len(limit) > 0 and module.check_mode:
result['changed'] = True
module.exit_json(**result)
def quota_report(module, mountpoint, name, quota_type, used_type):
soft = None
hard = None
if quota_type == 'project':
type_arg = '-p'
elif quota_type == 'user':
type_arg = '-u'
elif quota_type == 'group':
type_arg = '-g'
if used_type == 'b':
used_arg = '-b'
used_name = 'blocks'
factor = 1024
elif used_type == 'i':
used_arg = '-i'
used_name = 'inodes'
factor = 1
elif used_type == 'rtb':
used_arg = '-r'
used_name = 'realtime blocks'
factor = 1024
rc, stdout, stderr = exec_quota(module, 'report %s %s' % (type_arg, used_arg), mountpoint)
if rc != 0:
result = dict(
changed=False,
rc=rc,
stdout=stdout,
stderr=stderr,
)
module.fail_json(msg='Could not get quota report for %s.' % used_name, **result)
for line in stdout.split('\n'):
line = line.strip().split()
if len(line) > 3 and line[0] == name:
soft = int(line[2]) * factor
hard = int(line[3]) * factor
break
return soft, hard
def exec_quota(module, cmd, mountpoint):
cmd = ['xfs_quota', '-x', '-c'] + [cmd, mountpoint]
(rc, stdout, stderr) = module.run_command(cmd, use_unsafe_shell=True)
if "XFS_GETQUOTA: Operation not permitted" in stderr.split('\n') or \
rc == 1 and 'xfs_quota: cannot set limits: Operation not permitted' in stderr.split('\n'):
module.fail_json(msg='You need to be root or have CAP_SYS_ADMIN capability to perform this operation')
return rc, stdout, stderr
def get_fs_by_mountpoint(mountpoint):
mpr = None
with open('/proc/mounts', 'r') as s:
for line in s.readlines():
mp = line.strip().split()
if len(mp) == 6 and mp[1] == mountpoint and mp[2] == 'xfs':
mpr = dict(zip(['spec', 'file', 'vfstype', 'mntopts', 'freq', 'passno'], mp))
mpr['mntopts'] = mpr['mntopts'].split(',')
break
return mpr
def get_project_id(name):
prjid = None
with open('/etc/projid', 'r') as s:
for line in s.readlines():
line = line.strip().partition(':')
if line[0] == name:
prjid = line[2]
break
return prjid
if __name__ == '__main__':
main()