Initial commit

This commit is contained in:
Ansible Core Team 2020-03-09 09:11:07 +00:00
commit aebc1b03fd
4861 changed files with 812621 additions and 0 deletions

View file

@ -0,0 +1,176 @@
#!/usr/bin/python
#
# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto <lorenzetto.luca@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: emc_vnx_sg_member
short_description: Manage storage group member on EMC VNX
description:
- "This module manages the members of an existing storage group."
extends_documentation_fragment:
- community.general.emc.emc_vnx
options:
name:
description:
- Name of the Storage group to manage.
required: true
lunid:
description:
- Lun id to be added.
required: true
state:
description:
- Indicates the desired lunid state.
- C(present) ensures specified lunid is present in the Storage Group.
- C(absent) ensures specified lunid is absent from Storage Group.
default: present
choices: [ "present", "absent"]
author:
- Luca 'remix_tj' Lorenzetto (@remixtj)
'''
EXAMPLES = '''
- name: Add lun to storage group
emc_vnx_sg_member:
name: sg01
sp_address: sp1a.fqdn
sp_user: sysadmin
sp_password: sysadmin
lunid: 100
state: present
- name: Remove lun from storage group
emc_vnx_sg_member:
name: sg01
sp_address: sp1a.fqdn
sp_user: sysadmin
sp_password: sysadmin
lunid: 100
state: absent
'''
RETURN = '''
hluid:
description: LUNID that hosts attached to the storage group will see.
type: int
returned: success
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec
LIB_IMP_ERR = None
try:
from storops import VNXSystem
from storops.exception import VNXCredentialError, VNXStorageGroupError, \
VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError
HAS_LIB = True
except Exception:
LIB_IMP_ERR = traceback.format_exc()
HAS_LIB = False
def run_module():
module_args = dict(
name=dict(type='str', required=True),
lunid=dict(type='int', required=True),
state=dict(default='present', choices=['present', 'absent']),
)
module_args.update(emc_vnx_argument_spec)
result = dict(
changed=False,
hluid=None
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
if not HAS_LIB:
module.fail_json(msg=missing_required_lib('storops >= 0.5.10'),
exception=LIB_IMP_ERR)
sp_user = module.params['sp_user']
sp_address = module.params['sp_address']
sp_password = module.params['sp_password']
alu = module.params['lunid']
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
return result
try:
vnx = VNXSystem(sp_address, sp_user, sp_password)
sg = vnx.get_sg(module.params['name'])
if sg.existed:
if module.params['state'] == 'present':
if not sg.has_alu(alu):
try:
result['hluid'] = sg.attach_alu(alu)
result['changed'] = True
except VNXAluAlreadyAttachedError:
result['hluid'] = sg.get_hlu(alu)
except (VNXAttachAluError, VNXStorageGroupError) as e:
module.fail_json(msg='Error attaching {0}: '
'{1} '.format(alu, to_native(e)),
**result)
else:
result['hluid'] = sg.get_hlu(alu)
if module.params['state'] == 'absent' and sg.has_alu(alu):
try:
sg.detach_alu(alu)
result['changed'] = True
except VNXDetachAluNotFoundError:
# being not attached when using absent is OK
pass
except VNXStorageGroupError as e:
module.fail_json(msg='Error detaching alu {0}: '
'{1} '.format(alu, to_native(e)),
**result)
else:
module.fail_json(msg='No such storage group named '
'{0}'.format(module.params['name']),
**result)
except VNXCredentialError as e:
module.fail_json(msg='{0}'.format(to_native(e)), **result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()

View file

@ -0,0 +1 @@
gluster_heal_info.py

View file

@ -0,0 +1,203 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2016, Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gluster_heal_info
short_description: Gather information on self-heal or rebalance status
author: "Devyani Kota (@devyanikota)"
description:
- Gather facts about either self-heal or rebalance status.
- This module was called C(gluster_heal_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(gluster_heal_info) module no longer returns C(ansible_facts)!
options:
name:
description:
- The volume name.
required: true
aliases: ['volume']
status_filter:
default: "self-heal"
choices: ["self-heal", "rebalance"]
description:
- Determines which facts are to be returned.
- If the C(status_filter) is C(self-heal), status of self-heal, along with the number of files still in process are returned.
- If the C(status_filter) is C(rebalance), rebalance status is returned.
requirements:
- GlusterFS > 3.2
'''
EXAMPLES = '''
- name: Gather self-heal facts about all gluster hosts in the cluster
gluster_heal_info:
name: test_volume
status_filter: self-heal
register: self_heal_status
- debug:
var: self_heal_status
- name: Gather rebalance facts about all gluster hosts in the cluster
gluster_heal_info:
name: test_volume
status_filter: rebalance
register: rebalance_status
- debug:
var: rebalance_status
'''
RETURN = '''
name:
description: GlusterFS volume name
returned: always
type: str
status_filter:
description: Whether self-heal or rebalance status is to be returned
returned: always
type: str
heal_info:
description: List of files that still need healing process
returned: On success
type: list
rebalance_status:
description: Status of rebalance operation
returned: On success
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from distutils.version import LooseVersion
glusterbin = ''
def run_gluster(gargs, **kwargs):
global glusterbin
global module
args = [glusterbin, '--mode=script']
args.extend(gargs)
try:
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
(' '.join(args), rc, out or err), exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
to_native(e)), exception=traceback.format_exc())
return out
def get_self_heal_status(name):
out = run_gluster(['volume', 'heal', name, 'info'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
raw_out = out.split("\n")
heal_info = []
# return files that still need healing.
for line in raw_out:
if 'Brick' in line:
br_dict = {}
br_dict['brick'] = line.strip().strip("Brick")
elif 'Status' in line:
br_dict['status'] = line.split(":")[1].strip()
elif 'Number' in line:
br_dict['no_of_entries'] = line.split(":")[1].strip()
elif line.startswith('/') or line.startswith('<') or '\n' in line:
continue
else:
br_dict and heal_info.append(br_dict)
br_dict = {}
return heal_info
def get_rebalance_status(name):
out = run_gluster(['volume', 'rebalance', name, 'status'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
raw_out = out.split("\n")
rebalance_status = []
# return the files that are either still 'in progress' state or 'completed'.
for line in raw_out:
line = " ".join(line.split())
line_vals = line.split(" ")
if line_vals[0].startswith('-') or line_vals[0].startswith('Node'):
continue
node_dict = {}
if len(line_vals) == 1 or len(line_vals) == 4:
continue
node_dict['node'] = line_vals[0]
node_dict['rebalanced_files'] = line_vals[1]
node_dict['failures'] = line_vals[4]
if 'in progress' in line:
node_dict['status'] = line_vals[5] + line_vals[6]
rebalance_status.append(node_dict)
elif 'completed' in line:
node_dict['status'] = line_vals[5]
rebalance_status.append(node_dict)
return rebalance_status
def is_invalid_gluster_version(module, required_version):
cmd = module.get_bin_path('gluster', True) + ' --version'
result = module.run_command(cmd)
ver_line = result[1].split('\n')[0]
version = ver_line.split(' ')[1]
# If the installed version is less than 3.2, it is an invalid version
# return True
return LooseVersion(version) < LooseVersion(required_version)
def main():
global module
global glusterbin
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, aliases=['volume']),
status_filter=dict(type='str', default='self-heal', choices=['self-heal', 'rebalance']),
),
)
is_old_facts = module._name == 'gluster_heal_facts'
if is_old_facts:
module.deprecate("The 'gluster_heal_facts' module has been renamed to 'gluster_heal_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
glusterbin = module.get_bin_path('gluster', True)
required_version = "3.2"
status_filter = module.params['status_filter']
volume_name = module.params['name']
heal_info = ''
rebalance_status = ''
# Verify if required GlusterFS version is installed
if is_invalid_gluster_version(module, required_version):
module.fail_json(msg="GlusterFS version > %s is required" %
required_version)
try:
if status_filter == "self-heal":
heal_info = get_self_heal_status(volume_name)
elif status_filter == "rebalance":
rebalance_status = get_rebalance_status(volume_name)
except Exception as e:
module.fail_json(msg='Error retrieving status: %s' % e, exception=traceback.format_exc())
facts = {}
facts['glusterfs'] = {'volume': volume_name, 'status_filter': status_filter, 'heal_info': heal_info, 'rebalance': rebalance_status}
if is_old_facts:
module.exit_json(ansible_facts=facts)
else:
module.exit_json(**facts)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,175 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Nandaja Varma <nvarma@redhat.com>
# Copyright 2018 Red Hat, Inc.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gluster_peer
short_description: Attach/Detach peers to/from the cluster
description:
- Create or diminish a GlusterFS trusted storage pool. A set of nodes can be
added into an existing trusted storage pool or a new storage pool can be
formed. Or, nodes can be removed from an existing trusted storage pool.
author: Sachidananda Urs (@sac)
options:
state:
choices: ["present", "absent"]
default: "present"
description:
- Determines whether the nodes should be attached to the pool or
removed from the pool. If the state is present, nodes will be
attached to the pool. If state is absent, nodes will be detached
from the pool.
required: true
nodes:
description:
- List of nodes that have to be probed into the pool.
required: true
force:
type: bool
default: "false"
description:
- Applicable only while removing the nodes from the pool. gluster
will refuse to detach a node from the pool if any one of the node
is down, in such cases force can be used.
requirements:
- GlusterFS > 3.2
notes:
- This module does not support check mode.
'''
EXAMPLES = '''
- name: Create a trusted storage pool
gluster_peer:
state: present
nodes:
- 10.0.1.5
- 10.0.1.10
- name: Delete a node from the trusted storage pool
gluster_peer:
state: absent
nodes:
- 10.0.1.10
- name: Delete a node from the trusted storage pool by force
gluster_peer:
state: absent
nodes:
- 10.0.0.1
force: true
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from distutils.version import LooseVersion
class Peer(object):
def __init__(self, module):
self.module = module
self.state = self.module.params['state']
self.nodes = self.module.params['nodes']
self.glustercmd = self.module.get_bin_path('gluster', True)
self.lang = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
self.action = ''
self.force = ''
def gluster_peer_ops(self):
if not self.nodes:
self.module.fail_json(msg="nodes list cannot be empty")
self.force = 'force' if self.module.params.get('force') else ''
if self.state == 'present':
self.nodes = self.get_to_be_probed_hosts(self.nodes)
self.action = 'probe'
# In case of peer probe, we do not need `force'
self.force = ''
else:
self.action = 'detach'
self.call_peer_commands()
def get_to_be_probed_hosts(self, hosts):
peercmd = [self.glustercmd, 'pool', 'list', '--mode=script']
rc, output, err = self.module.run_command(peercmd,
environ_update=self.lang)
peers_in_cluster = [line.split('\t')[1].strip() for
line in filter(None, output.split('\n')[1:])]
try:
peers_in_cluster.remove('localhost')
except ValueError:
# It is ok not to have localhost in list
pass
hosts_to_be_probed = [host for host in hosts if host not in
peers_in_cluster]
return hosts_to_be_probed
def call_peer_commands(self):
result = {}
result['msg'] = ''
result['changed'] = False
for node in self.nodes:
peercmd = [self.glustercmd, 'peer', self.action, node, '--mode=script']
if self.force:
peercmd.append(self.force)
rc, out, err = self.module.run_command(peercmd,
environ_update=self.lang)
if rc:
result['rc'] = rc
result['msg'] = err
# Fail early, do not wait for the loop to finish
self.module.fail_json(**result)
else:
if 'already in peer' in out or \
'localhost not needed' in out:
result['changed'] |= False
else:
result['changed'] = True
self.module.exit_json(**result)
def main():
module = AnsibleModule(
argument_spec=dict(
force=dict(type='bool', required=False),
nodes=dict(type='list', required=True),
state=dict(type='str', choices=['absent', 'present'],
default='present'),
),
supports_check_mode=False
)
pops = Peer(module)
required_version = "3.2"
# Verify if required GlusterFS version is installed
if is_invalid_gluster_version(module, required_version):
module.fail_json(msg="GlusterFS version > %s is required" %
required_version)
pops.gluster_peer_ops()
def is_invalid_gluster_version(module, required_version):
cmd = module.get_bin_path('gluster', True) + ' --version'
result = module.run_command(cmd)
ver_line = result[1].split('\n')[0]
version = ver_line.split(' ')[1]
# If the installed version is less than 3.2, it is an invalid version
# return True
return LooseVersion(version) < LooseVersion(required_version)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,607 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Taneli Leppä <taneli@crasman.fi>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: gluster_volume
short_description: Manage GlusterFS volumes
description:
- Create, remove, start, stop and tune GlusterFS volumes
options:
name:
description:
- The volume name.
required: true
aliases: ['volume']
state:
description:
- Use present/absent ensure if a volume exists or not.
Use started/stopped to control its availability.
required: true
choices: ['absent', 'present', 'started', 'stopped']
cluster:
description:
- List of hosts to use for probing and brick setup.
host:
description:
- Override local hostname (for peer probing purposes).
replicas:
description:
- Replica count for volume.
arbiters:
description:
- Arbiter count for volume.
stripes:
description:
- Stripe count for volume.
disperses:
description:
- Disperse count for volume.
redundancies:
description:
- Redundancy count for volume.
transport:
description:
- Transport type for volume.
default: tcp
choices: [ tcp, rdma, 'tcp,rdma' ]
bricks:
description:
- Brick paths on servers. Multiple brick paths can be separated by commas.
aliases: [ brick ]
start_on_create:
description:
- Controls whether the volume is started after creation or not.
type: bool
default: 'yes'
rebalance:
description:
- Controls whether the cluster is rebalanced after changes.
type: bool
default: 'no'
directory:
description:
- Directory for limit-usage.
options:
description:
- A dictionary/hash with options/settings for the volume.
quota:
description:
- Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list).
force:
description:
- If brick is being created in the root partition, module will fail.
Set force to true to override this behaviour.
type: bool
notes:
- Requires cli tools for GlusterFS on servers.
- Will add new bricks, but not remove them.
author:
- Taneli Leppä (@rosmo)
'''
EXAMPLES = """
- name: create gluster volume
gluster_volume:
state: present
name: test1
bricks: /bricks/brick1/g1
rebalance: yes
cluster:
- 192.0.2.10
- 192.0.2.11
run_once: true
- name: tune
gluster_volume:
state: present
name: test1
options:
performance.cache-size: 256MB
- name: Set multiple options on GlusterFS volume
gluster_volume:
state: present
name: test1
options:
{ performance.cache-size: 128MB,
write-behind: 'off',
quick-read: 'on'
}
- name: start gluster volume
gluster_volume:
state: started
name: test1
- name: limit usage
gluster_volume:
state: present
name: test1
directory: /foo
quota: 20.0MB
- name: stop gluster volume
gluster_volume:
state: stopped
name: test1
- name: remove gluster volume
gluster_volume:
state: absent
name: test1
- name: create gluster volume with multiple bricks
gluster_volume:
state: present
name: test2
bricks: /bricks/brick1/g2,/bricks/brick2/g2
cluster:
- 192.0.2.10
- 192.0.2.11
run_once: true
- name: Remove the bricks from gluster volume
gluster_volume:
state: present
name: testvol
bricks: /bricks/brick1/b1,/bricks/brick2/b2
cluster:
- 10.70.42.85
force: true
run_once: true
- name: Reduce cluster configuration
gluster_volume:
state: present
name: testvol
bricks: /bricks/brick3/b1,/bricks/brick4/b2
replicas: 2
cluster:
- 10.70.42.85
force: true
run_once: true
"""
import re
import socket
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
glusterbin = ''
def run_gluster(gargs, **kwargs):
global glusterbin
global module
args = [glusterbin, '--mode=script']
args.extend(gargs)
try:
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
(' '.join(args), rc, out or err), exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
to_native(e)), exception=traceback.format_exc())
return out
def run_gluster_nofail(gargs, **kwargs):
global glusterbin
global module
args = [glusterbin]
args.extend(gargs)
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
return None
return out
def get_peers():
out = run_gluster(['peer', 'status'])
peers = {}
hostname = None
uuid = None
state = None
shortNames = False
for row in out.split('\n'):
if ': ' in row:
key, value = row.split(': ')
if key.lower() == 'hostname':
hostname = value
shortNames = False
if key.lower() == 'uuid':
uuid = value
if key.lower() == 'state':
state = value
peers[hostname] = [uuid, state]
elif row.lower() == 'other names:':
shortNames = True
elif row != '' and shortNames is True:
peers[row] = [uuid, state]
elif row == '':
shortNames = False
return peers
def get_volumes():
out = run_gluster(['volume', 'info'])
volumes = {}
volume = {}
for row in out.split('\n'):
if ': ' in row:
key, value = row.split(': ')
if key.lower() == 'volume name':
volume['name'] = value
volume['options'] = {}
volume['quota'] = False
if key.lower() == 'volume id':
volume['id'] = value
if key.lower() == 'status':
volume['status'] = value
if key.lower() == 'transport-type':
volume['transport'] = value
if value.lower().endswith(' (arbiter)'):
if 'arbiters' not in volume:
volume['arbiters'] = []
value = value[:-10]
volume['arbiters'].append(value)
elif key.lower() == 'number of bricks':
volume['replicas'] = value[-1:]
if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
if 'bricks' not in volume:
volume['bricks'] = []
volume['bricks'].append(value)
# Volume options
if '.' in key:
if 'options' not in volume:
volume['options'] = {}
volume['options'][key] = value
if key == 'features.quota' and value == 'on':
volume['quota'] = True
else:
if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
if len(volume) > 0:
volumes[volume['name']] = volume
volume = {}
return volumes
def get_quotas(name, nofail):
quotas = {}
if nofail:
out = run_gluster_nofail(['volume', 'quota', name, 'list'])
if not out:
return quotas
else:
out = run_gluster(['volume', 'quota', name, 'list'])
for row in out.split('\n'):
if row[:1] == '/':
q = re.split(r'\s+', row)
quotas[q[0]] = q[1]
return quotas
def wait_for_peer(host):
for x in range(0, 4):
peers = get_peers()
if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
return True
time.sleep(1)
return False
def probe(host, myhostname):
global module
out = run_gluster(['peer', 'probe', host])
if out.find('localhost') == -1 and not wait_for_peer(host):
module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname))
def probe_all_peers(hosts, peers, myhostname):
for host in hosts:
host = host.strip() # Clean up any extra space for exact comparison
if host not in peers:
probe(host, myhostname)
def create_volume(name, stripe, replica, arbiter, disperse, redundancy, transport, hosts, bricks, force):
args = ['volume', 'create']
args.append(name)
if stripe:
args.append('stripe')
args.append(str(stripe))
if replica:
args.append('replica')
args.append(str(replica))
if arbiter:
args.append('arbiter')
args.append(str(arbiter))
if disperse:
args.append('disperse')
args.append(str(disperse))
if redundancy:
args.append('redundancy')
args.append(str(redundancy))
args.append('transport')
args.append(transport)
for brick in bricks:
for host in hosts:
args.append(('%s:%s' % (host, brick)))
if force:
args.append('force')
run_gluster(args)
def start_volume(name):
run_gluster(['volume', 'start', name])
def stop_volume(name):
run_gluster(['volume', 'stop', name])
def set_volume_option(name, option, parameter):
run_gluster(['volume', 'set', name, option, parameter])
def add_bricks(name, new_bricks, stripe, replica, force):
args = ['volume', 'add-brick', name]
if stripe:
args.append('stripe')
args.append(str(stripe))
if replica:
args.append('replica')
args.append(str(replica))
args.extend(new_bricks)
if force:
args.append('force')
run_gluster(args)
def remove_bricks(name, removed_bricks, force):
# max-tries=12 with default_interval=10 secs
max_tries = 12
retries = 0
success = False
args = ['volume', 'remove-brick', name]
args.extend(removed_bricks)
# create a copy of args to use for commit operation
args_c = args[:]
args.append('start')
run_gluster(args)
# remove-brick operation needs to be followed by commit operation.
if not force:
module.fail_json(msg="Force option is mandatory.")
else:
while retries < max_tries:
last_brick = removed_bricks[-1]
out = run_gluster(['volume', 'remove-brick', name, last_brick, 'status'])
for row in out.split('\n')[1:]:
if 'completed' in row:
# remove-brick successful, call commit operation.
args_c.append('commit')
out = run_gluster(args_c)
success = True
break
else:
time.sleep(10)
if success:
break
retries += 1
if not success:
# remove-brick still in process, needs to be committed after completion.
module.fail_json(msg="Exceeded number of tries, check remove-brick status.\n"
"Commit operation needs to be followed.")
def reduce_config(name, removed_bricks, replicas, force):
out = run_gluster(['volume', 'heal', name, 'info'])
summary = out.split("\n")
for line in summary:
if 'Number' in line and int(line.split(":")[1].strip()) != 0:
module.fail_json(msg="Operation aborted, self-heal in progress.")
args = ['volume', 'remove-brick', name, 'replica', replicas]
args.extend(removed_bricks)
if force:
args.append('force')
else:
module.fail_json(msg="Force option is mandatory")
run_gluster(args)
def do_rebalance(name):
run_gluster(['volume', 'rebalance', name, 'start'])
def enable_quota(name):
run_gluster(['volume', 'quota', name, 'enable'])
def set_quota(name, directory, value):
run_gluster(['volume', 'quota', name, 'limit-usage', directory, value])
def main():
# MAIN
global module
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, aliases=['volume']),
state=dict(type='str', required=True, choices=['absent', 'started', 'stopped', 'present']),
cluster=dict(type='list'),
host=dict(type='str'),
stripes=dict(type='int'),
replicas=dict(type='int'),
arbiters=dict(type='int'),
disperses=dict(type='int'),
redundancies=dict(type='int'),
transport=dict(type='str', default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']),
bricks=dict(type='str', aliases=['brick']),
start_on_create=dict(type='bool', default=True),
rebalance=dict(type='bool', default=False),
options=dict(type='dict', default={}),
quota=dict(type='str'),
directory=dict(type='str'),
force=dict(type='bool', default=False),
),
)
global glusterbin
glusterbin = module.get_bin_path('gluster', True)
changed = False
action = module.params['state']
volume_name = module.params['name']
cluster = module.params['cluster']
brick_paths = module.params['bricks']
stripes = module.params['stripes']
replicas = module.params['replicas']
arbiters = module.params['arbiters']
disperses = module.params['disperses']
redundancies = module.params['redundancies']
transport = module.params['transport']
myhostname = module.params['host']
start_on_create = module.boolean(module.params['start_on_create'])
rebalance = module.boolean(module.params['rebalance'])
force = module.boolean(module.params['force'])
if not myhostname:
myhostname = socket.gethostname()
# Clean up if last element is empty. Consider that yml can look like this:
# cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}"
if cluster is not None and len(cluster) > 1 and cluster[-1] == '':
cluster = cluster[0:-1]
if cluster is None:
cluster = []
if brick_paths is not None and "," in brick_paths:
brick_paths = brick_paths.split(",")
else:
brick_paths = [brick_paths]
options = module.params['options']
quota = module.params['quota']
directory = module.params['directory']
# get current state info
peers = get_peers()
volumes = get_volumes()
quotas = {}
if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
quotas = get_quotas(volume_name, True)
# do the work!
if action == 'absent':
if volume_name in volumes:
if volumes[volume_name]['status'].lower() != 'stopped':
stop_volume(volume_name)
run_gluster(['volume', 'delete', volume_name])
changed = True
if action == 'present':
probe_all_peers(cluster, peers, myhostname)
# create if it doesn't exist
if volume_name not in volumes:
create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force)
volumes = get_volumes()
changed = True
if volume_name in volumes:
if volumes[volume_name]['status'].lower() != 'started' and start_on_create:
start_volume(volume_name)
changed = True
# switch bricks
new_bricks = []
removed_bricks = []
all_bricks = []
bricks_in_volume = volumes[volume_name]['bricks']
for node in cluster:
for brick_path in brick_paths:
brick = '%s:%s' % (node, brick_path)
all_bricks.append(brick)
if brick not in bricks_in_volume:
new_bricks.append(brick)
if not new_bricks and len(all_bricks) > 0 and \
len(all_bricks) < len(bricks_in_volume):
for brick in bricks_in_volume:
if brick not in all_bricks:
removed_bricks.append(brick)
if new_bricks:
add_bricks(volume_name, new_bricks, stripes, replicas, force)
changed = True
if removed_bricks:
if replicas and int(replicas) < int(volumes[volume_name]['replicas']):
reduce_config(volume_name, removed_bricks, str(replicas), force)
else:
remove_bricks(volume_name, removed_bricks, force)
changed = True
# handle quotas
if quota:
if not volumes[volume_name]['quota']:
enable_quota(volume_name)
quotas = get_quotas(volume_name, False)
if directory not in quotas or quotas[directory] != quota:
set_quota(volume_name, directory, quota)
changed = True
# set options
for option in options.keys():
if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]:
set_volume_option(volume_name, option, options[option])
changed = True
else:
module.fail_json(msg='failed to create volume %s' % volume_name)
if action != 'absent' and volume_name not in volumes:
module.fail_json(msg='volume not found %s' % volume_name)
if action == 'started':
if volumes[volume_name]['status'].lower() != 'started':
start_volume(volume_name)
changed = True
if action == 'stopped':
if volumes[volume_name]['status'].lower() != 'stopped':
stop_volume(volume_name)
changed = True
if changed:
volumes = get_volumes()
if rebalance:
do_rebalance(volume_name)
facts = {}
facts['glusterfs'] = {'peers': peers, 'volumes': volumes, 'quotas': quotas}
module.exit_json(changed=changed, ansible_facts=facts)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,300 @@
#!/usr/bin/python
# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
short_description: Manage HPE StoreServ 3PAR CPG
author:
- Farhan Nomani (@farhan7500)
- Gautham P Hegde (@gautamphegde)
description:
- Create and delete CPG on HPE 3PAR.
module: ss_3par_cpg
options:
cpg_name:
description:
- Name of the CPG.
type: str
required: true
disk_type:
choices:
- FC
- NL
- SSD
description:
- Specifies that physical disks must have the specified device type.
type: str
domain:
description:
- Specifies the name of the domain in which the object will reside.
type: str
growth_increment:
description:
- Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage
created on each auto-grow operation.
type: str
growth_limit:
description:
- Specifies that the autogrow operation is limited to the specified
storage amount that sets the growth limit(in MiB, GiB or TiB).
type: str
growth_warning:
description:
- Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded
results in a warning alert.
type: str
high_availability:
choices:
- PORT
- CAGE
- MAG
description:
- Specifies that the layout must support the failure of one port pair,
one cage, or one magazine.
type: str
raid_type:
choices:
- R0
- R1
- R5
- R6
description:
- Specifies the RAID type for the logical disk.
type: str
set_size:
description:
- Specifies the set size in the number of chunklets.
type: int
state:
choices:
- present
- absent
description:
- Whether the specified CPG should exist or not.
required: true
type: str
secure:
description:
- Specifies whether the certificate needs to be validated while communicating.
type: bool
default: no
extends_documentation_fragment:
- community.general.hpe3par
'''
EXAMPLES = r'''
- name: Create CPG sample_cpg
ss_3par_cpg:
storage_system_ip: 10.10.10.1
storage_system_username: username
storage_system_password: password
state: present
cpg_name: sample_cpg
domain: sample_domain
growth_increment: 32000 MiB
growth_limit: 64000 MiB
growth_warning: 48000 MiB
raid_type: R6
set_size: 8
high_availability: MAG
disk_type: FC
secure: no
- name: Delete CPG sample_cpg
ss_3par_cpg:
storage_system_ip: 10.10.10.1
storage_system_username: username
storage_system_password: password
state: absent
cpg_name: sample_cpg
secure: no
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par
try:
from hpe3par_sdk import client
from hpe3parclient import exceptions
HAS_3PARCLIENT = True
except ImportError:
HAS_3PARCLIENT = False
def validate_set_size(raid_type, set_size):
if raid_type:
set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes']
if set_size in set_size_array:
return True
return False
def cpg_ldlayout_map(ldlayout_dict):
if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']:
ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[
ldlayout_dict['RAIDType']]['raid_value']
if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']:
ldlayout_dict['HA'] = getattr(
client.HPE3ParClient, ldlayout_dict['HA'])
return ldlayout_dict
def create_cpg(
client_obj,
cpg_name,
domain,
growth_increment,
growth_limit,
growth_warning,
raid_type,
set_size,
high_availability,
disk_type):
try:
if not validate_set_size(raid_type, set_size):
return (False, False, "Set size %s not part of RAID set %s" % (set_size, raid_type))
if not client_obj.cpgExists(cpg_name):
disk_patterns = []
if disk_type:
disk_type = getattr(client.HPE3ParClient, disk_type)
disk_patterns = [{'diskType': disk_type}]
ld_layout = {
'RAIDType': raid_type,
'setSize': set_size,
'HA': high_availability,
'diskPatterns': disk_patterns}
ld_layout = cpg_ldlayout_map(ld_layout)
if growth_increment is not None:
growth_increment = hpe3par.convert_to_binary_multiple(
growth_increment)
if growth_limit is not None:
growth_limit = hpe3par.convert_to_binary_multiple(
growth_limit)
if growth_warning is not None:
growth_warning = hpe3par.convert_to_binary_multiple(
growth_warning)
optional = {
'domain': domain,
'growthIncrementMiB': growth_increment,
'growthLimitMiB': growth_limit,
'usedLDWarningAlertMiB': growth_warning,
'LDLayout': ld_layout}
client_obj.createCPG(cpg_name, optional)
else:
return (True, False, "CPG already present")
except exceptions.ClientException as e:
return (False, False, "CPG creation failed | %s" % (e))
return (True, True, "Created CPG %s successfully." % cpg_name)
def delete_cpg(
client_obj,
cpg_name):
try:
if client_obj.cpgExists(cpg_name):
client_obj.deleteCPG(cpg_name)
else:
return (True, False, "CPG does not exist")
except exceptions.ClientException as e:
return (False, False, "CPG delete failed | %s" % e)
return (True, True, "Deleted CPG %s successfully." % cpg_name)
def main():
module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(),
required_together=[['raid_type', 'set_size']])
if not HAS_3PARCLIENT:
module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)')
if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31:
module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters")
storage_system_ip = module.params["storage_system_ip"]
storage_system_username = module.params["storage_system_username"]
storage_system_password = module.params["storage_system_password"]
cpg_name = module.params["cpg_name"]
domain = module.params["domain"]
growth_increment = module.params["growth_increment"]
growth_limit = module.params["growth_limit"]
growth_warning = module.params["growth_warning"]
raid_type = module.params["raid_type"]
set_size = module.params["set_size"]
high_availability = module.params["high_availability"]
disk_type = module.params["disk_type"]
secure = module.params["secure"]
wsapi_url = 'https://%s:8080/api/v1' % storage_system_ip
try:
client_obj = client.HPE3ParClient(wsapi_url, secure)
except exceptions.SSLCertFailed:
module.fail_json(msg="SSL Certificate Failed")
except exceptions.ConnectionError:
module.fail_json(msg="Connection Error")
except exceptions.UnsupportedVersion:
module.fail_json(msg="Unsupported WSAPI version")
except Exception as e:
module.fail_json(msg="Initializing client failed. %s" % e)
if storage_system_username is None or storage_system_password is None:
module.fail_json(msg="Storage system username or password is None")
if cpg_name is None:
module.fail_json(msg="CPG Name is None")
# States
if module.params["state"] == "present":
try:
client_obj.login(storage_system_username, storage_system_password)
return_status, changed, msg = create_cpg(
client_obj,
cpg_name,
domain,
growth_increment,
growth_limit,
growth_warning,
raid_type,
set_size,
high_availability,
disk_type
)
except Exception as e:
module.fail_json(msg="CPG create failed | %s" % e)
finally:
client_obj.logout()
elif module.params["state"] == "absent":
try:
client_obj.login(storage_system_username, storage_system_password)
return_status, changed, msg = delete_cpg(
client_obj,
cpg_name
)
except Exception as e:
module.fail_json(msg="CPG create failed | %s" % e)
finally:
client_obj.logout()
if return_status:
module.exit_json(changed=changed, msg=msg)
else:
module.fail_json(msg=msg)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,161 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, IBM CORPORATION
# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: ibm_sa_domain
short_description: Manages domains on IBM Spectrum Accelerate Family storage systems
description:
- "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems."
options:
domain:
description:
- Name of the domain to be managed.
required: true
state:
description:
- The desired state of the domain.
required: true
default: "present"
choices: [ "present", "absent" ]
ldap_id:
description:
- ldap id to add to the domain.
required: false
size:
description:
- Size of the domain.
required: false
hard_capacity:
description:
- Hard capacity of the domain.
required: false
soft_capacity:
description:
- Soft capacity of the domain.
required: false
max_cgs:
description:
- Number of max cgs.
required: false
max_dms:
description:
- Number of max dms.
required: false
max_mirrors:
description:
- Number of max_mirrors.
required: false
max_pools:
description:
- Number of max_pools.
required: false
max_volumes:
description:
- Number of max_volumes.
required: false
perf_class:
description:
- Add the domain to a performance class.
required: false
extends_documentation_fragment:
- community.general.ibm_storage
author:
- Tzur Eliyahu (@tzure)
'''
EXAMPLES = '''
- name: Define new domain.
ibm_sa_domain:
domain: domain_name
size: domain_size
state: present
username: admin
password: secret
endpoints: hostdev-system
- name: Delete domain.
ibm_sa_domain:
domain: domain_name
state: absent
username: admin
password: secret
endpoints: hostdev-system
'''
RETURN = '''
msg:
description: module return status.
returned: as needed
type: str
sample: "domain 'domain_name' created successfully."
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
def main():
argument_spec = spectrum_accelerate_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
domain=dict(required=True),
size=dict(),
max_dms=dict(),
max_cgs=dict(),
ldap_id=dict(),
max_mirrors=dict(),
max_pools=dict(),
max_volumes=dict(),
perf_class=dict(),
hard_capacity=dict(),
soft_capacity=dict()
)
)
module = AnsibleModule(argument_spec)
is_pyxcli_installed(module)
xcli_client = connect_ssl(module)
domain = xcli_client.cmd.domain_list(
domain=module.params['domain']).as_single_element
state = module.params['state']
state_changed = False
msg = 'Domain \'{0}\''.format(module.params['domain'])
if state == 'present' and not domain:
state_changed = execute_pyxcli_command(
module, 'domain_create', xcli_client)
msg += " created successfully."
elif state == 'absent' and domain:
state_changed = execute_pyxcli_command(
module, 'domain_delete', xcli_client)
msg += " deleted successfully."
else:
msg += " state unchanged."
module.exit_json(changed=state_changed, msg=msg)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,123 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 IBM CORPORATION
# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: ibm_sa_host
short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems.
description:
- "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems."
options:
host:
description:
- Host name.
required: true
state:
description:
- Host state.
required: true
default: "present"
choices: [ "present", "absent" ]
cluster:
description:
- The name of the cluster to include the host.
required: false
domain:
description:
- The domains the cluster will be attached to.
To include more than one domain,
separate domain names with commas.
To include all existing domains, use an asterisk ("*").
required: false
iscsi_chap_name:
description:
- The host's CHAP name identifier
required: false
iscsi_chap_secret:
description:
- The password of the initiator used to
authenticate to the system when CHAP is enable
required: false
extends_documentation_fragment:
- community.general.ibm_storage
author:
- Tzur Eliyahu (@tzure)
'''
EXAMPLES = '''
- name: Define new host.
ibm_sa_host:
host: host_name
state: present
username: admin
password: secret
endpoints: hostdev-system
- name: Delete host.
ibm_sa_host:
host: host_name
state: absent
username: admin
password: secret
endpoints: hostdev-system
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
def main():
argument_spec = spectrum_accelerate_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
host=dict(required=True),
cluster=dict(),
domain=dict(),
iscsi_chap_name=dict(),
iscsi_chap_secret=dict()
)
)
module = AnsibleModule(argument_spec)
is_pyxcli_installed(module)
xcli_client = connect_ssl(module)
host = xcli_client.cmd.host_list(
host=module.params['host']).as_single_element
state = module.params['state']
state_changed = False
if state == 'present' and not host:
state_changed = execute_pyxcli_command(
module, 'host_define', xcli_client)
elif state == 'absent' and host:
state_changed = execute_pyxcli_command(
module, 'host_delete', xcli_client)
module.exit_json(changed=state_changed)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,132 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 IBM CORPORATION
# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: ibm_sa_host_ports
short_description: Add host ports on IBM Spectrum Accelerate Family storage systems.
description:
- "This module adds ports to or removes them from the hosts
on IBM Spectrum Accelerate Family storage systems."
options:
host:
description:
- Host name.
required: true
state:
description:
- Host ports state.
required: true
default: "present"
choices: [ "present", "absent" ]
iscsi_name:
description:
- iSCSI initiator name.
required: false
fcaddress:
description:
- Fiber channel address.
required: false
num_of_visible_targets:
description:
- Number of visible targets.
required: false
extends_documentation_fragment:
- community.general.ibm_storage
author:
- Tzur Eliyahu (@tzure)
'''
EXAMPLES = '''
- name: Add ports for host.
ibm_sa_host_ports:
host: test_host
iscsi_name: iqn.1994-05.com***
username: admin
password: secret
endpoints: hostdev-system
state: present
- name: Remove ports for host.
ibm_sa_host_ports:
host: test_host
iscsi_name: iqn.1994-05.com***
username: admin
password: secret
endpoints: hostdev-system
state: absent
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl,
spectrum_accelerate_spec, is_pyxcli_installed)
def main():
argument_spec = spectrum_accelerate_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
host=dict(required=True),
iscsi_name=dict(),
fcaddress=dict(),
num_of_visible_targets=dict()
)
)
module = AnsibleModule(argument_spec)
is_pyxcli_installed(module)
xcli_client = connect_ssl(module)
# required args
ports = []
try:
ports = xcli_client.cmd.host_list_ports(
host=module.params.get('host')).as_list
except Exception:
pass
state = module.params['state']
port_exists = False
ports = [port.get('port_name') for port in ports]
fc_ports = (module.params.get('fcaddress')
if module.params.get('fcaddress') else [])
iscsi_ports = (module.params.get('iscsi_name')
if module.params.get('iscsi_name') else [])
for port in ports:
if port in iscsi_ports or port in fc_ports:
port_exists = True
break
state_changed = False
if state == 'present' and not port_exists:
state_changed = execute_pyxcli_command(
module, 'host_add_port', xcli_client)
if state == 'absent' and port_exists:
state_changed = execute_pyxcli_command(
module, 'host_remove_port', xcli_client)
module.exit_json(changed=state_changed)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,120 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 IBM CORPORATION
# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: ibm_sa_pool
short_description: Handles pools on IBM Spectrum Accelerate Family storage systems.
description:
- "This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems"
options:
pool:
description:
- Pool name.
required: true
state:
description:
- Pool state.
required: true
default: "present"
choices: [ "present", "absent" ]
size:
description:
- Pool size in GB
required: false
snapshot_size:
description:
- Pool snapshot size in GB
required: false
domain:
description:
- Adds the pool to the specified domain.
required: false
perf_class:
description:
- Assigns a perf_class to the pool.
required: false
extends_documentation_fragment:
- community.general.ibm_storage
author:
- Tzur Eliyahu (@tzure)
'''
EXAMPLES = '''
- name: Create new pool.
ibm_sa_pool:
name: pool_name
size: 300
state: present
username: admin
password: secret
endpoints: hostdev-system
- name: Delete pool.
ibm_sa_pool:
name: pool_name
state: absent
username: admin
password: secret
endpoints: hostdev-system
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
def main():
argument_spec = spectrum_accelerate_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
pool=dict(required=True),
size=dict(),
snapshot_size=dict(),
domain=dict(),
perf_class=dict()
)
)
module = AnsibleModule(argument_spec)
is_pyxcli_installed(module)
xcli_client = connect_ssl(module)
pool = xcli_client.cmd.pool_list(
pool=module.params['pool']).as_single_element
state = module.params['state']
state_changed = False
if state == 'present' and not pool:
state_changed = execute_pyxcli_command(
module, 'pool_create', xcli_client)
if state == 'absent' and pool:
state_changed = execute_pyxcli_command(
module, 'pool_delete', xcli_client)
module.exit_json(changed=state_changed)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,112 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 IBM CORPORATION
# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: ibm_sa_vol
short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems.
description:
- "This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems."
options:
vol:
description:
- Volume name.
required: true
pool:
description:
- Volume pool.
required: false
state:
description:
- Volume state.
required: true
default: "present"
choices: [ "present", "absent" ]
size:
description:
- Volume size.
required: false
extends_documentation_fragment:
- community.general.ibm_storage
author:
- Tzur Eliyahu (@tzure)
'''
EXAMPLES = '''
- name: Create a new volume.
ibm_sa_vol:
vol: volume_name
pool: pool_name
size: 17
state: present
username: admin
password: secret
endpoints: hostdev-system
- name: Delete an existing volume.
ibm_sa_vol:
vol: volume_name
state: absent
username: admin
password: secret
endpoints: hostdev-system
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
def main():
argument_spec = spectrum_accelerate_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
vol=dict(required=True),
pool=dict(),
size=dict()
)
)
module = AnsibleModule(argument_spec)
is_pyxcli_installed(module)
xcli_client = connect_ssl(module)
# required args
volume = xcli_client.cmd.vol_list(
vol=module.params.get('vol')).as_single_element
state = module.params['state']
state_changed = False
if state == 'present' and not volume:
state_changed = execute_pyxcli_command(
module, 'vol_create', xcli_client)
elif state == 'absent' and volume:
state_changed = execute_pyxcli_command(
module, 'vol_delete', xcli_client)
module.exit_json(changed=state_changed)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,140 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 IBM CORPORATION
# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: ibm_sa_vol_map
short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems.
description:
- "This module maps volumes to or unmaps them from the hosts on
IBM Spectrum Accelerate Family storage systems."
options:
vol:
description:
- Volume name.
required: true
state:
default: "present"
choices: [ "present", "absent" ]
description:
- When the state is present the volume is mapped.
When the state is absent, the volume is meant to be unmapped.
required: true
cluster:
description:
- Maps the volume to a cluster.
required: false
host:
description:
- Maps the volume to a host.
required: false
lun:
description:
- The LUN identifier.
required: false
override:
description:
- Overrides the existing volume mapping.
required: false
extends_documentation_fragment:
- community.general.ibm_storage
author:
- Tzur Eliyahu (@tzure)
'''
EXAMPLES = '''
- name: Map volume to host.
ibm_sa_vol_map:
vol: volume_name
lun: 1
host: host_name
username: admin
password: secret
endpoints: hostdev-system
state: present
- name: Map volume to cluster.
ibm_sa_vol_map:
vol: volume_name
lun: 1
cluster: cluster_name
username: admin
password: secret
endpoints: hostdev-system
state: present
- name: Unmap volume.
ibm_sa_vol_map:
host: host_name
username: admin
password: secret
endpoints: hostdev-system
state: absent
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command,
connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed)
def main():
argument_spec = spectrum_accelerate_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
vol=dict(required=True),
lun=dict(),
cluster=dict(),
host=dict(),
override=dict()
)
)
module = AnsibleModule(argument_spec)
is_pyxcli_installed(module)
xcli_client = connect_ssl(module)
# required args
mapping = False
try:
mapped_hosts = xcli_client.cmd.vol_mapping_list(
vol=module.params.get('vol')).as_list
for host in mapped_hosts:
if host['host'] == module.params.get("host", ""):
mapping = True
except Exception:
pass
state = module.params['state']
state_changed = False
if state == 'present' and not mapping:
state_changed = execute_pyxcli_command(module, 'map_vol', xcli_client)
if state == 'absent' and mapping:
state_changed = execute_pyxcli_command(
module, 'unmap_vol', xcli_client)
module.exit_json(changed=state_changed)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,196 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_export
short_description: Create, Delete or Modify NFS Exports on Infinibox
description:
- This module creates, deletes or modifies NFS exports on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- Export name. Should always start with C(/). (ex. name=/data)
aliases: ['export', 'path']
required: true
state:
description:
- Creates/Modifies export when present and removes when absent.
required: false
default: "present"
choices: [ "present", "absent" ]
inner_path:
description:
- Internal path of the export.
default: "/"
client_list:
description:
- List of dictionaries with client entries. See examples.
Check infini_export_client module to modify individual NFS client entries for export.
default: "All Hosts(*), RW, no_root_squash: True"
required: false
filesystem:
description:
- Name of exported file system.
required: true
extends_documentation_fragment:
- community.general.infinibox
requirements:
- munch
'''
EXAMPLES = '''
- name: Export bar filesystem under foo pool as /data
infini_export:
name: /data01
filesystem: foo
user: admin
password: secret
system: ibox001
- name: Export and specify client list explicitly
infini_export:
name: /data02
filesystem: foo
client_list:
- client: 192.168.0.2
access: RW
no_root_squash: True
- client: 192.168.0.100
access: RO
no_root_squash: False
- client: 192.168.0.10-192.168.0.20
access: RO
no_root_squash: False
system: ibox001
user: admin
password: secret
'''
RETURN = '''
'''
import traceback
MUNCH_IMP_ERR = None
try:
from munch import unmunchify
HAS_MUNCH = True
except ImportError:
MUNCH_IMP_ERR = traceback.format_exc()
HAS_MUNCH = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible_collections.community.general.plugins.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
def transform(d):
return frozenset(d.items())
@api_wrapper
def get_filesystem(module, system):
"""Return Filesystem or None"""
try:
return system.filesystems.get(name=module.params['filesystem'])
except Exception:
return None
@api_wrapper
def get_export(module, filesystem, system):
"""Return export if found. When not found return None"""
export = None
exports_to_list = system.exports.to_list()
for e in exports_to_list:
if e.get_export_path() == module.params['name']:
export = e
break
return export
@api_wrapper
def update_export(module, export, filesystem, system):
""" Create new filesystem or update existing one"""
changed = False
name = module.params['name']
client_list = module.params['client_list']
if export is None:
if not module.check_mode:
export = system.exports.create(export_path=name, filesystem=filesystem)
if client_list:
export.update_permissions(client_list)
changed = True
else:
if client_list:
if set(map(transform, unmunchify(export.get_permissions()))) != set(map(transform, client_list)):
if not module.check_mode:
export.update_permissions(client_list)
changed = True
module.exit_json(changed=changed)
@api_wrapper
def delete_export(module, export):
""" Delete file system"""
if not module.check_mode:
export.delete()
module.exit_json(changed=True)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
filesystem=dict(required=True),
client_list=dict(type='list')
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib('infinisdk'))
if not HAS_MUNCH:
module.fail_json(msg=missing_required_lib('munch'), exception=MUNCH_IMP_ERR)
state = module.params['state']
system = get_system(module)
filesystem = get_filesystem(module, system)
export = get_export(module, filesystem, system)
if filesystem is None:
module.fail_json(msg='Filesystem {0} not found'.format(module.params['filesystem']))
if state == 'present':
update_export(module, export, filesystem, system)
elif export and state == 'absent':
delete_export(module, export)
elif export is None and state == 'absent':
module.exit_json(changed=False)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,206 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_export_client
short_description: Create, Delete or Modify NFS Client(s) for existing exports on Infinibox
description:
- This module creates, deletes or modifys NFS client(s) for existing exports on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
client:
description:
- Client IP or Range. Ranges can be defined as follows
192.168.0.1-192.168.0.254.
aliases: ['name']
required: true
state:
description:
- Creates/Modifies client when present and removes when absent.
required: false
default: "present"
choices: [ "present", "absent" ]
access_mode:
description:
- Read Write or Read Only Access.
choices: [ "RW", "RO" ]
default: RW
required: false
no_root_squash:
description:
- Don't squash root user to anonymous. Will be set to "no" on creation if not specified explicitly.
type: bool
default: no
required: false
export:
description:
- Name of the export.
required: true
extends_documentation_fragment:
- community.general.infinibox
requirements:
- munch
'''
EXAMPLES = '''
- name: Make sure nfs client 10.0.0.1 is configured for export. Allow root access
infini_export_client:
client: 10.0.0.1
access_mode: RW
no_root_squash: yes
export: /data
user: admin
password: secret
system: ibox001
- name: Add multiple clients with RO access. Squash root privileges
infini_export_client:
client: "{{ item }}"
access_mode: RO
no_root_squash: no
export: /data
user: admin
password: secret
system: ibox001
with_items:
- 10.0.0.2
- 10.0.0.3
'''
RETURN = '''
'''
import traceback
MUNCH_IMP_ERR = None
try:
from munch import Munch, unmunchify
HAS_MUNCH = True
except ImportError:
MUNCH_IMP_ERR = traceback.format_exc()
HAS_MUNCH = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible_collections.community.general.plugins.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
def transform(d):
return frozenset(d.items())
@api_wrapper
def get_export(module, system):
"""Return export if found. Fail module if not found"""
try:
export = system.exports.get(export_path=module.params['export'])
except Exception:
module.fail_json(msg="Export with export path {0} not found".format(module.params['export']))
return export
@api_wrapper
def update_client(module, export):
"""Update export client list"""
changed = False
client = module.params['client']
access_mode = module.params['access_mode']
no_root_squash = module.params['no_root_squash']
client_list = export.get_permissions()
client_not_in_list = True
for index, item in enumerate(client_list):
if item.client == client:
client_not_in_list = False
if item.access != access_mode:
item.access = access_mode
changed = True
if item.no_root_squash is not no_root_squash:
item.no_root_squash = no_root_squash
changed = True
# If access_mode and/or no_root_squash not passed as arguments to the module,
# use access_mode with RW value and set no_root_squash to False
if client_not_in_list:
changed = True
client_list.append(Munch(client=client, access=access_mode, no_root_squash=no_root_squash))
if changed:
for index, item in enumerate(client_list):
client_list[index] = unmunchify(item)
if not module.check_mode:
export.update_permissions(client_list)
module.exit_json(changed=changed)
@api_wrapper
def delete_client(module, export):
"""Update export client list"""
changed = False
client = module.params['client']
client_list = export.get_permissions()
for index, item in enumerate(client_list):
if item.client == client:
changed = True
del client_list[index]
if changed:
for index, item in enumerate(client_list):
client_list[index] = unmunchify(item)
if not module.check_mode:
export.update_permissions(client_list)
module.exit_json(changed=changed)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
client=dict(required=True),
access_mode=dict(choices=['RO', 'RW'], default='RW'),
no_root_squash=dict(type='bool', default='no'),
state=dict(default='present', choices=['present', 'absent']),
export=dict(required=True)
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib('infinisdk'))
if not HAS_MUNCH:
module.fail_json(msg=missing_required_lib('munch'), exception=MUNCH_IMP_ERR)
system = get_system(module)
export = get_export(module, system)
if module.params['state'] == 'present':
update_client(module, export)
else:
delete_client(module, export)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,171 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_fs
short_description: Create, Delete or Modify filesystems on Infinibox
description:
- This module creates, deletes or modifies filesystems on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- File system name.
required: true
state:
description:
- Creates/Modifies file system when present or removes when absent.
required: false
default: present
choices: [ "present", "absent" ]
size:
description:
- File system size in MB, GB or TB units. See examples.
required: false
pool:
description:
- Pool that will host file system.
required: true
extends_documentation_fragment:
- community.general.infinibox
requirements:
- capacity
'''
EXAMPLES = '''
- name: Create new file system named foo under pool named bar
infini_fs:
name: foo
size: 1TB
pool: bar
state: present
user: admin
password: secret
system: ibox001
'''
RETURN = '''
'''
import traceback
CAPACITY_IMP_ERR = None
try:
from capacity import KiB, Capacity
HAS_CAPACITY = True
except ImportError:
CAPACITY_IMP_ERR = traceback.format_exc()
HAS_CAPACITY = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible_collections.community.general.plugins.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
@api_wrapper
def get_pool(module, system):
"""Return Pool or None"""
try:
return system.pools.get(name=module.params['pool'])
except Exception:
return None
@api_wrapper
def get_filesystem(module, system):
"""Return Filesystem or None"""
try:
return system.filesystems.get(name=module.params['name'])
except Exception:
return None
@api_wrapper
def create_filesystem(module, system):
"""Create Filesystem"""
if not module.check_mode:
filesystem = system.filesystems.create(name=module.params['name'], pool=get_pool(module, system))
if module.params['size']:
size = Capacity(module.params['size']).roundup(64 * KiB)
filesystem.update_size(size)
module.exit_json(changed=True)
@api_wrapper
def update_filesystem(module, filesystem):
"""Update Filesystem"""
changed = False
if module.params['size']:
size = Capacity(module.params['size']).roundup(64 * KiB)
if filesystem.get_size() != size:
if not module.check_mode:
filesystem.update_size(size)
changed = True
module.exit_json(changed=changed)
@api_wrapper
def delete_filesystem(module, filesystem):
""" Delete Filesystem"""
if not module.check_mode:
filesystem.delete()
module.exit_json(changed=True)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
pool=dict(required=True),
size=dict()
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib('infinisdk'))
if not HAS_CAPACITY:
module.fail_json(msg=missing_required_lib('capacity'), exception=CAPACITY_IMP_ERR)
if module.params['size']:
try:
Capacity(module.params['size'])
except Exception:
module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
state = module.params['state']
system = get_system(module)
pool = get_pool(module, system)
filesystem = get_filesystem(module, system)
if pool is None:
module.fail_json(msg='Pool {0} not found'.format(module.params['pool']))
if state == 'present' and not filesystem:
create_filesystem(module, system)
elif state == 'present' and filesystem:
update_filesystem(module, filesystem)
elif state == 'absent' and filesystem:
delete_filesystem(module, filesystem)
elif state == 'absent' and not filesystem:
module.exit_json(changed=False)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,160 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_host
short_description: Create, Delete and Modify Hosts on Infinibox
description:
- This module creates, deletes or modifies hosts on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- Host Name
required: true
state:
description:
- Creates/Modifies Host when present or removes when absent
required: false
default: present
choices: [ "present", "absent" ]
wwns:
description:
- List of wwns of the host
required: false
volume:
description:
- Volume name to map to the host
required: false
extends_documentation_fragment:
- community.general.infinibox
'''
EXAMPLES = '''
- name: Create new new host
infini_host:
name: foo.example.com
user: admin
password: secret
system: ibox001
- name: Make sure host bar is available with wwn ports
infini_host:
name: bar.example.com
wwns:
- "00:00:00:00:00:00:00"
- "11:11:11:11:11:11:11"
system: ibox01
user: admin
password: secret
- name: Map host foo.example.com to volume bar
infini_host:
name: foo.example.com
volume: bar
system: ibox01
user: admin
password: secret
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible_collections.community.general.plugins.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
@api_wrapper
def get_host(module, system):
host = None
for h in system.hosts.to_list():
if h.get_name() == module.params['name']:
host = h
break
return host
@api_wrapper
def create_host(module, system):
changed = True
if not module.check_mode:
host = system.hosts.create(name=module.params['name'])
if module.params['wwns']:
for p in module.params['wwns']:
host.add_fc_port(p)
if module.params['volume']:
host.map_volume(system.volumes.get(name=module.params['volume']))
module.exit_json(changed=changed)
@api_wrapper
def update_host(module, host):
changed = False
module.exit_json(changed=changed)
@api_wrapper
def delete_host(module, host):
changed = True
if not module.check_mode:
host.delete()
module.exit_json(changed=changed)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
wwns=dict(type='list'),
volume=dict()
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib('infinisdk'))
state = module.params['state']
system = get_system(module)
host = get_host(module, system)
if module.params['volume']:
try:
system.volumes.get(name=module.params['volume'])
except Exception:
module.fail_json(msg='Volume {0} not found'.format(module.params['volume']))
if host and state == 'present':
update_host(module, host)
elif host and state == 'absent':
delete_host(module, host)
elif host is None and state == 'absent':
module.exit_json(changed=False)
else:
create_host(module, system)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,215 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_pool
short_description: Create, Delete and Modify Pools on Infinibox
description:
- This module to creates, deletes or modifies pools on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- Pool Name
required: true
state:
description:
- Creates/Modifies Pool when present or removes when absent
required: false
default: present
choices: [ "present", "absent" ]
size:
description:
- Pool Physical Capacity in MB, GB or TB units.
If pool size is not set on pool creation, size will be equal to 1TB.
See examples.
required: false
vsize:
description:
- Pool Virtual Capacity in MB, GB or TB units.
If pool vsize is not set on pool creation, Virtual Capacity will be equal to Physical Capacity.
See examples.
required: false
ssd_cache:
description:
- Enable/Disable SSD Cache on Pool
required: false
default: yes
type: bool
notes:
- Infinibox Admin level access is required for pool modifications
extends_documentation_fragment:
- community.general.infinibox
requirements:
- capacity
'''
EXAMPLES = '''
- name: Make sure pool foo exists. Set pool physical capacity to 10TB
infini_pool:
name: foo
size: 10TB
vsize: 10TB
user: admin
password: secret
system: ibox001
- name: Disable SSD Cache on pool
infini_pool:
name: foo
ssd_cache: no
user: admin
password: secret
system: ibox001
'''
RETURN = '''
'''
import traceback
CAPACITY_IMP_ERR = None
try:
from capacity import KiB, Capacity
HAS_CAPACITY = True
except ImportError:
CAPACITY_IMP_ERR = traceback.format_exc()
HAS_CAPACITY = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible_collections.community.general.plugins.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
@api_wrapper
def get_pool(module, system):
"""Return Pool on None"""
try:
return system.pools.get(name=module.params['name'])
except Exception:
return None
@api_wrapper
def create_pool(module, system):
"""Create Pool"""
name = module.params['name']
size = module.params['size']
vsize = module.params['vsize']
ssd_cache = module.params['ssd_cache']
if not module.check_mode:
if not size and not vsize:
pool = system.pools.create(name=name, physical_capacity=Capacity('1TB'), virtual_capacity=Capacity('1TB'))
elif size and not vsize:
pool = system.pools.create(name=name, physical_capacity=Capacity(size), virtual_capacity=Capacity(size))
elif not size and vsize:
pool = system.pools.create(name=name, physical_capacity=Capacity('1TB'), virtual_capacity=Capacity(vsize))
else:
pool = system.pools.create(name=name, physical_capacity=Capacity(size), virtual_capacity=Capacity(vsize))
# Default value of ssd_cache is True. Disable ssd caching if False
if not ssd_cache:
pool.update_ssd_enabled(ssd_cache)
module.exit_json(changed=True)
@api_wrapper
def update_pool(module, system, pool):
"""Update Pool"""
changed = False
size = module.params['size']
vsize = module.params['vsize']
ssd_cache = module.params['ssd_cache']
# Roundup the capacity to mimic Infinibox behaviour
if size:
physical_capacity = Capacity(size).roundup(6 * 64 * KiB)
if pool.get_physical_capacity() != physical_capacity:
if not module.check_mode:
pool.update_physical_capacity(physical_capacity)
changed = True
if vsize:
virtual_capacity = Capacity(vsize).roundup(6 * 64 * KiB)
if pool.get_virtual_capacity() != virtual_capacity:
if not module.check_mode:
pool.update_virtual_capacity(virtual_capacity)
changed = True
if pool.get_ssd_enabled() != ssd_cache:
if not module.check_mode:
pool.update_ssd_enabled(ssd_cache)
changed = True
module.exit_json(changed=changed)
@api_wrapper
def delete_pool(module, pool):
"""Delete Pool"""
if not module.check_mode:
pool.delete()
module.exit_json(changed=True)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
size=dict(),
vsize=dict(),
ssd_cache=dict(type='bool', default=True)
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib('infinisdk'))
if not HAS_CAPACITY:
module.fail_json(msg=missing_required_lib('capacity'), exception=CAPACITY_IMP_ERR)
if module.params['size']:
try:
Capacity(module.params['size'])
except Exception:
module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
if module.params['vsize']:
try:
Capacity(module.params['vsize'])
except Exception:
module.fail_json(msg='vsize (Virtual Capacity) should be defined in MB, GB, TB or PB units')
state = module.params['state']
system = get_system(module)
pool = get_pool(module, system)
if state == 'present' and not pool:
create_pool(module, system)
elif state == 'present' and pool:
update_pool(module, system, pool)
elif state == 'absent' and pool:
delete_pool(module, pool)
elif state == 'absent' and not pool:
module.exit_json(changed=False)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,166 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_vol
short_description: Create, Delete or Modify volumes on Infinibox
description:
- This module creates, deletes or modifies volume on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- Volume Name
required: true
state:
description:
- Creates/Modifies volume when present or removes when absent
required: false
default: present
choices: [ "present", "absent" ]
size:
description:
- Volume size in MB, GB or TB units. See examples.
required: false
pool:
description:
- Pool that volume will reside on
required: true
extends_documentation_fragment:
- community.general.infinibox
requirements:
- capacity
'''
EXAMPLES = '''
- name: Create new volume named foo under pool named bar
infini_vol:
name: foo
size: 1TB
pool: bar
state: present
user: admin
password: secret
system: ibox001
'''
RETURN = '''
'''
try:
from capacity import KiB, Capacity
HAS_CAPACITY = True
except ImportError:
HAS_CAPACITY = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible_collections.community.general.plugins.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
@api_wrapper
def get_pool(module, system):
"""Return Pool or None"""
try:
return system.pools.get(name=module.params['pool'])
except Exception:
return None
@api_wrapper
def get_volume(module, system):
"""Return Volume or None"""
try:
return system.volumes.get(name=module.params['name'])
except Exception:
return None
@api_wrapper
def create_volume(module, system):
"""Create Volume"""
if not module.check_mode:
volume = system.volumes.create(name=module.params['name'], pool=get_pool(module, system))
if module.params['size']:
size = Capacity(module.params['size']).roundup(64 * KiB)
volume.update_size(size)
module.exit_json(changed=True)
@api_wrapper
def update_volume(module, volume):
"""Update Volume"""
changed = False
if module.params['size']:
size = Capacity(module.params['size']).roundup(64 * KiB)
if volume.get_size() != size:
if not module.check_mode:
volume.update_size(size)
changed = True
module.exit_json(changed=changed)
@api_wrapper
def delete_volume(module, volume):
""" Delete Volume"""
if not module.check_mode:
volume.delete()
module.exit_json(changed=True)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
pool=dict(required=True),
size=dict()
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib('infinisdk'))
if module.params['size']:
try:
Capacity(module.params['size'])
except Exception:
module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
state = module.params['state']
system = get_system(module)
pool = get_pool(module, system)
volume = get_volume(module, system)
if pool is None:
module.fail_json(msg='Pool {0} not found'.format(module.params['pool']))
if state == 'present' and not volume:
create_volume(module, system)
elif state == 'present' and volume:
update_volume(module, volume)
elif state == 'absent' and volume:
delete_volume(module, volume)
elif state == 'absent' and not volume:
module.exit_json(changed=False)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,233 @@
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_aggregate
short_description: Manage NetApp cDOT aggregates.
extends_documentation_fragment:
- netapp.ontap.netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
deprecated:
removed_in: '2.11'
why: Updated modules released with increased functionality
alternative: Use M(na_ontap_aggregate) instead.
description:
- Create or destroy aggregates on NetApp cDOT.
options:
state:
required: true
description:
- Whether the specified aggregate should exist or not.
choices: ['present', 'absent']
name:
required: true
description:
- The name of the aggregate to manage.
disk_count:
description:
- Number of disks to place into the aggregate, including parity disks.
- The disks in this newly-created aggregate come from the spare disk pool.
- The smallest disks in this pool join the aggregate first, unless the C(disk-size) argument is provided.
- Either C(disk-count) or C(disks) must be supplied. Range [0..2^31-1].
- Required when C(state=present).
'''
EXAMPLES = """
- name: Manage Aggregates
na_cdot_aggregate:
state: present
name: ansibleAggr
disk_count: 1
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Manage Aggregates
na_cdot_aggregate:
state: present
name: ansibleAggr
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTAggregate(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
disk_count=dict(required=False, type='int'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['disk_count'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.disk_count = p['disk_count']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_aggr(self):
"""
Checks if aggregate exists.
:return:
True if aggregate found
False if aggregate is not found
:rtype: bool
"""
aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-attributes', **{'aggregate-name': self.name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
aggr_get_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(aggr_get_iter,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
# Error 13040 denotes an aggregate not being found.
if to_native(e.code) == "13040":
return False
else:
self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if (result.get_child_by_name('num-records') and
int(result.get_child_content('num-records')) >= 1):
return True
else:
return False
def create_aggr(self):
aggr_create = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-create', **{'aggregate': self.name,
'disk-count': str(self.disk_count)})
try:
self.server.invoke_successfully(aggr_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error provisioning aggregate %s: %s" % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_aggr(self):
aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-destroy', **{'aggregate': self.name})
try:
self.server.invoke_successfully(aggr_destroy,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error removing aggregate %s: %s" % (self.name, to_native(e)),
exception=traceback.format_exc())
def rename_aggregate(self):
aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-rename', **{'aggregate': self.name,
'new-aggregate-name':
self.name})
try:
self.server.invoke_successfully(aggr_rename,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error renaming aggregate %s: %s" % (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
aggregate_exists = self.get_aggr()
rename_aggregate = False
# check if anything needs to be changed (add/delete/update)
if aggregate_exists:
if self.state == 'absent':
changed = True
elif self.state == 'present':
if self.name is not None and not self.name == self.name:
rename_aggregate = True
changed = True
else:
if self.state == 'present':
# Aggregate does not exist, but requested state is present.
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not aggregate_exists:
self.create_aggr()
else:
if rename_aggregate:
self.rename_aggregate()
elif self.state == 'absent':
self.delete_aggr()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTAggregate()
v.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,299 @@
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_license
short_description: Manage NetApp cDOT protocol and feature licenses
extends_documentation_fragment:
- netapp.ontap.netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
deprecated:
removed_in: '2.11'
why: Updated modules released with increased functionality
alternative: Use M(na_ontap_license) instead.
description:
- Add or remove licenses on NetApp ONTAP.
options:
remove_unused:
description:
- Remove licenses that have no controller affiliation in the cluster.
type: bool
remove_expired:
description:
- Remove licenses that have expired in the cluster.
type: bool
serial_number:
description:
- Serial number of the node associated with the license.
- This parameter is used primarily when removing license for a specific service.
- If this parameter is not provided, the cluster serial number is used by default.
licenses:
description:
- List of licenses to add or remove.
- Please note that trying to remove a non-existent license will throw an error.
suboptions:
base:
description:
- Cluster Base License
nfs:
description:
- NFS License
cifs:
description:
- CIFS License
iscsi:
description:
- iSCSI License
fcp:
description:
- FCP License
cdmi:
description:
- CDMI License
snaprestore:
description:
- SnapRestore License
snapmirror:
description:
- SnapMirror License
flexclone:
description:
- FlexClone License
snapvault:
description:
- SnapVault License
snaplock:
description:
- SnapLock License
snapmanagersuite:
description:
- SnapManagerSuite License
snapprotectapps:
description:
- SnapProtectApp License
v_storageattach:
description:
- Virtual Attached Storage License
'''
EXAMPLES = """
- name: Add licenses
na_cdot_license:
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
serial_number: #################
licenses:
nfs: #################
cifs: #################
iscsi: #################
fcp: #################
snaprestore: #################
flexclone: #################
- name: Remove licenses
na_cdot_license:
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
remove_unused: false
remove_expired: true
serial_number: #################
licenses:
nfs: remove
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTLicense(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
serial_number=dict(required=False, type='str', default=None),
remove_unused=dict(default=False, type='bool'),
remove_expired=dict(default=False, type='bool'),
licenses=dict(default=False, type='dict'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=False
)
p = self.module.params
# set up state variables
self.serial_number = p['serial_number']
self.remove_unused = p['remove_unused']
self.remove_expired = p['remove_expired']
self.licenses = p['licenses']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_licensing_status(self):
"""
Check licensing status
:return: package (key) and licensing status (value)
:rtype: dict
"""
license_status = netapp_utils.zapi.NaElement('license-v2-status-list-info')
result = None
try:
result = self.server.invoke_successfully(license_status,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error checking license status: %s" %
to_native(e), exception=traceback.format_exc())
return_dictionary = {}
license_v2_status = result.get_child_by_name('license-v2-status')
if license_v2_status:
for license_v2_status_info in license_v2_status.get_children():
package = license_v2_status_info.get_child_content('package')
status = license_v2_status_info.get_child_content('method')
return_dictionary[package] = status
return return_dictionary
def remove_licenses(self, remove_list):
"""
Remove requested licenses
:param:
remove_list : List of packages to remove
"""
license_delete = netapp_utils.zapi.NaElement('license-v2-delete')
for package in remove_list:
license_delete.add_new_child('package', package)
if self.serial_number is not None:
license_delete.add_new_child('serial-number', self.serial_number)
try:
self.server.invoke_successfully(license_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error removing license %s" %
to_native(e), exception=traceback.format_exc())
def remove_unused_licenses(self):
"""
Remove unused licenses
"""
remove_unused = netapp_utils.zapi.NaElement('license-v2-delete-unused')
try:
self.server.invoke_successfully(remove_unused,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error removing unused licenses: %s" %
to_native(e), exception=traceback.format_exc())
def remove_expired_licenses(self):
"""
Remove expired licenses
"""
remove_expired = netapp_utils.zapi.NaElement('license-v2-delete-expired')
try:
self.server.invoke_successfully(remove_expired,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error removing expired licenses: %s" %
to_native(e), exception=traceback.format_exc())
def update_licenses(self):
"""
Update licenses
"""
# Remove unused and expired licenses, if requested.
if self.remove_unused:
self.remove_unused_licenses()
if self.remove_expired:
self.remove_expired_licenses()
# Next, add/remove specific requested licenses.
license_add = netapp_utils.zapi.NaElement('license-v2-add')
codes = netapp_utils.zapi.NaElement('codes')
remove_list = []
for key, value in self.licenses.items():
str_value = str(value)
# Make sure license is not an empty string.
if str_value and str_value.strip():
if str_value.lower() == 'remove':
remove_list.append(str(key).lower())
else:
codes.add_new_child('license-code-v2', str_value)
# Remove requested licenses.
if len(remove_list) != 0:
self.remove_licenses(remove_list)
# Add requested licenses
if len(codes.get_children()) != 0:
license_add.add_child_elem(codes)
try:
self.server.invoke_successfully(license_add,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error adding licenses: %s" %
to_native(e), exception=traceback.format_exc())
def apply(self):
changed = False
# Add / Update licenses.
license_status = self.get_licensing_status()
self.update_licenses()
new_license_status = self.get_licensing_status()
if license_status != new_license_status:
changed = True
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTLicense()
v.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,378 @@
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_lun
short_description: Manage NetApp cDOT luns
extends_documentation_fragment:
- netapp.ontap.netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
deprecated:
removed_in: '2.11'
why: Updated modules released with increased functionality
alternative: Use M(na_ontap_lun) instead.
description:
- Create, destroy, resize luns on NetApp cDOT.
options:
state:
description:
- Whether the specified lun should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the lun to manage.
required: true
flexvol_name:
description:
- The name of the FlexVol the lun should exist on.
- Required when C(state=present).
size:
description:
- The size of the lun in C(size_unit).
- Required when C(state=present).
size_unit:
description:
- The unit used to interpret the size parameter.
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
force_resize:
description:
- Forcibly reduce the size. This is required for reducing the size of the LUN to avoid accidentally reducing the LUN size.
default: false
force_remove:
description:
- If "true", override checks that prevent a LUN from being destroyed if it is online and mapped.
- If "false", destroying an online and mapped LUN will fail.
default: false
force_remove_fenced:
description:
- If "true", override checks that prevent a LUN from being destroyed while it is fenced.
- If "false", attempting to destroy a fenced LUN will fail.
- The default if not specified is "false". This field is available in Data ONTAP 8.2 and later.
default: false
vserver:
required: true
description:
- The name of the vserver to use.
'''
EXAMPLES = """
- name: Create LUN
na_cdot_lun:
state: present
name: ansibleLUN
flexvol_name: ansibleVolume
vserver: ansibleVServer
size: 5
size_unit: mb
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Resize Lun
na_cdot_lun:
state: present
name: ansibleLUN
force_resize: True
flexvol_name: ansibleVolume
vserver: ansibleVServer
size: 5
size_unit: gb
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTLUN(object):
def __init__(self):
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
force_resize=dict(default=False, type='bool'),
force_remove=dict(default=False, type='bool'),
force_remove_fenced=dict(default=False, type='bool'),
flexvol_name=dict(type='str'),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['flexvol_name', 'size'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.size_unit = p['size_unit']
if p['size'] is not None:
self.size = p['size'] * self._size_unit_map[self.size_unit]
else:
self.size = None
self.force_resize = p['force_resize']
self.force_remove = p['force_remove']
self.force_remove_fenced = p['force_remove_fenced']
self.flexvol_name = p['flexvol_name']
self.vserver = p['vserver']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
def get_lun(self):
"""
Return details about the LUN
:return: Details about the lun
:rtype: dict
"""
luns = []
tag = None
while True:
lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
if tag:
lun_info.add_new_child('tag', tag, True)
query_details = netapp_utils.zapi.NaElement('lun-info')
query_details.add_new_child('vserver', self.vserver)
query_details.add_new_child('volume', self.flexvol_name)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
lun_info.add_child_elem(query)
result = self.server.invoke_successfully(lun_info, True)
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
luns.extend(attr_list.get_children())
tag = result.get_child_content('next-tag')
if tag is None:
break
# The LUNs have been extracted.
# Find the specified lun and extract details.
return_value = None
for lun in luns:
path = lun.get_child_content('path')
_rest, _splitter, found_name = path.rpartition('/')
if found_name == self.name:
size = lun.get_child_content('size')
# Find out if the lun is attached
attached_to = None
lun_id = None
if lun.get_child_content('mapped') == 'true':
lun_map_list = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-map-list-info', **{'path': path})
result = self.server.invoke_successfully(
lun_map_list, enable_tunneling=True)
igroups = result.get_child_by_name('initiator-groups')
if igroups:
for igroup_info in igroups.get_children():
igroup = igroup_info.get_child_content(
'initiator-group-name')
attached_to = igroup
lun_id = igroup_info.get_child_content('lun-id')
return_value = {
'name': found_name,
'size': size,
'attached_to': attached_to,
'lun_id': lun_id
}
else:
continue
return return_value
def create_lun(self):
"""
Create LUN with requested name and size
"""
path = '/vol/%s/%s' % (self.flexvol_name, self.name)
lun_create = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-create-by-size', **{'path': path,
'size': str(self.size),
'ostype': 'linux'})
try:
self.server.invoke_successfully(lun_create, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error provisioning lun %s of size %s: %s" % (self.name, self.size, to_native(e)),
exception=traceback.format_exc())
def delete_lun(self):
"""
Delete requested LUN
"""
path = '/vol/%s/%s' % (self.flexvol_name, self.name)
lun_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-destroy', **{'path': path,
'force': str(self.force_remove),
'destroy-fenced-lun':
str(self.force_remove_fenced)})
try:
self.server.invoke_successfully(lun_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error deleting lun %s: %s" % (path, to_native(e)),
exception=traceback.format_exc())
def resize_lun(self):
"""
Resize requested LUN.
:return: True if LUN was actually re-sized, false otherwise.
:rtype: bool
"""
path = '/vol/%s/%s' % (self.flexvol_name, self.name)
lun_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-resize', **{'path': path,
'size': str(self.size),
'force': str(self.force_resize)})
try:
self.server.invoke_successfully(lun_resize, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
if to_native(e.code) == "9042":
# Error 9042 denotes the new LUN size being the same as the
# old LUN size. This happens when there's barely any difference
# in the two sizes. For example, from 8388608 bytes to
# 8194304 bytes. This should go away if/when the default size
# requested/reported to/from the controller is changed to a
# larger unit (MB/GB/TB).
return False
else:
self.module.fail_json(msg="Error resizing lun %s: %s" % (path, to_native(e)),
exception=traceback.format_exc())
return True
def apply(self):
property_changed = False
multiple_properties_changed = False
size_changed = False
lun_exists = False
lun_detail = self.get_lun()
if lun_detail:
lun_exists = True
current_size = lun_detail['size']
if self.state == 'absent':
property_changed = True
elif self.state == 'present':
if not int(current_size) == self.size:
size_changed = True
property_changed = True
else:
if self.state == 'present':
property_changed = True
if property_changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not lun_exists:
self.create_lun()
else:
if size_changed:
# Ensure that size was actually changed. Please
# read notes in 'resize_lun' function for details.
size_changed = self.resize_lun()
if not size_changed and not \
multiple_properties_changed:
property_changed = False
elif self.state == 'absent':
self.delete_lun()
changed = property_changed or size_changed
# TODO: include other details about the lun (size, etc.)
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTLUN()
v.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,239 @@
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_qtree
short_description: Manage qtrees
extends_documentation_fragment:
- netapp.ontap.netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
deprecated:
removed_in: '2.11'
why: Updated modules released with increased functionality
alternative: Use M(na_ontap_qtree) instead.
description:
- Create or destroy Qtrees.
options:
state:
description:
- Whether the specified Qtree should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the Qtree to manage.
required: true
flexvol_name:
description:
- The name of the FlexVol the Qtree should exist on. Required when C(state=present).
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = """
- name: Create QTree
na_cdot_qtree:
state: present
name: ansibleQTree
flexvol_name: ansibleVolume
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Rename QTree
na_cdot_qtree:
state: present
name: ansibleQTree
flexvol_name: ansibleVolume
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTQTree(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
flexvol_name=dict(type='str'),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['flexvol_name'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.flexvol_name = p['flexvol_name']
self.vserver = p['vserver']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
def get_qtree(self):
"""
Checks if the qtree exists.
:return:
True if qtree found
False if qtree is not found
:rtype: bool
"""
qtree_list_iter = netapp_utils.zapi.NaElement('qtree-list-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'qtree-info', **{'vserver': self.vserver,
'volume': self.flexvol_name,
'qtree': self.name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
qtree_list_iter.add_child_elem(query)
result = self.server.invoke_successfully(qtree_list_iter,
enable_tunneling=True)
if (result.get_child_by_name('num-records') and
int(result.get_child_content('num-records')) >= 1):
return True
else:
return False
def create_qtree(self):
qtree_create = netapp_utils.zapi.NaElement.create_node_with_children(
'qtree-create', **{'volume': self.flexvol_name,
'qtree': self.name})
try:
self.server.invoke_successfully(qtree_create,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error provisioning qtree %s: %s" % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_qtree(self):
path = '/vol/%s/%s' % (self.flexvol_name, self.name)
qtree_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'qtree-delete', **{'qtree': path})
try:
self.server.invoke_successfully(qtree_delete,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error deleting qtree %s: %s" % (path, to_native(e)),
exception=traceback.format_exc())
def rename_qtree(self):
path = '/vol/%s/%s' % (self.flexvol_name, self.name)
new_path = '/vol/%s/%s' % (self.flexvol_name, self.name)
qtree_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'qtree-rename', **{'qtree': path,
'new-qtree-name': new_path})
try:
self.server.invoke_successfully(qtree_rename,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error renaming qtree %s: %s" % (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
qtree_exists = False
rename_qtree = False
qtree_detail = self.get_qtree()
if qtree_detail:
qtree_exists = True
if self.state == 'absent':
# Qtree exists, but requested state is 'absent'.
changed = True
elif self.state == 'present':
if self.name is not None and not self.name == \
self.name:
changed = True
rename_qtree = True
else:
if self.state == 'present':
# Qtree does not exist, but requested state is 'present'.
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not qtree_exists:
self.create_qtree()
else:
if rename_qtree:
self.rename_qtree()
elif self.state == 'absent':
self.delete_qtree()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTQTree()
v.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,251 @@
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_svm
short_description: Manage NetApp cDOT svm
extends_documentation_fragment:
- netapp.ontap.netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
deprecated:
removed_in: '2.11'
why: Updated modules released with increased functionality
alternative: Use M(na_ontap_svm) instead.
description:
- Create or destroy svm on NetApp cDOT
options:
state:
description:
- Whether the specified SVM should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the SVM to manage.
required: true
root_volume:
description:
- Root volume of the SVM. Required when C(state=present).
root_volume_aggregate:
description:
- The aggregate on which the root volume will be created.
- Required when C(state=present).
root_volume_security_style:
description:
- Security Style of the root volume.
- When specified as part of the vserver-create, this field represents the security style for the Vserver root volume.
- When specified as part of vserver-get-iter call, this will return the list of matching Vservers.
- Possible values are 'unix', 'ntfs', 'mixed'.
- The 'unified' security style, which applies only to Infinite Volumes, cannot be applied to a Vserver's root volume.
- Valid options are "unix" for NFS, "ntfs" for CIFS, "mixed" for Mixed, "unified" for Unified.
- Required when C(state=present)
choices: ['unix', 'ntfs', 'mixed', 'unified']
'''
EXAMPLES = """
- name: Create SVM
na_cdot_svm:
state: present
name: ansibleVServer
root_volume: vol1
root_volume_aggregate: aggr1
root_volume_security_style: mixed
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTSVM(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
root_volume=dict(type='str'),
root_volume_aggregate=dict(type='str'),
root_volume_security_style=dict(type='str', choices=['unix',
'ntfs',
'mixed',
'unified'
]),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['root_volume',
'root_volume_aggregate',
'root_volume_security_style'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.root_volume = p['root_volume']
self.root_volume_aggregate = p['root_volume_aggregate']
self.root_volume_security_style = p['root_volume_security_style']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_vserver(self):
"""
Checks if vserver exists.
:return:
True if vserver found
False if vserver is not found
:rtype: bool
"""
vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-info', **{'vserver-name': self.name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
vserver_info.add_child_elem(query)
result = self.server.invoke_successfully(vserver_info,
enable_tunneling=False)
if (result.get_child_by_name('num-records') and
int(result.get_child_content('num-records')) >= 1):
"""
TODO:
Return more relevant parameters about vserver that can
be updated by the playbook.
"""
return True
else:
return False
def create_vserver(self):
vserver_create = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-create', **{'vserver-name': self.name,
'root-volume': self.root_volume,
'root-volume-aggregate':
self.root_volume_aggregate,
'root-volume-security-style':
self.root_volume_security_style
})
try:
self.server.invoke_successfully(vserver_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error provisioning SVM %s with root volume %s on aggregate %s: %s'
% (self.name, self.root_volume, self.root_volume_aggregate, to_native(e)),
exception=traceback.format_exc())
def delete_vserver(self):
vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-destroy', **{'vserver-name': self.name})
try:
self.server.invoke_successfully(vserver_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error deleting SVM %s with root volume %s on aggregate %s: %s'
% (self.name, self.root_volume, self.root_volume_aggregate, to_native(e)),
exception=traceback.format_exc())
def rename_vserver(self):
vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-rename', **{'vserver-name': self.name,
'new-name': self.name})
try:
self.server.invoke_successfully(vserver_rename,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error renaming SVM %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
vserver_exists = self.get_vserver()
rename_vserver = False
if vserver_exists:
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Update properties
pass
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not vserver_exists:
self.create_vserver()
else:
if rename_vserver:
self.rename_vserver()
elif self.state == 'absent':
self.delete_vserver()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTSVM()
v.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,306 @@
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_user
short_description: useradmin configuration and management
extends_documentation_fragment:
- netapp.ontap.netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
deprecated:
removed_in: '2.11'
why: Updated modules released with increased functionality
alternative: Use M(na_ontap_user) instead.
description:
- Create or destroy users.
options:
state:
description:
- Whether the specified user should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the user to manage.
required: true
application:
description:
- Applications to grant access to.
required: true
choices: ['console', 'http','ontapi','rsh','snmp','sp','ssh','telnet']
authentication_method:
description:
- Authentication method for the application.
- Not all authentication methods are valid for an application.
- Valid authentication methods for each application are as denoted in I(authentication_choices_description).
- password for console application
- password, domain, nsswitch, cert for http application.
- password, domain, nsswitch, cert for ontapi application.
- community for snmp application (when creating SNMPv1 and SNMPv2 users).
- usm and community for snmp application (when creating SNMPv3 users).
- password for sp application.
- password for rsh application.
- password for telnet application.
- password, publickey, domain, nsswitch for ssh application.
required: true
choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm']
set_password:
description:
- Password for the user account.
- It is ignored for creating snmp users, but is required for creating non-snmp users.
- For an existing user, this value will be used as the new password.
role_name:
description:
- The name of the role. Required when C(state=present)
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = """
- name: Create User
na_cdot_user:
state: present
name: SampleUser
application: ssh
authentication_method: password
set_password: apn1242183u1298u41
role_name: vsadmin
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTUser(object):
"""
Common operations to manage users and roles.
"""
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
application=dict(required=True, type='str', choices=[
'console', 'http', 'ontapi', 'rsh',
'snmp', 'sp', 'ssh', 'telnet']),
authentication_method=dict(required=True, type='str',
choices=['community', 'password',
'publickey', 'domain',
'nsswitch', 'usm']),
set_password=dict(required=False, type='str', default=None),
role_name=dict(required=False, type='str'),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['role_name'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.application = p['application']
self.authentication_method = p['authentication_method']
self.set_password = p['set_password']
self.role_name = p['role_name']
self.vserver = p['vserver']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_user(self):
"""
Checks if the user exists.
:return:
True if user found
False if user is not found
:rtype: bool
"""
security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-account-info', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
security_login_get_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(security_login_get_iter,
enable_tunneling=False)
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
return True
else:
return False
except netapp_utils.zapi.NaApiError as e:
# Error 16034 denotes a user not being found.
if to_native(e.code) == "16034":
return False
else:
self.module.fail_json(msg='Error getting user %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def create_user(self):
user_create = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-create', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method,
'role-name': self.role_name})
if self.set_password is not None:
user_create.add_new_child('password', self.set_password)
try:
self.server.invoke_successfully(user_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error creating user %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_user(self):
user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-delete', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method})
try:
self.server.invoke_successfully(user_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error removing user %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def change_password(self):
"""
Changes the password
:return:
True if password updated
False if password is not updated
:rtype: bool
"""
self.server.set_vserver(self.vserver)
modify_password = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-modify-password', **{
'new-password': str(self.set_password),
'user-name': self.name})
try:
self.server.invoke_successfully(modify_password,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
if to_native(e.code) == '13114':
return False
else:
self.module.fail_json(msg='Error setting password for user %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
self.server.set_vserver(None)
return True
def apply(self):
property_changed = False
password_changed = False
user_exists = self.get_user()
if user_exists:
if self.state == 'absent':
property_changed = True
elif self.state == 'present':
if self.set_password is not None:
password_changed = self.change_password()
else:
if self.state == 'present':
# Check if anything needs to be updated
property_changed = True
if property_changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not user_exists:
self.create_user()
# Add ability to update parameters.
elif self.state == 'absent':
self.delete_user()
changed = property_changed or password_changed
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTUser()
v.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,232 @@
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_user_role
short_description: useradmin configuration and management
extends_documentation_fragment:
- netapp.ontap.netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
deprecated:
removed_in: '2.11'
why: Updated modules released with increased functionality
alternative: Use M(na_ontap_user_role) instead.
description:
- Create or destroy user roles
options:
state:
description:
- Whether the specified user should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the role to manage.
required: true
command_directory_name:
description:
- The command or command directory to which the role has an access.
required: true
access_level:
description:
- The name of the role to manage.
choices: ['none', 'readonly', 'all']
default: 'all'
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = """
- name: Create User Role
na_cdot_user_role:
state: present
name: ansibleRole
command_directory_name: DEFAULT
access_level: none
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTUserRole(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
command_directory_name=dict(required=True, type='str'),
access_level=dict(required=False, type='str', default='all',
choices=['none', 'readonly', 'all']),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.command_directory_name = p['command_directory_name']
self.access_level = p['access_level']
self.vserver = p['vserver']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_role(self):
"""
Checks if the role exists for specific command-directory-name.
:return:
True if role found
False if role is not found
:rtype: bool
"""
security_login_role_get_iter = netapp_utils.zapi.NaElement(
'security-login-role-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-role-info', **{'vserver': self.vserver,
'role-name': self.name,
'command-directory-name':
self.command_directory_name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
security_login_role_get_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(
security_login_role_get_iter, enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
# Error 16031 denotes a role not being found.
if to_native(e.code) == "16031":
return False
else:
self.module.fail_json(msg='Error getting role %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
if (result.get_child_by_name('num-records') and
int(result.get_child_content('num-records')) >= 1):
return True
else:
return False
def create_role(self):
role_create = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-role-create', **{'vserver': self.vserver,
'role-name': self.name,
'command-directory-name':
self.command_directory_name,
'access-level':
self.access_level})
try:
self.server.invoke_successfully(role_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error creating role %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_role(self):
role_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-role-delete', **{'vserver': self.vserver,
'role-name': self.name,
'command-directory-name':
self.command_directory_name})
try:
self.server.invoke_successfully(role_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error removing role %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
role_exists = self.get_role()
if role_exists:
if self.state == 'absent':
changed = True
# Check if properties need to be updated
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not role_exists:
self.create_role()
# Update properties
elif self.state == 'absent':
self.delete_role()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTUserRole()
v.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,442 @@
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_volume
short_description: Manage NetApp cDOT volumes
extends_documentation_fragment:
- netapp.ontap.netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
deprecated:
removed_in: '2.11'
why: Updated modules released with increased functionality
alternative: Use M(na_ontap_volume) instead.
description:
- Create or destroy volumes on NetApp cDOT
options:
state:
description:
- Whether the specified volume should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the volume to manage.
required: true
infinite:
description:
- Set True if the volume is an Infinite Volume.
type: bool
default: 'no'
online:
description:
- Whether the specified volume is online, or not.
type: bool
default: 'yes'
aggregate_name:
description:
- The name of the aggregate the flexvol should exist on. Required when C(state=present).
size:
description:
- The size of the volume in (size_unit). Required when C(state=present).
size_unit:
description:
- The unit used to interpret the size parameter.
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
vserver:
description:
- Name of the vserver to use.
required: true
junction_path:
description:
- Junction path where to mount the volume
required: false
export_policy:
description:
- Export policy to set for the specified junction path.
required: false
default: default
snapshot_policy:
description:
- Snapshot policy to set for the specified volume.
required: false
default: default
'''
EXAMPLES = """
- name: Create FlexVol
na_cdot_volume:
state: present
name: ansibleVolume
infinite: False
aggregate_name: aggr1
size: 20
size_unit: mb
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
junction_path: /ansibleVolume
export_policy: all_nfs_networks
snapshot_policy: daily
- name: Make FlexVol offline
na_cdot_volume:
state: present
name: ansibleVolume
infinite: False
online: False
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTVolume(object):
def __init__(self):
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
is_infinite=dict(required=False, type='bool', default=False, aliases=['infinite']),
is_online=dict(required=False, type='bool', default=True, aliases=['online']),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
aggregate_name=dict(type='str'),
vserver=dict(required=True, type='str', default=None),
junction_path=dict(required=False, type='str', default=None),
export_policy=dict(required=False, type='str', default='default'),
snapshot_policy=dict(required=False, type='str', default='default'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['aggregate_name', 'size'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.is_infinite = p['is_infinite']
self.is_online = p['is_online']
self.size_unit = p['size_unit']
self.vserver = p['vserver']
self.junction_path = p['junction_path']
self.export_policy = p['export_policy']
self.snapshot_policy = p['snapshot_policy']
if p['size'] is not None:
self.size = p['size'] * self._size_unit_map[self.size_unit]
else:
self.size = None
self.aggregate_name = p['aggregate_name']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
def get_volume(self):
"""
Return details about the volume
:param:
name : Name of the volume
:return: Details about the volume. None if not found.
:rtype: dict
"""
volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
volume_id_attributes.add_new_child('name', self.name)
volume_attributes.add_child_elem(volume_id_attributes)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(volume_attributes)
volume_info.add_child_elem(query)
result = self.server.invoke_successfully(volume_info, True)
return_value = None
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
volume_attributes = result.get_child_by_name(
'attributes-list').get_child_by_name(
'volume-attributes')
# Get volume's current size
volume_space_attributes = volume_attributes.get_child_by_name(
'volume-space-attributes')
current_size = volume_space_attributes.get_child_content('size')
# Get volume's state (online/offline)
volume_state_attributes = volume_attributes.get_child_by_name(
'volume-state-attributes')
current_state = volume_state_attributes.get_child_content('state')
is_online = None
if current_state == "online":
is_online = True
elif current_state == "offline":
is_online = False
return_value = {
'name': self.name,
'size': current_size,
'is_online': is_online,
}
return return_value
def create_volume(self):
create_parameters = {'volume': self.name,
'containing-aggr-name': self.aggregate_name,
'size': str(self.size),
}
if self.junction_path:
create_parameters['junction-path'] = str(self.junction_path)
if self.export_policy != 'default':
create_parameters['export-policy'] = str(self.export_policy)
if self.snapshot_policy != 'default':
create_parameters['snapshot-policy'] = str(self.snapshot_policy)
volume_create = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-create', **create_parameters)
try:
self.server.invoke_successfully(volume_create,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error provisioning volume %s of size %s: %s' % (self.name, self.size, to_native(e)),
exception=traceback.format_exc())
def delete_volume(self):
if self.is_infinite:
volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-destroy-async', **{'volume-name': self.name})
else:
volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-destroy', **{'name': self.name, 'unmount-and-offline':
'true'})
try:
self.server.invoke_successfully(volume_delete,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error deleting volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def rename_volume(self):
"""
Rename the volume.
Note: 'is_infinite' needs to be set to True in order to rename an
Infinite Volume.
"""
if self.is_infinite:
volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-rename-async',
**{'volume-name': self.name, 'new-volume-name': str(
self.name)})
else:
volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-rename', **{'volume': self.name, 'new-volume-name': str(
self.name)})
try:
self.server.invoke_successfully(volume_rename,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error renaming volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def resize_volume(self):
"""
Re-size the volume.
Note: 'is_infinite' needs to be set to True in order to rename an
Infinite Volume.
"""
if self.is_infinite:
volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-size-async',
**{'volume-name': self.name, 'new-size': str(
self.size)})
else:
volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-size', **{'volume': self.name, 'new-size': str(
self.size)})
try:
self.server.invoke_successfully(volume_resize,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error re-sizing volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def change_volume_state(self):
"""
Change volume's state (offline/online).
Note: 'is_infinite' needs to be set to True in order to change the
state of an Infinite Volume.
"""
state_requested = None
if self.is_online:
# Requested state is 'online'.
state_requested = "online"
if self.is_infinite:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-online-async',
**{'volume-name': self.name})
else:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-online',
**{'name': self.name})
else:
# Requested state is 'offline'.
state_requested = "offline"
if self.is_infinite:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-offline-async',
**{'volume-name': self.name})
else:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-offline',
**{'name': self.name})
try:
self.server.invoke_successfully(volume_change_state,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error changing the state of volume %s to %s: %s' %
(self.name, state_requested, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
volume_exists = False
rename_volume = False
resize_volume = False
volume_detail = self.get_volume()
if volume_detail:
volume_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
if str(volume_detail['size']) != str(self.size):
resize_volume = True
changed = True
if (volume_detail['is_online'] is not None) and (volume_detail['is_online'] != self.is_online):
changed = True
if self.is_online is False:
# Volume is online, but requested state is offline
pass
else:
# Volume is offline but requested state is online
pass
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not volume_exists:
self.create_volume()
else:
if resize_volume:
self.resize_volume()
if volume_detail['is_online'] is not \
None and volume_detail['is_online'] != \
self.is_online:
self.change_volume_state()
# Ensure re-naming is the last change made.
if rename_volume:
self.rename_volume()
elif self.state == 'absent':
self.delete_volume()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTVolume()
v.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,614 @@
#!/usr/bin/python
# (c) 2018 Piotr Olczak <piotr.olczak@redhat.com>
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_gather_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(na_ontap_info) instead.
author: Piotr Olczak (@dprts) <polczak@redhat.com>
extends_documentation_fragment:
- netapp.ontap.netapp.na_ontap
short_description: NetApp information gatherer
description:
- This module allows you to gather various information about ONTAP configuration
requirements:
- netapp_lib
options:
state:
description:
- Returns "info"
default: "info"
choices: ['info']
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
"aggregate_info", "cluster_node_info", "igroup_info", "lun_info", "net_dns_info",
"net_ifgrp_info",
"net_interface_info", "net_port_info", "nvme_info", "nvme_interface_info",
"nvme_namespace_info", "nvme_subsystem_info", "ontap_version",
"qos_adaptive_policy_info", "qos_policy_info", "security_key_manager_key_info",
"security_login_account_info", "storage_failover_info", "volume_info",
"vserver_info", "vserver_login_banner_info", "vserver_motd_info", "vserver_nfs_info"
Can specify a list of values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
- nvme is supported with ONTAP 9.4 onwards.
- use "help" to get a list of supported facts for your system.
default: "all"
'''
EXAMPLES = '''
- name: Get NetApp info (Password Authentication)
na_ontap_gather_facts:
state: info
hostname: "na-vsim"
username: "admin"
password: "admins_password"
- debug:
var: ontap_facts
- name: Limit Fact Gathering to Aggregate Information
na_ontap_gather_facts:
state: info
hostname: "na-vsim"
username: "admin"
password: "admins_password"
gather_subset: "aggregate_info"
- name: Limit Fact Gathering to Volume and Lun Information
na_ontap_gather_facts:
state: info
hostname: "na-vsim"
username: "admin"
password: "admins_password"
gather_subset:
- volume_info
- lun_info
- name: Gather all facts except for volume and lun information
na_ontap_gather_facts:
state: info
hostname: "na-vsim"
username: "admin"
password: "admins_password"
gather_subset:
- "!volume_info"
- "!lun_info"
'''
RETURN = '''
ontap_facts:
description: Returns various information about NetApp cluster configuration
returned: always
type: dict
sample: '{
"ontap_facts": {
"aggregate_info": {...},
"cluster_node_info": {...},
"net_dns_info": {...},
"net_ifgrp_info": {...},
"net_interface_info": {...},
"net_port_info": {...},
"security_key_manager_key_info": {...},
"security_login_account_info": {...},
"volume_info": {...},
"lun_info": {...},
"storage_failover_info": {...},
"vserver_login_banner_info": {...},
"vserver_motd_info": {...},
"vserver_info": {...},
"vserver_nfs_info": {...},
"ontap_version": {...},
"igroup_info": {...},
"qos_policy_info": {...},
"qos_adaptive_policy_info": {...}
}'
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
try:
import xmltodict
HAS_XMLTODICT = True
except ImportError:
HAS_XMLTODICT = False
try:
import json
HAS_JSON = True
except ImportError:
HAS_JSON = False
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPGatherFacts(object):
'''Class with gather facts methods'''
def __init__(self, module):
self.module = module
self.netapp_info = dict()
# thanks to coreywan (https://github.com/ansible/ansible/pull/47016)
# for starting this
# min_version identifies the ontapi version which supports this ZAPI
# use 0 if it is supported since 9.1
self.fact_subsets = {
'net_dns_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'net-dns-get-iter',
'attribute': 'net-dns-info',
'field': 'vserver-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'net_interface_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'net-interface-get-iter',
'attribute': 'net-interface-info',
'field': 'interface-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'net_port_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'net-port-get-iter',
'attribute': 'net-port-info',
'field': ('node', 'port'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'cluster_node_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'cluster-node-get-iter',
'attribute': 'cluster-node-info',
'field': 'node-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'security_login_account_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'security-login-get-iter',
'attribute': 'security-login-account-info',
'field': ('vserver', 'user-name', 'application', 'authentication-method'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'aggregate_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'aggr-get-iter',
'attribute': 'aggr-attributes',
'field': 'aggregate-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'volume_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'volume-get-iter',
'attribute': 'volume-attributes',
'field': ('name', 'owning-vserver-name'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'lun_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'lun-get-iter',
'attribute': 'lun-info',
'field': 'path',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'storage_failover_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'cf-get-iter',
'attribute': 'storage-failover-info',
'field': 'node',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'vserver_motd_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'vserver-motd-get-iter',
'attribute': 'vserver-motd-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'vserver_login_banner_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'vserver-login-banner-get-iter',
'attribute': 'vserver-login-banner-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'security_key_manager_key_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'security-key-manager-key-get-iter',
'attribute': 'security-key-manager-key-info',
'field': ('node', 'key-id'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'vserver_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'vserver-get-iter',
'attribute': 'vserver-info',
'field': 'vserver-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'vserver_nfs_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nfs-service-get-iter',
'attribute': 'nfs-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'net_ifgrp_info': {
'method': self.get_ifgrp_info,
'kwargs': {},
'min_version': '0',
},
'ontap_version': {
'method': self.ontapi,
'kwargs': {},
'min_version': '0',
},
'system_node_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'system-node-get-iter',
'attribute': 'node-details-info',
'field': 'node',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'igroup_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'igroup-get-iter',
'attribute': 'initiator-group-info',
'field': ('vserver', 'initiator-group-name'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'qos_policy_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'qos-policy-group-get-iter',
'attribute': 'qos-policy-group-info',
'field': 'policy-group',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
# supported in ONTAP 9.3 and onwards
'qos_adaptive_policy_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'qos-adaptive-policy-group-get-iter',
'attribute': 'qos-adaptive-policy-group-info',
'field': 'policy-group',
'query': {'max-records': '1024'},
},
'min_version': '130',
},
# supported in ONTAP 9.4 and onwards
'nvme_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nvme-get-iter',
'attribute': 'nvme-target-service-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '140',
},
'nvme_interface_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nvme-interface-get-iter',
'attribute': 'nvme-interface-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '140',
},
'nvme_subsystem_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nvme-subsystem-get-iter',
'attribute': 'nvme-subsystem-info',
'field': 'subsystem',
'query': {'max-records': '1024'},
},
'min_version': '140',
},
'nvme_namespace_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nvme-namespace-get-iter',
'attribute': 'nvme-namespace-info',
'field': 'path',
'query': {'max-records': '1024'},
},
'min_version': '140',
},
}
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def ontapi(self):
'''Method to get ontapi version'''
api = 'system-get-ontapi-version'
api_call = netapp_utils.zapi.NaElement(api)
try:
results = self.server.invoke_successfully(api_call, enable_tunneling=False)
ontapi_version = results.get_child_content('minor-version')
return ontapi_version if ontapi_version is not None else '0'
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error calling API %s: %s" %
(api, to_native(error)), exception=traceback.format_exc())
def call_api(self, call, query=None):
'''Main method to run an API call'''
api_call = netapp_utils.zapi.NaElement(call)
result = None
if query:
for key, val in query.items():
# Can val be nested?
api_call.add_new_child(key, val)
try:
result = self.server.invoke_successfully(api_call, enable_tunneling=False)
return result
except netapp_utils.zapi.NaApiError as error:
if call in ['security-key-manager-key-get-iter']:
return result
else:
self.module.fail_json(msg="Error calling API %s: %s"
% (call, to_native(error)), exception=traceback.format_exc())
def get_ifgrp_info(self):
'''Method to get network port ifgroups info'''
try:
net_port_info = self.netapp_info['net_port_info']
except KeyError:
net_port_info_calls = self.fact_subsets['net_port_info']
net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs'])
interfaces = net_port_info.keys()
ifgrps = []
for ifn in interfaces:
if net_port_info[ifn]['port_type'] == 'if_group':
ifgrps.append(ifn)
net_ifgrp_info = dict()
for ifgrp in ifgrps:
query = dict()
query['node'], query['ifgrp-name'] = ifgrp.split(':')
tmp = self.get_generic_get_iter('net-port-ifgrp-get', field=('node', 'ifgrp-name'),
attribute='net-ifgrp-info', query=query)
net_ifgrp_info = net_ifgrp_info.copy()
net_ifgrp_info.update(tmp)
return net_ifgrp_info
def get_generic_get_iter(self, call, attribute=None, field=None, query=None):
'''Method to run a generic get-iter call'''
generic_call = self.call_api(call, query)
if call == 'net-port-ifgrp-get':
children = 'attributes'
else:
children = 'attributes-list'
if generic_call is None:
return None
if field is None:
out = []
else:
out = {}
attributes_list = generic_call.get_child_by_name(children)
if attributes_list is None:
return None
for child in attributes_list.get_children():
dic = xmltodict.parse(child.to_string(), xml_attribs=False)
if attribute is not None:
dic = dic[attribute]
if isinstance(field, str):
unique_key = _finditem(dic, field)
out = out.copy()
out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
elif isinstance(field, tuple):
unique_key = ':'.join([_finditem(dic, el) for el in field])
out = out.copy()
out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
else:
out.append(convert_keys(json.loads(json.dumps(dic))))
return out
def get_all(self, gather_subset):
'''Method to get all subsets'''
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_gather_facts", cserver)
self.netapp_info['ontap_version'] = self.ontapi()
run_subset = self.get_subset(gather_subset, self.netapp_info['ontap_version'])
if 'help' in gather_subset:
self.netapp_info['help'] = sorted(run_subset)
else:
for subset in run_subset:
call = self.fact_subsets[subset]
self.netapp_info[subset] = call['method'](**call['kwargs'])
return self.netapp_info
def get_subset(self, gather_subset, version):
'''Method to get a single subset'''
runable_subsets = set()
exclude_subsets = set()
usable_subsets = [key for key in self.fact_subsets.keys() if version >= self.fact_subsets[key]['min_version']]
if 'help' in gather_subset:
return usable_subsets
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(usable_subsets)
return runable_subsets
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
return set()
exclude = True
else:
exclude = False
if subset not in usable_subsets:
if subset not in self.fact_subsets.keys():
self.module.fail_json(msg='Bad subset: %s' % subset)
self.module.fail_json(msg='Remote system at version %s does not support %s' %
(version, subset))
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(usable_subsets)
runable_subsets.difference_update(exclude_subsets)
return runable_subsets
# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary
def __finditem(obj, key):
if key in obj:
return obj[key]
for dummy, val in obj.items():
if isinstance(val, dict):
item = __finditem(val, key)
if item is not None:
return item
return None
def _finditem(obj, key):
value = __finditem(obj, key)
if value is not None:
return value
raise KeyError(key)
def convert_keys(d_param):
'''Method to convert hyphen to underscore'''
out = {}
if isinstance(d_param, dict):
for key, val in d_param.items():
val = convert_keys(val)
out[key.replace('-', '_')] = val
else:
return d_param
return out
def main():
'''Execute action'''
argument_spec = netapp_utils.na_ontap_host_argument_spec()
argument_spec.update(dict(
state=dict(default='info', choices=['info']),
gather_subset=dict(default=['all'], type='list'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_XMLTODICT:
module.fail_json(msg="xmltodict missing")
if not HAS_JSON:
module.fail_json(msg="json missing")
state = module.params['state']
gather_subset = module.params['gather_subset']
if gather_subset is None:
gather_subset = ['all']
gf_obj = NetAppONTAPGatherFacts(module)
gf_all = gf_obj.get_all(gather_subset)
result = {'state': state, 'changed': False}
module.exit_json(ansible_facts={'ontap_facts': gf_all}, **result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,280 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_alerts
short_description: NetApp E-Series manage email notification settings
description:
- Certain E-Series systems have the capability to send email notifications on potentially critical events.
- This module will allow the owner of the system to specify email recipients for these messages.
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
state:
description:
- Enable/disable the sending of email-based alerts.
default: enabled
required: false
choices:
- enabled
- disabled
server:
description:
- A fully qualified domain name, IPv4 address, or IPv6 address of a mail server.
- To use a fully qualified domain name, you must configure a DNS server on both controllers using
M(netapp_e_mgmt_interface).
- Required when I(state=enabled).
required: no
sender:
description:
- This is the sender that the recipient will see. It doesn't necessarily need to be a valid email account.
- Required when I(state=enabled).
required: no
contact:
description:
- Allows the owner to specify some free-form contact information to be included in the emails.
- This is typically utilized to provide a contact phone number.
required: no
recipients:
description:
- The email addresses that will receive the email notifications.
- Required when I(state=enabled).
required: no
test:
description:
- When a change is detected in the configuration, a test email will be sent.
- This may take a few minutes to process.
- Only applicable if I(state=enabled).
default: no
type: bool
log_path:
description:
- Path to a file on the Ansible control node to be used for debug logging
required: no
notes:
- Check mode is supported.
- Alertable messages are a subset of messages shown by the Major Event Log (MEL), of the storage-system. Examples
of alertable messages include drive failures, failed controllers, loss of redundancy, and other warning/critical
events.
- This API is currently only supported with the Embedded Web Services API v2.0 and higher.
'''
EXAMPLES = """
- name: Enable email-based alerting
netapp_e_alerts:
state: enabled
sender: noreply@example.com
server: mail@example.com
contact: "Phone: 1-555-555-5555"
recipients:
- name1@example.com
- name2@example.com
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Disable alerting
netapp_e_alerts:
state: disabled
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
"""
import json
import logging
from pprint import pformat
import re
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Alerts(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', required=False, default='enabled',
choices=['enabled', 'disabled']),
server=dict(type='str', required=False, ),
sender=dict(type='str', required=False, ),
contact=dict(type='str', required=False, ),
recipients=dict(type='list', required=False, ),
test=dict(type='bool', required=False, default=False, ),
log_path=dict(type='str', required=False),
))
required_if = [
['state', 'enabled', ['server', 'sender', 'recipients']]
]
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
args = self.module.params
self.alerts = args['state'] == 'enabled'
self.server = args['server']
self.sender = args['sender']
self.contact = args['contact']
self.recipients = args['recipients']
self.test = args['test']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
# Very basic validation on email addresses: xx@yy.zz
email = re.compile(r"[^@]+@[^@]+\.[^@]+")
if self.sender and not email.match(self.sender):
self.module.fail_json(msg="The sender (%s) provided is not a valid email address." % self.sender)
if self.recipients is not None:
for recipient in self.recipients:
if not email.match(recipient):
self.module.fail_json(msg="The recipient (%s) provided is not a valid email address." % recipient)
if len(self.recipients) < 1:
self.module.fail_json(msg="At least one recipient address must be specified.")
def get_configuration(self):
try:
(rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, headers=HEADERS,
**self.creds)
self._logger.info("Current config: %s", pformat(result))
return result
except Exception as err:
self.module.fail_json(msg="Failed to retrieve the alerts configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self):
config = self.get_configuration()
update = False
body = dict()
if self.alerts:
body = dict(alertingEnabled=True)
if not config['alertingEnabled']:
update = True
body.update(emailServerAddress=self.server)
if config['emailServerAddress'] != self.server:
update = True
body.update(additionalContactInformation=self.contact, sendAdditionalContactInformation=True)
if self.contact and (self.contact != config['additionalContactInformation']
or not config['sendAdditionalContactInformation']):
update = True
body.update(emailSenderAddress=self.sender)
if config['emailSenderAddress'] != self.sender:
update = True
self.recipients.sort()
if config['recipientEmailAddresses']:
config['recipientEmailAddresses'].sort()
body.update(recipientEmailAddresses=self.recipients)
if config['recipientEmailAddresses'] != self.recipients:
update = True
elif config['alertingEnabled']:
body = dict(alertingEnabled=False)
update = True
self._logger.debug(pformat(body))
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def send_test_email(self):
"""Send a test email to verify that the provided configuration is valid and functional."""
if not self.check_mode:
try:
(rc, result) = request(self.url + 'storage-systems/%s/device-alerts/alert-email-test' % self.ssid,
timeout=300, method='POST', headers=HEADERS, **self.creds)
if result['response'] != 'emailSentOK':
self.module.fail_json(msg="The test email failed with status=[%s]! Array Id [%s]."
% (result['response'], self.ssid))
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to send the test email! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update(self):
update = self.update_configuration()
if self.test and update:
self._logger.info("An update was detected and test=True, running a test.")
self.send_test_email()
if self.alerts:
msg = 'Alerting has been enabled using server=%s, sender=%s.' % (self.server, self.sender)
else:
msg = 'Alerting has been disabled.'
self.module.exit_json(msg=msg, changed=update, )
def __call__(self, *args, **kwargs):
self.update()
def main():
alerts = Alerts()
alerts()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,255 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_amg
short_description: NetApp E-Series create, remove, and update asynchronous mirror groups
description:
- Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays
author: Kevin Hulquest (@hulquest)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
name:
description:
- The name of the async array you wish to target, or create.
- If C(state) is present and the name isn't found, it will attempt to create.
required: yes
secondaryArrayId:
description:
- The ID of the secondary array to be used in mirroring process
required: yes
syncIntervalMinutes:
description:
- The synchronization interval in minutes
default: 10
manualSync:
description:
- Setting this to true will cause other synchronization values to be ignored
type: bool
default: 'no'
recoveryWarnThresholdMinutes:
description:
- Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value
default: 20
repoUtilizationWarnThreshold:
description:
- Recovery point warning threshold
default: 80
interfaceType:
description:
- The intended protocol to use if both Fibre and iSCSI are available.
choices:
- iscsi
- fibre
syncWarnThresholdMinutes:
description:
- The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete.
default: 10
state:
description:
- A C(state) of present will either create or update the async mirror group.
- A C(state) of absent will remove the async mirror group.
choices: [ absent, present ]
required: yes
'''
EXAMPLES = """
- name: AMG removal
na_eseries_amg:
state: absent
ssid: "{{ ssid }}"
secondaryArrayId: "{{amg_secondaryArrayId}}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
new_name: "{{amg_array_name}}"
name: "{{amg_name}}"
when: amg_create
- name: AMG create
netapp_e_amg:
state: present
ssid: "{{ ssid }}"
secondaryArrayId: "{{amg_secondaryArrayId}}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
new_name: "{{amg_array_name}}"
name: "{{amg_name}}"
when: amg_create
"""
RETURN = """
msg:
description: Successful creation
returned: success
type: str
sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}'
""" # NOQA
import json
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def has_match(module, ssid, api_url, api_pwd, api_usr, body):
compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
desired_state = dict((x, (body.get(x))) for x in compare_keys)
label_exists = False
matches_spec = False
current_state = None
async_id = None
api_data = None
desired_name = body.get('name')
endpoint = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + endpoint
try:
rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.exit_json(msg="Error finding a match. Message: %s" % to_native(e), exception=traceback.format_exc())
for async_group in data:
if async_group['label'] == desired_name:
label_exists = True
api_data = async_group
async_id = async_group['groupRef']
current_state = dict(
syncIntervalMinutes=async_group['syncIntervalMinutes'],
syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'],
recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'],
repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'],
)
if current_state == desired_state:
matches_spec = True
return label_exists, matches_spec, api_data, async_id
def create_async(module, ssid, api_url, api_pwd, api_usr, body):
endpoint = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + endpoint
post_data = json.dumps(body)
try:
rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except Exception as e:
module.exit_json(msg="Exception while creating aysnc mirror group. Message: %s" % to_native(e),
exception=traceback.format_exc())
return data
def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id):
endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
url = api_url + endpoint
compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
desired_state = dict((x, (body.get(x))) for x in compare_keys)
if new_name:
desired_state['new_name'] = new_name
post_data = json.dumps(desired_state)
try:
rc, data = request(url, data=post_data, method='POST', headers=HEADERS,
url_username=user, url_password=pwd)
except Exception as e:
module.exit_json(msg="Exception while updating async mirror group. Message: %s" % to_native(e),
exception=traceback.format_exc())
return data
def remove_amg(module, ssid, api_url, pwd, user, async_id):
endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
url = api_url + endpoint
try:
rc, data = request(url, method='DELETE', url_username=user, url_password=pwd,
headers=HEADERS)
except Exception as e:
module.exit_json(msg="Exception while removing async mirror group. Message: %s" % to_native(e),
exception=traceback.format_exc())
return
def main():
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
new_name=dict(required=False, type='str'),
secondaryArrayId=dict(required=True, type='str'),
syncIntervalMinutes=dict(required=False, default=10, type='int'),
manualSync=dict(required=False, default=False, type='bool'),
recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'),
repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'),
interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'),
state=dict(required=True, choices=['present', 'absent']),
syncWarnThresholdMinutes=dict(required=False, default=10, type='int')
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
new_name = p.pop('new_name')
state = p.pop('state')
if not api_url.endswith('/'):
api_url += '/'
name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p)
if state == 'present':
if name_exists and spec_matches:
module.exit_json(changed=False, msg="Desired state met", **api_data)
elif name_exists and not spec_matches:
results = update_async(module, ssid, api_url, pwd, user,
p, new_name, async_id)
module.exit_json(changed=True,
msg="Async mirror group updated", async_id=async_id,
**results)
elif not name_exists:
results = create_async(module, ssid, api_url, user, pwd, p)
module.exit_json(changed=True, **results)
elif state == 'absent':
if name_exists:
remove_amg(module, ssid, api_url, pwd, user, async_id)
module.exit_json(changed=True, msg="Async mirror group removed.",
async_id=async_id)
else:
module.exit_json(changed=False,
msg="Async Mirror group: %s already absent" % p['name'])
if __name__ == '__main__':
main()

View file

@ -0,0 +1,233 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_amg_role
short_description: NetApp E-Series update the role of a storage array within an Asynchronous Mirror Group (AMG).
description:
- Update a storage array to become the primary or secondary instance in an asynchronous mirror group
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
ssid:
description:
- The ID of the primary storage array for the async mirror action
required: yes
role:
description:
- Whether the array should be the primary or secondary array for the AMG
required: yes
choices: ['primary', 'secondary']
noSync:
description:
- Whether to avoid synchronization prior to role reversal
required: no
default: no
type: bool
force:
description:
- Whether to force the role reversal regardless of the online-state of the primary
required: no
default: no
type: bool
'''
EXAMPLES = """
- name: Update the role of a storage array
netapp_e_amg_role:
name: updating amg role
role: primary
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
"""
RETURN = """
msg:
description: Failure message
returned: failure
type: str
sample: "No Async Mirror Group with the name."
"""
import json
import traceback
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
amg_exists = False
has_desired_role = False
amg_id = None
amg_data = None
get_amgs = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + get_amgs
try:
amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except Exception:
module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
for amg in amgs:
if amg['label'] == name:
amg_exists = True
amg_id = amg['id']
amg_data = amg
if amg['localRole'] == body.get('role'):
has_desired_role = True
return amg_exists, has_desired_role, amg_id, amg_data
def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
url = api_url + endpoint
post_data = json.dumps(body)
try:
request(url, data=post_data, method='POST', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
status_url = api_url + status_endpoint
try:
rc, status = request(status_url, method='GET', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to check status of AMG after role reversal. "
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
# Here we wait for the role reversal to complete
if 'roleChangeProgress' in status:
while status['roleChangeProgress'] != "none":
try:
rc, status = request(status_url, method='GET',
url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to check status of AMG after role reversal. "
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
return status
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
role=dict(required=True, choices=['primary', 'secondary']),
noSync=dict(required=False, type='bool', default=False),
force=dict(required=False, type='bool', default=False),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
name = p.pop('name')
if not api_url.endswith('/'):
api_url += '/'
agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
if not agm_exists:
module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
elif has_desired_role:
module.exit_json(changed=False, **amg_data)
else:
amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
if amg_data:
module.exit_json(changed=True, **amg_data)
else:
module.exit_json(changed=True, msg="AMG role changed.")
if __name__ == '__main__':
main()

View file

@ -0,0 +1,260 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_amg_sync
short_description: NetApp E-Series conduct synchronization actions on asynchronous mirror groups.
description:
- Allows for the initialization, suspension and resumption of an asynchronous mirror group's synchronization for NetApp E-series storage arrays.
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
ssid:
description:
- The ID of the storage array containing the AMG you wish to target
name:
description:
- The name of the async mirror group you wish to target
required: yes
state:
description:
- The synchronization action you'd like to take.
- If C(running) then it will begin syncing if there is no active sync or will resume a suspended sync. If there is already a sync in
progress, it will return with an OK status.
- If C(suspended) it will suspend any ongoing sync action, but return OK if there is no active sync or if the sync is already suspended
choices:
- running
- suspended
required: yes
delete_recovery_point:
description:
- Indicates whether the failures point can be deleted on the secondary if necessary to achieve the synchronization.
- If true, and if the amount of unsynchronized data exceeds the CoW repository capacity on the secondary for any member volume, the last
failures point will be deleted and synchronization will continue.
- If false, the synchronization will be suspended if the amount of unsynchronized data exceeds the CoW Repository capacity on the secondary
and the failures point will be preserved.
- "NOTE: This only has impact for newly launched syncs."
type: bool
default: no
'''
EXAMPLES = """
- name: start AMG async
netapp_e_amg_sync:
name: "{{ amg_sync_name }}"
state: running
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
"""
RETURN = """
json:
description: The object attributes of the AMG.
returned: success
type: str
example:
{
"changed": false,
"connectionType": "fc",
"groupRef": "3700000060080E5000299C24000006EF57ACAC70",
"groupState": "optimal",
"id": "3700000060080E5000299C24000006EF57ACAC70",
"label": "made_with_ansible",
"localRole": "primary",
"mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC",
"orphanGroup": false,
"recoveryPointAgeAlertThresholdMinutes": 20,
"remoteRole": "secondary",
"remoteTarget": {
"nodeName": {
"ioInterfaceType": "fc",
"iscsiNodeName": null,
"remoteNodeWWN": "20040080E5299F1C"
},
"remoteRef": "9000000060080E5000299C24005B06E557AC7EEC",
"scsiinitiatorTargetBaseProperties": {
"ioInterfaceType": "fc",
"iscsiinitiatorTargetBaseParameters": null
}
},
"remoteTargetId": "ansible2",
"remoteTargetName": "Ansible2",
"remoteTargetWwn": "60080E5000299F880000000056A25D56",
"repositoryUtilizationWarnThreshold": 80,
"roleChangeProgress": "none",
"syncActivity": "idle",
"syncCompletionTimeAlertThresholdMinutes": 10,
"syncIntervalMinutes": 10,
"worldWideName": "60080E5000299C24000006EF57ACAC70"
}
"""
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.urls import open_url
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class AMGsync(object):
def __init__(self):
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
name=dict(required=True, type='str'),
ssid=dict(required=True, type='str'),
state=dict(required=True, type='str', choices=['running', 'suspended']),
delete_recovery_point=dict(required=False, type='bool', default=False)
))
self.module = AnsibleModule(argument_spec=argument_spec)
args = self.module.params
self.name = args['name']
self.ssid = args['ssid']
self.state = args['state']
self.delete_recovery_point = args['delete_recovery_point']
try:
self.user = args['api_username']
self.pwd = args['api_password']
self.url = args['api_url']
except KeyError:
self.module.fail_json(msg="You must pass in api_username"
"and api_password and api_url to the module.")
self.certs = args['validate_certs']
self.post_headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
self.amg_id, self.amg_obj = self.get_amg()
def get_amg(self):
endpoint = self.url + '/storage-systems/%s/async-mirrors' % self.ssid
(rc, amg_objs) = request(endpoint, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
headers=self.post_headers)
try:
amg_id = filter(lambda d: d['label'] == self.name, amg_objs)[0]['id']
amg_obj = filter(lambda d: d['label'] == self.name, amg_objs)[0]
except IndexError:
self.module.fail_json(
msg="There is no async mirror group %s associated with storage array %s" % (self.name, self.ssid))
return amg_id, amg_obj
@property
def current_state(self):
amg_id, amg_obj = self.get_amg()
return amg_obj['syncActivity']
def run_sync_action(self):
# If we get to this point we know that the states differ, and there is no 'err' state,
# so no need to revalidate
post_body = dict()
if self.state == 'running':
if self.current_state == 'idle':
if self.delete_recovery_point:
post_body.update(dict(deleteRecoveryPointIfNecessary=self.delete_recovery_point))
suffix = 'sync'
else:
# In a suspended state
suffix = 'resume'
else:
suffix = 'suspend'
endpoint = self.url + "/storage-systems/%s/async-mirrors/%s/%s" % (self.ssid, self.amg_id, suffix)
(rc, resp) = request(endpoint, method='POST', url_username=self.user, url_password=self.pwd,
validate_certs=self.certs, data=json.dumps(post_body), headers=self.post_headers,
ignore_errors=True)
if not str(rc).startswith('2'):
self.module.fail_json(msg=str(resp['errorMessage']))
return resp
def apply(self):
state_map = dict(
running=['active'],
suspended=['userSuspended', 'internallySuspended', 'paused'],
err=['unkown', '_UNDEFINED'])
if self.current_state not in state_map[self.state]:
if self.current_state in state_map['err']:
self.module.fail_json(
msg="The sync is a state of '%s', this requires manual intervention. " +
"Please investigate and try again" % self.current_state)
else:
self.amg_obj = self.run_sync_action()
(ret, amg) = self.get_amg()
self.module.exit_json(changed=False, **amg)
def main():
sync = AMGsync()
sync.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,309 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_asup
short_description: NetApp E-Series manage auto-support settings
description:
- Allow the auto-support settings to be configured for an individual E-Series storage-system
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
state:
description:
- Enable/disable the E-Series auto-support configuration.
- When this option is enabled, configuration, logs, and other support-related information will be relayed
to NetApp to help better support your system. No personally identifiable information, passwords, etc, will
be collected.
default: enabled
choices:
- enabled
- disabled
aliases:
- asup
- auto_support
- autosupport
active:
description:
- Enable active/proactive monitoring for ASUP. When a problem is detected by our monitoring systems, it's
possible that the bundle did not contain all of the required information at the time of the event.
Enabling this option allows NetApp support personnel to manually request transmission or re-transmission
of support data in order ot resolve the problem.
- Only applicable if I(state=enabled).
default: yes
type: bool
start:
description:
- A start hour may be specified in a range from 0 to 23 hours.
- ASUP bundles will be sent daily between the provided start and end time (UTC).
- I(start) must be less than I(end).
aliases:
- start_time
default: 0
end:
description:
- An end hour may be specified in a range from 1 to 24 hours.
- ASUP bundles will be sent daily between the provided start and end time (UTC).
- I(start) must be less than I(end).
aliases:
- end_time
default: 24
days:
description:
- A list of days of the week that ASUP bundles will be sent. A larger, weekly bundle will be sent on one
of the provided days.
choices:
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
- sunday
required: no
aliases:
- days_of_week
- schedule_days
verbose:
description:
- Provide the full ASUP configuration in the return.
default: no
required: no
type: bool
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- Enabling ASUP will allow our support teams to monitor the logs of the storage-system in order to proactively
respond to issues with the system. It is recommended that all ASUP-related options be enabled, but they may be
disabled if desired.
- This API is currently only supported with the Embedded Web Services API v2.0 and higher.
'''
EXAMPLES = """
- name: Enable ASUP and allow pro-active retrieval of bundles
netapp_e_asup:
state: enabled
active: yes
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
netapp_e_asup:
start: 17
end: 20
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
asup:
description:
- True if ASUP is enabled.
returned: on success
sample: True
type: bool
active:
description:
- True if the active option has been enabled.
returned: on success
sample: True
type: bool
cfg:
description:
- Provide the full ASUP configuration.
returned: on success when I(verbose=true).
type: complex
contains:
asupEnabled:
description:
- True if ASUP has been enabled.
type: bool
onDemandEnabled:
description:
- True if ASUP active monitoring has been enabled.
type: bool
daysOfWeek:
description:
- The days of the week that ASUP bundles will be sent.
type: list
"""
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Asup(object):
DAYS_OPTIONS = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', required=False, default='enabled', aliases=['asup', 'auto_support', 'autosupport'],
choices=['enabled', 'disabled']),
active=dict(type='bool', required=False, default=True, ),
days=dict(type='list', required=False, aliases=['schedule_days', 'days_of_week'],
choices=self.DAYS_OPTIONS),
start=dict(type='int', required=False, default=0, aliases=['start_time']),
end=dict(type='int', required=False, default=24, aliases=['end_time']),
verbose=dict(type='bool', required=False, default=False),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.asup = args['state'] == 'enabled'
self.active = args['active']
self.days = args['days']
self.start = args['start']
self.end = args['end']
self.verbose = args['verbose']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.start >= self.end:
self.module.fail_json(msg="The value provided for the start time is invalid."
" It must be less than the end time.")
if self.start < 0 or self.start > 23:
self.module.fail_json(msg="The value provided for the start time is invalid. It must be between 0 and 23.")
else:
self.start = self.start * 60
if self.end < 1 or self.end > 24:
self.module.fail_json(msg="The value provided for the end time is invalid. It must be between 1 and 24.")
else:
self.end = min(self.end * 60, 1439)
if not self.days:
self.days = self.DAYS_OPTIONS
def get_configuration(self):
try:
(rc, result) = request(self.url + 'device-asup', headers=HEADERS, **self.creds)
if not (result['asupCapable'] and result['onDemandCapable']):
self.module.fail_json(msg="ASUP is not supported on this device. Array Id [%s]." % (self.ssid))
return result
except Exception as err:
self.module.fail_json(msg="Failed to retrieve ASUP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self):
config = self.get_configuration()
update = False
body = dict()
if self.asup:
body = dict(asupEnabled=True)
if not config['asupEnabled']:
update = True
if (config['onDemandEnabled'] and config['remoteDiagsEnabled']) != self.active:
update = True
body.update(dict(onDemandEnabled=self.active,
remoteDiagsEnabled=self.active))
self.days.sort()
config['schedule']['daysOfWeek'].sort()
body['schedule'] = dict(daysOfWeek=self.days,
dailyMinTime=self.start,
dailyMaxTime=self.end,
weeklyMinTime=self.start,
weeklyMaxTime=self.end)
if self.days != config['schedule']['daysOfWeek']:
update = True
if self.start != config['schedule']['dailyMinTime'] or self.start != config['schedule']['weeklyMinTime']:
update = True
elif self.end != config['schedule']['dailyMaxTime'] or self.end != config['schedule']['weeklyMaxTime']:
update = True
elif config['asupEnabled']:
body = dict(asupEnabled=False)
update = True
self._logger.info(pformat(body))
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'device-asup', method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.update_configuration()
cfg = self.get_configuration()
if self.verbose:
self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'], cfg=cfg)
else:
self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'])
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = Asup()
settings()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,281 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_auditlog
short_description: NetApp E-Series manage audit-log configuration
description:
- This module allows an e-series storage system owner to set audit-log configuration parameters.
author: Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
max_records:
description:
- The maximum number log messages audit-log will retain.
- Max records must be between and including 100 and 50000.
default: 50000
log_level:
description: Filters the log messages according to the specified log level selection.
choices:
- all
- writeOnly
default: writeOnly
full_policy:
description: Specifies what audit-log should do once the number of entries approach the record limit.
choices:
- overWrite
- preventSystemAccess
default: overWrite
threshold:
description:
- This is the memory full percent threshold that audit-log will start issuing warning messages.
- Percent range must be between and including 60 and 90.
default: 90
force:
description:
- Forces the audit-log configuration to delete log history when log messages fullness cause immediate
warning or full condition.
- Warning! This will cause any existing audit-log messages to be deleted.
- This is only applicable for I(full_policy=preventSystemAccess).
type: bool
default: no
log_path:
description: A local path to a file to be used for debug logging.
required: no
notes:
- Check mode is supported.
- This module is currently only supported with the Embedded Web Services API v3.0 and higher.
'''
EXAMPLES = """
- name: Define audit-log to prevent system access if records exceed 50000 with warnings occurring at 60% capacity.
netapp_e_auditlog:
api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: no
max_records: 50000
log_level: all
full_policy: preventSystemAccess
threshold: 60
log_path: /path/to/log_file.log
- name: Define audit-log utilize the default values.
netapp_e_auditlog:
api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
- name: Force audit-log configuration when full or warning conditions occur while enacting preventSystemAccess policy.
netapp_e_auditlog:
api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
max_records: 5000
log_level: all
full_policy: preventSystemAccess
threshold: 60
force: yes
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
"""
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
try:
from urlparse import urlparse, urlunparse
except Exception:
from urllib.parse import urlparse, urlunparse
class AuditLog(object):
"""Audit-log module configuration class."""
MAX_RECORDS = 50000
HEADERS = {"Content-Type": "application/json",
"Accept": "application/json"}
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
max_records=dict(type="int", default=50000),
log_level=dict(type="str", default="writeOnly", choices=["all", "writeOnly"]),
full_policy=dict(type="str", default="overWrite", choices=["overWrite", "preventSystemAccess"]),
threshold=dict(type="int", default=90),
force=dict(type="bool", default=False),
log_path=dict(type='str', required=False)))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
args = self.module.params
self.max_records = args["max_records"]
if self.max_records < 100 or self.max_records > self.MAX_RECORDS:
self.module.fail_json(msg="Audit-log max_records count must be between 100 and 50000: [%s]"
% self.max_records)
self.threshold = args["threshold"]
if self.threshold < 60 or self.threshold > 90:
self.module.fail_json(msg="Audit-log percent threshold must be between 60 and 90: [%s]" % self.threshold)
self.log_level = args["log_level"]
self.full_policy = args["full_policy"]
self.force = args["force"]
self.ssid = args['ssid']
self.url = args['api_url']
if not self.url.endswith('/'):
self.url += '/'
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
# logging setup
log_path = args['log_path']
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
self.proxy_used = self.is_proxy()
self._logger.info(self.proxy_used)
self.check_mode = self.module.check_mode
def is_proxy(self):
"""Determine whether the API is embedded or proxy."""
try:
# replace http url path with devmgr/utils/about
about_url = list(urlparse(self.url))
about_url[2] = "devmgr/utils/about"
about_url = urlunparse(about_url)
rc, data = request(about_url, timeout=300, headers=self.HEADERS, **self.creds)
return data["runningAsProxy"]
except Exception as err:
self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def get_configuration(self):
"""Retrieve the existing audit-log configurations.
:returns: dictionary containing current audit-log configuration
"""
try:
if self.proxy_used:
rc, data = request(self.url + "audit-log/config", timeout=300, headers=self.HEADERS, **self.creds)
else:
rc, data = request(self.url + "storage-systems/%s/audit-log/config" % self.ssid,
timeout=300, headers=self.HEADERS, **self.creds)
return data
except Exception as err:
self.module.fail_json(msg="Failed to retrieve the audit-log configuration! "
"Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def build_configuration(self):
"""Build audit-log expected configuration.
:returns: Tuple containing update boolean value and dictionary of audit-log configuration
"""
config = self.get_configuration()
current = dict(auditLogMaxRecords=config["auditLogMaxRecords"],
auditLogLevel=config["auditLogLevel"],
auditLogFullPolicy=config["auditLogFullPolicy"],
auditLogWarningThresholdPct=config["auditLogWarningThresholdPct"])
body = dict(auditLogMaxRecords=self.max_records,
auditLogLevel=self.log_level,
auditLogFullPolicy=self.full_policy,
auditLogWarningThresholdPct=self.threshold)
update = current != body
self._logger.info(pformat(update))
self._logger.info(pformat(body))
return update, body
def delete_log_messages(self):
"""Delete all audit-log messages."""
self._logger.info("Deleting audit-log messages...")
try:
if self.proxy_used:
rc, result = request(self.url + "audit-log?clearAll=True", timeout=300,
method="DELETE", headers=self.HEADERS, **self.creds)
else:
rc, result = request(self.url + "storage-systems/%s/audit-log?clearAll=True" % self.ssid, timeout=300,
method="DELETE", headers=self.HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(msg="Failed to delete audit-log messages! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self, update=None, body=None, attempt_recovery=True):
"""Update audit-log configuration."""
if update is None or body is None:
update, body = self.build_configuration()
if update and not self.check_mode:
try:
if self.proxy_used:
rc, result = request(self.url + "storage-systems/audit-log/config", timeout=300,
data=json.dumps(body), method='POST', headers=self.HEADERS,
ignore_errors=True, **self.creds)
else:
rc, result = request(self.url + "storage-systems/%s/audit-log/config" % self.ssid, timeout=300,
data=json.dumps(body), method='POST', headers=self.HEADERS,
ignore_errors=True, **self.creds)
if rc == 422:
if self.force and attempt_recovery:
self.delete_log_messages()
update = self.update_configuration(update, body, False)
else:
self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(rc, result)))
except Exception as error:
self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(error)))
return update
def update(self):
"""Update the audit-log configuration."""
update = self.update_configuration()
self.module.exit_json(msg="Audit-log update complete", changed=update)
def __call__(self):
self.update()
def main():
auditlog = AuditLog()
auditlog()
if __name__ == "__main__":
main()

View file

@ -0,0 +1,275 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_auth
short_description: NetApp E-Series set or update the password for a storage array.
description:
- Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web
Services proxy. Note, all storage arrays do not have a Monitor or RO role.
author: Kevin Hulquest (@hulquest)
options:
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
name:
description:
- The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use
the ID instead.
required: False
ssid:
description:
- the identifier of the storage array in the Web Services Proxy.
required: False
set_admin:
description:
- Boolean value on whether to update the admin password. If set to false then the RO account is updated.
type: bool
default: False
current_password:
description:
- The current admin password. This is not required if the password hasn't been set before.
required: False
new_password:
description:
- The password you would like to set. Cannot be more than 30 characters.
required: True
api_url:
description:
- The full API url.
- "Example: http://ENDPOINT:8080/devmgr/v2"
- This can optionally be set via an environment variable, API_URL
required: False
api_username:
description:
- The username used to authenticate against the API
- This can optionally be set via an environment variable, API_USERNAME
required: False
api_password:
description:
- The password used to authenticate against the API
- This can optionally be set via an environment variable, API_PASSWORD
required: False
'''
EXAMPLES = '''
- name: Test module
netapp_e_auth:
name: trex
current_password: OldPasswd
new_password: NewPasswd
set_admin: yes
api_url: '{{ netapp_api_url }}'
api_username: '{{ netapp_api_username }}'
api_password: '{{ netapp_api_password }}'
'''
RETURN = '''
msg:
description: Success message
returned: success
type: str
sample: "Password Updated Successfully"
'''
import json
import traceback
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
"x-netapp-password-validate-method": "none"
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def get_ssid(module, name, api_url, user, pwd):
count = 0
all_systems = 'storage-systems'
systems_url = api_url + all_systems
rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd,
validate_certs=module.validate_certs)
for system in data:
if system['name'] == name:
count += 1
if count > 1:
module.fail_json(
msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " +
"Use the id instead")
else:
ssid = system['id']
else:
continue
if count == 0:
module.fail_json(msg="No storage array with the name %s was found" % name)
else:
return ssid
def get_pwd_status(module, ssid, api_url, user, pwd):
pwd_status = "storage-systems/%s/passwords" % ssid
url = api_url + pwd_status
try:
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd,
validate_certs=module.validate_certs)
return data['readOnlyPasswordSet'], data['adminPasswordSet']
except HTTPError as e:
module.fail_json(msg="There was an issue with connecting, please check that your "
"endpoint is properly defined and your credentials are correct: %s" % to_native(e))
def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd):
"""Update the stored storage-system password"""
update_pwd = 'storage-systems/%s' % ssid
url = api_url + update_pwd
post_body = json.dumps(dict(storedPassword=pwd))
try:
rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr,
url_password=api_pwd, validate_certs=module.validate_certs)
return rc, data
except Exception as e:
module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, to_native(e)))
def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False):
"""Set the storage-system password"""
set_pass = "storage-systems/%s/passwords" % ssid
url = api_url + set_pass
if not current_password:
current_password = ""
post_body = json.dumps(
dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password))
try:
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
ignore_errors=True, validate_certs=module.validate_certs)
except Exception as e:
module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, to_native(e)),
exception=traceback.format_exc())
if rc == 422:
post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password))
try:
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
validate_certs=module.validate_certs)
except Exception:
# TODO(lorenp): Resolve ignored rc, data
module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again")
if int(rc) >= 300:
module.fail_json(msg="Failed to set system password. Id [%s] Code [%s]. Error [%s]" % (ssid, rc, data))
rc, update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd)
if int(rc) < 300:
return update_data
else:
module.fail_json(msg="%s:%s" % (rc, update_data))
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=False, type='str'),
ssid=dict(required=False, type='str'),
current_password=dict(required=False, no_log=True),
new_password=dict(required=True, no_log=True),
set_admin=dict(required=True, type='bool'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True)
)
)
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']],
required_one_of=[['name', 'ssid']])
name = module.params['name']
ssid = module.params['ssid']
current_password = module.params['current_password']
new_password = module.params['new_password']
set_admin = module.params['set_admin']
user = module.params['api_username']
pwd = module.params['api_password']
api_url = module.params['api_url']
module.validate_certs = module.params['validate_certs']
if not api_url.endswith('/'):
api_url += '/'
if name:
ssid = get_ssid(module, name, api_url, user, pwd)
ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd)
if admin_pwd and not current_password:
module.fail_json(
msg="Admin account has a password set. " +
"You must supply current_password in order to update the RO or Admin passwords")
if len(new_password) > 30:
module.fail_json(msg="Passwords must not be greater than 30 characters in length")
result = set_password(module, ssid, api_url, user, pwd, current_password=current_password,
new_password=new_password, set_admin=set_admin)
module.exit_json(changed=True, msg="Password Updated Successfully",
password_set=result['passwordSet'],
password_status=result['passwordStatus'])
if __name__ == '__main__':
main()

View file

@ -0,0 +1,215 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_drive_firmware
short_description: NetApp E-Series manage drive firmware
description:
- Ensure drive firmware version is activated on specified drive model.
author:
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
firmware:
description:
- list of drive firmware file paths.
- NetApp E-Series drives require special firmware which can be downloaded from https://mysupport.netapp.com/NOW/download/tools/diskfw_eseries/
type: list
required: True
wait_for_completion:
description:
- This flag will cause module to wait for any upgrade actions to complete.
type: bool
default: false
ignore_inaccessible_drives:
description:
- This flag will determine whether drive firmware upgrade should fail if any affected drives are inaccessible.
type: bool
default: false
upgrade_drives_online:
description:
- This flag will determine whether drive firmware can be upgrade while drives are accepting I/O.
- When I(upgrade_drives_online==False) stop all I/O before running task.
type: bool
default: true
'''
EXAMPLES = """
- name: Ensure correct firmware versions
nac_santricity_drive_firmware:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "adminpass"
validate_certs: true
firmware: "path/to/drive_firmware"
wait_for_completion: true
ignore_inaccessible_drives: false
"""
RETURN = """
msg:
description: Whether any drive firmware was upgraded and whether it is in progress.
type: str
returned: always
sample:
{ changed: True, upgrade_in_process: True }
"""
import os
import re
from time import sleep
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule, create_multipart_formdata
from ansible.module_utils._text import to_native, to_text, to_bytes
class NetAppESeriesDriveFirmware(NetAppESeriesModule):
WAIT_TIMEOUT_SEC = 60 * 15
def __init__(self):
ansible_options = dict(
firmware=dict(type="list", required=True),
wait_for_completion=dict(type="bool", default=False),
ignore_inaccessible_drives=dict(type="bool", default=False),
upgrade_drives_online=dict(type="bool", default=True))
super(NetAppESeriesDriveFirmware, self).__init__(ansible_options=ansible_options,
web_services_version="02.00.0000.0000",
supports_check_mode=True)
args = self.module.params
self.firmware_list = args["firmware"]
self.wait_for_completion = args["wait_for_completion"]
self.ignore_inaccessible_drives = args["ignore_inaccessible_drives"]
self.upgrade_drives_online = args["upgrade_drives_online"]
self.upgrade_list_cache = None
self.upgrade_required_cache = None
self.upgrade_in_progress = False
self.drive_info_cache = None
def upload_firmware(self):
"""Ensure firmware has been upload prior to uploaded."""
for firmware in self.firmware_list:
firmware_name = os.path.basename(firmware)
files = [("file", firmware_name, firmware)]
headers, data = create_multipart_formdata(files)
try:
rc, response = self.request("/files/drive", method="POST", headers=headers, data=data)
except Exception as error:
self.module.fail_json(msg="Failed to upload drive firmware [%s]. Array [%s]. Error [%s]." % (firmware_name, self.ssid, to_native(error)))
def upgrade_list(self):
"""Determine whether firmware is compatible with the specified drives."""
if self.upgrade_list_cache is None:
self.upgrade_list_cache = list()
try:
rc, response = self.request("storage-systems/%s/firmware/drives" % self.ssid)
# Create upgrade list, this ensures only the firmware uploaded is applied
for firmware in self.firmware_list:
filename = os.path.basename(firmware)
for uploaded_firmware in response["compatibilities"]:
if uploaded_firmware["filename"] == filename:
# Determine whether upgrade is required
drive_reference_list = []
for drive in uploaded_firmware["compatibleDrives"]:
try:
rc, drive_info = self.request("storage-systems/%s/drives/%s" % (self.ssid, drive["driveRef"]))
# Add drive references that are supported and differ from current firmware
if (drive_info["firmwareVersion"] != uploaded_firmware["firmwareVersion"] and
uploaded_firmware["firmwareVersion"] in uploaded_firmware["supportedFirmwareVersions"]):
if self.ignore_inaccessible_drives or (not drive_info["offline"] and drive_info["available"]):
drive_reference_list.append(drive["driveRef"])
if not drive["onlineUpgradeCapable"] and self.upgrade_drives_online:
self.module.fail_json(msg="Drive is not capable of online upgrade. Array [%s]. Drive [%s]."
% (self.ssid, drive["driveRef"]))
except Exception as error:
self.module.fail_json(msg="Failed to retrieve drive information. Array [%s]. Drive [%s]. Error [%s]."
% (self.ssid, drive["driveRef"], to_native(error)))
if drive_reference_list:
self.upgrade_list_cache.extend([{"filename": filename, "driveRefList": drive_reference_list}])
except Exception as error:
self.module.fail_json(msg="Failed to complete compatibility and health check. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
return self.upgrade_list_cache
def wait_for_upgrade_completion(self):
"""Wait for drive firmware upgrade to complete."""
drive_references = [reference for drive in self.upgrade_list() for reference in drive["driveRefList"]]
last_status = None
for attempt in range(int(self.WAIT_TIMEOUT_SEC / 5)):
try:
rc, response = self.request("storage-systems/%s/firmware/drives/state" % self.ssid)
# Check drive status
for status in response["driveStatus"]:
last_status = status
if status["driveRef"] in drive_references:
if status["status"] == "okay":
continue
elif status["status"] in ["inProgress", "inProgressRecon", "pending", "notAttempted"]:
break
else:
self.module.fail_json(msg="Drive firmware upgrade failed. Array [%s]. Drive [%s]. Status [%s]."
% (self.ssid, status["driveRef"], status["status"]))
else:
self.upgrade_in_progress = False
break
except Exception as error:
self.module.fail_json(msg="Failed to retrieve drive status. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
sleep(5)
else:
self.module.fail_json(msg="Timed out waiting for drive firmware upgrade. Array [%s]. Status [%s]." % (self.ssid, last_status))
def upgrade(self):
"""Apply firmware to applicable drives."""
try:
rc, response = self.request("storage-systems/%s/firmware/drives/initiate-upgrade?onlineUpdate=%s"
% (self.ssid, "true" if self.upgrade_drives_online else "false"), method="POST", data=self.upgrade_list())
self.upgrade_in_progress = True
except Exception as error:
self.module.fail_json(msg="Failed to upgrade drive firmware. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
if self.wait_for_completion:
self.wait_for_upgrade_completion()
def apply(self):
"""Apply firmware policy has been enforced on E-Series storage system."""
self.upload_firmware()
if self.upgrade_list() and not self.module.check_mode:
self.upgrade()
self.module.exit_json(changed=True if self.upgrade_list() else False,
upgrade_in_process=self.upgrade_in_progress)
def main():
drive_firmware = NetAppESeriesDriveFirmware()
drive_firmware.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,530 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netapp_e_facts
short_description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays
description:
- The netapp_e_facts module returns a collection of facts regarding NetApp E-Series storage arrays.
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
'''
EXAMPLES = """
---
- name: Get array facts
netapp_e_facts:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "adminpass"
validate_certs: true
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample:
- Gathered facts for storage array. Array ID [1].
- Gathered facts for web services proxy.
storage_array_facts:
description: provides details about the array, controllers, management interfaces, hostside interfaces,
driveside interfaces, disks, storage pools, volumes, snapshots, and features.
returned: on successful inquiry from from embedded web services rest api
type: complex
contains:
netapp_controllers:
description: storage array controller list that contains basic controller identification and status
type: complex
sample:
- [{"name": "A", "serial": "021632007299", "status": "optimal"},
{"name": "B", "serial": "021632007300", "status": "failed"}]
netapp_disks:
description: drive list that contains identification, type, and status information for each drive
type: complex
sample:
- [{"available": false,
"firmware_version": "MS02",
"id": "01000000500003960C8B67880000000000000000",
"media_type": "ssd",
"product_id": "PX02SMU080 ",
"serial_number": "15R0A08LT2BA",
"status": "optimal",
"tray_ref": "0E00000000000000000000000000000000000000",
"usable_bytes": "799629205504" }]
netapp_driveside_interfaces:
description: drive side interface list that contains identification, type, and speed for each interface
type: complex
sample:
- [{ "controller": "A", "interface_speed": "12g", "interface_type": "sas" }]
- [{ "controller": "B", "interface_speed": "10g", "interface_type": "iscsi" }]
netapp_enabled_features:
description: specifies the enabled features on the storage array.
returned: on success
type: complex
sample:
- [ "flashReadCache", "performanceTier", "protectionInformation", "secureVolume" ]
netapp_host_groups:
description: specifies the host groups on the storage arrays.
returned: on success
type: complex
sample:
- [{ "id": "85000000600A098000A4B28D003610705C40B964", "name": "group1" }]
netapp_hosts:
description: specifies the hosts on the storage arrays.
returned: on success
type: complex
sample:
- [{ "id": "8203800000000000000000000000000000000000",
"name": "host1",
"group_id": "85000000600A098000A4B28D003610705C40B964",
"host_type_index": 28,
"ports": [{ "type": "fc", "address": "1000FF7CFFFFFF01", "label": "FC_1" },
{ "type": "fc", "address": "1000FF7CFFFFFF00", "label": "FC_2" }]}]
netapp_host_types:
description: lists the available host types on the storage array.
returned: on success
type: complex
sample:
- [{ "index": 0, "type": "FactoryDefault" },
{ "index": 1, "type": "W2KNETNCL"},
{ "index": 2, "type": "SOL" },
{ "index": 5, "type": "AVT_4M" },
{ "index": 6, "type": "LNX" },
{ "index": 7, "type": "LnxALUA" },
{ "index": 8, "type": "W2KNETCL" },
{ "index": 9, "type": "AIX MPIO" },
{ "index": 10, "type": "VmwTPGSALUA" },
{ "index": 15, "type": "HPXTPGS" },
{ "index": 17, "type": "SolTPGSALUA" },
{ "index": 18, "type": "SVC" },
{ "index": 22, "type": "MacTPGSALUA" },
{ "index": 23, "type": "WinTPGSALUA" },
{ "index": 24, "type": "LnxTPGSALUA" },
{ "index": 25, "type": "LnxTPGSALUA_PM" },
{ "index": 26, "type": "ONTAP_ALUA" },
{ "index": 27, "type": "LnxTPGSALUA_SF" },
{ "index": 28, "type": "LnxDHALUA" },
{ "index": 29, "type": "ATTOClusterAllOS" }]
netapp_hostside_interfaces:
description: host side interface list that contains identification, configuration, type, speed, and
status information for each interface
type: complex
sample:
- [{"iscsi":
[{ "controller": "A",
"current_interface_speed": "10g",
"ipv4_address": "10.10.10.1",
"ipv4_enabled": true,
"ipv4_gateway": "10.10.10.1",
"ipv4_subnet_mask": "255.255.255.0",
"ipv6_enabled": false,
"iqn": "iqn.1996-03.com.netapp:2806.600a098000a81b6d0000000059d60c76",
"link_status": "up",
"mtu": 9000,
"supported_interface_speeds": [ "10g" ] }]}]
netapp_management_interfaces:
description: management interface list that contains identification, configuration, and status for
each interface
type: complex
sample:
- [{"alias": "ict-2800-A",
"channel": 1,
"controller": "A",
"dns_config_method": "dhcp",
"dns_servers": [],
"ipv4_address": "10.1.1.1",
"ipv4_address_config_method": "static",
"ipv4_enabled": true,
"ipv4_gateway": "10.113.1.1",
"ipv4_subnet_mask": "255.255.255.0",
"ipv6_enabled": false,
"link_status": "up",
"mac_address": "00A098A81B5D",
"name": "wan0",
"ntp_config_method": "disabled",
"ntp_servers": [],
"remote_ssh_access": false }]
netapp_storage_array:
description: provides storage array identification, firmware version, and available capabilities
type: dict
sample:
- {"chassis_serial": "021540006043",
"firmware": "08.40.00.01",
"name": "ict-2800-11_40",
"wwn": "600A098000A81B5D0000000059D60C76",
"cacheBlockSizes": [4096,
8192,
16384,
32768],
"supportedSegSizes": [8192,
16384,
32768,
65536,
131072,
262144,
524288]}
netapp_storage_pools:
description: storage pool list that contains identification and capacity information for each pool
type: complex
sample:
- [{"available_capacity": "3490353782784",
"id": "04000000600A098000A81B5D000002B45A953A61",
"name": "Raid6",
"total_capacity": "5399466745856",
"used_capacity": "1909112963072" }]
netapp_volumes:
description: storage volume list that contains identification and capacity information for each volume
type: complex
sample:
- [{"capacity": "5368709120",
"id": "02000000600A098000AAC0C3000002C45A952BAA",
"is_thin_provisioned": false,
"name": "5G",
"parent_storage_pool_id": "04000000600A098000A81B5D000002B45A953A61" }]
netapp_workload_tags:
description: workload tag list
type: complex
sample:
- [{"id": "87e19568-43fb-4d8d-99ea-2811daaa2b38",
"name": "ftp_server",
"workloadAttributes": [{"key": "use",
"value": "general"}]}]
netapp_volumes_by_initiators:
description: list of available volumes keyed by the mapped initiators.
type: complex
sample:
- {"192_168_1_1": [{"id": "02000000600A098000A4B9D1000015FD5C8F7F9E",
"meta_data": {"filetype": "xfs", "public": true},
"name": "some_volume",
"workload_name": "test2_volumes",
"wwn": "600A098000A4B9D1000015FD5C8F7F9E"}]}
snapshot_images:
description: snapshot image list that contains identification, capacity, and status information for each
snapshot image
type: complex
sample:
- [{"active_cow": true,
"creation_method": "user",
"id": "34000000600A098000A81B5D00630A965B0535AC",
"pit_capacity": "5368709120",
"reposity_cap_utilization": "0",
"rollback_source": false,
"status": "optimal" }]
"""
from re import match
from pprint import pformat
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule
class Facts(NetAppESeriesModule):
def __init__(self):
web_services_version = "02.00.0000.0000"
super(Facts, self).__init__(ansible_options={},
web_services_version=web_services_version,
supports_check_mode=True)
def get_controllers(self):
"""Retrieve a mapping of controller references to their labels."""
controllers = list()
try:
rc, controllers = self.request('storage-systems/%s/graph/xpath-filter?query=/controller/id' % self.ssid)
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
% (self.ssid, str(err)))
controllers.sort()
controllers_dict = {}
i = ord('A')
for controller in controllers:
label = chr(i)
controllers_dict[controller] = label
i += 1
return controllers_dict
def get_array_facts(self):
"""Extract particular facts from the storage array graph"""
facts = dict(facts_from_proxy=(not self.is_embedded()), ssid=self.ssid)
controller_reference_label = self.get_controllers()
array_facts = None
# Get the storage array graph
try:
rc, array_facts = self.request("storage-systems/%s/graph" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (self.ssid, str(error)))
facts['netapp_storage_array'] = dict(
name=array_facts['sa']['saData']['storageArrayLabel'],
chassis_serial=array_facts['sa']['saData']['chassisSerialNumber'],
firmware=array_facts['sa']['saData']['fwVersion'],
wwn=array_facts['sa']['saData']['saId']['worldWideName'],
segment_sizes=array_facts['sa']['featureParameters']['supportedSegSizes'],
cache_block_sizes=array_facts['sa']['featureParameters']['cacheBlockSizes'])
facts['netapp_controllers'] = [
dict(
name=controller_reference_label[controller['controllerRef']],
serial=controller['serialNumber'].strip(),
status=controller['status'],
) for controller in array_facts['controller']]
facts['netapp_host_groups'] = [
dict(
id=group['id'],
name=group['name']
) for group in array_facts['storagePoolBundle']['cluster']]
facts['netapp_hosts'] = [
dict(
group_id=host['clusterRef'],
hosts_reference=host['hostRef'],
id=host['id'],
name=host['name'],
host_type_index=host['hostTypeIndex'],
posts=host['hostSidePorts']
) for host in array_facts['storagePoolBundle']['host']]
facts['netapp_host_types'] = [
dict(
type=host_type['hostType'],
index=host_type['index']
) for host_type in array_facts['sa']['hostSpecificVals']
if 'hostType' in host_type.keys() and host_type['hostType']
# This conditional ignores zero-length strings which indicates that the associated host-specific NVSRAM region has been cleared.
]
facts['snapshot_images'] = [
dict(
id=snapshot['id'],
status=snapshot['status'],
pit_capacity=snapshot['pitCapacity'],
creation_method=snapshot['creationMethod'],
reposity_cap_utilization=snapshot['repositoryCapacityUtilization'],
active_cow=snapshot['activeCOW'],
rollback_source=snapshot['isRollbackSource']
) for snapshot in array_facts['highLevelVolBundle']['pit']]
facts['netapp_disks'] = [
dict(
id=disk['id'],
available=disk['available'],
media_type=disk['driveMediaType'],
status=disk['status'],
usable_bytes=disk['usableCapacity'],
tray_ref=disk['physicalLocation']['trayRef'],
product_id=disk['productID'],
firmware_version=disk['firmwareVersion'],
serial_number=disk['serialNumber'].lstrip()
) for disk in array_facts['drive']]
facts['netapp_management_interfaces'] = [
dict(controller=controller_reference_label[controller['controllerRef']],
name=iface['ethernet']['interfaceName'],
alias=iface['ethernet']['alias'],
channel=iface['ethernet']['channel'],
mac_address=iface['ethernet']['macAddr'],
remote_ssh_access=iface['ethernet']['rloginEnabled'],
link_status=iface['ethernet']['linkStatus'],
ipv4_enabled=iface['ethernet']['ipv4Enabled'],
ipv4_address_config_method=iface['ethernet']['ipv4AddressConfigMethod'].lower().replace("config", ""),
ipv4_address=iface['ethernet']['ipv4Address'],
ipv4_subnet_mask=iface['ethernet']['ipv4SubnetMask'],
ipv4_gateway=iface['ethernet']['ipv4GatewayAddress'],
ipv6_enabled=iface['ethernet']['ipv6Enabled'],
dns_config_method=iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'],
dns_servers=(iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers']
if iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] else []),
ntp_config_method=iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'],
ntp_servers=(iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers']
if iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] else [])
) for controller in array_facts['controller'] for iface in controller['netInterfaces']]
facts['netapp_hostside_interfaces'] = [
dict(
fc=[dict(controller=controller_reference_label[controller['controllerRef']],
channel=iface['fibre']['channel'],
link_status=iface['fibre']['linkStatus'],
current_interface_speed=strip_interface_speed(iface['fibre']['currentInterfaceSpeed']),
maximum_interface_speed=strip_interface_speed(iface['fibre']['maximumInterfaceSpeed']))
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'fc'],
ib=[dict(controller=controller_reference_label[controller['controllerRef']],
channel=iface['ib']['channel'],
link_status=iface['ib']['linkState'],
mtu=iface['ib']['maximumTransmissionUnit'],
current_interface_speed=strip_interface_speed(iface['ib']['currentSpeed']),
maximum_interface_speed=strip_interface_speed(iface['ib']['supportedSpeed']))
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'ib'],
iscsi=[dict(controller=controller_reference_label[controller['controllerRef']],
iqn=iface['iscsi']['iqn'],
link_status=iface['iscsi']['interfaceData']['ethernetData']['linkStatus'],
ipv4_enabled=iface['iscsi']['ipv4Enabled'],
ipv4_address=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4Address'],
ipv4_subnet_mask=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4SubnetMask'],
ipv4_gateway=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4GatewayAddress'],
ipv6_enabled=iface['iscsi']['ipv6Enabled'],
mtu=iface['iscsi']['interfaceData']['ethernetData']['maximumFramePayloadSize'],
current_interface_speed=strip_interface_speed(iface['iscsi']['interfaceData']
['ethernetData']['currentInterfaceSpeed']),
supported_interface_speeds=strip_interface_speed(iface['iscsi']['interfaceData']
['ethernetData']
['supportedInterfaceSpeeds']))
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'iscsi'],
sas=[dict(controller=controller_reference_label[controller['controllerRef']],
channel=iface['sas']['channel'],
current_interface_speed=strip_interface_speed(iface['sas']['currentInterfaceSpeed']),
maximum_interface_speed=strip_interface_speed(iface['sas']['maximumInterfaceSpeed']),
link_status=iface['sas']['iocPort']['state'])
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'sas'])]
facts['netapp_driveside_interfaces'] = [
dict(
controller=controller_reference_label[controller['controllerRef']],
interface_type=interface['interfaceType'],
interface_speed=strip_interface_speed(
interface[interface['interfaceType']]['maximumInterfaceSpeed']
if (interface['interfaceType'] == 'sata' or
interface['interfaceType'] == 'sas' or
interface['interfaceType'] == 'fibre')
else (
interface[interface['interfaceType']]['currentSpeed']
if interface['interfaceType'] == 'ib'
else (
interface[interface['interfaceType']]['interfaceData']['maximumInterfaceSpeed']
if interface['interfaceType'] == 'iscsi' else 'unknown'
))),
)
for controller in array_facts['controller']
for interface in controller['driveInterfaces']]
facts['netapp_storage_pools'] = [
dict(
id=storage_pool['id'],
name=storage_pool['name'],
available_capacity=storage_pool['freeSpace'],
total_capacity=storage_pool['totalRaidedSpace'],
used_capacity=storage_pool['usedSpace']
) for storage_pool in array_facts['volumeGroup']]
all_volumes = list(array_facts['volume'])
facts['netapp_volumes'] = [
dict(
id=v['id'],
name=v['name'],
parent_storage_pool_id=v['volumeGroupRef'],
capacity=v['capacity'],
is_thin_provisioned=v['thinProvisioned'],
workload=v['metadata'],
) for v in all_volumes]
workload_tags = None
try:
rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve workload tags. Array [%s]." % self.ssid)
facts['netapp_workload_tags'] = [
dict(
id=workload_tag['id'],
name=workload_tag['name'],
attributes=workload_tag['workloadAttributes']
) for workload_tag in workload_tags]
# Create a dictionary of volume lists keyed by host names
facts['netapp_volumes_by_initiators'] = dict()
for mapping in array_facts['storagePoolBundle']['lunMapping']:
for host in facts['netapp_hosts']:
if mapping['mapRef'] == host['hosts_reference'] or mapping['mapRef'] == host['group_id']:
if host['name'] not in facts['netapp_volumes_by_initiators'].keys():
facts['netapp_volumes_by_initiators'].update({host['name']: []})
for volume in all_volumes:
if mapping['id'] in [volume_mapping['id'] for volume_mapping in volume['listOfMappings']]:
# Determine workload name if there is one
workload_name = ""
metadata = dict()
for volume_tag in volume['metadata']:
if volume_tag['key'] == 'workloadId':
for workload_tag in facts['netapp_workload_tags']:
if volume_tag['value'] == workload_tag['id']:
workload_name = workload_tag['name']
metadata = dict((entry['key'], entry['value'])
for entry in workload_tag['attributes']
if entry['key'] != 'profileId')
facts['netapp_volumes_by_initiators'][host['name']].append(
dict(name=volume['name'],
id=volume['id'],
wwn=volume['wwn'],
workload_name=workload_name,
meta_data=metadata))
features = [feature for feature in array_facts['sa']['capabilities']]
features.extend([feature['capability'] for feature in array_facts['sa']['premiumFeatures']
if feature['isEnabled']])
features = list(set(features)) # ensure unique
features.sort()
facts['netapp_enabled_features'] = features
return facts
def get_facts(self):
"""Get the embedded or web services proxy information."""
facts = self.get_array_facts()
self.module.log("isEmbedded: %s" % self.is_embedded())
self.module.log(pformat(facts))
self.module.exit_json(msg="Gathered facts for storage array. Array ID: [%s]." % self.ssid,
storage_array_facts=facts)
def strip_interface_speed(speed):
"""Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'"""
if isinstance(speed, list):
result = [match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed]
result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp]
result = ["auto" if match(r"auto", sp) else sp for sp in result]
else:
result = match(r"speed[0-9]{1,3}[gm]", speed)
result = result.group().replace("speed", "") if result else "unknown"
result = "auto" if match(r"auto", result.lower()) else result
return result
def main():
facts = Facts()
facts.get_facts()
if __name__ == "__main__":
main()

View file

@ -0,0 +1,488 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_firmware
short_description: NetApp E-Series manage firmware.
description:
- Ensure specific firmware versions are activated on E-Series storage system.
author:
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
nvsram:
description:
- Path to the NVSRAM file.
type: str
required: true
firmware:
description:
- Path to the firmware file.
type: str
required: true
wait_for_completion:
description:
- This flag will cause module to wait for any upgrade actions to complete.
type: bool
default: false
ignore_health_check:
description:
- This flag will force firmware to be activated in spite of the health check.
- Use at your own risk. Certain non-optimal states could result in data loss.
type: bool
default: false
'''
EXAMPLES = """
- name: Ensure correct firmware versions
netapp_e_firmware:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "adminpass"
validate_certs: true
nvsram: "path/to/nvsram"
bundle: "path/to/bundle"
wait_for_completion: true
- name: Ensure correct firmware versions
netapp_e_firmware:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "adminpass"
validate_certs: true
nvsram: "path/to/nvsram"
firmware: "path/to/firmware"
"""
RETURN = """
msg:
description: Status and version of firmware and NVSRAM.
type: str
returned: always
sample:
"""
import os
from time import sleep
from ansible.module_utils import six
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule, create_multipart_formdata, request
from ansible.module_utils._text import to_native, to_text, to_bytes
class NetAppESeriesFirmware(NetAppESeriesModule):
HEALTH_CHECK_TIMEOUT_MS = 120000
REBOOT_TIMEOUT_SEC = 15 * 60
FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC = 60
DEFAULT_TIMEOUT = 60 * 15 # This will override the NetAppESeriesModule request method timeout.
def __init__(self):
ansible_options = dict(
nvsram=dict(type="str", required=True),
firmware=dict(type="str", required=True),
wait_for_completion=dict(type="bool", default=False),
ignore_health_check=dict(type="bool", default=False))
super(NetAppESeriesFirmware, self).__init__(ansible_options=ansible_options,
web_services_version="02.00.0000.0000",
supports_check_mode=True)
args = self.module.params
self.nvsram = args["nvsram"]
self.firmware = args["firmware"]
self.wait_for_completion = args["wait_for_completion"]
self.ignore_health_check = args["ignore_health_check"]
self.nvsram_name = None
self.firmware_name = None
self.is_bundle_cache = None
self.firmware_version_cache = None
self.nvsram_version_cache = None
self.upgrade_required = False
self.upgrade_in_progress = False
self.module_info = dict()
self.nvsram_name = os.path.basename(self.nvsram)
self.firmware_name = os.path.basename(self.firmware)
def is_firmware_bundled(self):
"""Determine whether supplied firmware is bundle."""
if self.is_bundle_cache is None:
with open(self.firmware, "rb") as fh:
signature = fh.read(16).lower()
if b"firmware" in signature:
self.is_bundle_cache = False
elif b"combined_content" in signature:
self.is_bundle_cache = True
else:
self.module.fail_json(msg="Firmware file is invalid. File [%s]. Array [%s]" % (self.firmware, self.ssid))
return self.is_bundle_cache
def firmware_version(self):
"""Retrieve firmware version of the firmware file. Return: bytes string"""
if self.firmware_version_cache is None:
# Search firmware file for bundle or firmware version
with open(self.firmware, "rb") as fh:
line = fh.readline()
while line:
if self.is_firmware_bundled():
if b'displayableAttributeList=' in line:
for item in line[25:].split(b','):
key, value = item.split(b"|")
if key == b'VERSION':
self.firmware_version_cache = value.strip(b"\n")
break
elif b"Version:" in line:
self.firmware_version_cache = line.split()[-1].strip(b"\n")
break
line = fh.readline()
else:
self.module.fail_json(msg="Failed to determine firmware version. File [%s]. Array [%s]." % (self.firmware, self.ssid))
return self.firmware_version_cache
def nvsram_version(self):
"""Retrieve NVSRAM version of the NVSRAM file. Return: byte string"""
if self.nvsram_version_cache is None:
with open(self.nvsram, "rb") as fh:
line = fh.readline()
while line:
if b".NVSRAM Configuration Number" in line:
self.nvsram_version_cache = line.split(b'"')[-2]
break
line = fh.readline()
else:
self.module.fail_json(msg="Failed to determine NVSRAM file version. File [%s]. Array [%s]." % (self.nvsram, self.ssid))
return self.nvsram_version_cache
def check_system_health(self):
"""Ensure E-Series storage system is healthy. Works for both embedded and proxy web services."""
try:
rc, request_id = self.request("health-check", method="POST", data={"onlineOnly": True, "storageDeviceIds": [self.ssid]})
while True:
sleep(1)
try:
rc, response = self.request("health-check?requestId=%s" % request_id["requestId"])
if not response["healthCheckRunning"]:
return response["results"][0]["successful"]
elif int(response["results"][0]["processingTimeMS"]) > self.HEALTH_CHECK_TIMEOUT_MS:
self.module.fail_json(msg="Health check failed to complete. Array Id [%s]." % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve health check status. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
except Exception as error:
self.module.fail_json(msg="Failed to initiate health check. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
self.module.fail_json(msg="Failed to retrieve health check status. Array Id [%s]. Error[%s]." % self.ssid)
def embedded_check_compatibility(self):
"""Verify files are compatible with E-Series storage system."""
self.embedded_check_nvsram_compatibility()
self.embedded_check_bundle_compatibility()
def embedded_check_nvsram_compatibility(self):
"""Verify the provided NVSRAM is compatible with E-Series storage system."""
# Check nvsram compatibility
try:
files = [("nvsramimage", self.nvsram_name, self.nvsram)]
headers, data = create_multipart_formdata(files=files)
rc, nvsram_compatible = self.request("firmware/embedded-firmware/%s/nvsram-compatibility-check" % self.ssid,
method="POST", data=data, headers=headers)
if not nvsram_compatible["signatureTestingPassed"]:
self.module.fail_json(msg="Invalid NVSRAM file. File [%s]." % self.nvsram)
if not nvsram_compatible["fileCompatible"]:
self.module.fail_json(msg="Incompatible NVSRAM file. File [%s]." % self.nvsram)
# Determine whether nvsram is required
for module in nvsram_compatible["versionContents"]:
if module["bundledVersion"] != module["onboardVersion"]:
self.upgrade_required = True
# Update bundle info
self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}})
except Exception as error:
self.module.fail_json(msg="Failed to retrieve NVSRAM compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
def embedded_check_bundle_compatibility(self):
"""Verify the provided firmware bundle is compatible with E-Series storage system."""
try:
files = [("files[]", "blob", self.firmware)]
headers, data = create_multipart_formdata(files=files, send_8kb=True)
rc, bundle_compatible = self.request("firmware/embedded-firmware/%s/bundle-compatibility-check" % self.ssid,
method="POST", data=data, headers=headers)
# Determine whether valid and compatible firmware
if not bundle_compatible["signatureTestingPassed"]:
self.module.fail_json(msg="Invalid firmware bundle file. File [%s]." % self.firmware)
if not bundle_compatible["fileCompatible"]:
self.module.fail_json(msg="Incompatible firmware bundle file. File [%s]." % self.firmware)
# Determine whether upgrade is required
for module in bundle_compatible["versionContents"]:
bundle_module_version = module["bundledVersion"].split(".")
onboard_module_version = module["onboardVersion"].split(".")
version_minimum_length = min(len(bundle_module_version), len(onboard_module_version))
if bundle_module_version[:version_minimum_length] != onboard_module_version[:version_minimum_length]:
self.upgrade_required = True
# Check whether downgrade is being attempted
bundle_version = module["bundledVersion"].split(".")[:2]
onboard_version = module["onboardVersion"].split(".")[:2]
if bundle_version[0] < onboard_version[0] or (bundle_version[0] == onboard_version[0] and bundle_version[1] < onboard_version[1]):
self.module.fail_json(msg="Downgrades are not permitted. onboard [%s] > bundled[%s]."
% (module["onboardVersion"], module["bundledVersion"]))
# Update bundle info
self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}})
except Exception as error:
self.module.fail_json(msg="Failed to retrieve bundle compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
def embedded_wait_for_upgrade(self):
"""Wait for SANtricity Web Services Embedded to be available after reboot."""
for count in range(0, self.REBOOT_TIMEOUT_SEC):
try:
rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData" % self.ssid)
bundle_display = [m["versionString"] for m in response[0]["extendedSAData"]["codeVersions"] if m["codeModule"] == "bundleDisplay"][0]
if rc == 200 and six.b(bundle_display) == self.firmware_version() and six.b(response[0]["nvsramVersion"]) == self.nvsram_version():
self.upgrade_in_progress = False
break
except Exception as error:
pass
sleep(1)
else:
self.module.fail_json(msg="Timeout waiting for Santricity Web Services Embedded. Array [%s]" % self.ssid)
def embedded_upgrade(self):
"""Upload and activate both firmware and NVSRAM."""
files = [("nvsramfile", self.nvsram_name, self.nvsram),
("dlpfile", self.firmware_name, self.firmware)]
headers, data = create_multipart_formdata(files=files)
try:
rc, response = self.request("firmware/embedded-firmware?staged=false&nvsram=true", method="POST", data=data, headers=headers)
self.upgrade_in_progress = True
except Exception as error:
self.module.fail_json(msg="Failed to upload and activate firmware. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
if self.wait_for_completion:
self.embedded_wait_for_upgrade()
def proxy_check_nvsram_compatibility(self):
"""Verify nvsram is compatible with E-Series storage system."""
data = {"storageDeviceIds": [self.ssid]}
try:
rc, check = self.request("firmware/compatibility-check", method="POST", data=data)
for count in range(0, int((self.FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC / 5))):
sleep(5)
try:
rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"])
if not response["checkRunning"]:
for result in response["results"][0]["nvsramFiles"]:
if result["filename"] == self.nvsram_name:
return
self.module.fail_json(msg="NVSRAM is not compatible. NVSRAM [%s]. Array [%s]." % (self.nvsram_name, self.ssid))
except Exception as error:
self.module.fail_json(msg="Failed to retrieve NVSRAM status update from proxy. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
except Exception as error:
self.module.fail_json(msg="Failed to receive NVSRAM compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
def proxy_check_firmware_compatibility(self):
"""Verify firmware is compatible with E-Series storage system."""
data = {"storageDeviceIds": [self.ssid]}
try:
rc, check = self.request("firmware/compatibility-check", method="POST", data=data)
for count in range(0, int((self.FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC / 5))):
sleep(5)
try:
rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"])
if not response["checkRunning"]:
for result in response["results"][0]["cfwFiles"]:
if result["filename"] == self.firmware_name:
return
self.module.fail_json(msg="Firmware bundle is not compatible. firmware [%s]. Array [%s]." % (self.firmware_name, self.ssid))
except Exception as error:
self.module.fail_json(msg="Failed to retrieve firmware status update from proxy. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
except Exception as error:
self.module.fail_json(msg="Failed to receive firmware compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
def proxy_upload_and_check_compatibility(self):
"""Ensure firmware is uploaded and verify compatibility."""
try:
rc, cfw_files = self.request("firmware/cfw-files")
for file in cfw_files:
if file["filename"] == self.nvsram_name:
break
else:
fields = [("validate", "true")]
files = [("firmwareFile", self.nvsram_name, self.nvsram)]
headers, data = create_multipart_formdata(files=files, fields=fields)
try:
rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers)
except Exception as error:
self.module.fail_json(msg="Failed to upload NVSRAM file. File [%s]. Array [%s]. Error [%s]."
% (self.nvsram_name, self.ssid, to_native(error)))
self.proxy_check_nvsram_compatibility()
for file in cfw_files:
if file["filename"] == self.firmware_name:
break
else:
fields = [("validate", "true")]
files = [("firmwareFile", self.firmware_name, self.firmware)]
headers, data = create_multipart_formdata(files=files, fields=fields)
try:
rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers)
except Exception as error:
self.module.fail_json(msg="Failed to upload firmware bundle file. File [%s]. Array [%s]. Error [%s]."
% (self.firmware_name, self.ssid, to_native(error)))
self.proxy_check_firmware_compatibility()
except Exception as error:
self.module.fail_json(msg="Failed to retrieve existing existing firmware files. Error [%s]" % to_native(error))
def proxy_check_upgrade_required(self):
"""Staging is required to collect firmware information from the web services proxy."""
# Verify controller consistency and get firmware versions
try:
# Retrieve current bundle version
if self.is_firmware_bundled():
rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/codeVersions[codeModule='bundleDisplay']" % self.ssid)
current_firmware_version = six.b(response[0]["versionString"])
else:
rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid)
current_firmware_version = six.b(response[0])
# Determine whether upgrade is required
if current_firmware_version != self.firmware_version():
current = current_firmware_version.split(b".")[:2]
upgrade = self.firmware_version().split(b".")[:2]
if current[0] < upgrade[0] or (current[0] == upgrade[0] and current[1] <= upgrade[1]):
self.upgrade_required = True
else:
self.module.fail_json(msg="Downgrades are not permitted. Firmware [%s]. Array [%s]." % (self.firmware, self.ssid))
except Exception as error:
self.module.fail_json(msg="Failed to retrieve controller firmware information. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
# Determine current NVSRAM version and whether change is required
try:
rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid)
if six.b(response[0]) != self.nvsram_version():
self.upgrade_required = True
except Exception as error:
self.module.fail_json(msg="Failed to retrieve storage system's NVSRAM version. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
def proxy_wait_for_upgrade(self, request_id):
"""Wait for SANtricity Web Services Proxy to report upgrade complete"""
if self.is_firmware_bundled():
while True:
try:
sleep(5)
rc, response = self.request("batch/cfw-upgrade/%s" % request_id)
if response["status"] == "complete":
self.upgrade_in_progress = False
break
elif response["status"] in ["failed", "cancelled"]:
self.module.fail_json(msg="Firmware upgrade failed to complete. Array [%s]." % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve firmware upgrade status. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
else:
for count in range(0, int(self.REBOOT_TIMEOUT_SEC / 5)):
try:
sleep(5)
rc_firmware, firmware = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid)
rc_nvsram, nvsram = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid)
if six.b(firmware[0]) == self.firmware_version() and six.b(nvsram[0]) == self.nvsram_version():
self.upgrade_in_progress = False
break
except Exception as error:
pass
else:
self.module.fail_json(msg="Timed out waiting for firmware upgrade to complete. Array [%s]." % self.ssid)
def proxy_upgrade(self):
"""Activate previously uploaded firmware related files."""
request_id = None
if self.is_firmware_bundled():
data = {"activate": True,
"firmwareFile": self.firmware_name,
"nvsramFile": self.nvsram_name,
"systemInfos": [{"systemId": self.ssid,
"allowNonOptimalActivation": self.ignore_health_check}]}
try:
rc, response = self.request("batch/cfw-upgrade", method="POST", data=data)
request_id = response["requestId"]
except Exception as error:
self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
else:
data = {"stageFirmware": False,
"skipMelCheck": self.ignore_health_check,
"cfwFile": self.firmware_name,
"nvsramFile": self.nvsram_name}
try:
rc, response = self.request("storage-systems/%s/cfw-upgrade" % self.ssid, method="POST", data=data)
request_id = response["requestId"]
except Exception as error:
self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
self.upgrade_in_progress = True
if self.wait_for_completion:
self.proxy_wait_for_upgrade(request_id)
def apply(self):
"""Upgrade controller firmware."""
self.check_system_health()
# Verify firmware compatibility and whether changes are required
if self.is_embedded():
self.embedded_check_compatibility()
else:
self.proxy_check_upgrade_required()
# This will upload the firmware files to the web services proxy but not to the controller
if self.upgrade_required:
self.proxy_upload_and_check_compatibility()
# Perform upgrade
if self.upgrade_required and not self.module.check_mode:
if self.is_embedded():
self.embedded_upgrade()
else:
self.proxy_upgrade()
self.module.exit_json(changed=self.upgrade_required, upgrade_in_process=self.upgrade_in_progress, status=self.module_info)
def main():
firmware = NetAppESeriesFirmware()
firmware.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,414 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netapp_e_flashcache
author: Kevin Hulquest (@hulquest)
short_description: NetApp E-Series manage SSD caches
description:
- Create or remove SSD caches on a NetApp E-Series storage array.
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
ssid:
required: true
description:
- The ID of the array to manage (as configured on the web services proxy).
state:
required: true
description:
- Whether the specified SSD cache should exist or not.
choices: ['present', 'absent']
default: present
name:
required: true
description:
- The name of the SSD cache to manage
io_type:
description:
- The type of workload to optimize the cache for.
choices: ['filesystem','database','media']
default: filesystem
disk_count:
description:
- The minimum number of disks to use for building the cache. The cache will be expanded if this number exceeds the number of disks already in place
size_unit:
description:
- The unit to be applied to size arguments
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: gb
cache_size_min:
description:
- The minimum size (in size_units) of the ssd cache. The cache will be expanded if this exceeds the current size of the cache.
'''
EXAMPLES = """
- name: Flash Cache
netapp_e_flashcache:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
name: SSDCacheBuiltByAnsible
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
sample: json for newly created flash cache
"""
import json
import logging
import sys
import traceback
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import reduce
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class NetAppESeriesFlashCache(object):
def __init__(self):
self.name = None
self.log_mode = None
self.log_path = None
self.api_url = None
self.api_username = None
self.api_password = None
self.ssid = None
self.validate_certs = None
self.disk_count = None
self.size_unit = None
self.cache_size_min = None
self.io_type = None
self.driveRefs = None
self.state = None
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
state=dict(default='present', choices=['present', 'absent'], type='str'),
ssid=dict(required=True, type='str'),
name=dict(required=True, type='str'),
disk_count=dict(type='int'),
disk_refs=dict(type='list'),
cache_size_min=dict(type='int'),
io_type=dict(default='filesystem', choices=['filesystem', 'database', 'media']),
size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'],
type='str'),
criteria_disk_phy_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'],
type='str'),
log_mode=dict(type='str'),
log_path=dict(type='str'),
))
self.module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
],
mutually_exclusive=[
],
# TODO: update validation for various selection criteria
supports_check_mode=True
)
self.__dict__.update(self.module.params)
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
self.debug = self._logger.debug
if self.log_mode == 'file' and self.log_path:
logging.basicConfig(level=logging.DEBUG, filename=self.log_path)
elif self.log_mode == 'stderr':
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
self.post_headers = dict(Accept="application/json")
self.post_headers['Content-Type'] = 'application/json'
def get_candidate_disks(self, disk_count, size_unit='gb', capacity=None):
self.debug("getting candidate disks...")
drives_req = dict(
driveCount=disk_count,
sizeUnit=size_unit,
driveType='ssd',
)
if capacity:
drives_req['targetUsableCapacity'] = capacity
(rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid),
data=json.dumps(drives_req), headers=self.post_headers, method='POST',
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs)
if rc == 204:
self.module.fail_json(msg='Cannot find disks to match requested criteria for ssd cache')
disk_ids = [d['id'] for d in drives_resp]
bytes = reduce(lambda s, d: s + int(d['usableCapacity']), drives_resp, 0)
return (disk_ids, bytes)
def create_cache(self):
(disk_ids, bytes) = self.get_candidate_disks(disk_count=self.disk_count, size_unit=self.size_unit,
capacity=self.cache_size_min)
self.debug("creating ssd cache...")
create_fc_req = dict(
driveRefs=disk_ids,
name=self.name
)
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
data=json.dumps(create_fc_req), headers=self.post_headers, method='POST',
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs)
def update_cache(self):
self.debug('updating flash cache config...')
update_fc_req = dict(
name=self.name,
configType=self.io_type
)
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/configure" % (self.ssid),
data=json.dumps(update_fc_req), headers=self.post_headers, method='POST',
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs)
def delete_cache(self):
self.debug('deleting flash cache...')
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), method='DELETE',
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs, ignore_errors=True)
@property
def needs_more_disks(self):
if len(self.cache_detail['driveRefs']) < self.disk_count:
self.debug("needs resize: current disk count %s < requested requested count %s",
len(self.cache_detail['driveRefs']), self.disk_count)
return True
@property
def needs_less_disks(self):
if len(self.cache_detail['driveRefs']) > self.disk_count:
self.debug("needs resize: current disk count %s < requested requested count %s",
len(self.cache_detail['driveRefs']), self.disk_count)
return True
@property
def current_size_bytes(self):
return int(self.cache_detail['fcDriveInfo']['fcWithDrives']['usedCapacity'])
@property
def requested_size_bytes(self):
if self.cache_size_min:
return self.cache_size_min * self._size_unit_map[self.size_unit]
else:
return 0
@property
def needs_more_capacity(self):
if self.current_size_bytes < self.requested_size_bytes:
self.debug("needs resize: current capacity %sb is less than requested minimum %sb",
self.current_size_bytes, self.requested_size_bytes)
return True
@property
def needs_resize(self):
return self.needs_more_disks or self.needs_more_capacity or self.needs_less_disks
def resize_cache(self):
# increase up to disk count first, then iteratively add disks until we meet requested capacity
# TODO: perform this calculation in check mode
current_disk_count = len(self.cache_detail['driveRefs'])
proposed_new_disks = 0
proposed_additional_bytes = 0
proposed_disk_ids = []
if self.needs_more_disks:
proposed_disk_count = self.disk_count - current_disk_count
(disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_disk_count)
proposed_additional_bytes = bytes
proposed_disk_ids = disk_ids
while self.current_size_bytes + proposed_additional_bytes < self.requested_size_bytes:
proposed_new_disks += 1
(disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_new_disks)
proposed_disk_ids = disk_ids
proposed_additional_bytes = bytes
add_drives_req = dict(
driveRef=proposed_disk_ids
)
self.debug("adding drives to flash-cache...")
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/addDrives" % (self.ssid),
data=json.dumps(add_drives_req), headers=self.post_headers, method='POST',
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs)
elif self.needs_less_disks and self.driveRefs:
rm_drives = dict(driveRef=self.driveRefs)
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/removeDrives" % (self.ssid),
data=json.dumps(rm_drives), headers=self.post_headers, method='POST',
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs)
def apply(self):
result = dict(changed=False)
(rc, cache_resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs, ignore_errors=True)
if rc == 200:
self.cache_detail = cache_resp
else:
self.cache_detail = None
if rc not in [200, 404]:
raise Exception(
"Unexpected error code %s fetching flash cache detail. Response data was %s" % (rc, cache_resp))
if self.state == 'present':
if self.cache_detail:
# TODO: verify parameters against detail for changes
if self.cache_detail['name'] != self.name:
self.debug("CHANGED: name differs")
result['changed'] = True
if self.cache_detail['flashCacheBase']['configType'] != self.io_type:
self.debug("CHANGED: io_type differs")
result['changed'] = True
if self.needs_resize:
self.debug("CHANGED: resize required")
result['changed'] = True
else:
self.debug("CHANGED: requested state is 'present' but cache does not exist")
result['changed'] = True
else: # requested state is absent
if self.cache_detail:
self.debug("CHANGED: requested state is 'absent' but cache exists")
result['changed'] = True
if not result['changed']:
self.debug("no changes, exiting...")
self.module.exit_json(**result)
if self.module.check_mode:
self.debug("changes pending in check mode, exiting early...")
self.module.exit_json(**result)
if self.state == 'present':
if not self.cache_detail:
self.create_cache()
else:
if self.needs_resize:
self.resize_cache()
# run update here as well, since io_type can't be set on creation
self.update_cache()
elif self.state == 'absent':
self.delete_cache()
# TODO: include other details about the storage pool (size, type, id, etc)
self.module.exit_json(changed=result['changed'], **self.resp)
def main():
sp = NetAppESeriesFlashCache()
try:
sp.apply()
except Exception as e:
sp.debug("Exception in apply(): \n%s", to_native(e))
sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % to_native(e),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View file

@ -0,0 +1,157 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_global
short_description: NetApp E-Series manage global settings configuration
description:
- Allow the user to configure several of the global settings associated with an E-Series storage-system
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
name:
description:
- Set the name of the E-Series storage-system
- This label/name doesn't have to be unique.
- May be up to 30 characters in length.
aliases:
- label
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- This module requires Web Services API v1.3 or newer.
'''
EXAMPLES = """
- name: Set the storage-system name
netapp_e_global:
name: myArrayName
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
name:
description:
- The current name/label of the storage-system.
returned: on success
sample: myArrayName
type: str
"""
import json
import logging
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class GlobalSettings(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=False, aliases=['label']),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.name = args['name']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.name and len(self.name) > 30:
self.module.fail_json(msg="The provided name is invalid, it must be < 30 characters in length.")
def get_name(self):
try:
(rc, result) = request(self.url + 'storage-systems/%s' % self.ssid, headers=HEADERS, **self.creds)
if result['status'] in ['offline', 'neverContacted']:
self.module.fail_json(msg="This storage-system is offline! Array Id [%s]." % (self.ssid))
return result['name']
except Exception as err:
self.module.fail_json(msg="Connection failure! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
def update_name(self):
name = self.get_name()
update = False
if self.name != name:
update = True
body = dict(name=self.name)
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'storage-systems/%s/configuration' % self.ssid, method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
self._logger.info("Set name to %s.", result['name'])
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(
msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.update_name()
name = self.get_name()
self.module.exit_json(msg="The requested settings have been updated.", changed=update, name=name)
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = GlobalSettings()
settings()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,536 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_host
short_description: NetApp E-Series manage eseries hosts
description: Create, update, remove hosts on NetApp E-series storage arrays
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
name:
description:
- If the host doesn't yet exist, the label/name to assign at creation time.
- If the hosts already exists, this will be used to uniquely identify the host to make any required changes
required: True
aliases:
- label
state:
description:
- Set to absent to remove an existing host
- Set to present to modify or create a new host definition
choices:
- absent
- present
default: present
host_type:
description:
- This is the type of host to be mapped
- Required when C(state=present)
- Either one of the following names can be specified, Linux DM-MP, VMWare, Windows, Windows Clustered, or a
host type index which can be found in M(netapp_e_facts)
type: str
aliases:
- host_type_index
ports:
description:
- A list of host ports you wish to associate with the host.
- Host ports are uniquely identified by their WWN or IQN. Their assignments to a particular host are
uniquely identified by a label and these must be unique.
required: False
suboptions:
type:
description:
- The interface type of the port to define.
- Acceptable choices depend on the capabilities of the target hardware/software platform.
required: true
choices:
- iscsi
- sas
- fc
- ib
- nvmeof
- ethernet
label:
description:
- A unique label to assign to this port assignment.
required: true
port:
description:
- The WWN or IQN of the hostPort to assign to this port definition.
required: true
force_port:
description:
- Allow ports that are already assigned to be re-assigned to your current host
required: false
type: bool
group:
description:
- The unique identifier of the host-group you want the host to be a member of; this is used for clustering.
required: False
aliases:
- cluster
log_path:
description:
- A local path to a file to be used for debug logging
required: False
'''
EXAMPLES = """
- name: Define or update an existing host named 'Host1'
netapp_e_host:
ssid: "1"
api_url: "10.113.1.101:8443"
api_username: admin
api_password: myPassword
name: "Host1"
state: present
host_type_index: Linux DM-MP
ports:
- type: 'iscsi'
label: 'PORT_1'
port: 'iqn.1996-04.de.suse:01:56f86f9bd1fe'
- type: 'fc'
label: 'FC_1'
port: '10:00:FF:7C:FF:FF:FF:01'
- type: 'fc'
label: 'FC_2'
port: '10:00:FF:7C:FF:FF:FF:00'
- name: Ensure a host named 'Host2' doesn't exist
netapp_e_host:
ssid: "1"
api_url: "10.113.1.101:8443"
api_username: admin
api_password: myPassword
name: "Host2"
state: absent
"""
RETURN = """
msg:
description:
- A user-readable description of the actions performed.
returned: on success
type: str
sample: The host has been created.
id:
description:
- the unique identifier of the host on the E-Series storage-system
returned: on success when state=present
type: str
sample: 00000000600A098000AAC0C3003004700AD86A52
version_added: "2.6"
ssid:
description:
- the unique identifier of the E-Series storage-system with the current api
returned: on success
type: str
sample: 1
version_added: "2.6"
api_url:
description:
- the url of the API that this request was processed by
returned: on success
type: str
sample: https://webservices.example.com:8443
version_added: "2.6"
"""
import json
import logging
import re
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Host(object):
HOST_TYPE_INDEXES = {"linux dm-mp": 28, "vmware": 10, "windows": 1, "windows clustered": 8}
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
group=dict(type='str', required=False, aliases=['cluster']),
ports=dict(type='list', required=False),
force_port=dict(type='bool', default=False),
name=dict(type='str', required=True, aliases=['label']),
host_type_index=dict(type='str', aliases=['host_type']),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
self.check_mode = self.module.check_mode
args = self.module.params
self.group = args['group']
self.ports = args['ports']
self.force_port = args['force_port']
self.name = args['name']
self.state = args['state']
self.ssid = args['ssid']
self.url = args['api_url']
self.user = args['api_username']
self.pwd = args['api_password']
self.certs = args['validate_certs']
self.post_body = dict()
self.all_hosts = list()
self.host_obj = dict()
self.newPorts = list()
self.portsForUpdate = list()
self.portsForRemoval = list()
# Update host type with the corresponding index
host_type = args['host_type_index']
if host_type:
host_type = host_type.lower()
if host_type in [key.lower() for key in list(self.HOST_TYPE_INDEXES.keys())]:
self.host_type_index = self.HOST_TYPE_INDEXES[host_type]
elif host_type.isdigit():
self.host_type_index = int(args['host_type_index'])
else:
self.module.fail_json(msg="host_type must be either a host type name or host type index found integer"
" the documentation.")
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if args['log_path']:
logging.basicConfig(
level=logging.DEBUG, filename=args['log_path'], filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
# Ensure when state==present then host_type_index is defined
if self.state == "present" and self.host_type_index is None:
self.module.fail_json(msg="Host_type_index is required when state=='present'. Array Id: [%s]" % self.ssid)
# Fix port representation if they are provided with colons
if self.ports is not None:
for port in self.ports:
port['label'] = port['label'].lower()
port['type'] = port['type'].lower()
port['port'] = port['port'].lower()
# Determine whether address is 16-byte WWPN and, if so, remove
if re.match(r'^(0x)?[0-9a-f]{16}$', port['port'].replace(':', '')):
port['port'] = port['port'].replace(':', '').replace('0x', '')
def valid_host_type(self):
host_types = None
try:
(rc, host_types) = request(self.url + 'storage-systems/%s/host-types' % self.ssid, url_password=self.pwd,
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
except Exception as err:
self.module.fail_json(
msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
try:
match = list(filter(lambda host_type: host_type['index'] == self.host_type_index, host_types))[0]
return True
except IndexError:
self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index)
def assigned_host_ports(self, apply_unassigning=False):
"""Determine if the hostPorts requested have already been assigned and return list of required used ports."""
used_host_ports = {}
for host in self.all_hosts:
if host['label'] != self.name:
for host_port in host['hostSidePorts']:
for port in self.ports:
if port['port'] == host_port["address"] or port['label'] == host_port['label']:
if not self.force_port:
self.module.fail_json(msg="There are no host ports available OR there are not enough"
" unassigned host ports")
else:
# Determine port reference
port_ref = [port["hostPortRef"] for port in host["ports"]
if port["hostPortName"] == host_port["address"]]
port_ref.extend([port["initiatorRef"] for port in host["initiators"]
if port["nodeName"]["iscsiNodeName"] == host_port["address"]])
# Create dictionary of hosts containing list of port references
if host["hostRef"] not in used_host_ports.keys():
used_host_ports.update({host["hostRef"]: port_ref})
else:
used_host_ports[host["hostRef"]].extend(port_ref)
else:
for host_port in host['hostSidePorts']:
for port in self.ports:
if ((host_port['label'] == port['label'] and host_port['address'] != port['port']) or
(host_port['label'] != port['label'] and host_port['address'] == port['port'])):
if not self.force_port:
self.module.fail_json(msg="There are no host ports available OR there are not enough"
" unassigned host ports")
else:
# Determine port reference
port_ref = [port["hostPortRef"] for port in host["ports"]
if port["hostPortName"] == host_port["address"]]
port_ref.extend([port["initiatorRef"] for port in host["initiators"]
if port["nodeName"]["iscsiNodeName"] == host_port["address"]])
# Create dictionary of hosts containing list of port references
if host["hostRef"] not in used_host_ports.keys():
used_host_ports.update({host["hostRef"]: port_ref})
else:
used_host_ports[host["hostRef"]].extend(port_ref)
# Unassign assigned ports
if apply_unassigning:
for host_ref in used_host_ports.keys():
try:
rc, resp = request(self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, host_ref),
url_username=self.user, url_password=self.pwd, headers=HEADERS,
validate_certs=self.certs, method='POST',
data=json.dumps({"portsToRemove": used_host_ports[host_ref]}))
except Exception as err:
self.module.fail_json(msg="Failed to unassign host port. Host Id [%s]. Array Id [%s]. Ports [%s]."
" Error [%s]." % (self.host_obj['id'], self.ssid,
used_host_ports[host_ref], to_native(err)))
return used_host_ports
def group_id(self):
if self.group:
try:
(rc, all_groups) = request(self.url + 'storage-systems/%s/host-groups' % self.ssid,
url_password=self.pwd,
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
except Exception as err:
self.module.fail_json(
msg="Failed to get host groups. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
try:
group_obj = list(filter(lambda group: group['name'] == self.group, all_groups))[0]
return group_obj['id']
except IndexError:
self.module.fail_json(msg="No group with the name: %s exists" % self.group)
else:
# Return the value equivalent of no group
return "0000000000000000000000000000000000000000"
def host_exists(self):
"""Determine if the requested host exists
As a side effect, set the full list of defined hosts in 'all_hosts', and the target host in 'host_obj'.
"""
match = False
all_hosts = list()
try:
(rc, all_hosts) = request(self.url + 'storage-systems/%s/hosts' % self.ssid, url_password=self.pwd,
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
except Exception as err:
self.module.fail_json(
msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
# Augment the host objects
for host in all_hosts:
for port in host['hostSidePorts']:
port['type'] = port['type'].lower()
port['address'] = port['address'].lower()
port['label'] = port['label'].lower()
# Augment hostSidePorts with their ID (this is an omission in the API)
ports = dict((port['label'], port['id']) for port in host['ports'])
ports.update((port['label'], port['id']) for port in host['initiators'])
for host_side_port in host['hostSidePorts']:
if host_side_port['label'] in ports:
host_side_port['id'] = ports[host_side_port['label']]
if host['label'] == self.name:
self.host_obj = host
match = True
self.all_hosts = all_hosts
return match
def needs_update(self):
"""Determine whether we need to update the Host object
As a side effect, we will set the ports that we need to update (portsForUpdate), and the ports we need to add
(newPorts), on self.
"""
changed = False
if (self.host_obj["clusterRef"].lower() != self.group_id().lower() or
self.host_obj["hostTypeIndex"] != self.host_type_index):
self._logger.info("Either hostType or the clusterRef doesn't match, an update is required.")
changed = True
current_host_ports = dict((port["id"], {"type": port["type"], "port": port["address"], "label": port["label"]})
for port in self.host_obj["hostSidePorts"])
if self.ports:
for port in self.ports:
for current_host_port_id in current_host_ports.keys():
if port == current_host_ports[current_host_port_id]:
current_host_ports.pop(current_host_port_id)
break
elif port["port"] == current_host_ports[current_host_port_id]["port"]:
if self.port_on_diff_host(port) and not self.force_port:
self.module.fail_json(msg="The port you specified [%s] is associated with a different host."
" Specify force_port as True or try a different port spec" % port)
if (port["label"] != current_host_ports[current_host_port_id]["label"] or
port["type"] != current_host_ports[current_host_port_id]["type"]):
current_host_ports.pop(current_host_port_id)
self.portsForUpdate.append({"portRef": current_host_port_id, "port": port["port"],
"label": port["label"], "hostRef": self.host_obj["hostRef"]})
break
else:
self.newPorts.append(port)
self.portsForRemoval = list(current_host_ports.keys())
changed = any([self.newPorts, self.portsForUpdate, self.portsForRemoval, changed])
return changed
def port_on_diff_host(self, arg_port):
""" Checks to see if a passed in port arg is present on a different host """
for host in self.all_hosts:
# Only check 'other' hosts
if host['name'] != self.name:
for port in host['hostSidePorts']:
# Check if the port label is found in the port dict list of each host
if arg_port['label'] == port['label'] or arg_port['port'] == port['address']:
self.other_host = host
return True
return False
def update_host(self):
self._logger.info("Beginning the update for host=%s.", self.name)
if self.ports:
# Remove ports that need reassigning from their current host.
self.assigned_host_ports(apply_unassigning=True)
self.post_body["portsToUpdate"] = self.portsForUpdate
self.post_body["ports"] = self.newPorts
self._logger.info("Requested ports: %s", pformat(self.ports))
else:
self._logger.info("No host ports were defined.")
if self.group:
self.post_body['groupId'] = self.group_id()
self.post_body['hostType'] = dict(index=self.host_type_index)
api = self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id'])
self._logger.info("POST => url=%s, body=%s.", api, pformat(self.post_body))
if not self.check_mode:
try:
(rc, self.host_obj) = request(api, url_username=self.user, url_password=self.pwd, headers=HEADERS,
validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
except Exception as err:
self.module.fail_json(
msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
payload = self.build_success_payload(self.host_obj)
self.module.exit_json(changed=True, **payload)
def create_host(self):
self._logger.info("Creating host definition.")
# Remove ports that need reassigning from their current host.
self.assigned_host_ports(apply_unassigning=True)
# needs_reassignment = False
post_body = dict(
name=self.name,
hostType=dict(index=self.host_type_index),
groupId=self.group_id(),
)
if self.ports:
post_body.update(ports=self.ports)
api = self.url + "storage-systems/%s/hosts" % self.ssid
self._logger.info('POST => url=%s, body=%s', api, pformat(post_body))
if not self.check_mode:
if not self.host_exists():
try:
(rc, self.host_obj) = request(api, method='POST', url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
data=json.dumps(post_body), headers=HEADERS)
except Exception as err:
self.module.fail_json(
msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
else:
payload = self.build_success_payload(self.host_obj)
self.module.exit_json(changed=False, msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name), **payload)
payload = self.build_success_payload(self.host_obj)
self.module.exit_json(changed=True, msg='Host created.', **payload)
def remove_host(self):
try:
(rc, resp) = request(self.url + "storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj['id']),
method='DELETE',
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
except Exception as err:
self.module.fail_json(
msg="Failed to remove host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj['id'],
self.ssid,
to_native(err)))
def build_success_payload(self, host=None):
keys = ['id']
if host is not None:
result = dict((key, host[key]) for key in keys)
else:
result = dict()
result['ssid'] = self.ssid
result['api_url'] = self.url
return result
def apply(self):
if self.state == 'present':
if self.host_exists():
if self.needs_update() and self.valid_host_type():
self.update_host()
else:
payload = self.build_success_payload(self.host_obj)
self.module.exit_json(changed=False, msg="Host already present; no changes required.", **payload)
elif self.valid_host_type():
self.create_host()
else:
payload = self.build_success_payload()
if self.host_exists():
self.remove_host()
self.module.exit_json(changed=True, msg="Host removed.", **payload)
else:
self.module.exit_json(changed=False, msg="Host already absent.", **payload)
def main():
host = Host()
host.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,302 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community"}
DOCUMENTATION = '''
---
module: netapp_e_hostgroup
short_description: NetApp E-Series manage array host groups
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
description: Create, update or destroy host groups on a NetApp E-Series storage array.
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
state:
required: true
description:
- Whether the specified host group should exist or not.
choices: ["present", "absent"]
name:
required: false
description:
- Name of the host group to manage
- This option is mutually exclusive with I(id).
new_name:
required: false
description:
- Specify this when you need to update the name of a host group
id:
required: false
description:
- Host reference identifier for the host group to manage.
- This option is mutually exclusive with I(name).
hosts:
required: false
description:
- List of host names/labels to add to the group
'''
EXAMPLES = """
- name: Configure Hostgroup
netapp_e_hostgroup:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: present
"""
RETURN = """
clusterRef:
description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster.
returned: always except when state is absent
type: str
sample: "3233343536373839303132333100000000000000"
confirmLUNMappingCreation:
description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping
will alter the volume access rights of other clusters, in addition to this one.
returned: always
type: bool
sample: false
hosts:
description: A list of the hosts that are part of the host group after all operations.
returned: always except when state is absent
type: list
sample: ["HostA","HostB"]
id:
description: The id number of the hostgroup
returned: always except when state is absent
type: str
sample: "3233343536373839303132333100000000000000"
isSAControlled:
description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false,
indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
returned: always except when state is absent
type: bool
sample: false
label:
description: The user-assigned, descriptive label string for the cluster.
returned: always
type: str
sample: "MyHostGroup"
name:
description: same as label
returned: always except when state is absent
type: str
sample: "MyHostGroup"
protectionInformationCapableAccessMethod:
description: This field is true if the host has a PI capable access method.
returned: always except when state is absent
type: bool
sample: true
"""
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule
from ansible.module_utils._text import to_native
class NetAppESeriesHostGroup(NetAppESeriesModule):
EXPANSION_TIMEOUT_SEC = 10
DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11
def __init__(self):
version = "02.00.0000.0000"
ansible_options = dict(
state=dict(required=True, choices=["present", "absent"], type="str"),
name=dict(required=False, type="str"),
new_name=dict(required=False, type="str"),
id=dict(required=False, type="str"),
hosts=dict(required=False, type="list"))
mutually_exclusive = [["name", "id"]]
super(NetAppESeriesHostGroup, self).__init__(ansible_options=ansible_options,
web_services_version=version,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive)
args = self.module.params
self.state = args["state"]
self.name = args["name"]
self.new_name = args["new_name"]
self.id = args["id"]
self.hosts_list = args["hosts"]
self.current_host_group = None
@property
def hosts(self):
"""Retrieve a list of host reference identifiers should be associated with the host group."""
host_list = []
existing_hosts = []
if self.hosts_list:
try:
rc, existing_hosts = self.request("storage-systems/%s/hosts" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve hosts information. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
for host in self.hosts_list:
for existing_host in existing_hosts:
if host in existing_host["id"] or host in existing_host["name"]:
host_list.append(existing_host["id"])
break
else:
self.module.fail_json(msg="Expected host does not exist. Array id [%s]. Host [%s]."
% (self.ssid, host))
return host_list
@property
def host_groups(self):
"""Retrieve a list of existing host groups."""
host_groups = []
hosts = []
try:
rc, host_groups = self.request("storage-systems/%s/host-groups" % self.ssid)
rc, hosts = self.request("storage-systems/%s/hosts" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve host group information. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
host_groups = [{"id": group["clusterRef"], "name": group["name"]} for group in host_groups]
for group in host_groups:
hosts_ids = []
for host in hosts:
if group["id"] == host["clusterRef"]:
hosts_ids.append(host["hostRef"])
group.update({"hosts": hosts_ids})
return host_groups
@property
def current_hosts_in_host_group(self):
"""Retrieve the current hosts associated with the current hostgroup."""
current_hosts = []
for group in self.host_groups:
if (self.name and group["name"] == self.name) or (self.id and group["id"] == self.id):
current_hosts = group["hosts"]
return current_hosts
def unassign_hosts(self, host_list=None):
"""Unassign hosts from host group."""
if host_list is None:
host_list = self.current_host_group["hosts"]
for host_id in host_list:
try:
rc, resp = self.request("storage-systems/%s/hosts/%s/move" % (self.ssid, host_id),
method="POST", data={"group": "0000000000000000000000000000000000000000"})
except Exception as error:
self.module.fail_json(msg="Failed to unassign hosts from host group. Array id [%s]. Host id [%s]."
" Error[%s]." % (self.ssid, host_id, to_native(error)))
def delete_host_group(self, unassign_hosts=True):
"""Delete host group"""
if unassign_hosts:
self.unassign_hosts()
try:
rc, resp = self.request("storage-systems/%s/host-groups/%s" % (self.ssid, self.current_host_group["id"]),
method="DELETE")
except Exception as error:
self.module.fail_json(msg="Failed to delete host group. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
def create_host_group(self):
"""Create host group."""
data = {"name": self.name, "hosts": self.hosts}
response = None
try:
rc, response = self.request("storage-systems/%s/host-groups" % self.ssid, method="POST", data=data)
except Exception as error:
self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
return response
def update_host_group(self):
"""Update host group."""
data = {"name": self.new_name if self.new_name else self.name,
"hosts": self.hosts}
# unassign hosts that should not be part of the hostgroup
desired_host_ids = self.hosts
for host in self.current_hosts_in_host_group:
if host not in desired_host_ids:
self.unassign_hosts([host])
update_response = None
try:
rc, update_response = self.request("storage-systems/%s/host-groups/%s"
% (self.ssid, self.current_host_group["id"]), method="POST", data=data)
except Exception as error:
self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
return update_response
def apply(self):
"""Apply desired host group state to the storage array."""
changes_required = False
# Search for existing host group match
for group in self.host_groups:
if (self.id and group["id"] == self.id) or (self.name and group["name"] == self.name):
self.current_host_group = group
# Determine whether changes are required
if self.state == "present":
if self.current_host_group:
if (self.new_name and self.new_name != self.name) or self.hosts != self.current_host_group["hosts"]:
changes_required = True
else:
if not self.name:
self.module.fail_json(msg="The option name must be supplied when creating a new host group."
" Array id [%s]." % self.ssid)
changes_required = True
elif self.current_host_group:
changes_required = True
# Apply any necessary changes
msg = ""
if changes_required and not self.module.check_mode:
msg = "No changes required."
if self.state == "present":
if self.current_host_group:
if ((self.new_name and self.new_name != self.name) or
(self.hosts != self.current_host_group["hosts"])):
msg = self.update_host_group()
else:
msg = self.create_host_group()
elif self.current_host_group:
self.delete_host_group()
msg = "Host group deleted. Array Id [%s]. Host Name [%s]. Host Id [%s]."\
% (self.ssid, self.current_host_group["name"], self.current_host_group["id"])
self.module.exit_json(msg=msg, changed=changes_required)
def main():
hostgroup = NetAppESeriesHostGroup()
hostgroup.apply()
if __name__ == "__main__":
main()

View file

@ -0,0 +1,398 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_iscsi_interface
short_description: NetApp E-Series manage iSCSI interface configuration
description:
- Configure settings of an E-Series iSCSI interface
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
controller:
description:
- The controller that owns the port you want to configure.
- Controller names are presented alphabetically, with the first controller as A,
the second as B, and so on.
- Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard
limitation and could change in the future.
required: yes
choices:
- A
- B
name:
description:
- The channel of the port to modify the configuration of.
- The list of choices is not necessarily comprehensive. It depends on the number of ports
that are available in the system.
- The numerical value represents the number of the channel (typically from left to right on the HIC),
beginning with a value of 1.
required: yes
aliases:
- channel
state:
description:
- When enabled, the provided configuration will be utilized.
- When disabled, the IPv4 configuration will be cleared and IPv4 connectivity disabled.
choices:
- enabled
- disabled
default: enabled
address:
description:
- The IPv4 address to assign to the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
subnet_mask:
description:
- The subnet mask to utilize for the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
gateway:
description:
- The IPv4 gateway address to utilize for the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
config_method:
description:
- The configuration method type to use for this interface.
- dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
choices:
- dhcp
- static
default: dhcp
mtu:
description:
- The maximum transmission units (MTU), in bytes.
- This allows you to configure a larger value for the MTU, in order to enable jumbo frames
(any value > 1500).
- Generally, it is necessary to have your host, switches, and other components not only support jumbo
frames, but also have it configured properly. Therefore, unless you know what you're doing, it's best to
leave this at the default.
default: 1500
aliases:
- max_frame_size
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address
via dhcp, etc), can take seconds or minutes longer to take effect.
- This module will not be useful/usable on an E-Series system without any iSCSI interfaces.
- This module requires a Web Services API version of >= 1.3.
'''
EXAMPLES = """
- name: Configure the first port on the A controller with a static IPv4 address
netapp_e_iscsi_interface:
name: "1"
controller: "A"
config_method: static
address: "192.168.1.100"
subnet_mask: "255.255.255.0"
gateway: "192.168.1.1"
ssid: "1"
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Disable ipv4 connectivity for the second port on the B controller
netapp_e_iscsi_interface:
name: "2"
controller: "B"
state: disabled
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
- name: Enable jumbo frames for the first 4 ports on controller A
netapp_e_iscsi_interface:
name: "{{ item | int }}"
controller: "A"
state: enabled
mtu: 9000
config_method: dhcp
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
loop:
- 1
- 2
- 3
- 4
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The interface settings have been updated.
enabled:
description:
- Indicates whether IPv4 connectivity has been enabled or disabled.
- This does not necessarily indicate connectivity. If dhcp was enabled without a dhcp server, for instance,
it is unlikely that the configuration will actually be valid.
returned: on success
sample: True
type: bool
"""
import json
import logging
from pprint import pformat
import re
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class IscsiInterface(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
controller=dict(type='str', required=True, choices=['A', 'B']),
name=dict(type='int', aliases=['channel']),
state=dict(type='str', required=False, default='enabled', choices=['enabled', 'disabled']),
address=dict(type='str', required=False),
subnet_mask=dict(type='str', required=False),
gateway=dict(type='str', required=False),
config_method=dict(type='str', required=False, default='dhcp', choices=['dhcp', 'static']),
mtu=dict(type='int', default=1500, required=False, aliases=['max_frame_size']),
log_path=dict(type='str', required=False),
))
required_if = [
["config_method", "static", ["address", "subnet_mask"]],
]
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if, )
args = self.module.params
self.controller = args['controller']
self.name = args['name']
self.mtu = args['mtu']
self.state = args['state']
self.address = args['address']
self.subnet_mask = args['subnet_mask']
self.gateway = args['gateway']
self.config_method = args['config_method']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
self.post_body = dict()
self.controllers = list()
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.mtu < 1500 or self.mtu > 9000:
self.module.fail_json(msg="The provided mtu is invalid, it must be > 1500 and < 9000 bytes.")
if self.config_method == 'dhcp' and any([self.address, self.subnet_mask, self.gateway]):
self.module.fail_json(msg='A config_method of dhcp is mutually exclusive with the address,'
' subnet_mask, and gateway options.')
# A relatively primitive regex to validate that the input is formatted like a valid ip address
address_regex = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
if self.address and not address_regex.match(self.address):
self.module.fail_json(msg="An invalid ip address was provided for address.")
if self.subnet_mask and not address_regex.match(self.subnet_mask):
self.module.fail_json(msg="An invalid ip address was provided for subnet_mask.")
if self.gateway and not address_regex.match(self.gateway):
self.module.fail_json(msg="An invalid ip address was provided for gateway.")
@property
def interfaces(self):
ifaces = list()
try:
(rc, ifaces) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/hostInterfaces'
% self.ssid, headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
# Filter out non-iSCSI interfaces
ifaces = [iface['iscsi'] for iface in ifaces if iface['interfaceType'] == 'iscsi']
return ifaces
def get_controllers(self):
"""Retrieve a mapping of controller labels to their references
{
'A': '070000000000000000000001',
'B': '070000000000000000000002',
}
:return: the controllers defined on the system
"""
controllers = list()
try:
(rc, controllers) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/id'
% self.ssid, headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
controllers.sort()
controllers_dict = {}
i = ord('A')
for controller in controllers:
label = chr(i)
controllers_dict[label] = controller
i += 1
return controllers_dict
def fetch_target_interface(self):
interfaces = self.interfaces
for iface in interfaces:
if iface['channel'] == self.name and self.controllers[self.controller] == iface['controllerId']:
return iface
channels = sorted(set((str(iface['channel'])) for iface in interfaces
if self.controllers[self.controller] == iface['controllerId']))
self.module.fail_json(msg="The requested channel of %s is not valid. Valid channels include: %s."
% (self.name, ", ".join(channels)))
def make_update_body(self, target_iface):
body = dict(iscsiInterface=target_iface['id'])
update_required = False
self._logger.info("Requested state=%s.", self.state)
self._logger.info("config_method: current=%s, requested=%s",
target_iface['ipv4Data']['ipv4AddressConfigMethod'], self.config_method)
if self.state == 'enabled':
settings = dict()
if not target_iface['ipv4Enabled']:
update_required = True
settings['ipv4Enabled'] = [True]
if self.mtu != target_iface['interfaceData']['ethernetData']['maximumFramePayloadSize']:
update_required = True
settings['maximumFramePayloadSize'] = [self.mtu]
if self.config_method == 'static':
ipv4Data = target_iface['ipv4Data']['ipv4AddressData']
if ipv4Data['ipv4Address'] != self.address:
update_required = True
settings['ipv4Address'] = [self.address]
if ipv4Data['ipv4SubnetMask'] != self.subnet_mask:
update_required = True
settings['ipv4SubnetMask'] = [self.subnet_mask]
if self.gateway is not None and ipv4Data['ipv4GatewayAddress'] != self.gateway:
update_required = True
settings['ipv4GatewayAddress'] = [self.gateway]
if target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configStatic':
update_required = True
settings['ipv4AddressConfigMethod'] = ['configStatic']
elif (target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configDhcp'):
update_required = True
settings.update(dict(ipv4Enabled=[True],
ipv4AddressConfigMethod=['configDhcp']))
body['settings'] = settings
else:
if target_iface['ipv4Enabled']:
update_required = True
body['settings'] = dict(ipv4Enabled=[False])
self._logger.info("Update required ?=%s", update_required)
self._logger.info("Update body: %s", pformat(body))
return update_required, body
def update(self):
self.controllers = self.get_controllers()
if self.controller not in self.controllers:
self.module.fail_json(msg="The provided controller name is invalid. Valid controllers: %s."
% ", ".join(self.controllers.keys()))
iface_before = self.fetch_target_interface()
update_required, body = self.make_update_body(iface_before)
if update_required and not self.check_mode:
try:
url = (self.url +
'storage-systems/%s/symbol/setIscsiInterfaceProperties' % self.ssid)
(rc, result) = request(url, method='POST', data=json.dumps(body), headers=HEADERS, timeout=300,
ignore_errors=True, **self.creds)
# We could potentially retry this a few times, but it's probably a rare enough case (unless a playbook
# is cancelled mid-flight), that it isn't worth the complexity.
if rc == 422 and result['retcode'] in ['busy', '3']:
self.module.fail_json(
msg="The interface is currently busy (probably processing a previously requested modification"
" request). This operation cannot currently be completed. Array Id [%s]. Error [%s]."
% (self.ssid, result))
# Handle authentication issues, etc.
elif rc != 200:
self.module.fail_json(
msg="Failed to modify the interface! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(result)))
self._logger.debug("Update request completed successfully.")
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(
msg="Connection failure: we failed to modify the interface! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
iface_after = self.fetch_target_interface()
self.module.exit_json(msg="The interface settings have been updated.", changed=update_required,
enabled=iface_after['ipv4Enabled'])
def __call__(self, *args, **kwargs):
self.update()
def main():
iface = IscsiInterface()
iface()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,294 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_iscsi_target
short_description: NetApp E-Series manage iSCSI target configuration
description:
- Configure the settings of an E-Series iSCSI target
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
name:
description:
- The name/alias to assign to the iSCSI target.
- This alias is often used by the initiator software in order to make an iSCSI target easier to identify.
aliases:
- alias
ping:
description:
- Enable ICMP ping responses from the configured iSCSI ports.
type: bool
default: yes
chap_secret:
description:
- Enable Challenge-Handshake Authentication Protocol (CHAP), utilizing this value as the password.
- When this value is specified, we will always trigger an update (changed=True). We have no way of verifying
whether or not the password has changed.
- The chap secret may only use ascii characters with values between 32 and 126 decimal.
- The chap secret must be no less than 12 characters, but no greater than 57 characters in length.
- The chap secret is cleared when not specified or an empty string.
aliases:
- chap
- password
unnamed_discovery:
description:
- When an initiator initiates a discovery session to an initiator port, it is considered an unnamed
discovery session if the iSCSI target iqn is not specified in the request.
- This option may be disabled to increase security if desired.
type: bool
default: yes
log_path:
description:
- A local path (on the Ansible controller), to a file to be used for debug logging.
required: no
notes:
- Check mode is supported.
- Some of the settings are dependent on the settings applied to the iSCSI interfaces. These can be configured using
M(netapp_e_iscsi_interface).
- This module requires a Web Services API version of >= 1.3.
'''
EXAMPLES = """
- name: Enable ping responses and unnamed discovery sessions for all iSCSI ports
netapp_e_iscsi_target:
api_url: "https://localhost:8443/devmgr/v2"
api_username: admin
api_password: myPassword
ssid: "1"
validate_certs: no
name: myTarget
ping: yes
unnamed_discovery: yes
- name: Set the target alias and the CHAP secret
netapp_e_iscsi_target:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
name: myTarget
chap: password1234
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The iSCSI target settings have been updated.
alias:
description:
- The alias assigned to the iSCSI target.
returned: on success
sample: myArray
type: str
iqn:
description:
- The iqn (iSCSI Qualified Name), assigned to the iSCSI target.
returned: on success
sample: iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45
type: str
"""
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class IscsiTarget(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=False, aliases=['alias']),
ping=dict(type='bool', required=False, default=True),
chap_secret=dict(type='str', required=False, aliases=['chap', 'password'], no_log=True),
unnamed_discovery=dict(type='bool', required=False, default=True),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.name = args['name']
self.ping = args['ping']
self.chap_secret = args['chap_secret']
self.unnamed_discovery = args['unnamed_discovery']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
self.post_body = dict()
self.controllers = list()
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.chap_secret:
if len(self.chap_secret) < 12 or len(self.chap_secret) > 57:
self.module.fail_json(msg="The provided CHAP secret is not valid, it must be between 12 and 57"
" characters in length.")
for c in self.chap_secret:
ordinal = ord(c)
if ordinal < 32 or ordinal > 126:
self.module.fail_json(msg="The provided CHAP secret is not valid, it may only utilize ascii"
" characters with decimal values between 32 and 126.")
@property
def target(self):
"""Provide information on the iSCSI Target configuration
Sample:
{
'alias': 'myCustomName',
'ping': True,
'unnamed_discovery': True,
'chap': False,
'iqn': 'iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45',
}
"""
target = dict()
try:
(rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/storagePoolBundle/target'
% self.ssid, headers=HEADERS, **self.creds)
# This likely isn't an iSCSI-enabled system
if not data:
self.module.fail_json(
msg="This storage-system doesn't appear to have iSCSI interfaces. Array Id [%s]." % (self.ssid))
data = data[0]
chap = any(
[auth for auth in data['configuredAuthMethods']['authMethodData'] if auth['authMethod'] == 'chap'])
target.update(dict(alias=data['alias']['iscsiAlias'],
iqn=data['nodeName']['iscsiNodeName'],
chap=chap))
(rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/sa/iscsiEntityData'
% self.ssid, headers=HEADERS, **self.creds)
data = data[0]
target.update(dict(ping=data['icmpPingResponseEnabled'],
unnamed_discovery=data['unnamedDiscoverySessionsEnabled']))
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve the iSCSI target information. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return target
def apply_iscsi_settings(self):
"""Update the iSCSI target alias and CHAP settings"""
update = False
target = self.target
body = dict()
if self.name is not None and self.name != target['alias']:
update = True
body['alias'] = self.name
# If the CHAP secret was provided, we trigger an update.
if self.chap_secret:
update = True
body.update(dict(enableChapAuthentication=True,
chapSecret=self.chap_secret))
# If no secret was provided, then we disable chap
elif target['chap']:
update = True
body.update(dict(enableChapAuthentication=False))
if update and not self.check_mode:
try:
request(self.url + 'storage-systems/%s/iscsi/target-settings' % self.ssid, method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def apply_target_changes(self):
update = False
target = self.target
body = dict()
if self.ping != target['ping']:
update = True
body['icmpPingResponseEnabled'] = self.ping
if self.unnamed_discovery != target['unnamed_discovery']:
update = True
body['unnamedDiscoverySessionsEnabled'] = self.unnamed_discovery
self._logger.info(pformat(body))
if update and not self.check_mode:
try:
request(self.url + 'storage-systems/%s/iscsi/entity' % self.ssid, method='POST',
data=json.dumps(body), timeout=60, headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.apply_iscsi_settings()
update = self.apply_target_changes() or update
target = self.target
data = dict((key, target[key]) for key in target if key in ['iqn', 'alias'])
self.module.exit_json(msg="The interface settings have been updated.", changed=update, **data)
def __call__(self, *args, **kwargs):
self.update()
def main():
iface = IscsiTarget()
iface()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,390 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_ldap
short_description: NetApp E-Series manage LDAP integration to use for authentication
description:
- Configure an E-Series system to allow authentication via an LDAP server
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
state:
description:
- Enable/disable LDAP support on the system. Disabling will clear out any existing defined domains.
choices:
- present
- absent
default: present
identifier:
description:
- This is a unique identifier for the configuration (for cases where there are multiple domains configured).
- If this is not specified, but I(state=present), we will utilize a default value of 'default'.
username:
description:
- This is the user account that will be used for querying the LDAP server.
- "Example: CN=MyBindAcct,OU=ServiceAccounts,DC=example,DC=com"
required: yes
aliases:
- bind_username
password:
description:
- This is the password for the bind user account.
required: yes
aliases:
- bind_password
attributes:
description:
- The user attributes that should be considered for the group to role mapping.
- Typically this is used with something like 'memberOf', and a user's access is tested against group
membership or lack thereof.
default: memberOf
server:
description:
- This is the LDAP server url.
- The connection string should be specified as using the ldap or ldaps protocol along with the port
information.
aliases:
- server_url
required: yes
name:
description:
- The domain name[s] that will be utilized when authenticating to identify which domain to utilize.
- Default to use the DNS name of the I(server).
- The only requirement is that the name[s] be resolvable.
- "Example: user@example.com"
required: no
search_base:
description:
- The search base is used to find group memberships of the user.
- "Example: ou=users,dc=example,dc=com"
required: yes
role_mappings:
description:
- This is where you specify which groups should have access to what permissions for the
storage-system.
- For example, all users in group A will be assigned all 4 available roles, which will allow access
to all the management functionality of the system (super-user). Those in group B only have the
storage.monitor role, which will allow only read-only access.
- This is specified as a mapping of regular expressions to a list of roles. See the examples.
- The roles that will be assigned to to the group/groups matching the provided regex.
- storage.admin allows users full read/write access to storage objects and operations.
- storage.monitor allows users read-only access to storage objects and operations.
- support.admin allows users access to hardware, diagnostic information, the Major Event
Log, and other critical support-related functionality, but not the storage configuration.
- security.admin allows users access to authentication/authorization configuration, as well
as the audit log configuration, and certification management.
required: yes
user_attribute:
description:
- This is the attribute we will use to match the provided username when a user attempts to
authenticate.
default: sAMAccountName
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- This module allows you to define one or more LDAP domains identified uniquely by I(identifier) to use for
authentication. Authorization is determined by I(role_mappings), in that different groups of users may be given
different (or no), access to certain aspects of the system and API.
- The local user accounts will still be available if the LDAP server becomes unavailable/inaccessible.
- Generally, you'll need to get the details of your organization's LDAP server before you'll be able to configure
the system for using LDAP authentication; every implementation is likely to be very different.
- This API is currently only supported with the Embedded Web Services API v2.0 and higher, or the Web Services Proxy
v3.0 and higher.
'''
EXAMPLES = '''
- name: Disable LDAP authentication
netapp_e_ldap:
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
ssid: "1"
state: absent
- name: Remove the 'default' LDAP domain configuration
netapp_e_ldap:
state: absent
identifier: default
- name: Define a new LDAP domain, utilizing defaults where possible
netapp_e_ldap:
state: present
bind_username: "CN=MyBindAccount,OU=ServiceAccounts,DC=example,DC=com"
bind_password: "mySecretPass"
server: "ldap://example.com:389"
search_base: 'OU=Users,DC=example,DC=com'
role_mappings:
".*dist-dev-storage.*":
- storage.admin
- security.admin
- support.admin
- storage.monitor
'''
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The ldap settings have been updated.
"""
import json
import logging
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
class Ldap(object):
NO_CHANGE_MSG = "No changes were necessary."
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', required=False, default='present',
choices=['present', 'absent']),
identifier=dict(type='str', required=False, ),
username=dict(type='str', required=False, aliases=['bind_username']),
password=dict(type='str', required=False, aliases=['bind_password'], no_log=True),
name=dict(type='list', required=False, ),
server=dict(type='str', required=False, aliases=['server_url']),
search_base=dict(type='str', required=False, ),
role_mappings=dict(type='dict', required=False, ),
user_attribute=dict(type='str', required=False, default='sAMAccountName'),
attributes=dict(type='list', default=['memberOf'], required=False, ),
log_path=dict(type='str', required=False),
))
required_if = [
["state", "present", ["username", "password", "server", "search_base", "role_mappings", ]]
]
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
args = self.module.params
self.ldap = args['state'] == 'present'
self.identifier = args['identifier']
self.username = args['username']
self.password = args['password']
self.names = args['name']
self.server = args['server']
self.search_base = args['search_base']
self.role_mappings = args['role_mappings']
self.user_attribute = args['user_attribute']
self.attributes = args['attributes']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'],
timeout=60)
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
self.embedded = None
self.base_path = None
def make_configuration(self):
if not self.identifier:
self.identifier = 'default'
if not self.names:
parts = urlparse.urlparse(self.server)
netloc = parts.netloc
if ':' in netloc:
netloc = netloc.split(':')[0]
self.names = [netloc]
roles = list()
for regex in self.role_mappings:
for role in self.role_mappings[regex]:
roles.append(dict(groupRegex=regex,
ignoreCase=True,
name=role))
domain = dict(id=self.identifier,
ldapUrl=self.server,
bindLookupUser=dict(user=self.username, password=self.password),
roleMapCollection=roles,
groupAttributes=self.attributes,
names=self.names,
searchBase=self.search_base,
userAttribute=self.user_attribute,
)
return domain
def is_embedded(self):
"""Determine whether or not we're using the embedded or proxy implementation of Web Services"""
if self.embedded is None:
url = self.url
try:
parts = urlparse.urlparse(url)
parts = parts._replace(path='/devmgr/utils/')
url = urlparse.urlunparse(parts)
(rc, result) = request(url + 'about', **self.creds)
self.embedded = not result['runningAsProxy']
except Exception as err:
self._logger.exception("Failed to retrieve the About information.")
self.module.fail_json(msg="Failed to determine the Web Services implementation type!"
" Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return self.embedded
def get_full_configuration(self):
try:
(rc, result) = request(self.url + self.base_path, **self.creds)
return result
except Exception as err:
self._logger.exception("Failed to retrieve the LDAP configuration.")
self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def get_configuration(self, identifier):
try:
(rc, result) = request(self.url + self.base_path + '%s' % (identifier), ignore_errors=True, **self.creds)
if rc == 200:
return result
elif rc == 404:
return None
else:
self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, result))
except Exception as err:
self._logger.exception("Failed to retrieve the LDAP configuration.")
self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self):
# Define a new domain based on the user input
domain = self.make_configuration()
# This is the current list of configurations
current = self.get_configuration(self.identifier)
update = current != domain
msg = "No changes were necessary for [%s]." % self.identifier
self._logger.info("Is updated: %s", update)
if update and not self.check_mode:
msg = "The configuration changes were made for [%s]." % self.identifier
try:
if current is None:
api = self.base_path + 'addDomain'
else:
api = self.base_path + '%s' % (domain['id'])
(rc, result) = request(self.url + api, method='POST', data=json.dumps(domain), **self.creds)
except Exception as err:
self._logger.exception("Failed to modify the LDAP configuration.")
self.module.fail_json(msg="Failed to modify LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return msg, update
def clear_single_configuration(self, identifier=None):
if identifier is None:
identifier = self.identifier
configuration = self.get_configuration(identifier)
updated = False
msg = self.NO_CHANGE_MSG
if configuration:
updated = True
msg = "The LDAP domain configuration for [%s] was cleared." % identifier
if not self.check_mode:
try:
(rc, result) = request(self.url + self.base_path + '%s' % identifier, method='DELETE', **self.creds)
except Exception as err:
self.module.fail_json(msg="Failed to remove LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return msg, updated
def clear_configuration(self):
configuration = self.get_full_configuration()
updated = False
msg = self.NO_CHANGE_MSG
if configuration['ldapDomains']:
updated = True
msg = "The LDAP configuration for all domains was cleared."
if not self.check_mode:
try:
(rc, result) = request(self.url + self.base_path, method='DELETE', ignore_errors=True, **self.creds)
# Older versions of NetApp E-Series restAPI does not possess an API to remove all existing configs
if rc == 405:
for config in configuration['ldapDomains']:
self.clear_single_configuration(config['id'])
except Exception as err:
self.module.fail_json(msg="Failed to clear LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return msg, updated
def get_base_path(self):
embedded = self.is_embedded()
if embedded:
return 'storage-systems/%s/ldap/' % self.ssid
else:
return '/ldap/'
def update(self):
self.base_path = self.get_base_path()
if self.ldap:
msg, update = self.update_configuration()
elif self.identifier:
msg, update = self.clear_single_configuration()
else:
msg, update = self.clear_configuration()
self.module.exit_json(msg=msg, changed=update, )
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = Ldap()
settings()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,284 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_lun_mapping
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
short_description: NetApp E-Series create, delete, or modify lun mappings
description:
- Create, delete, or modify mappings between a volume and a targeted host/host+ group.
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
state:
description:
- Present will ensure the mapping exists, absent will remove the mapping.
required: True
choices: ["present", "absent"]
target:
description:
- The name of host or hostgroup you wish to assign to the mapping
- If omitted, the default hostgroup is used.
- If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here.
required: False
volume_name:
description:
- The name of the volume you wish to include in the mapping.
required: True
aliases:
- volume
lun:
description:
- The LUN value you wish to give the mapping.
- If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here.
- LUN value will be determine by the storage-system when not specified.
required: no
target_type:
description:
- This option specifies the whether the target should be a host or a group of hosts
- Only necessary when the target name is used for both a host and a group of hosts
choices:
- host
- group
required: no
'''
EXAMPLES = '''
---
- name: Map volume1 to the host target host1
netapp_e_lun_mapping:
ssid: 1
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: no
state: present
target: host1
volume: volume1
- name: Delete the lun mapping between volume1 and host1
netapp_e_lun_mapping:
ssid: 1
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: yes
state: absent
target: host1
volume: volume1
'''
RETURN = '''
msg:
description: success of the module
returned: always
type: str
sample: Lun mapping is complete
'''
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json"
}
class LunMapping(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=["present", "absent"]),
target=dict(required=False, default=None),
volume_name=dict(required=True, aliases=["volume"]),
lun=dict(type="int", required=False),
target_type=dict(required=False, choices=["host", "group"])))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
args = self.module.params
self.state = args["state"] in ["present"]
self.target = args["target"]
self.volume = args["volume_name"]
self.lun = args["lun"]
self.target_type = args["target_type"]
self.ssid = args["ssid"]
self.url = args["api_url"]
self.check_mode = self.module.check_mode
self.creds = dict(url_username=args["api_username"],
url_password=args["api_password"],
validate_certs=args["validate_certs"])
self.mapping_info = None
if not self.url.endswith('/'):
self.url += '/'
def update_mapping_info(self):
"""Collect the current state of the storage array."""
response = None
try:
rc, response = request(self.url + "storage-systems/%s/graph" % self.ssid,
method="GET", headers=HEADERS, **self.creds)
except Exception as error:
self.module.fail_json(
msg="Failed to retrieve storage array graph. Id [%s]. Error [%s]" % (self.ssid, to_native(error)))
# Create dictionary containing host/cluster references mapped to their names
target_reference = {}
target_name = {}
target_type = {}
if self.target_type is None or self.target_type == "host":
for host in response["storagePoolBundle"]["host"]:
target_reference.update({host["hostRef"]: host["name"]})
target_name.update({host["name"]: host["hostRef"]})
target_type.update({host["name"]: "host"})
if self.target_type is None or self.target_type == "group":
for cluster in response["storagePoolBundle"]["cluster"]:
# Verify there is no ambiguity between target's type (ie host and group has the same name)
if self.target and self.target_type is None and cluster["name"] == self.target and \
self.target in target_name.keys():
self.module.fail_json(msg="Ambiguous target type: target name is used for both host and group"
" targets! Id [%s]" % self.ssid)
target_reference.update({cluster["clusterRef"]: cluster["name"]})
target_name.update({cluster["name"]: cluster["clusterRef"]})
target_type.update({cluster["name"]: "group"})
volume_reference = {}
volume_name = {}
lun_name = {}
for volume in response["volume"]:
volume_reference.update({volume["volumeRef"]: volume["name"]})
volume_name.update({volume["name"]: volume["volumeRef"]})
if volume["listOfMappings"]:
lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]})
for volume in response["highLevelVolBundle"]["thinVolume"]:
volume_reference.update({volume["volumeRef"]: volume["name"]})
volume_name.update({volume["name"]: volume["volumeRef"]})
if volume["listOfMappings"]:
lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]})
# Build current mapping object
self.mapping_info = dict(lun_mapping=[dict(volume_reference=mapping["volumeRef"],
map_reference=mapping["mapRef"],
lun_mapping_reference=mapping["lunMappingRef"],
lun=mapping["lun"]
) for mapping in response["storagePoolBundle"]["lunMapping"]],
volume_by_reference=volume_reference,
volume_by_name=volume_name,
lun_by_name=lun_name,
target_by_reference=target_reference,
target_by_name=target_name,
target_type_by_name=target_type)
def get_lun_mapping(self):
"""Find the matching lun mapping reference.
Returns: tuple(bool, int, int): contains volume match, volume mapping reference and mapping lun
"""
target_match = False
reference = None
lun = None
self.update_mapping_info()
# Verify that when a lun is specified that it does not match an existing lun value unless it is associated with
# the specified volume (ie for an update)
if self.lun and any((self.lun == lun_mapping["lun"] and
self.target == self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] and
self.volume != self.mapping_info["volume_by_reference"][lun_mapping["volume_reference"]]
) for lun_mapping in self.mapping_info["lun_mapping"]):
self.module.fail_json(msg="Option lun value is already in use for target! Array Id [%s]." % self.ssid)
# Verify that when target_type is specified then it matches the target's actually type
if self.target and self.target_type and self.target in self.mapping_info["target_type_by_name"].keys() and \
self.mapping_info["target_type_by_name"][self.target] != self.target_type:
self.module.fail_json(
msg="Option target does not match the specified target_type! Id [%s]." % self.ssid)
# Verify volume and target exist if needed for expected state.
if self.state:
if self.volume not in self.mapping_info["volume_by_name"].keys():
self.module.fail_json(msg="Volume does not exist. Id [%s]." % self.ssid)
if self.target and self.target not in self.mapping_info["target_by_name"].keys():
self.module.fail_json(msg="Target does not exist. Id [%s'." % self.ssid)
for lun_mapping in self.mapping_info["lun_mapping"]:
# Find matching volume reference
if lun_mapping["volume_reference"] == self.mapping_info["volume_by_name"][self.volume]:
reference = lun_mapping["lun_mapping_reference"]
lun = lun_mapping["lun"]
# Determine if lun mapping is attached to target with the
if (lun_mapping["map_reference"] in self.mapping_info["target_by_reference"].keys() and
self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] == self.target and
(self.lun is None or lun == self.lun)):
target_match = True
return target_match, reference, lun
def update(self):
"""Execute the changes the require changes on the storage array."""
target_match, lun_reference, lun = self.get_lun_mapping()
update = (self.state and not target_match) or (not self.state and target_match)
if update and not self.check_mode:
try:
if self.state:
body = dict()
target = None if not self.target else self.mapping_info["target_by_name"][self.target]
if target:
body.update(dict(targetId=target))
if self.lun is not None:
body.update(dict(lun=self.lun))
if lun_reference:
rc, response = request(self.url + "storage-systems/%s/volume-mappings/%s/move"
% (self.ssid, lun_reference), method="POST", data=json.dumps(body),
headers=HEADERS, **self.creds)
else:
body.update(dict(mappableObjectId=self.mapping_info["volume_by_name"][self.volume]))
rc, response = request(self.url + "storage-systems/%s/volume-mappings" % self.ssid,
method="POST", data=json.dumps(body), headers=HEADERS, **self.creds)
else: # Remove existing lun mapping for volume and target
rc, response = request(self.url + "storage-systems/%s/volume-mappings/%s"
% (self.ssid, lun_reference),
method="DELETE", headers=HEADERS, **self.creds)
except Exception as error:
self.module.fail_json(
msg="Failed to update storage array lun mapping. Id [%s]. Error [%s]"
% (self.ssid, to_native(error)))
self.module.exit_json(msg="Lun mapping is complete.", changed=update)
def main():
lun_mapping = LunMapping()
lun_mapping.update()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,708 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_mgmt_interface
short_description: NetApp E-Series management interface configuration
description:
- Configure the E-Series management interfaces
author:
- Michael Price (@lmprice)
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
state:
description:
- Enable or disable IPv4 network interface configuration.
- Either IPv4 or IPv6 must be enabled otherwise error will occur.
- Only required when enabling or disabling IPv4 network interface
choices:
- enable
- disable
required: no
aliases:
- enable_interface
controller:
description:
- The controller that owns the port you want to configure.
- Controller names are represented alphabetically, with the first controller as A,
the second as B, and so on.
- Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard
limitation and could change in the future.
required: yes
choices:
- A
- B
name:
description:
- The port to modify the configuration for.
- The list of choices is not necessarily comprehensive. It depends on the number of ports
that are present in the system.
- The name represents the port number (typically from left to right on the controller),
beginning with a value of 1.
- Mutually exclusive with I(channel).
aliases:
- port
- iface
channel:
description:
- The port to modify the configuration for.
- The channel represents the port number (typically from left to right on the controller),
beginning with a value of 1.
- Mutually exclusive with I(name).
address:
description:
- The IPv4 address to assign to the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
required: no
subnet_mask:
description:
- The subnet mask to utilize for the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
required: no
gateway:
description:
- The IPv4 gateway address to utilize for the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
required: no
config_method:
description:
- The configuration method type to use for network interface ports.
- dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
choices:
- dhcp
- static
required: no
dns_config_method:
description:
- The configuration method type to use for DNS services.
- dhcp is mutually exclusive with I(dns_address), and I(dns_address_backup).
choices:
- dhcp
- static
required: no
dns_address:
description:
- Primary IPv4 DNS server address
required: no
dns_address_backup:
description:
- Backup IPv4 DNS server address
- Queried when primary DNS server fails
required: no
ntp_config_method:
description:
- The configuration method type to use for NTP services.
- disable is mutually exclusive with I(ntp_address) and I(ntp_address_backup).
- dhcp is mutually exclusive with I(ntp_address) and I(ntp_address_backup).
choices:
- disable
- dhcp
- static
required: no
ntp_address:
description:
- Primary IPv4 NTP server address
required: no
ntp_address_backup:
description:
- Backup IPv4 NTP server address
- Queried when primary NTP server fails
required: no
ssh:
type: bool
description:
- Enable ssh access to the controller for debug purposes.
- This is a controller-level setting.
- rlogin/telnet will be enabled for ancient equipment where ssh is not available.
required: no
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address
via dhcp, etc), can take seconds or minutes longer to take effect.
- "Known issue: Changes specifically to down ports will result in a failure. However, this may not be the case in up
coming NetApp E-Series firmware releases (released after firmware version 11.40.2)."
'''
EXAMPLES = """
- name: Configure the first port on the A controller with a static IPv4 address
netapp_e_mgmt_interface:
name: "1"
controller: "A"
config_method: static
address: "192.168.1.100"
subnet_mask: "255.255.255.0"
gateway: "192.168.1.1"
ssid: "1"
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Disable ipv4 connectivity for the second port on the B controller
netapp_e_mgmt_interface:
name: "2"
controller: "B"
enable_interface: no
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
- name: Enable ssh access for ports one and two on controller A
netapp_e_mgmt_interface:
name: "{{ item }}"
controller: "A"
ssh: yes
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
loop:
- 1
- 2
- name: Configure static DNS settings for the first port on controller A
netapp_e_mgmt_interface:
name: "1"
controller: "A"
dns_config_method: static
dns_address: "192.168.1.100"
dns_address_backup: "192.168.1.1"
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
- name: Configure static NTP settings for ports one and two on controller B
netapp_e_mgmt_interface:
name: "{{ item }}"
controller: "B"
ntp_config_method: static
ntp_address: "129.100.1.100"
ntp_address_backup: "127.100.1.1"
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
loop:
- 1
- 2
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The interface settings have been updated.
enabled:
description:
- Indicates whether IPv4 connectivity has been enabled or disabled.
- This does not necessarily indicate connectivity. If dhcp was enabled absent a dhcp server, for instance,
it is unlikely that the configuration will actually be valid.
returned: on success
sample: True
type: bool
"""
import json
import logging
from pprint import pformat, pprint
import time
import socket
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class MgmtInterface(object):
MAX_RETRIES = 15
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type="str", choices=["enable", "disable"],
aliases=["enable_interface"], required=False),
controller=dict(type="str", required=True, choices=["A", "B"]),
name=dict(type="str", aliases=["port", "iface"]),
channel=dict(type="int"),
address=dict(type="str", required=False),
subnet_mask=dict(type="str", required=False),
gateway=dict(type="str", required=False),
config_method=dict(type="str", required=False, choices=["dhcp", "static"]),
dns_config_method=dict(type="str", required=False, choices=["dhcp", "static"]),
dns_address=dict(type="str", required=False),
dns_address_backup=dict(type="str", required=False),
ntp_config_method=dict(type="str", required=False, choices=["disable", "dhcp", "static"]),
ntp_address=dict(type="str", required=False),
ntp_address_backup=dict(type="str", required=False),
ssh=dict(type="bool", required=False),
log_path=dict(type="str", required=False),
))
required_if = [
["state", "enable", ["config_method"]],
["config_method", "static", ["address", "subnet_mask"]],
["dns_config_method", "static", ["dns_address"]],
["ntp_config_method", "static", ["ntp_address"]],
]
mutually_exclusive = [
["name", "channel"],
]
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
mutually_exclusive=mutually_exclusive)
args = self.module.params
self.controller = args["controller"]
self.name = args["name"]
self.channel = args["channel"]
self.config_method = args["config_method"]
self.address = args["address"]
self.subnet_mask = args["subnet_mask"]
self.gateway = args["gateway"]
self.enable_interface = None if args["state"] is None else args["state"] == "enable"
self.dns_config_method = args["dns_config_method"]
self.dns_address = args["dns_address"]
self.dns_address_backup = args["dns_address_backup"]
self.ntp_config_method = args["ntp_config_method"]
self.ntp_address = args["ntp_address"]
self.ntp_address_backup = args["ntp_address_backup"]
self.ssh = args["ssh"]
self.ssid = args["ssid"]
self.url = args["api_url"]
self.creds = dict(url_password=args["api_password"],
validate_certs=args["validate_certs"],
url_username=args["api_username"], )
self.retries = 0
self.check_mode = self.module.check_mode
self.post_body = dict()
log_path = args["log_path"]
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
@property
def controllers(self):
"""Retrieve a mapping of controller labels to their references
{
'A': '070000000000000000000001',
'B': '070000000000000000000002',
}
:return: the controllers defined on the system
"""
try:
(rc, controllers) = request(self.url + 'storage-systems/%s/controllers'
% self.ssid, headers=HEADERS, **self.creds)
except Exception as err:
controllers = list()
self.module.fail_json(
msg="Failed to retrieve the controller settings. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
controllers.sort(key=lambda c: c['physicalLocation']['slot'])
controllers_dict = dict()
i = ord('A')
for controller in controllers:
label = chr(i)
settings = dict(controllerSlot=controller['physicalLocation']['slot'],
controllerRef=controller['controllerRef'],
ssh=controller['networkSettings']['remoteAccessEnabled'])
controllers_dict[label] = settings
i += 1
return controllers_dict
@property
def interface(self):
net_interfaces = list()
try:
(rc, net_interfaces) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces'
% self.ssid, headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve defined management interfaces. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
controllers = self.controllers
controller = controllers[self.controller]
net_interfaces = [iface for iface in net_interfaces if iface["controllerRef"] == controller["controllerRef"]]
# Find the correct interface
iface = None
for net in net_interfaces:
if self.name:
if net["alias"] == self.name or net["interfaceName"] == self.name:
iface = net
break
elif self.channel:
if net["channel"] == self.channel:
iface = net
break
if iface is None:
identifier = self.name if self.name is not None else self.channel
self.module.fail_json(msg="We could not find an interface matching [%s] on Array=[%s]."
% (identifier, self.ssid))
return dict(alias=iface["alias"],
channel=iface["channel"],
link_status=iface["linkStatus"],
enabled=iface["ipv4Enabled"],
address=iface["ipv4Address"],
gateway=iface["ipv4GatewayAddress"],
subnet_mask=iface["ipv4SubnetMask"],
dns_config_method=iface["dnsProperties"]["acquisitionProperties"]["dnsAcquisitionType"],
dns_servers=iface["dnsProperties"]["acquisitionProperties"]["dnsServers"],
ntp_config_method=iface["ntpProperties"]["acquisitionProperties"]["ntpAcquisitionType"],
ntp_servers=iface["ntpProperties"]["acquisitionProperties"]["ntpServers"],
config_method=iface["ipv4AddressConfigMethod"],
controllerRef=iface["controllerRef"],
controllerSlot=iface["controllerSlot"],
ipv6Enabled=iface["ipv6Enabled"],
id=iface["interfaceRef"], )
def get_enable_interface_settings(self, iface, expected_iface, update, body):
"""Enable or disable the IPv4 network interface."""
if self.enable_interface:
if not iface["enabled"]:
update = True
body["ipv4Enabled"] = True
else:
if iface["enabled"]:
update = True
body["ipv4Enabled"] = False
expected_iface["enabled"] = body["ipv4Enabled"]
return update, expected_iface, body
def get_interface_settings(self, iface, expected_iface, update, body):
"""Update network interface settings."""
if self.config_method == "dhcp":
if iface["config_method"] != "configDhcp":
update = True
body["ipv4AddressConfigMethod"] = "configDhcp"
else:
if iface["config_method"] != "configStatic":
update = True
body["ipv4AddressConfigMethod"] = "configStatic"
if iface["address"] != self.address:
update = True
body["ipv4Address"] = self.address
if iface["subnet_mask"] != self.subnet_mask:
update = True
body["ipv4SubnetMask"] = self.subnet_mask
if self.gateway and iface["gateway"] != self.gateway:
update = True
body["ipv4GatewayAddress"] = self.gateway
expected_iface["address"] = body["ipv4Address"]
expected_iface["subnet_mask"] = body["ipv4SubnetMask"]
expected_iface["gateway"] = body["ipv4GatewayAddress"]
expected_iface["config_method"] = body["ipv4AddressConfigMethod"]
return update, expected_iface, body
def get_dns_server_settings(self, iface, expected_iface, update, body):
"""Add DNS server information to the request body."""
if self.dns_config_method == "dhcp":
if iface["dns_config_method"] != "dhcp":
update = True
body["dnsAcquisitionDescriptor"] = dict(dnsAcquisitionType="dhcp")
elif self.dns_config_method == "static":
dns_servers = [dict(addressType="ipv4", ipv4Address=self.dns_address)]
if self.dns_address_backup:
dns_servers.append(dict(addressType="ipv4", ipv4Address=self.dns_address_backup))
body["dnsAcquisitionDescriptor"] = dict(dnsAcquisitionType="stat", dnsServers=dns_servers)
if (iface["dns_config_method"] != "stat" or
len(iface["dns_servers"]) != len(dns_servers) or
(len(iface["dns_servers"]) == 2 and
(iface["dns_servers"][0]["ipv4Address"] != self.dns_address or
iface["dns_servers"][1]["ipv4Address"] != self.dns_address_backup)) or
(len(iface["dns_servers"]) == 1 and
iface["dns_servers"][0]["ipv4Address"] != self.dns_address)):
update = True
expected_iface["dns_servers"] = dns_servers
expected_iface["dns_config_method"] = body["dnsAcquisitionDescriptor"]["dnsAcquisitionType"]
return update, expected_iface, body
def get_ntp_server_settings(self, iface, expected_iface, update, body):
"""Add NTP server information to the request body."""
if self.ntp_config_method == "disable":
if iface["ntp_config_method"] != "disabled":
update = True
body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="disabled")
elif self.ntp_config_method == "dhcp":
if iface["ntp_config_method"] != "dhcp":
update = True
body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="dhcp")
elif self.ntp_config_method == "static":
ntp_servers = [dict(addrType="ipvx", ipvxAddress=dict(addressType="ipv4", ipv4Address=self.ntp_address))]
if self.ntp_address_backup:
ntp_servers.append(dict(addrType="ipvx",
ipvxAddress=dict(addressType="ipv4", ipv4Address=self.ntp_address_backup)))
body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="stat", ntpServers=ntp_servers)
if (iface["ntp_config_method"] != "stat" or
len(iface["ntp_servers"]) != len(ntp_servers) or
((len(iface["ntp_servers"]) == 2 and
(iface["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address or
iface["ntp_servers"][1]["ipvxAddress"]["ipv4Address"] != self.ntp_address_backup)) or
(len(iface["ntp_servers"]) == 1 and
iface["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address))):
update = True
expected_iface["ntp_servers"] = ntp_servers
expected_iface["ntp_config_method"] = body["ntpAcquisitionDescriptor"]["ntpAcquisitionType"]
return update, expected_iface, body
def get_remote_ssh_settings(self, settings, update, body):
"""Configure network interface ports for remote ssh access."""
if self.ssh != settings["ssh"]:
update = True
body["enableRemoteAccess"] = self.ssh
return update, body
def update_array(self, settings, iface):
"""Update controller with new interface, dns service, ntp service and/or remote ssh access information.
:returns: whether information passed will modify the controller's current state
:rtype: bool
"""
update = False
body = dict(controllerRef=settings['controllerRef'],
interfaceRef=iface['id'])
expected_iface = iface.copy()
# Check if api url is using the effected management interface to change itself
update_used_matching_address = False
if self.enable_interface and self.config_method:
netloc = list(urlparse.urlparse(self.url))[1]
address = netloc.split(":")[0]
address_info = socket.getaddrinfo(address, 8443)
url_address_info = socket.getaddrinfo(iface["address"], 8443)
update_used_matching_address = any(info in url_address_info for info in address_info)
self._logger.info("update_used_matching_address: %s", update_used_matching_address)
# Populate the body of the request and check for changes
if self.enable_interface is not None:
update, expected_iface, body = self.get_enable_interface_settings(iface, expected_iface, update, body)
if self.config_method is not None:
update, expected_iface, body = self.get_interface_settings(iface, expected_iface, update, body)
if self.dns_config_method is not None:
update, expected_iface, body = self.get_dns_server_settings(iface, expected_iface, update, body)
if self.ntp_config_method is not None:
update, expected_iface, body = self.get_ntp_server_settings(iface, expected_iface, update, body)
if self.ssh is not None:
update, body = self.get_remote_ssh_settings(settings, update, body)
iface["ssh"] = self.ssh
expected_iface["ssh"] = self.ssh
# debug information
self._logger.info(pformat(body))
self._logger.info(pformat(iface))
self._logger.info(pformat(expected_iface))
if self.check_mode:
return update
if update and not self.check_mode:
if not update_used_matching_address:
try:
(rc, data) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces'
% self.ssid, method='POST', data=json.dumps(body), headers=HEADERS,
timeout=300, ignore_errors=True, **self.creds)
if rc == 422:
if data['retcode'] == "4" or data['retcode'] == "illegalParam":
if not (body['ipv4Enabled'] or iface['ipv6Enabled']):
self.module.fail_json(msg="This storage-system already has IPv6 connectivity disabled. "
"DHCP configuration for IPv4 is required at a minimum."
" Array Id [%s] Message [%s]."
% (self.ssid, data['errorMessage']))
else:
self.module.fail_json(msg="We failed to configure the management interface. Array Id "
"[%s] Message [%s]." % (self.ssid, data))
elif rc >= 300:
self.module.fail_json(
msg="We failed to configure the management interface. Array Id [%s] Message [%s]." %
(self.ssid, data))
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(
msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
else:
self.update_api_address_interface_match(body)
return self.validate_changes(expected_iface) if update and iface["link_status"] != "up" else update
def update_api_address_interface_match(self, body):
"""Change network interface address which matches the api_address"""
try:
try:
(rc, data) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces' % self.ssid,
use_proxy=False, force=True, ignore_errors=True, method='POST',
data=json.dumps(body), headers=HEADERS, timeout=10, **self.creds)
except Exception:
url_parts = list(urlparse.urlparse(self.url))
domain = url_parts[1].split(":")
domain[0] = self.address
url_parts[1] = ":".join(domain)
expected_url = urlparse.urlunparse(url_parts)
self._logger.info(pformat(expected_url))
(rc, data) = request(expected_url + 'storage-systems/%s/configuration/ethernet-interfaces' % self.ssid,
headers=HEADERS, timeout=300, **self.creds)
return
except Exception as err:
self._logger.info(type(err))
self.module.fail_json(
msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def validate_changes(self, expected_iface, retry=6):
"""Validate interface changes were applied to the controller interface port. 30 second timeout"""
if self.interface != expected_iface:
time.sleep(5)
if retry:
return self.validate_changes(expected_iface, retry - 1)
self.module.fail_json(msg="Update failure: we failed to verify the necessary state change.")
return True
def check_health(self):
"""It's possible, due to a previous operation, for the API to report a 424 (offline) status for the
storage-system. Therefore, we run a manual check with retries to attempt to contact the system before we
continue.
"""
try:
(rc, data) = request(self.url + 'storage-systems/%s/controllers'
% self.ssid, headers=HEADERS,
ignore_errors=True, **self.creds)
# We've probably recently changed the interface settings and it's still coming back up: retry.
if rc == 424:
if self.retries < self.MAX_RETRIES:
self.retries += 1
self._logger.info("We hit a 424, retrying in 5s.")
time.sleep(5)
self.check_health()
else:
self.module.fail_json(
msg="We failed to pull storage-system information. Array Id [%s] Message [%s]." %
(self.ssid, data))
elif rc >= 300:
self.module.fail_json(
msg="We failed to pull storage-system information. Array Id [%s] Message [%s]." %
(self.ssid, data))
# This is going to catch cases like a connection failure
except Exception as err:
if self.retries < self.MAX_RETRIES:
self._logger.info("We hit a connection failure, retrying in 5s.")
self.retries += 1
time.sleep(5)
self.check_health()
else:
self.module.fail_json(
msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update(self):
"""Update storage system with necessary changes."""
# Check if the storage array can be contacted
self.check_health()
# make the necessary changes to the storage system
settings = self.controllers[self.controller]
iface = self.interface
self._logger.info(pformat(settings))
self._logger.info(pformat(iface))
update = self.update_array(settings, iface)
self.module.exit_json(msg="The interface settings have been updated.", changed=update)
def __call__(self, *args, **kwargs):
self.update()
def main():
iface = MgmtInterface()
iface()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,369 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_snapshot_group
short_description: NetApp E-Series manage snapshot groups
description:
- Create, update, delete snapshot groups for NetApp E-series storage arrays
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
state:
description:
- Whether to ensure the group is present or absent.
required: True
choices:
- present
- absent
name:
description:
- The name to give the snapshot group
required: True
base_volume_name:
description:
- The name of the base volume or thin volume to use as the base for the new snapshot group.
- If a snapshot group with an identical C(name) already exists but with a different base volume
an error will be returned.
required: True
repo_pct:
description:
- The size of the repository in relation to the size of the base volume
required: False
default: 20
warning_threshold:
description:
- The repository utilization warning threshold, as a percentage of the repository volume capacity.
required: False
default: 80
delete_limit:
description:
- The automatic deletion indicator.
- If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of
snapshot images limited to the number specified.
- This value is overridden by the consistency group setting if this snapshot group is associated with a consistency group.
required: False
default: 30
full_policy:
description:
- The behavior on when the data repository becomes full.
- This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
required: False
default: purgepit
choices:
- purgepit
- unknown
- failbasewrites
- __UNDEFINED
storage_pool_name:
required: True
description:
- The name of the storage pool on which to allocate the repository volume.
rollback_priority:
required: False
description:
- The importance of the rollback operation.
- This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
choices:
- highest
- high
- medium
- low
- lowest
- __UNDEFINED
default: medium
'''
EXAMPLES = """
- name: Configure Snapshot group
netapp_e_snapshot_group:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
base_volume_name: SSGroup_test
name=: OOSS_Group
repo_pct: 20
warning_threshold: 85
delete_limit: 30
full_policy: purgepit
storage_pool_name: Disk_Pool_1
rollback_priority: medium
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
sample: json facts for newly created snapshot group.
"""
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class SnapshotGroup(object):
def __init__(self):
argument_spec = basic_auth_argument_spec()
argument_spec.update(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
state=dict(required=True, choices=['present', 'absent']),
base_volume_name=dict(required=True),
name=dict(required=True),
repo_pct=dict(default=20, type='int'),
warning_threshold=dict(default=80, type='int'),
delete_limit=dict(default=30, type='int'),
full_policy=dict(default='purgepit', choices=['unknown', 'failbasewrites', 'purgepit']),
rollback_priority=dict(default='medium', choices=['highest', 'high', 'medium', 'low', 'lowest']),
storage_pool_name=dict(type='str'),
ssid=dict(required=True),
)
self.module = AnsibleModule(argument_spec=argument_spec)
self.post_data = dict()
self.warning_threshold = self.module.params['warning_threshold']
self.base_volume_name = self.module.params['base_volume_name']
self.name = self.module.params['name']
self.repo_pct = self.module.params['repo_pct']
self.delete_limit = self.module.params['delete_limit']
self.full_policy = self.module.params['full_policy']
self.rollback_priority = self.module.params['rollback_priority']
self.storage_pool_name = self.module.params['storage_pool_name']
self.state = self.module.params['state']
self.url = self.module.params['api_url']
self.user = self.module.params['api_username']
self.pwd = self.module.params['api_password']
self.certs = self.module.params['validate_certs']
self.ssid = self.module.params['ssid']
if not self.url.endswith('/'):
self.url += '/'
self.changed = False
@property
def pool_id(self):
pools = 'storage-systems/%s/storage-pools' % self.ssid
url = self.url + pools
try:
(rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd)
except Exception as err:
self.module.fail_json(msg="Snapshot group module - Failed to fetch storage pools. " +
"Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
for pool in data:
if pool['name'] == self.storage_pool_name:
self.pool_data = pool
return pool['id']
self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
@property
def volume_id(self):
volumes = 'storage-systems/%s/volumes' % self.ssid
url = self.url + volumes
try:
rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Snapshot group module - Failed to fetch volumes. " +
"Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
qty = 0
for volume in data:
if volume['name'] == self.base_volume_name:
qty += 1
if qty > 1:
self.module.fail_json(msg="More than one volume with the name: %s was found, "
"please ensure your volume has a unique name" % self.base_volume_name)
else:
Id = volume['id']
self.volume = volume
try:
return Id
except NameError:
self.module.fail_json(msg="No volume with the name: %s, was found" % self.base_volume_name)
@property
def snapshot_group_id(self):
url = self.url + 'storage-systems/%s/snapshot-groups' % self.ssid
try:
rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Failed to fetch snapshot groups. " +
"Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
for ssg in data:
if ssg['name'] == self.name:
self.ssg_data = ssg
return ssg['id']
return None
@property
def ssg_needs_update(self):
if self.ssg_data['fullWarnThreshold'] != self.warning_threshold or \
self.ssg_data['autoDeleteLimit'] != self.delete_limit or \
self.ssg_data['repFullPolicy'] != self.full_policy or \
self.ssg_data['rollbackPriority'] != self.rollback_priority:
return True
else:
return False
def create_snapshot_group(self):
self.post_data = dict(
baseMappableObjectId=self.volume_id,
name=self.name,
repositoryPercentage=self.repo_pct,
warningThreshold=self.warning_threshold,
autoDeleteLimit=self.delete_limit,
fullPolicy=self.full_policy,
storagePoolId=self.pool_id,
)
snapshot = 'storage-systems/%s/snapshot-groups' % self.ssid
url = self.url + snapshot
try:
rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Failed to create snapshot group. " +
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
self.ssid,
to_native(err)))
if not self.snapshot_group_id:
self.snapshot_group_id = self.ssg_data['id']
if self.ssg_needs_update:
self.update_ssg()
else:
self.module.exit_json(changed=True, **self.ssg_data)
def update_ssg(self):
self.post_data = dict(
warningThreshold=self.warning_threshold,
autoDeleteLimit=self.delete_limit,
fullPolicy=self.full_policy,
rollbackPriority=self.rollback_priority
)
url = self.url + "storage-systems/%s/snapshot-groups/%s" % (self.ssid, self.snapshot_group_id)
try:
rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Failed to update snapshot group. " +
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
self.ssid,
to_native(err)))
def apply(self):
if self.state == 'absent':
if self.snapshot_group_id:
try:
rc, resp = request(
self.url + 'storage-systems/%s/snapshot-groups/%s' % (self.ssid, self.snapshot_group_id),
method='DELETE', headers=HEADERS, url_password=self.pwd, url_username=self.user,
validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Failed to delete snapshot group. " +
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
self.ssid,
to_native(err)))
self.module.exit_json(changed=True, msg="Snapshot group removed", **self.ssg_data)
else:
self.module.exit_json(changed=False, msg="Snapshot group absent")
elif self.snapshot_group_id:
if self.ssg_needs_update:
self.update_ssg()
self.module.exit_json(changed=True, **self.ssg_data)
else:
self.module.exit_json(changed=False, **self.ssg_data)
else:
self.create_snapshot_group()
def main():
vg = SnapshotGroup()
vg.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,246 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_snapshot_images
short_description: NetApp E-Series create and delete snapshot images
description:
- Create and delete snapshots images on snapshot groups for NetApp E-series storage arrays.
- Only the oldest snapshot image can be deleted so consistency is preserved.
- "Related: Snapshot volumes are created from snapshot images."
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
snapshot_group:
description:
- The name of the snapshot group in which you want to create a snapshot image.
required: True
state:
description:
- Whether a new snapshot image should be created or oldest be deleted.
required: True
choices: ['create', 'remove']
'''
EXAMPLES = """
- name: Create Snapshot
netapp_e_snapshot_images:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ validate_certs }}"
snapshot_group: "3300000060080E5000299C24000005B656D9F394"
state: 'create'
"""
RETURN = """
---
msg:
description: State of operation
type: str
returned: always
sample: "Created snapshot image"
image_id:
description: ID of snapshot image
type: str
returned: state == created
sample: "3400000060080E5000299B640063074057BC5C5E "
"""
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def snapshot_group_from_name(module, ssid, api_url, api_pwd, api_usr, name):
snap_groups = 'storage-systems/%s/snapshot-groups' % ssid
snap_groups_url = api_url + snap_groups
(ret, snapshot_groups) = request(snap_groups_url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
snapshot_group_id = None
for snapshot_group in snapshot_groups:
if name == snapshot_group['label']:
snapshot_group_id = snapshot_group['pitGroupRef']
break
if snapshot_group_id is None:
module.fail_json(msg="Failed to lookup snapshot group. Group [%s]. Id [%s]." % (name, ssid))
return snapshot_group
def oldest_image(module, ssid, api_url, api_pwd, api_usr, name):
get_status = 'storage-systems/%s/snapshot-images' % ssid
url = api_url + get_status
try:
(ret, images) = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
except Exception as err:
module.fail_json(msg="Failed to get snapshot images for group. Group [%s]. Id [%s]. Error [%s]" %
(name, ssid, to_native(err)))
if not images:
module.exit_json(msg="There are no snapshot images to remove. Group [%s]. Id [%s]." % (name, ssid))
oldest = min(images, key=lambda x: x['pitSequenceNumber'])
if oldest is None or "pitRef" not in oldest:
module.fail_json(msg="Failed to lookup oldest snapshot group. Group [%s]. Id [%s]." % (name, ssid))
return oldest
def create_image(module, ssid, api_url, pwd, user, p, snapshot_group):
snapshot_group_obj = snapshot_group_from_name(module, ssid, api_url, pwd, user, snapshot_group)
snapshot_group_id = snapshot_group_obj['pitGroupRef']
endpoint = 'storage-systems/%s/snapshot-images' % ssid
url = api_url + endpoint
post_data = json.dumps({'groupId': snapshot_group_id})
image_data = request(url, data=post_data, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
if image_data[1]['status'] == 'optimal':
status = True
id = image_data[1]['id']
else:
status = False
id = ''
return status, id
def delete_image(module, ssid, api_url, pwd, user, snapshot_group):
image = oldest_image(module, ssid, api_url, pwd, user, snapshot_group)
image_id = image['pitRef']
endpoint = 'storage-systems/%s/snapshot-images/%s' % (ssid, image_id)
url = api_url + endpoint
try:
(ret, image_data) = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
except Exception as e:
image_data = (e[0], e[1])
if ret == 204:
deleted_status = True
error_message = ''
else:
deleted_status = False
error_message = image_data[1]['errorMessage']
return deleted_status, error_message
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
snapshot_group=dict(required=True, type='str'),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
validate_certs=dict(required=False, default=True),
state=dict(required=True, choices=['create', 'remove'], type='str'),
))
module = AnsibleModule(argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
snapshot_group = p.pop('snapshot_group')
desired_state = p.pop('state')
if not api_url.endswith('/'):
api_url += '/'
if desired_state == 'create':
created_status, snapshot_id = create_image(module, ssid, api_url, pwd, user, p, snapshot_group)
if created_status:
module.exit_json(changed=True, msg='Created snapshot image', image_id=snapshot_id)
else:
module.fail_json(
msg="Could not create snapshot image on system %s, in snapshot group %s" % (ssid, snapshot_group))
else:
deleted, error_msg = delete_image(module, ssid, api_url, pwd, user, snapshot_group)
if deleted:
module.exit_json(changed=True, msg='Deleted snapshot image for snapshot group [%s]' % (snapshot_group))
else:
module.fail_json(
msg="Could not create snapshot image on system %s, in snapshot group %s --- %s" % (
ssid, snapshot_group, error_msg))
if __name__ == '__main__':
main()

View file

@ -0,0 +1,280 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_snapshot_volume
short_description: NetApp E-Series manage snapshot volumes.
description:
- Create, update, remove snapshot volumes for NetApp E/EF-Series storage arrays.
author: Kevin Hulquest (@hulquest)
notes:
- Only I(full_threshold) is supported for update operations. If the snapshot volume already exists and the threshold matches, then an C(ok) status
will be returned, no other changes can be made to a pre-existing snapshot volume.
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
ssid:
description:
- storage array ID
required: True
snapshot_image_id:
required: True
description:
- The identifier of the snapshot image used to create the new snapshot volume.
- "Note: You'll likely want to use the M(netapp_e_facts) module to find the ID of the image you want."
full_threshold:
description:
- The repository utilization warning threshold percentage
default: 85
name:
required: True
description:
- The name you wish to give the snapshot volume
view_mode:
required: True
description:
- The snapshot volume access mode
choices:
- modeUnknown
- readWrite
- readOnly
- __UNDEFINED
repo_percentage:
description:
- The size of the view in relation to the size of the base volume
default: 20
storage_pool_name:
description:
- Name of the storage pool on which to allocate the repository volume.
required: True
state:
description:
- Whether to create or remove the snapshot volume
required: True
choices:
- absent
- present
'''
EXAMPLES = """
- name: Snapshot volume
netapp_e_snapshot_volume:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}/"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
state: present
storage_pool_name: "{{ snapshot_volume_storage_pool_name }}"
snapshot_image_id: "{{ snapshot_volume_image_id }}"
name: "{{ snapshot_volume_name }}"
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
sample: Json facts for the volume that was created.
"""
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class SnapshotVolume(object):
def __init__(self):
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
ssid=dict(type='str', required=True),
snapshot_image_id=dict(type='str', required=True),
full_threshold=dict(type='int', default=85),
name=dict(type='str', required=True),
view_mode=dict(type='str', default='readOnly',
choices=['readOnly', 'readWrite', 'modeUnknown', '__Undefined']),
repo_percentage=dict(type='int', default=20),
storage_pool_name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'present'])
))
self.module = AnsibleModule(argument_spec=argument_spec)
args = self.module.params
self.state = args['state']
self.ssid = args['ssid']
self.snapshot_image_id = args['snapshot_image_id']
self.full_threshold = args['full_threshold']
self.name = args['name']
self.view_mode = args['view_mode']
self.repo_percentage = args['repo_percentage']
self.storage_pool_name = args['storage_pool_name']
self.url = args['api_url']
self.user = args['api_username']
self.pwd = args['api_password']
self.certs = args['validate_certs']
if not self.url.endswith('/'):
self.url += '/'
@property
def pool_id(self):
pools = 'storage-systems/%s/storage-pools' % self.ssid
url = self.url + pools
(rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
validate_certs=self.certs)
for pool in data:
if pool['name'] == self.storage_pool_name:
self.pool_data = pool
return pool['id']
self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
@property
def ss_vol_exists(self):
rc, ss_vols = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, headers=HEADERS,
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
if ss_vols:
for ss_vol in ss_vols:
if ss_vol['name'] == self.name:
self.ss_vol = ss_vol
return True
else:
return False
return False
@property
def ss_vol_needs_update(self):
if self.ss_vol['fullWarnThreshold'] != self.full_threshold:
return True
else:
return False
def create_ss_vol(self):
post_data = dict(
snapshotImageId=self.snapshot_image_id,
fullThreshold=self.full_threshold,
name=self.name,
viewMode=self.view_mode,
repositoryPercentage=self.repo_percentage,
repositoryPoolId=self.pool_id
)
rc, create_resp = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid,
data=json.dumps(post_data), headers=HEADERS, url_username=self.user,
url_password=self.pwd, validate_certs=self.certs, method='POST')
self.ss_vol = create_resp
# Doing a check after creation because the creation call fails to set the specified warning threshold
if self.ss_vol_needs_update:
self.update_ss_vol()
else:
self.module.exit_json(changed=True, **create_resp)
def update_ss_vol(self):
post_data = dict(
fullThreshold=self.full_threshold,
)
rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
data=json.dumps(post_data), headers=HEADERS, url_username=self.user, url_password=self.pwd,
method='POST', validate_certs=self.certs)
self.module.exit_json(changed=True, **resp)
def remove_ss_vol(self):
rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
headers=HEADERS, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
method='DELETE')
self.module.exit_json(changed=True, msg="Volume successfully deleted")
def apply(self):
if self.state == 'present':
if self.ss_vol_exists:
if self.ss_vol_needs_update:
self.update_ss_vol()
else:
self.module.exit_json(changed=False, **self.ss_vol)
else:
self.create_ss_vol()
else:
if self.ss_vol_exists:
self.remove_ss_vol()
else:
self.module.exit_json(changed=False, msg="Volume already absent")
def main():
sv = SnapshotVolume()
sv.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,295 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netapp_e_storage_system
short_description: NetApp E-Series Web Services Proxy manage storage arrays
description:
- Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays.
options:
api_username:
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
required: true
api_password:
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
required: true
api_url:
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
required: true
validate_certs:
description:
- Should https certificates be validated?
type: bool
default: 'yes'
ssid:
description:
- The ID of the array to manage. This value must be unique for each array.
required: true
state:
description:
- Whether the specified array should be configured on the Web Services Proxy or not.
required: true
choices: ['present', 'absent']
controller_addresses:
description:
- The list addresses for the out-of-band management adapter or the agent host. Mutually exclusive of array_wwn parameter.
required: true
array_wwn:
description:
- The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of
controller_addresses parameter.
array_password:
description:
- The management password of the array to manage, if set.
enable_trace:
description:
- Enable trace logging for SYMbol calls to the storage system.
type: bool
default: 'no'
meta_tags:
description:
- Optional meta tags to associate to this storage system
author: Kevin Hulquest (@hulquest)
'''
EXAMPLES = '''
---
- name: Presence of storage system
netapp_e_storage_system:
ssid: "{{ item.key }}"
state: present
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
controller_addresses:
- "{{ item.value.address1 }}"
- "{{ item.value.address2 }}"
with_dict: "{{ storage_systems }}"
when: check_storage_system
'''
RETURN = '''
msg:
description: State of request
type: str
returned: always
sample: 'Storage system removed.'
'''
import json
from datetime import datetime as dt, timedelta
from time import sleep
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_body, timeout):
(rc, resp) = request(api_url + "/storage-systems", data=request_body, headers=post_headers,
method='POST', url_username=api_usr, url_password=api_pwd,
validate_certs=validate_certs)
status = None
return_resp = resp
if 'status' in resp:
status = resp['status']
if rc == 201:
status = 'neverContacted'
fail_after_time = dt.utcnow() + timedelta(seconds=timeout)
while status == 'neverContacted':
if dt.utcnow() > fail_after_time:
raise Exception("web proxy timed out waiting for array status")
sleep(1)
(rc, system_resp) = request(api_url + "/storage-systems/%s" % ssid,
headers=dict(Accept="application/json"), url_username=api_usr,
url_password=api_pwd, validate_certs=validate_certs,
ignore_errors=True)
status = system_resp['status']
return_resp = system_resp
return status, return_resp
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
ssid=dict(required=True, type='str'),
controller_addresses=dict(type='list'),
array_wwn=dict(required=False, type='str'),
array_password=dict(required=False, type='str', no_log=True),
array_status_timeout_sec=dict(default=60, type='int'),
enable_trace=dict(default=False, type='bool'),
meta_tags=dict(type='list')
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['controller_addresses', 'array_wwn']],
required_if=[('state', 'present', ['controller_addresses'])]
)
p = module.params
state = p['state']
ssid = p['ssid']
controller_addresses = p['controller_addresses']
array_wwn = p['array_wwn']
array_password = p['array_password']
array_status_timeout_sec = p['array_status_timeout_sec']
validate_certs = p['validate_certs']
meta_tags = p['meta_tags']
enable_trace = p['enable_trace']
api_usr = p['api_username']
api_pwd = p['api_password']
api_url = p['api_url']
changed = False
array_exists = False
try:
(rc, resp) = request(api_url + "/storage-systems/%s" % ssid, headers=dict(Accept="application/json"),
url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs,
ignore_errors=True)
except Exception as err:
module.fail_json(msg="Error accessing storage-system with id [%s]. Error [%s]" % (ssid, to_native(err)))
array_exists = True
array_detail = resp
if rc == 200:
if state == 'absent':
changed = True
array_exists = False
elif state == 'present':
current_addresses = frozenset(i for i in (array_detail['ip1'], array_detail['ip2']) if i)
if set(controller_addresses) != current_addresses:
changed = True
if array_detail['wwn'] != array_wwn and array_wwn is not None:
module.fail_json(
msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' %
(ssid, array_detail['wwn'])
)
elif rc == 404:
if state == 'present':
changed = True
array_exists = False
else:
changed = False
module.exit_json(changed=changed, msg="Storage system was not present.")
if changed and not module.check_mode:
if state == 'present':
if not array_exists:
# add the array
array_add_req = dict(
id=ssid,
controllerAddresses=controller_addresses,
metaTags=meta_tags,
enableTrace=enable_trace
)
if array_wwn:
array_add_req['wwn'] = array_wwn
if array_password:
array_add_req['password'] = array_password
post_headers = dict(Accept="application/json")
post_headers['Content-Type'] = 'application/json'
request_data = json.dumps(array_add_req)
try:
(rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_data,
array_status_timeout_sec)
except Exception as err:
module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." %
(ssid, request_data, to_native(err)))
else: # array exists, modify...
post_headers = dict(Accept="application/json")
post_headers['Content-Type'] = 'application/json'
post_body = dict(
controllerAddresses=controller_addresses,
removeAllTags=True,
enableTrace=enable_trace,
metaTags=meta_tags
)
try:
(rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, post_body,
array_status_timeout_sec)
except Exception as err:
module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." %
(ssid, post_body, to_native(err)))
elif state == 'absent':
# delete the array
try:
(rc, resp) = request(api_url + "/storage-systems/%s" % ssid, method='DELETE',
url_username=api_usr,
url_password=api_pwd, validate_certs=validate_certs)
except Exception as err:
module.fail_json(msg="Failed to remove storage array. Id[%s]. Error[%s]." % (ssid, to_native(err)))
if rc == 422:
module.exit_json(changed=changed, msg="Storage system was not presented.")
if rc == 204:
module.exit_json(changed=changed, msg="Storage system removed.")
module.exit_json(changed=changed, **resp)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,935 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community"}
DOCUMENTATION = '''
---
module: netapp_e_storagepool
short_description: NetApp E-Series manage volume groups and disk pools
description: Create or remove volume groups and disk pools for NetApp E-series storage arrays.
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
state:
description:
- Whether the specified storage pool should exist or not.
- Note that removing a storage pool currently requires the removal of all defined volumes first.
required: true
choices: ["present", "absent"]
name:
description:
- The name of the storage pool to manage
required: true
criteria_drive_count:
description:
- The number of disks to use for building the storage pool.
- When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified.
- The pool will be expanded if this number exceeds the number of disks already in place (See expansion note below)
required: false
type: int
criteria_min_usable_capacity:
description:
- The minimum size of the storage pool (in size_unit).
- When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified.
- The pool will be expanded if this value exceeds its current size. (See expansion note below)
required: false
type: float
criteria_drive_type:
description:
- The type of disk (hdd or ssd) to use when searching for candidates to use.
- When not specified each drive type will be evaluated until successful drive candidates are found starting with
the most prevalent drive type.
required: false
choices: ["hdd","ssd"]
criteria_size_unit:
description:
- The unit used to interpret size parameters
choices: ["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"]
default: "gb"
criteria_drive_min_size:
description:
- The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool.
criteria_drive_interface_type:
description:
- The interface type to use when selecting drives for the storage pool
- If not provided then all interface types will be considered.
choices: ["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"]
required: false
criteria_drive_require_da:
description:
- Ensures the storage pool will be created with only data assurance (DA) capable drives.
- Only available for new storage pools; existing storage pools cannot be converted.
default: false
type: bool
criteria_drive_require_fde:
description:
- Whether full disk encryption ability is required for drives to be added to the storage pool
default: false
type: bool
raid_level:
description:
- The RAID level of the storage pool to be created.
- Required only when I(state=="present").
- When I(raid_level=="raidDiskPool") then I(criteria_drive_count >= 10 or criteria_drive_count >= 11) is required
depending on the storage array specifications.
- When I(raid_level=="raid0") then I(1<=criteria_drive_count) is required.
- When I(raid_level=="raid1") then I(2<=criteria_drive_count) is required.
- When I(raid_level=="raid3") then I(3<=criteria_drive_count<=30) is required.
- When I(raid_level=="raid5") then I(3<=criteria_drive_count<=30) is required.
- When I(raid_level=="raid6") then I(5<=criteria_drive_count<=30) is required.
- Note that raidAll will be treated as raidDiskPool and raid3 as raid5.
required: false
choices: ["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"]
default: "raidDiskPool"
secure_pool:
description:
- Enables security at rest feature on the storage pool.
- Will only work if all drives in the pool are security capable (FDE, FIPS, or mix)
- Warning, once security is enabled it is impossible to disable without erasing the drives.
required: false
type: bool
reserve_drive_count:
description:
- Set the number of drives reserved by the storage pool for reconstruction operations.
- Only valid on raid disk pools.
required: false
remove_volumes:
description:
- Prior to removing a storage pool, delete all volumes in the pool.
default: true
erase_secured_drives:
description:
- If I(state=="absent") then all storage pool drives will be erase
- If I(state=="present") then delete all available storage array drives that have security enabled.
default: true
type: bool
notes:
- The expansion operations are non-blocking due to the time consuming nature of expanding volume groups
- Traditional volume groups (raid0, raid1, raid5, raid6) are performed in steps dictated by the storage array. Each
required step will be attempted until the request fails which is likely because of the required expansion time.
- raidUnsupported will be treated as raid0, raidAll as raidDiskPool and raid3 as raid5.
- Tray loss protection and drawer loss protection will be chosen if at all possible.
'''
EXAMPLES = """
- name: No disk groups
netapp_e_storagepool:
ssid: "{{ ssid }}"
name: "{{ item }}"
state: absent
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
sample: Json facts for the pool that was created.
"""
import functools
from itertools import groupby
from time import sleep
from pprint import pformat
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule
from ansible.module_utils._text import to_native
def get_most_common_elements(iterator):
"""Returns a generator containing a descending list of most common elements."""
if not isinstance(iterator, list):
raise TypeError("iterator must be a list.")
grouped = [(key, len(list(group))) for key, group in groupby(sorted(iterator))]
return sorted(grouped, key=lambda x: x[1], reverse=True)
def memoize(func):
"""Generic memoizer for any function with any number of arguments including zero."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
class MemoizeFuncArgs(dict):
def __missing__(self, _key):
self[_key] = func(*args, **kwargs)
return self[_key]
key = str((args, kwargs)) if args and kwargs else "no_argument_response"
return MemoizeFuncArgs().__getitem__(key)
return wrapper
class NetAppESeriesStoragePool(NetAppESeriesModule):
EXPANSION_TIMEOUT_SEC = 10
DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11
def __init__(self):
version = "02.00.0000.0000"
ansible_options = dict(
state=dict(required=True, choices=["present", "absent"], type="str"),
name=dict(required=True, type="str"),
criteria_size_unit=dict(choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"],
default="gb", type="str"),
criteria_drive_count=dict(type="int"),
criteria_drive_interface_type=dict(choices=["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"],
type="str"),
criteria_drive_type=dict(choices=["ssd", "hdd"], type="str", required=False),
criteria_drive_min_size=dict(type="float"),
criteria_drive_require_da=dict(type="bool", required=False),
criteria_drive_require_fde=dict(type="bool", required=False),
criteria_min_usable_capacity=dict(type="float"),
raid_level=dict(choices=["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"],
default="raidDiskPool"),
erase_secured_drives=dict(type="bool", default=True),
secure_pool=dict(type="bool", default=False),
reserve_drive_count=dict(type="int"),
remove_volumes=dict(type="bool", default=True))
required_if = [["state", "present", ["raid_level"]]]
super(NetAppESeriesStoragePool, self).__init__(ansible_options=ansible_options,
web_services_version=version,
supports_check_mode=True,
required_if=required_if)
args = self.module.params
self.state = args["state"]
self.ssid = args["ssid"]
self.name = args["name"]
self.criteria_drive_count = args["criteria_drive_count"]
self.criteria_min_usable_capacity = args["criteria_min_usable_capacity"]
self.criteria_size_unit = args["criteria_size_unit"]
self.criteria_drive_min_size = args["criteria_drive_min_size"]
self.criteria_drive_type = args["criteria_drive_type"]
self.criteria_drive_interface_type = args["criteria_drive_interface_type"]
self.criteria_drive_require_fde = args["criteria_drive_require_fde"]
self.criteria_drive_require_da = args["criteria_drive_require_da"]
self.raid_level = args["raid_level"]
self.erase_secured_drives = args["erase_secured_drives"]
self.secure_pool = args["secure_pool"]
self.reserve_drive_count = args["reserve_drive_count"]
self.remove_volumes = args["remove_volumes"]
self.pool_detail = None
# Change all sizes to be measured in bytes
if self.criteria_min_usable_capacity:
self.criteria_min_usable_capacity = int(self.criteria_min_usable_capacity *
self.SIZE_UNIT_MAP[self.criteria_size_unit])
if self.criteria_drive_min_size:
self.criteria_drive_min_size = int(self.criteria_drive_min_size *
self.SIZE_UNIT_MAP[self.criteria_size_unit])
self.criteria_size_unit = "bytes"
# Adjust unused raid level option to reflect documentation
if self.raid_level == "raidAll":
self.raid_level = "raidDiskPool"
if self.raid_level == "raid3":
self.raid_level = "raid5"
@property
@memoize
def available_drives(self):
"""Determine the list of available drives"""
return [drive["id"] for drive in self.drives if drive["available"] and drive["status"] == "optimal"]
@property
@memoize
def available_drive_types(self):
"""Determine the types of available drives sorted by the most common first."""
types = [drive["driveMediaType"] for drive in self.drives]
return [entry[0] for entry in get_most_common_elements(types)]
@property
@memoize
def available_drive_interface_types(self):
"""Determine the types of available drives."""
interfaces = [drive["phyDriveType"] for drive in self.drives]
return [entry[0] for entry in get_most_common_elements(interfaces)]
@property
def storage_pool_drives(self, exclude_hotspares=True):
"""Retrieve list of drives found in storage pool."""
if exclude_hotspares:
return [drive for drive in self.drives
if drive["currentVolumeGroupRef"] == self.pool_detail["id"] and not drive["hotSpare"]]
return [drive for drive in self.drives if drive["currentVolumeGroupRef"] == self.pool_detail["id"]]
@property
def expandable_drive_count(self):
"""Maximum number of drives that a storage pool can be expanded at a given time."""
capabilities = None
if self.raid_level == "raidDiskPool":
return len(self.available_drives)
try:
rc, capabilities = self.request("storage-systems/%s/capabilities" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to fetch maximum expandable drive count. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
return capabilities["featureParameters"]["maxDCEDrives"]
@property
def disk_pool_drive_minimum(self):
"""Provide the storage array's minimum disk pool drive count."""
rc, attr = self.request("storage-systems/%s/symbol/getSystemAttributeDefaults" % self.ssid, ignore_errors=True)
# Standard minimum is 11 drives but some allow 10 drives. 10 will be the default
if (rc != 200 or "minimumDriveCount" not in attr["defaults"]["diskPoolDefaultAttributes"].keys() or
attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] == 0):
return self.DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT
return attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"]
def get_available_drive_capacities(self, drive_id_list=None):
"""Determine the list of available drive capacities."""
if drive_id_list:
available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives
if drive["id"] in drive_id_list and drive["available"] and
drive["status"] == "optimal"])
else:
available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives
if drive["available"] and drive["status"] == "optimal"])
self.module.log("available drive capacities: %s" % available_drive_capacities)
return list(available_drive_capacities)
@property
def drives(self):
"""Retrieve list of drives found in storage pool."""
drives = None
try:
rc, drives = self.request("storage-systems/%s/drives" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to fetch disk drives. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
return drives
def is_drive_count_valid(self, drive_count):
"""Validate drive count criteria is met."""
if self.criteria_drive_count and drive_count < self.criteria_drive_count:
return False
if self.raid_level == "raidDiskPool":
return drive_count >= self.disk_pool_drive_minimum
if self.raid_level == "raid0":
return drive_count > 0
if self.raid_level == "raid1":
return drive_count >= 2 and (drive_count % 2) == 0
if self.raid_level in ["raid3", "raid5"]:
return 3 <= drive_count <= 30
if self.raid_level == "raid6":
return 5 <= drive_count <= 30
return False
@property
def storage_pool(self):
"""Retrieve storage pool information."""
storage_pools_resp = None
try:
rc, storage_pools_resp = self.request("storage-systems/%s/storage-pools" % self.ssid)
except Exception as err:
self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]."
% (self.ssid, to_native(err), self.state))
pool_detail = [pool for pool in storage_pools_resp if pool["name"] == self.name]
return pool_detail[0] if pool_detail else dict()
@property
def storage_pool_volumes(self):
"""Retrieve list of volumes associated with storage pool."""
volumes_resp = None
try:
rc, volumes_resp = self.request("storage-systems/%s/volumes" % self.ssid)
except Exception as err:
self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]."
% (self.ssid, to_native(err), self.state))
group_ref = self.storage_pool["volumeGroupRef"]
storage_pool_volume_list = [volume["id"] for volume in volumes_resp if volume["volumeGroupRef"] == group_ref]
return storage_pool_volume_list
def get_ddp_capacity(self, expansion_drive_list):
"""Return the total usable capacity based on the additional drives."""
def get_ddp_error_percent(_drive_count, _extent_count):
"""Determine the space reserved for reconstruction"""
if _drive_count <= 36:
if _extent_count <= 600:
return 0.40
elif _extent_count <= 1400:
return 0.35
elif _extent_count <= 6200:
return 0.20
elif _extent_count <= 50000:
return 0.15
elif _drive_count <= 64:
if _extent_count <= 600:
return 0.20
elif _extent_count <= 1400:
return 0.15
elif _extent_count <= 6200:
return 0.10
elif _extent_count <= 50000:
return 0.05
elif _drive_count <= 480:
if _extent_count <= 600:
return 0.20
elif _extent_count <= 1400:
return 0.15
elif _extent_count <= 6200:
return 0.10
elif _extent_count <= 50000:
return 0.05
self.module.fail_json(msg="Drive count exceeded the error percent table. Array[%s]" % self.ssid)
def get_ddp_reserved_drive_count(_disk_count):
"""Determine the number of reserved drive."""
reserve_count = 0
if self.reserve_drive_count:
reserve_count = self.reserve_drive_count
elif _disk_count >= 256:
reserve_count = 8
elif _disk_count >= 192:
reserve_count = 7
elif _disk_count >= 128:
reserve_count = 6
elif _disk_count >= 64:
reserve_count = 4
elif _disk_count >= 32:
reserve_count = 3
elif _disk_count >= 12:
reserve_count = 2
elif _disk_count == 11:
reserve_count = 1
return reserve_count
if self.pool_detail:
drive_count = len(self.storage_pool_drives) + len(expansion_drive_list)
else:
drive_count = len(expansion_drive_list)
drive_usable_capacity = min(min(self.get_available_drive_capacities()),
min(self.get_available_drive_capacities(expansion_drive_list)))
drive_data_extents = ((drive_usable_capacity - 8053063680) / 536870912)
maximum_stripe_count = (drive_count * drive_data_extents) / 10
error_percent = get_ddp_error_percent(drive_count, drive_data_extents)
error_overhead = (drive_count * drive_data_extents / 10 * error_percent + 10) / 10
total_stripe_count = maximum_stripe_count - error_overhead
stripe_count_per_drive = total_stripe_count / drive_count
reserved_stripe_count = get_ddp_reserved_drive_count(drive_count) * stripe_count_per_drive
available_stripe_count = total_stripe_count - reserved_stripe_count
return available_stripe_count * 4294967296
@memoize
def get_candidate_drives(self):
"""Retrieve set of drives candidates for creating a new storage pool."""
def get_candidate_drive_request():
"""Perform request for new volume creation."""
candidates_list = list()
drive_types = [self.criteria_drive_type] if self.criteria_drive_type else self.available_drive_types
interface_types = [self.criteria_drive_interface_type] \
if self.criteria_drive_interface_type else self.available_drive_interface_types
for interface_type in interface_types:
for drive_type in drive_types:
candidates = None
volume_candidate_request_data = dict(
type="diskPool" if self.raid_level == "raidDiskPool" else "traditional",
diskPoolVolumeCandidateRequestData=dict(
reconstructionReservedDriveCount=65535))
candidate_selection_type = dict(
candidateSelectionType="count",
driveRefList=dict(driveRef=self.available_drives))
criteria = dict(raidLevel=self.raid_level,
phyDriveType=interface_type,
dssPreallocEnabled=False,
securityType="capable" if self.criteria_drive_require_fde else "none",
driveMediaType=drive_type,
onlyProtectionInformationCapable=True if self.criteria_drive_require_da else False,
volumeCandidateRequestData=volume_candidate_request_data,
allocateReserveSpace=False,
securityLevel="fde" if self.criteria_drive_require_fde else "none",
candidateSelectionType=candidate_selection_type)
try:
rc, candidates = self.request("storage-systems/%s/symbol/getVolumeCandidates?verboseError"
"Response=true" % self.ssid, data=criteria, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]."
% (self.ssid, to_native(error)))
if candidates:
candidates_list.extend(candidates["volumeCandidate"])
# Sort output based on tray and then drawer protection first
tray_drawer_protection = list()
tray_protection = list()
drawer_protection = list()
no_protection = list()
sorted_candidates = list()
for item in candidates_list:
if item["trayLossProtection"]:
if item["drawerLossProtection"]:
tray_drawer_protection.append(item)
else:
tray_protection.append(item)
elif item["drawerLossProtection"]:
drawer_protection.append(item)
else:
no_protection.append(item)
if tray_drawer_protection:
sorted_candidates.extend(tray_drawer_protection)
if tray_protection:
sorted_candidates.extend(tray_protection)
if drawer_protection:
sorted_candidates.extend(drawer_protection)
if no_protection:
sorted_candidates.extend(no_protection)
return sorted_candidates
# Determine the appropriate candidate list
for candidate in get_candidate_drive_request():
# Evaluate candidates for required drive count, collective drive usable capacity and minimum drive size
if self.criteria_drive_count:
if self.criteria_drive_count != int(candidate["driveCount"]):
continue
if self.criteria_min_usable_capacity:
if ((self.raid_level == "raidDiskPool" and self.criteria_min_usable_capacity >
self.get_ddp_capacity(candidate["driveRefList"]["driveRef"])) or
self.criteria_min_usable_capacity > int(candidate["usableSize"])):
continue
if self.criteria_drive_min_size:
if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["driveRefList"]["driveRef"])):
continue
return candidate
self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid)
@memoize
def get_expansion_candidate_drives(self):
"""Retrieve required expansion drive list.
Note: To satisfy the expansion criteria each item in the candidate list must added specified group since there
is a potential limitation on how many drives can be incorporated at a time.
* Traditional raid volume groups must be added two drives maximum at a time. No limits on raid disk pools.
:return list(candidate): list of candidate structures from the getVolumeGroupExpansionCandidates symbol endpoint
"""
def get_expansion_candidate_drive_request():
"""Perform the request for expanding existing volume groups or disk pools.
Note: the list of candidate structures do not necessarily produce candidates that meet all criteria.
"""
candidates_list = None
url = "storage-systems/%s/symbol/getVolumeGroupExpansionCandidates?verboseErrorResponse=true" % self.ssid
if self.raid_level == "raidDiskPool":
url = "storage-systems/%s/symbol/getDiskPoolExpansionCandidates?verboseErrorResponse=true" % self.ssid
try:
rc, candidates_list = self.request(url, method="POST", data=self.pool_detail["id"])
except Exception as error:
self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]."
% (self.ssid, to_native(error)))
return candidates_list["candidates"]
required_candidate_list = list()
required_additional_drives = 0
required_additional_capacity = 0
total_required_capacity = 0
# determine whether and how much expansion is need to satisfy the specified criteria
if self.criteria_min_usable_capacity:
total_required_capacity = self.criteria_min_usable_capacity
required_additional_capacity = self.criteria_min_usable_capacity - int(self.pool_detail["totalRaidedSpace"])
if self.criteria_drive_count:
required_additional_drives = self.criteria_drive_count - len(self.storage_pool_drives)
# Determine the appropriate expansion candidate list
if required_additional_drives > 0 or required_additional_capacity > 0:
for candidate in get_expansion_candidate_drive_request():
if self.criteria_drive_min_size:
if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["drives"])):
continue
if self.raid_level == "raidDiskPool":
if (len(candidate["drives"]) >= required_additional_drives and
self.get_ddp_capacity(candidate["drives"]) >= total_required_capacity):
required_candidate_list.append(candidate)
break
else:
required_additional_drives -= len(candidate["drives"])
required_additional_capacity -= int(candidate["usableCapacity"])
required_candidate_list.append(candidate)
# Determine if required drives and capacities are satisfied
if required_additional_drives <= 0 and required_additional_capacity <= 0:
break
else:
self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid)
return required_candidate_list
def get_reserve_drive_count(self):
"""Retrieve the current number of reserve drives for raidDiskPool (Only for raidDiskPool)."""
if not self.pool_detail:
self.module.fail_json(msg="The storage pool must exist. Array [%s]." % self.ssid)
if self.raid_level != "raidDiskPool":
self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]."
% (self.pool_detail["id"], self.ssid))
return self.pool_detail["volumeGroupData"]["diskPoolData"]["reconstructionReservedDriveCount"]
def get_maximum_reserve_drive_count(self):
"""Retrieve the maximum number of reserve drives for storage pool (Only for raidDiskPool)."""
if self.raid_level != "raidDiskPool":
self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]."
% (self.pool_detail["id"], self.ssid))
drives_ids = list()
if self.pool_detail:
drives_ids.extend(self.storage_pool_drives)
for candidate in self.get_expansion_candidate_drives():
drives_ids.extend((candidate["drives"]))
else:
candidate = self.get_candidate_drives()
drives_ids.extend(candidate["driveRefList"]["driveRef"])
drive_count = len(drives_ids)
maximum_reserve_drive_count = min(int(drive_count * 0.2 + 1), drive_count - 10)
if maximum_reserve_drive_count > 10:
maximum_reserve_drive_count = 10
return maximum_reserve_drive_count
def set_reserve_drive_count(self, check_mode=False):
"""Set the reserve drive count for raidDiskPool."""
changed = False
if self.raid_level == "raidDiskPool" and self.reserve_drive_count:
maximum_count = self.get_maximum_reserve_drive_count()
if self.reserve_drive_count < 0 or self.reserve_drive_count > maximum_count:
self.module.fail_json(msg="Supplied reserve drive count is invalid or exceeds the maximum allowed. "
"Note that it may be necessary to wait for expansion operations to complete "
"before the adjusting the reserve drive count. Maximum [%s]. Array [%s]."
% (maximum_count, self.ssid))
if self.reserve_drive_count != self.get_reserve_drive_count():
changed = True
if not check_mode:
try:
rc, resp = self.request("storage-systems/%s/symbol/setDiskPoolReservedDriveCount" % self.ssid,
method="POST", data=dict(volumeGroupRef=self.pool_detail["id"],
newDriveCount=self.reserve_drive_count))
except Exception as error:
self.module.fail_json(msg="Failed to set reserve drive count for disk pool. Disk Pool [%s]."
" Array [%s]." % (self.pool_detail["id"], self.ssid))
return changed
def erase_all_available_secured_drives(self, check_mode=False):
"""Erase all available drives that have encryption at rest feature enabled."""
changed = False
drives_list = list()
for drive in self.drives:
if drive["available"] and drive["fdeEnabled"]:
changed = True
drives_list.append(drive["id"])
if drives_list and not check_mode:
try:
rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true"
% self.ssid, method="POST", data=dict(driveRef=drives_list))
except Exception as error:
self.module.fail_json(msg="Failed to erase all secured drives. Array [%s]" % self.ssid)
return changed
def create_storage_pool(self):
"""Create new storage pool."""
url = "storage-systems/%s/symbol/createVolumeGroup?verboseErrorResponse=true" % self.ssid
request_body = dict(label=self.name,
candidate=self.get_candidate_drives())
if self.raid_level == "raidDiskPool":
url = "storage-systems/%s/symbol/createDiskPool?verboseErrorResponse=true" % self.ssid
request_body.update(
dict(backgroundOperationPriority="useDefault",
criticalReconstructPriority="useDefault",
degradedReconstructPriority="useDefault",
poolUtilizationCriticalThreshold=65535,
poolUtilizationWarningThreshold=0))
if self.reserve_drive_count:
request_body.update(dict(volumeCandidateData=dict(
diskPoolVolumeCandidateData=dict(reconstructionReservedDriveCount=self.reserve_drive_count))))
try:
rc, resp = self.request(url, method="POST", data=request_body)
except Exception as error:
self.module.fail_json(msg="Failed to create storage pool. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
# Update drive and storage pool information
self.pool_detail = self.storage_pool
def delete_storage_pool(self):
"""Delete storage pool."""
storage_pool_drives = [drive["id"] for drive in self.storage_pool_drives if drive["fdeEnabled"]]
try:
delete_volumes_parameter = "?delete-volumes=true" if self.remove_volumes else ""
rc, resp = self.request("storage-systems/%s/storage-pools/%s%s"
% (self.ssid, self.pool_detail["id"], delete_volumes_parameter), method="DELETE")
except Exception as error:
self.module.fail_json(msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]."
% (self.pool_detail["id"], self.ssid, to_native(error)))
if storage_pool_drives and self.erase_secured_drives:
try:
rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true"
% self.ssid, method="POST", data=dict(driveRef=storage_pool_drives))
except Exception as error:
self.module.fail_json(msg="Failed to erase drives prior to creating new storage pool. Array [%s]."
" Error [%s]." % (self.ssid, to_native(error)))
def secure_storage_pool(self, check_mode=False):
"""Enable security on an existing storage pool"""
self.pool_detail = self.storage_pool
needs_secure_pool = False
if not self.secure_pool and self.pool_detail["securityType"] == "enabled":
self.module.fail_json(msg="It is not possible to disable storage pool security! See array documentation.")
if self.secure_pool and self.pool_detail["securityType"] != "enabled":
needs_secure_pool = True
if needs_secure_pool and not check_mode:
try:
rc, resp = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]),
data=dict(securePool=True), method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to secure storage pool. Pool id [%s]. Array [%s]. Error"
" [%s]." % (self.pool_detail["id"], self.ssid, to_native(error)))
self.pool_detail = self.storage_pool
return needs_secure_pool
def migrate_raid_level(self, check_mode=False):
"""Request storage pool raid level migration."""
needs_migration = self.raid_level != self.pool_detail["raidLevel"]
if needs_migration and self.pool_detail["raidLevel"] == "raidDiskPool":
self.module.fail_json(msg="Raid level cannot be changed for disk pools")
if needs_migration and not check_mode:
sp_raid_migrate_req = dict(raidLevel=self.raid_level)
try:
rc, resp = self.request("storage-systems/%s/storage-pools/%s/raid-type-migration"
% (self.ssid, self.name), data=sp_raid_migrate_req, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to change the raid level of storage pool. Array id [%s]."
" Error[%s]." % (self.ssid, to_native(error)))
self.pool_detail = self.storage_pool
return needs_migration
def expand_storage_pool(self, check_mode=False):
"""Add drives to existing storage pool.
:return bool: whether drives were required to be added to satisfy the specified criteria."""
expansion_candidate_list = self.get_expansion_candidate_drives()
changed_required = bool(expansion_candidate_list)
estimated_completion_time = 0.0
# build expandable groupings of traditional raid candidate
required_expansion_candidate_list = list()
while expansion_candidate_list:
subset = list()
while expansion_candidate_list and len(subset) < self.expandable_drive_count:
subset.extend(expansion_candidate_list.pop()["drives"])
required_expansion_candidate_list.append(subset)
if required_expansion_candidate_list and not check_mode:
url = "storage-systems/%s/symbol/startVolumeGroupExpansion?verboseErrorResponse=true" % self.ssid
if self.raid_level == "raidDiskPool":
url = "storage-systems/%s/symbol/startDiskPoolExpansion?verboseErrorResponse=true" % self.ssid
while required_expansion_candidate_list:
candidate_drives_list = required_expansion_candidate_list.pop()
request_body = dict(volumeGroupRef=self.pool_detail["volumeGroupRef"],
driveRef=candidate_drives_list)
try:
rc, resp = self.request(url, method="POST", data=request_body)
except Exception as error:
rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress"
% (self.ssid, self.pool_detail["id"]), ignore_errors=True)
if rc == 200 and actions_resp:
actions = [action["currentAction"] for action in actions_resp
if action["volumeRef"] in self.storage_pool_volumes]
self.module.fail_json(msg="Failed to add drives to the storage pool possibly because of actions"
" in progress. Actions [%s]. Pool id [%s]. Array id [%s]. Error[%s]."
% (", ".join(actions), self.pool_detail["id"], self.ssid,
to_native(error)))
self.module.fail_json(msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]."
" Error[%s]." % (self.pool_detail["id"], self.ssid, to_native(error)))
# Wait for expansion completion unless it is the last request in the candidate list
if required_expansion_candidate_list:
for dummy in range(self.EXPANSION_TIMEOUT_SEC):
rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress"
% (self.ssid, self.pool_detail["id"]), ignore_errors=True)
if rc == 200:
for action in actions_resp:
if (action["volumeRef"] in self.storage_pool_volumes and
action["currentAction"] == "remappingDce"):
sleep(1)
estimated_completion_time = action["estimatedTimeToCompletion"]
break
else:
estimated_completion_time = 0.0
break
return changed_required, estimated_completion_time
def apply(self):
"""Apply requested state to storage array."""
changed = False
if self.state == "present":
if self.criteria_drive_count is None and self.criteria_min_usable_capacity is None:
self.module.fail_json(msg="One of criteria_min_usable_capacity or criteria_drive_count must be"
" specified.")
if self.criteria_drive_count and not self.is_drive_count_valid(self.criteria_drive_count):
self.module.fail_json(msg="criteria_drive_count must be valid for the specified raid level.")
self.pool_detail = self.storage_pool
self.module.log(pformat(self.pool_detail))
if self.state == "present" and self.erase_secured_drives:
self.erase_all_available_secured_drives(check_mode=True)
# Determine whether changes need to be applied to the storage array
if self.pool_detail:
if self.state == "absent":
changed = True
elif self.state == "present":
if self.criteria_drive_count and self.criteria_drive_count < len(self.storage_pool_drives):
self.module.fail_json(msg="Failed to reduce the size of the storage pool. Array [%s]. Pool [%s]."
% (self.ssid, self.pool_detail["id"]))
if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail["driveMediaType"]:
self.module.fail_json(msg="Failed! It is not possible to modify storage pool media type."
" Array [%s]. Pool [%s]." % (self.ssid, self.pool_detail["id"]))
if (self.criteria_drive_require_da is not None and self.criteria_drive_require_da !=
self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"]):
self.module.fail_json(msg="Failed! It is not possible to modify DA-capability. Array [%s]."
" Pool [%s]." % (self.ssid, self.pool_detail["id"]))
# Evaluate current storage pool for required change.
needs_expansion, estimated_completion_time = self.expand_storage_pool(check_mode=True)
if needs_expansion:
changed = True
if self.migrate_raid_level(check_mode=True):
changed = True
if self.secure_storage_pool(check_mode=True):
changed = True
if self.set_reserve_drive_count(check_mode=True):
changed = True
elif self.state == "present":
changed = True
# Apply changes to storage array
msg = "No changes were required for the storage pool [%s]."
if changed and not self.module.check_mode:
if self.state == "present":
if self.erase_secured_drives:
self.erase_all_available_secured_drives()
if self.pool_detail:
change_list = list()
# Expansion needs to occur before raid level migration to account for any sizing needs.
expanded, estimated_completion_time = self.expand_storage_pool()
if expanded:
change_list.append("expanded")
if self.migrate_raid_level():
change_list.append("raid migration")
if self.secure_storage_pool():
change_list.append("secured")
if self.set_reserve_drive_count():
change_list.append("adjusted reserve drive count")
if change_list:
msg = "Following changes have been applied to the storage pool [%s]: " + ", ".join(change_list)
if expanded:
msg += "\nThe expansion operation will complete in an estimated %s minutes."\
% estimated_completion_time
else:
self.create_storage_pool()
msg = "Storage pool [%s] was created."
if self.secure_storage_pool():
msg = "Storage pool [%s] was created and secured."
if self.set_reserve_drive_count():
msg += " Adjusted reserve drive count."
elif self.pool_detail:
self.delete_storage_pool()
msg = "Storage pool [%s] removed."
self.pool_detail = self.storage_pool
self.module.log(pformat(self.pool_detail))
self.module.log(msg % self.name)
self.module.exit_json(msg=msg % self.name, changed=changed, **self.pool_detail)
def main():
storage_pool = NetAppESeriesStoragePool()
storage_pool.apply()
if __name__ == "__main__":
main()

View file

@ -0,0 +1,280 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_syslog
short_description: NetApp E-Series manage syslog settings
description:
- Allow the syslog settings to be configured for an individual E-Series storage-system
author: Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
state:
description:
- Add or remove the syslog server configuration for E-Series storage array.
- Existing syslog server configuration will be removed or updated when its address matches I(address).
- Fully qualified hostname that resolve to an IPv4 address that matches I(address) will not be
treated as a match.
choices:
- present
- absent
default: present
address:
description:
- The syslog server's IPv4 address or a fully qualified hostname.
- All existing syslog configurations will be removed when I(state=absent) and I(address=None).
port:
description:
- This is the port the syslog server is using.
default: 514
protocol:
description:
- This is the transmission protocol the syslog server's using to receive syslog messages.
choices:
- udp
- tcp
- tls
default: udp
components:
description:
- The e-series logging components define the specific logs to transfer to the syslog server.
- At the time of writing, 'auditLog' is the only logging component but more may become available.
default: ["auditLog"]
test:
description:
- This forces a test syslog message to be sent to the stated syslog server.
- Only attempts transmission when I(state=present).
type: bool
default: no
log_path:
description:
- This argument specifies a local path for logging purposes.
required: no
notes:
- Check mode is supported.
- This API is currently only supported with the Embedded Web Services API v2.12 (bundled with
SANtricity OS 11.40.2) and higher.
'''
EXAMPLES = """
- name: Add two syslog server configurations to NetApp E-Series storage array.
netapp_e_syslog:
state: present
address: "{{ item }}"
port: 514
protocol: tcp
component: "auditLog"
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
loop:
- "192.168.1.1"
- "192.168.1.100"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
syslog:
description:
- True if syslog server configuration has been added to e-series storage array.
returned: on success
sample: True
type: bool
"""
import json
import logging
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Syslog(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(choices=["present", "absent"], required=False, default="present"),
address=dict(type="str", required=False),
port=dict(type="int", default=514, required=False),
protocol=dict(choices=["tcp", "tls", "udp"], default="udp", required=False),
components=dict(type="list", required=False, default=["auditLog"]),
test=dict(type="bool", default=False, required=False),
log_path=dict(type="str", required=False),
))
required_if = [
["state", "present", ["address", "port", "protocol", "components"]],
]
mutually_exclusive = [
["test", "absent"],
]
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if,
mutually_exclusive=mutually_exclusive)
args = self.module.params
self.syslog = args["state"] in ["present"]
self.address = args["address"]
self.port = args["port"]
self.protocol = args["protocol"]
self.components = args["components"]
self.test = args["test"]
self.ssid = args["ssid"]
self.url = args["api_url"]
self.creds = dict(url_password=args["api_password"],
validate_certs=args["validate_certs"],
url_username=args["api_username"], )
self.components.sort()
self.check_mode = self.module.check_mode
# logging setup
log_path = args["log_path"]
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
def get_configuration(self):
"""Retrieve existing syslog configuration."""
try:
(rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid),
headers=HEADERS, **self.creds)
return result
except Exception as err:
self.module.fail_json(msg="Failed to retrieve syslog configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def test_configuration(self, body):
"""Send test syslog message to the storage array.
Allows fix number of retries to occur before failure is issued to give the storage array time to create
new syslog server record.
"""
try:
(rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}/test".format(self.ssid, body["id"]),
method='POST', headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="We failed to send test message! Array Id [{0}]. Error [{1}].".format(self.ssid, to_native(err)))
def update_configuration(self):
"""Post the syslog request to array."""
config_match = None
perfect_match = None
update = False
body = dict()
# search existing configuration for syslog server entry match
configs = self.get_configuration()
if self.address:
for config in configs:
if config["serverAddress"] == self.address:
config_match = config
if (config["port"] == self.port and config["protocol"] == self.protocol and
len(config["components"]) == len(self.components) and
all([component["type"] in self.components for component in config["components"]])):
perfect_match = config_match
break
# generate body for the http request
if self.syslog:
if not perfect_match:
update = True
if config_match:
body.update(dict(id=config_match["id"]))
components = [dict(type=component_type) for component_type in self.components]
body.update(dict(serverAddress=self.address, port=self.port,
protocol=self.protocol, components=components))
self._logger.info(body)
self.make_configuration_request(body)
# remove specific syslog server configuration
elif self.address:
update = True
body.update(dict(id=config_match["id"]))
self._logger.info(body)
self.make_configuration_request(body)
# if no address is specified, remove all syslog server configurations
elif configs:
update = True
for config in configs:
body.update(dict(id=config["id"]))
self._logger.info(body)
self.make_configuration_request(body)
return update
def make_configuration_request(self, body):
# make http request(s)
if not self.check_mode:
try:
if self.syslog:
if "id" in body:
(rc, result) = request(
self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]),
method='POST', data=json.dumps(body), headers=HEADERS, **self.creds)
else:
(rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid),
method='POST', data=json.dumps(body), headers=HEADERS, **self.creds)
body.update(result)
# send syslog test message
if self.test:
self.test_configuration(body)
elif "id" in body:
(rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]),
method='DELETE', headers=HEADERS, **self.creds)
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to modify syslog configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update(self):
"""Update configuration and respond to ansible."""
update = self.update_configuration()
self.module.exit_json(msg="The syslog settings have been updated.", changed=update)
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = Syslog()
settings()
if __name__ == "__main__":
main()

View file

@ -0,0 +1,845 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_volume
short_description: NetApp E-Series manage storage volumes (standard and thin)
description:
- Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays.
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
state:
description:
- Whether the specified volume should exist
required: true
choices: ['present', 'absent']
name:
description:
- The name of the volume to manage.
required: true
storage_pool_name:
description:
- Required only when requested I(state=='present').
- Name of the storage pool wherein the volume should reside.
required: false
size_unit:
description:
- The unit used to interpret the size parameter
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
size:
description:
- Required only when I(state=='present').
- Size of the volume in I(size_unit).
- Size of the virtual volume in the case of a thin volume in I(size_unit).
- Maximum virtual volume size of a thin provisioned volume is 256tb; however other OS-level restrictions may
exist.
required: true
segment_size_kb:
description:
- Segment size of the volume
- All values are in kibibytes.
- Some common choices include '8', '16', '32', '64', '128', '256', and '512' but options are system
dependent.
- Retrieve the definitive system list from M(netapp_e_facts) under segment_sizes.
- When the storage pool is a raidDiskPool then the segment size must be 128kb.
- Segment size migrations are not allowed in this module
default: '128'
thin_provision:
description:
- Whether the volume should be thin provisioned.
- Thin volumes can only be created when I(raid_level=="raidDiskPool").
- Generally, use of thin-provisioning is not recommended due to performance impacts.
type: bool
default: false
thin_volume_repo_size:
description:
- This value (in size_unit) sets the allocated space for the thin provisioned repository.
- Initial value must between or equal to 4gb and 256gb in increments of 4gb.
- During expansion operations the increase must be between or equal to 4gb and 256gb in increments of 4gb.
- This option has no effect during expansion if I(thin_volume_expansion_policy=="automatic").
- Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic).
required: false
thin_volume_max_repo_size:
description:
- This is the maximum amount the thin volume repository will be allowed to grow.
- Only has significance when I(thin_volume_expansion_policy=="automatic").
- When the percentage I(thin_volume_repo_size) of I(thin_volume_max_repo_size) exceeds
I(thin_volume_growth_alert_threshold) then a warning will be issued and the storage array will execute
the I(thin_volume_expansion_policy) policy.
- Expansion operations when I(thin_volume_expansion_policy=="automatic") will increase the maximum
repository size.
default: same as size (in size_unit)
thin_volume_expansion_policy:
description:
- This is the thin volume expansion policy.
- When I(thin_volume_expansion_policy=="automatic") and I(thin_volume_growth_alert_threshold) is exceed the
I(thin_volume_max_repo_size) will be automatically expanded.
- When I(thin_volume_expansion_policy=="manual") and I(thin_volume_growth_alert_threshold) is exceeded the
storage system will wait for manual intervention.
- The thin volume_expansion policy can not be modified on existing thin volumes in this module.
- Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic).
choices: ["automatic", "manual"]
default: "automatic"
thin_volume_growth_alert_threshold:
description:
- This is the thin provision repository utilization threshold (in percent).
- When the percentage of used storage of the maximum repository size exceeds this value then a alert will
be issued and the I(thin_volume_expansion_policy) will be executed.
- Values must be between or equal to 10 and 99.
default: 95
owning_controller:
description:
- Specifies which controller will be the primary owner of the volume
- Not specifying will allow the controller to choose ownership.
required: false
choices: ["A", "B"]
ssd_cache_enabled:
description:
- Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined)
- The default value is to ignore existing SSD cache setting.
type: bool
default: false
data_assurance_enabled:
description:
- Determines whether data assurance (DA) should be enabled for the volume
- Only available when creating a new volume and on a storage pool with drives supporting the DA capability.
type: bool
default: false
read_cache_enable:
description:
- Indicates whether read caching should be enabled for the volume.
type: bool
default: true
read_ahead_enable:
description:
- Indicates whether or not automatic cache read-ahead is enabled.
- This option has no effect on thinly provisioned volumes since the architecture for thin volumes cannot
benefit from read ahead caching.
type: bool
default: true
write_cache_enable:
description:
- Indicates whether write-back caching should be enabled for the volume.
type: bool
default: true
cache_without_batteries:
description:
- Indicates whether caching should be used without battery backup.
- Warning, M(cache_without_batteries==true) and the storage system looses power and there is no battery backup, data will be lost!
type: bool
default: false
workload_name:
description:
- Label for the workload defined by the metadata.
- When I(workload_name) and I(metadata) are specified then the defined workload will be added to the storage
array.
- When I(workload_name) exists on the storage array but the metadata is different then the workload
definition will be updated. (Changes will update all associated volumes!)
- Existing workloads can be retrieved using M(netapp_e_facts).
required: false
metadata:
description:
- Dictionary containing meta data for the use, user, location, etc of the volume (dictionary is arbitrarily
defined for whatever the user deems useful)
- When I(workload_name) exists on the storage array but the metadata is different then the workload
definition will be updated. (Changes will update all associated volumes!)
- I(workload_name) must be specified when I(metadata) are defined.
type: dict
required: false
wait_for_initialization:
description:
- Forces the module to wait for expansion operations to complete before continuing.
type: bool
default: false
initialization_timeout:
description:
- Duration in seconds before the wait_for_initialization operation will terminate.
- M(wait_for_initialization==True) to have any effect on module's operations.
type: int
required: false
'''
EXAMPLES = """
- name: Create simple volume with workload tags (volume meta data)
netapp_e_volume:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: present
name: volume
storage_pool_name: storage_pool
size: 300
size_unit: gb
workload_name: volume_tag
metadata:
key1: value1
key2: value2
- name: Create a thin volume
netapp_e_volume:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: present
name: volume1
storage_pool_name: storage_pool
size: 131072
size_unit: gb
thin_provision: true
thin_volume_repo_size: 32
thin_volume_max_repo_size: 1024
- name: Expand thin volume's virtual size
netapp_e_volume:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: present
name: volume1
storage_pool_name: storage_pool
size: 262144
size_unit: gb
thin_provision: true
thin_volume_repo_size: 32
thin_volume_max_repo_size: 1024
- name: Expand thin volume's maximum repository size
netapp_e_volume:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: present
name: volume1
storage_pool_name: storage_pool
size: 262144
size_unit: gb
thin_provision: true
thin_volume_repo_size: 32
thin_volume_max_repo_size: 2048
- name: Delete volume
netapp_e_volume:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: absent
name: volume
"""
RETURN = """
msg:
description: State of volume
type: str
returned: always
sample: "Standard volume [workload_vol_1] has been created."
"""
from time import sleep
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule
from ansible.module_utils._text import to_native
class NetAppESeriesVolume(NetAppESeriesModule):
VOLUME_CREATION_BLOCKING_TIMEOUT_SEC = 300
def __init__(self):
ansible_options = dict(
state=dict(required=True, choices=["present", "absent"]),
name=dict(required=True, type="str"),
storage_pool_name=dict(type="str"),
size_unit=dict(default="gb", choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"],
type="str"),
size=dict(type="float"),
segment_size_kb=dict(type="int", default=128),
owning_controller=dict(required=False, choices=['A', 'B']),
ssd_cache_enabled=dict(type="bool", default=False),
data_assurance_enabled=dict(type="bool", default=False),
thin_provision=dict(type="bool", default=False),
thin_volume_repo_size=dict(type="int"),
thin_volume_max_repo_size=dict(type="float"),
thin_volume_expansion_policy=dict(type="str", choices=["automatic", "manual"]),
thin_volume_growth_alert_threshold=dict(type="int", default=95),
read_cache_enable=dict(type="bool", default=True),
read_ahead_enable=dict(type="bool", default=True),
write_cache_enable=dict(type="bool", default=True),
cache_without_batteries=dict(type="bool", default=False),
workload_name=dict(type="str", required=False),
metadata=dict(type="dict", required=False),
wait_for_initialization=dict(type="bool", default=False),
initialization_timeout=dict(type="int", required=False))
required_if = [
["state", "present", ["storage_pool_name", "size"]],
["thin_provision", "true", ["thin_volume_repo_size"]]
]
super(NetAppESeriesVolume, self).__init__(ansible_options=ansible_options,
web_services_version="02.00.0000.0000",
supports_check_mode=True,
required_if=required_if)
args = self.module.params
self.state = args["state"]
self.name = args["name"]
self.storage_pool_name = args["storage_pool_name"]
self.size_unit = args["size_unit"]
self.segment_size_kb = args["segment_size_kb"]
if args["size"]:
self.size_b = self.convert_to_aligned_bytes(args["size"])
self.owning_controller_id = None
if args["owning_controller"]:
self.owning_controller_id = "070000000000000000000001" if args["owning_controller"] == "A" else "070000000000000000000002"
self.read_cache_enable = args["read_cache_enable"]
self.read_ahead_enable = args["read_ahead_enable"]
self.write_cache_enable = args["write_cache_enable"]
self.ssd_cache_enabled = args["ssd_cache_enabled"]
self.cache_without_batteries = args["cache_without_batteries"]
self.data_assurance_enabled = args["data_assurance_enabled"]
self.thin_provision = args["thin_provision"]
self.thin_volume_expansion_policy = args["thin_volume_expansion_policy"]
self.thin_volume_growth_alert_threshold = int(args["thin_volume_growth_alert_threshold"])
self.thin_volume_repo_size_b = None
self.thin_volume_max_repo_size_b = None
if args["thin_volume_repo_size"]:
self.thin_volume_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_repo_size"])
if args["thin_volume_max_repo_size"]:
self.thin_volume_max_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_max_repo_size"])
self.workload_name = args["workload_name"]
self.metadata = args["metadata"]
self.wait_for_initialization = args["wait_for_initialization"]
self.initialization_timeout = args["initialization_timeout"]
# convert metadata to a list of dictionaries containing the keys "key" and "value" corresponding to
# each of the workload attributes dictionary entries
metadata = []
if self.metadata:
if not self.workload_name:
self.module.fail_json(msg="When metadata is specified then the name for the workload must be specified."
" Array [%s]." % self.ssid)
for key in self.metadata.keys():
metadata.append(dict(key=key, value=self.metadata[key]))
self.metadata = metadata
if self.thin_provision:
if not self.thin_volume_max_repo_size_b:
self.thin_volume_max_repo_size_b = self.size_b
if not self.thin_volume_expansion_policy:
self.thin_volume_expansion_policy = "automatic"
if self.size_b > 256 * 1024 ** 4:
self.module.fail_json(msg="Thin provisioned volumes must be less than or equal to 256tb is size."
" Attempted size [%sg]" % (self.size_b * 1024 ** 3))
if (self.thin_volume_repo_size_b and self.thin_volume_max_repo_size_b and
self.thin_volume_repo_size_b > self.thin_volume_max_repo_size_b):
self.module.fail_json(msg="The initial size of the thin volume must not be larger than the maximum"
" repository size. Array [%s]." % self.ssid)
if self.thin_volume_growth_alert_threshold < 10 or self.thin_volume_growth_alert_threshold > 99:
self.module.fail_json(msg="thin_volume_growth_alert_threshold must be between or equal to 10 and 99."
"thin_volume_growth_alert_threshold [%s]. Array [%s]."
% (self.thin_volume_growth_alert_threshold, self.ssid))
self.volume_detail = None
self.pool_detail = None
self.workload_id = None
def convert_to_aligned_bytes(self, size):
"""Convert size to the truncated byte size that aligns on the segment size."""
size_bytes = int(size * self.SIZE_UNIT_MAP[self.size_unit])
segment_size_bytes = int(self.segment_size_kb * self.SIZE_UNIT_MAP["kb"])
segment_count = int(size_bytes / segment_size_bytes)
return segment_count * segment_size_bytes
def get_volume(self):
"""Retrieve volume details from storage array."""
volumes = list()
thin_volumes = list()
try:
rc, volumes = self.request("storage-systems/%s/volumes" % self.ssid)
except Exception as err:
self.module.fail_json(msg="Failed to obtain list of thick volumes. Array Id [%s]. Error[%s]."
% (self.ssid, to_native(err)))
try:
rc, thin_volumes = self.request("storage-systems/%s/thin-volumes" % self.ssid)
except Exception as err:
self.module.fail_json(msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]."
% (self.ssid, to_native(err)))
volume_detail = [volume for volume in volumes + thin_volumes if volume["name"] == self.name]
return volume_detail[0] if volume_detail else dict()
def wait_for_volume_availability(self, retries=VOLUME_CREATION_BLOCKING_TIMEOUT_SEC / 5):
"""Waits until volume becomes available.
:raises AnsibleFailJson when retries are exhausted.
"""
if retries == 0:
self.module.fail_json(msg="Timed out waiting for the volume %s to become available. Array [%s]."
% (self.name, self.ssid))
if not self.get_volume():
sleep(5)
self.wait_for_volume_availability(retries=retries - 1)
def wait_for_volume_action(self, timeout=None):
"""Waits until volume action is complete is complete.
:param: int timeout: Wait duration measured in seconds. Waits indefinitely when None.
"""
action = "unknown"
percent_complete = None
while action != "complete":
sleep(5)
try:
rc, operations = self.request("storage-systems/%s/symbol/getLongLivedOpsProgress" % self.ssid)
# Search long lived operations for volume
action = "complete"
for operation in operations["longLivedOpsProgress"]:
if operation["volAction"] is not None:
for key in operation.keys():
if (operation[key] is not None and "volumeRef" in operation[key] and
(operation[key]["volumeRef"] == self.volume_detail["id"] or
("storageVolumeRef" in self.volume_detail and operation[key]["volumeRef"] == self.volume_detail["storageVolumeRef"]))):
action = operation["volAction"]
percent_complete = operation["init"]["percentComplete"]
except Exception as err:
self.module.fail_json(msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]."
" Error[%s]." % (self.name, self.ssid, to_native(err)))
if timeout is not None:
if timeout <= 0:
self.module.warn("Expansion action, %s, failed to complete during the allotted time. Time remaining"
" [%s]. Array Id [%s]." % (action, percent_complete, self.ssid))
self.module.fail_json(msg="Expansion action failed to complete. Time remaining [%s]. Array Id [%s]." % (percent_complete, self.ssid))
if timeout:
timeout -= 5
self.module.log("Expansion action, %s, is %s complete." % (action, percent_complete))
self.module.log("Expansion action is complete.")
def get_storage_pool(self):
"""Retrieve storage pool details from the storage array."""
storage_pools = list()
try:
rc, storage_pools = self.request("storage-systems/%s/storage-pools" % self.ssid)
except Exception as err:
self.module.fail_json(msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]."
% (self.ssid, to_native(err)))
pool_detail = [storage_pool for storage_pool in storage_pools if storage_pool["name"] == self.storage_pool_name]
return pool_detail[0] if pool_detail else dict()
def check_storage_pool_sufficiency(self):
"""Perform a series of checks as to the sufficiency of the storage pool for the volume."""
if not self.pool_detail:
self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name)
if not self.volume_detail:
if self.thin_provision and not self.pool_detail['diskPool']:
self.module.fail_json(msg='Thin provisioned volumes can only be created on raid disk pools.')
if (self.data_assurance_enabled and not
(self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"] and
self.pool_detail["protectionInformationCapabilities"]["protectionType"] == "type2Protection")):
self.module.fail_json(msg="Data Assurance (DA) requires the storage pool to be DA-compatible."
" Array [%s]." % self.ssid)
if int(self.pool_detail["freeSpace"]) < self.size_b and not self.thin_provision:
self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs."
" Array [%s]." % self.ssid)
else:
# Check for expansion
if (int(self.pool_detail["freeSpace"]) < int(self.volume_detail["totalSizeInBytes"]) - self.size_b and
not self.thin_provision):
self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs."
" Array [%s]." % self.ssid)
def update_workload_tags(self, check_mode=False):
"""Check the status of the workload tag and update storage array definitions if necessary.
When the workload attributes are not provided but an existing workload tag name is, then the attributes will be
used.
:return bool: Whether changes were required to be made."""
change_required = False
workload_tags = None
request_body = None
ansible_profile_id = None
if self.workload_name:
try:
rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve storage array workload tags. Array [%s]" % self.ssid)
# Generate common indexed Ansible workload tag
current_tag_index_list = [int(pair["value"].replace("ansible_workload_", ""))
for tag in workload_tags for pair in tag["workloadAttributes"]
if pair["key"] == "profileId" and "ansible_workload_" in pair["value"] and
str(pair["value"]).replace("ansible_workload_", "").isdigit()]
tag_index = 1
if current_tag_index_list:
tag_index = max(current_tag_index_list) + 1
ansible_profile_id = "ansible_workload_%d" % tag_index
request_body = dict(name=self.workload_name,
profileId=ansible_profile_id,
workloadInstanceIndex=None,
isValid=True)
# evaluate and update storage array when needed
for tag in workload_tags:
if tag["name"] == self.workload_name:
self.workload_id = tag["id"]
if not self.metadata:
break
# Determine if core attributes (everything but profileId) is the same
metadata_set = set(tuple(sorted(attr.items())) for attr in self.metadata)
tag_set = set(tuple(sorted(attr.items()))
for attr in tag["workloadAttributes"] if attr["key"] != "profileId")
if metadata_set != tag_set:
self.module.log("Workload tag change is required!")
change_required = True
# only perform the required action when check_mode==False
if change_required and not check_mode:
self.metadata.append(dict(key="profileId", value=ansible_profile_id))
request_body.update(dict(isNewWorkloadInstance=False,
isWorkloadDataInitialized=True,
isWorkloadCardDataToBeReset=True,
workloadAttributes=self.metadata))
try:
rc, resp = self.request("storage-systems/%s/workloads/%s" % (self.ssid, tag["id"]),
data=request_body, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]"
% (self.ssid, to_native(error)))
self.module.log("Workload tag [%s] required change." % self.workload_name)
break
# existing workload tag not found so create new workload tag
else:
change_required = True
self.module.log("Workload tag creation is required!")
if change_required and not check_mode:
if self.metadata:
self.metadata.append(dict(key="profileId", value=ansible_profile_id))
else:
self.metadata = [dict(key="profileId", value=ansible_profile_id)]
request_body.update(dict(isNewWorkloadInstance=True,
isWorkloadDataInitialized=False,
isWorkloadCardDataToBeReset=False,
workloadAttributes=self.metadata))
try:
rc, resp = self.request("storage-systems/%s/workloads" % self.ssid,
method="POST", data=request_body)
self.workload_id = resp["id"]
except Exception as error:
self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]"
% (self.ssid, to_native(error)))
self.module.log("Workload tag [%s] was added." % self.workload_name)
return change_required
def get_volume_property_changes(self):
"""Retrieve the volume update request body when change(s) are required.
:raise AnsibleFailJson when attempting to change segment size on existing volume.
:return dict: request body when change(s) to a volume's properties are required.
"""
change = False
request_body = dict(flashCache=self.ssd_cache_enabled, metaTags=[],
cacheSettings=dict(readCacheEnable=self.read_cache_enable,
writeCacheEnable=self.write_cache_enable))
# check for invalid modifications
if self.segment_size_kb * 1024 != int(self.volume_detail["segmentSize"]):
self.module.fail_json(msg="Existing volume segment size is %s and cannot be modified."
% self.volume_detail["segmentSize"])
# common thick/thin volume properties
if (self.read_cache_enable != self.volume_detail["cacheSettings"]["readCacheEnable"] or
self.write_cache_enable != self.volume_detail["cacheSettings"]["writeCacheEnable"] or
self.ssd_cache_enabled != self.volume_detail["flashCached"]):
change = True
# controller ownership
if self.owning_controller_id and self.owning_controller_id != self.volume_detail["preferredManager"]:
change = True
request_body.update(dict(owningControllerId=self.owning_controller_id))
if self.workload_name:
request_body.update(dict(metaTags=[dict(key="workloadId", value=self.workload_id),
dict(key="volumeTypeId", value="volume")]))
if {"key": "workloadId", "value": self.workload_id} not in self.volume_detail["metadata"]:
change = True
elif self.volume_detail["metadata"]:
change = True
# thick/thin volume specific properties
if self.thin_provision:
if self.thin_volume_growth_alert_threshold != int(self.volume_detail["growthAlertThreshold"]):
change = True
request_body.update(dict(growthAlertThreshold=self.thin_volume_growth_alert_threshold))
if self.thin_volume_expansion_policy != self.volume_detail["expansionPolicy"]:
change = True
request_body.update(dict(expansionPolicy=self.thin_volume_expansion_policy))
else:
if self.read_ahead_enable != (int(self.volume_detail["cacheSettings"]["readAheadMultiplier"]) > 0):
change = True
request_body["cacheSettings"].update(dict(readAheadEnable=self.read_ahead_enable))
if self.cache_without_batteries != self.volume_detail["cacheSettings"]["cwob"]:
change = True
request_body["cacheSettings"].update(dict(cacheWithoutBatteries=self.cache_without_batteries))
return request_body if change else dict()
def get_expand_volume_changes(self):
"""Expand the storage specifications for the existing thick/thin volume.
:raise AnsibleFailJson when a thick/thin volume expansion request fails.
:return dict: dictionary containing all the necessary values for volume expansion request
"""
request_body = dict()
if self.size_b < int(self.volume_detail["capacity"]):
self.module.fail_json(msg="Reducing the size of volumes is not permitted. Volume [%s]. Array [%s]"
% (self.name, self.ssid))
if self.volume_detail["thinProvisioned"]:
if self.size_b > int(self.volume_detail["capacity"]):
request_body.update(dict(sizeUnit="bytes", newVirtualSize=self.size_b))
self.module.log("Thin volume virtual size have been expanded.")
if self.volume_detail["expansionPolicy"] == "automatic":
if self.thin_volume_max_repo_size_b > int(self.volume_detail["provisionedCapacityQuota"]):
request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_max_repo_size_b))
self.module.log("Thin volume maximum repository size have been expanded (automatic policy).")
elif self.volume_detail["expansionPolicy"] == "manual":
if self.thin_volume_repo_size_b > int(self.volume_detail["currentProvisionedCapacity"]):
change = self.thin_volume_repo_size_b - int(self.volume_detail["currentProvisionedCapacity"])
if change < 4 * 1024 ** 3 or change > 256 * 1024 ** 3 or change % (4 * 1024 ** 3) != 0:
self.module.fail_json(msg="The thin volume repository increase must be between or equal to 4gb"
" and 256gb in increments of 4gb. Attempted size [%sg]."
% (self.thin_volume_repo_size_b * 1024 ** 3))
request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_repo_size_b))
self.module.log("Thin volume maximum repository size have been expanded (manual policy).")
elif self.size_b > int(self.volume_detail["capacity"]):
request_body.update(dict(sizeUnit="bytes", expansionSize=self.size_b))
self.module.log("Volume storage capacities have been expanded.")
return request_body
def create_volume(self):
"""Create thick/thin volume according to the specified criteria."""
body = dict(name=self.name, poolId=self.pool_detail["id"], sizeUnit="bytes",
dataAssuranceEnabled=self.data_assurance_enabled)
if self.thin_provision:
body.update(dict(virtualSize=self.size_b,
repositorySize=self.thin_volume_repo_size_b,
maximumRepositorySize=self.thin_volume_max_repo_size_b,
expansionPolicy=self.thin_volume_expansion_policy,
growthAlertThreshold=self.thin_volume_growth_alert_threshold))
try:
rc, volume = self.request("storage-systems/%s/thin-volumes" % self.ssid, data=body, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
% (self.name, self.ssid, to_native(error)))
self.module.log("New thin volume created [%s]." % self.name)
else:
body.update(dict(size=self.size_b, segSize=self.segment_size_kb))
try:
rc, volume = self.request("storage-systems/%s/volumes" % self.ssid, data=body, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]."
% (self.name, self.ssid, to_native(error)))
self.module.log("New volume created [%s]." % self.name)
def update_volume_properties(self):
"""Update existing thin-volume or volume properties.
:raise AnsibleFailJson when either thick/thin volume update request fails.
:return bool: whether update was applied
"""
self.wait_for_volume_availability()
self.volume_detail = self.get_volume()
request_body = self.get_volume_property_changes()
if request_body:
if self.thin_provision:
try:
rc, resp = self.request("storage-systems/%s/thin-volumes/%s"
% (self.ssid, self.volume_detail["id"]), data=request_body, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to update thin volume properties. Volume [%s]. Array Id [%s]."
" Error[%s]." % (self.name, self.ssid, to_native(error)))
else:
try:
rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]),
data=request_body, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to update volume properties. Volume [%s]. Array Id [%s]."
" Error[%s]." % (self.name, self.ssid, to_native(error)))
return True
return False
def expand_volume(self):
"""Expand the storage specifications for the existing thick/thin volume.
:raise AnsibleFailJson when a thick/thin volume expansion request fails.
"""
request_body = self.get_expand_volume_changes()
if request_body:
if self.volume_detail["thinProvisioned"]:
try:
rc, resp = self.request("storage-systems/%s/thin-volumes/%s/expand"
% (self.ssid, self.volume_detail["id"]), data=request_body, method="POST")
except Exception as err:
self.module.fail_json(msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
% (self.name, self.ssid, to_native(err)))
self.module.log("Thin volume specifications have been expanded.")
else:
try:
rc, resp = self.request(
"storage-systems/%s/volumes/%s/expand" % (self.ssid, self.volume_detail['id']),
data=request_body, method="POST")
except Exception as err:
self.module.fail_json(msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]."
% (self.name, self.ssid, to_native(err)))
self.module.log("Volume storage capacities have been expanded.")
def delete_volume(self):
"""Delete existing thin/thick volume."""
if self.thin_provision:
try:
rc, resp = self.request("storage-systems/%s/thin-volumes/%s" % (self.ssid, self.volume_detail["id"]),
method="DELETE")
except Exception as error:
self.module.fail_json(msg="Failed to delete thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
% (self.name, self.ssid, to_native(error)))
self.module.log("Thin volume deleted [%s]." % self.name)
else:
try:
rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]),
method="DELETE")
except Exception as error:
self.module.fail_json(msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]."
% (self.name, self.ssid, to_native(error)))
self.module.log("Volume deleted [%s]." % self.name)
def apply(self):
"""Determine and apply any changes necessary to satisfy the specified criteria.
:raise AnsibleExitJson when completes successfully"""
change = False
msg = None
self.volume_detail = self.get_volume()
self.pool_detail = self.get_storage_pool()
# Determine whether changes need to be applied to existing workload tags
if self.state == 'present' and self.update_workload_tags(check_mode=True):
change = True
# Determine if any changes need to be applied
if self.volume_detail:
if self.state == 'absent':
change = True
elif self.state == 'present':
if self.get_expand_volume_changes() or self.get_volume_property_changes():
change = True
elif self.state == 'present':
if self.thin_provision and (self.thin_volume_repo_size_b < 4 * 1024 ** 3 or
self.thin_volume_repo_size_b > 256 * 1024 ** 3 or
self.thin_volume_repo_size_b % (4 * 1024 ** 3) != 0):
self.module.fail_json(msg="The initial thin volume repository size must be between 4gb and 256gb in"
" increments of 4gb. Attempted size [%sg]."
% (self.thin_volume_repo_size_b * 1024 ** 3))
change = True
self.module.log("Update required: [%s]." % change)
# Apply any necessary changes
if change and not self.module.check_mode:
if self.state == 'present':
if self.update_workload_tags():
msg = "Workload tag change occurred."
if not self.volume_detail:
self.check_storage_pool_sufficiency()
self.create_volume()
self.update_volume_properties()
msg = msg[:-1] + " and volume [%s] was created." if msg else "Volume [%s] has been created."
else:
if self.update_volume_properties():
msg = "Volume [%s] properties were updated."
if self.get_expand_volume_changes():
self.expand_volume()
msg = msg[:-1] + " and was expanded." if msg else "Volume [%s] was expanded."
if self.wait_for_initialization:
self.module.log("Waiting for volume operation to complete.")
self.wait_for_volume_action(timeout=self.initialization_timeout)
elif self.state == 'absent':
self.delete_volume()
msg = "Volume [%s] has been deleted."
else:
msg = "Volume [%s] does not exist." if self.state == 'absent' else "Volume [%s] exists."
self.module.exit_json(msg=(msg % self.name if msg and "%s" in msg else msg), changed=change)
def main():
volume = NetAppESeriesVolume()
volume.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,400 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_volume_copy
short_description: NetApp E-Series create volume copy pairs
description:
- Create and delete snapshots images on volume groups for NetApp E-series storage arrays.
author: Kevin Hulquest (@hulquest)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API, for example C(https://prod-1.wahoo.acme.com/devmgr/v2).
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
source_volume_id:
description:
- The id of the volume copy source.
- If used, must be paired with destination_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
destination_volume_id:
description:
- The id of the volume copy destination.
- If used, must be paired with source_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
volume_copy_pair_id:
description:
- The id of a given volume copy pair
- Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id
- Can use to delete or check presence of volume pairs
- Must specify this or (destination_volume_id and source_volume_id)
state:
description:
- Whether the specified volume copy pair should exist or not.
required: True
choices: ['present', 'absent']
create_copy_pair_if_does_not_exist:
description:
- Defines if a copy pair will be created if it does not exist.
- If set to True destination_volume_id and source_volume_id are required.
type: bool
default: True
start_stop_copy:
description:
- starts a re-copy or stops a copy in progress
- "Note: If you stop the initial file copy before it it done the copy pair will be destroyed"
- Requires volume_copy_pair_id
search_volume_id:
description:
- Searches for all valid potential target and source volumes that could be used in a copy_pair
- Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id
'''
RESULTS = """
"""
EXAMPLES = """
---
msg:
description: Success message
returned: success
type: str
sample: Json facts for the volume copy that was created.
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
sample: Created Volume Copy Pair with ID
"""
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
(rc, resp) = request(url, method='GET', url_username=params['api_username'],
url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
volume_copy_pair_id = None
for potential_copy_pair in resp:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
volume_copy_pair_id = potential_copy_pair['id']
return volume_copy_pair_id
def create_copy_pair(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
rData = {
"sourceId": params['source_volume_id'],
"targetId": params['destination_volume_id']
}
(rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def delete_copy_pair_by_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 204:
return False, (rc, resp)
else:
return True, (rc, resp)
def find_volume_copy_pair_id_by_volume_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def start_stop_copy(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % (
params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
return True, response_data[0]['percentComplete']
else:
return False, response_data
def check_copy_status(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
if response_data['percentComplete'] != -1:
return True, response_data['percentComplete']
else:
return False, response_data['percentComplete']
else:
return False, response_data
def find_valid_copy_pair_targets_and_sources(params):
get_status = 'storage-systems/%s/volumes' % params['ssid']
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
source_capacity = None
candidates = []
for volume in response_data:
if volume['id'] == params['search_volume_id']:
source_capacity = volume['capacity']
else:
candidates.append(volume)
potential_sources = []
potential_targets = []
for volume in candidates:
if volume['capacity'] > source_capacity:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_targets.append(volume['id'])
else:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_sources.append(volume['id'])
return potential_targets, potential_sources
else:
raise Exception("Response [%s]" % response_code)
def main():
module = AnsibleModule(argument_spec=dict(
source_volume_id=dict(type='str'),
destination_volume_id=dict(type='str'),
copy_priority=dict(required=False, default=0, type='int'),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
validate_certs=dict(required=False, default=True),
targetWriteProtected=dict(required=False, default=True, type='bool'),
onlineCopy=dict(required=False, default=False, type='bool'),
volume_copy_pair_id=dict(type='str'),
status=dict(required=True, choices=['present', 'absent'], type='str'),
create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'),
start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'),
search_volume_id=dict(type='str'),
),
mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'],
['volume_copy_pair_id', 'source_volume_id'],
['volume_copy_pair_id', 'search_volume_id'],
['search_volume_id', 'destination_volume_id'],
['search_volume_id', 'source_volume_id'],
],
required_together=[['source_volume_id', 'destination_volume_id'],
],
required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ],
["start_stop_copy", 'stop', ['volume_copy_pair_id'], ],
["start_stop_copy", 'start', ['volume_copy_pair_id'], ],
]
)
params = module.params
if not params['api_url'].endswith('/'):
params['api_url'] += '/'
# Check if we want to search
if params['search_volume_id'] is not None:
try:
potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params)
except Exception as e:
module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % to_native(e))
module.exit_json(changed=False,
msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)),
search_volume_id=params['search_volume_id'],
valid_targets=potential_targets,
valid_sources=potential_sources)
# Check if we want to start or stop a copy operation
if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop':
# Get the current status info
currenty_running, status_info = check_copy_status(params)
# If we want to start
if params['start_stop_copy'] == 'start':
# If we have already started
if currenty_running is True:
module.exit_json(changed=False, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info)
# If we need to start
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info)
else:
module.fail_json(msg="Could not start volume copy pair Error: %s" % info)
# If we want to stop
else:
# If it has already stopped
if currenty_running is False:
module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
# If we need to stop it
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not stop volume copy pair Error: %s" % info)
# If we want the copy pair to exist we do this stuff
if params['status'] == 'present':
# We need to check if it exists first
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
# If no volume copy pair is found we need need to make it.
if params['volume_copy_pair_id'] is None:
# In order to create we can not do so with just a volume_copy_pair_id
copy_began_status, (rc, resp) = create_copy_pair(params)
if copy_began_status is True:
module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id'])
else:
module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp))
# If it does exist we do nothing
else:
# We verify that it exists
exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id(
params)
if exist_status:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id'])
else:
if exist_status_code == 404:
module.fail_json(
msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' %
params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % (
exist_status_code, exist_status_data))
module.fail_json(msg="Done")
# If we want it to not exist we do this
else:
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
# We delete it by the volume_copy_pair_id
delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params)
if delete_status is True:
module.exit_json(changed=True,
msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id'])
else:
if delete_status_code == 404:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % (
delete_status_code, delete_status_data))
if __name__ == '__main__':
main()

View file

@ -0,0 +1,268 @@
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_account_manager
deprecated:
removed_in: "2.11"
why: This Module has been replaced
alternative: please use M(na_elementsw_account)
short_description: Manage SolidFire accounts
extends_documentation_fragment:
- netapp.ontap.netapp.solidfire
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
description:
- Create, destroy, or update accounts on SolidFire
options:
state:
description:
- Whether the specified account should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- Unique username for this account. (May be 1 to 64 characters in length).
required: true
new_name:
description:
- New name for the user account.
initiator_secret:
description:
- CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable.
- The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret.
- If not specified, a random secret is created.
target_secret:
description:
- CHAP secret to use for the target (mutual CHAP authentication).
- Should be 12-16 characters long and impenetrable.
- The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret.
- If not specified, a random secret is created.
attributes:
description: List of Name/Value pairs in JSON object format.
account_id:
description:
- The ID of the account to manage or update.
status:
description:
- Status of the account.
'''
EXAMPLES = """
- name: Create Account
sf_account_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: TenantA
- name: Modify Account
sf_account_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: TenantA
new_name: TenantA-Renamed
- name: Delete Account
sf_account_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
name: TenantA-Renamed
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireAccount(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
account_id=dict(required=False, type='int', default=None),
new_name=dict(required=False, type='str', default=None),
initiator_secret=dict(required=False, type='str'),
target_secret=dict(required=False, type='str'),
attributes=dict(required=False, type='dict'),
status=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.account_id = p['account_id']
self.new_name = p['new_name']
self.initiator_secret = p['initiator_secret']
self.target_secret = p['target_secret']
self.attributes = p['attributes']
self.status = p['status']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_account(self):
"""
Return account object if found
:return: Details about the account. None if not found.
:rtype: dict
"""
account_list = self.sfe.list_accounts()
for account in account_list.accounts:
if account.username == self.name:
# Update self.account_id:
if self.account_id is not None:
if account.account_id == self.account_id:
return account
else:
self.account_id = account.account_id
return account
return None
def create_account(self):
try:
self.sfe.add_account(username=self.name,
initiator_secret=self.initiator_secret,
target_secret=self.target_secret,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg='Error creating account %s: %s)' % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_account(self):
try:
self.sfe.remove_account(account_id=self.account_id)
except Exception as e:
self.module.fail_json(msg='Error deleting account %s: %s' % (self.account_id, to_native(e)),
exception=traceback.format_exc())
def update_account(self):
try:
self.sfe.modify_account(account_id=self.account_id,
username=self.new_name,
status=self.status,
initiator_secret=self.initiator_secret,
target_secret=self.target_secret,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg='Error updating account %s: %s' % (self.account_id, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
account_exists = False
update_account = False
account_detail = self.get_account()
if account_detail:
account_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Check if we need to update the account
if account_detail.username is not None and self.new_name is not None and \
account_detail.username != self.new_name:
update_account = True
changed = True
elif account_detail.status is not None and self.status is not None \
and account_detail.status != self.status:
update_account = True
changed = True
elif account_detail.initiator_secret is not None and self.initiator_secret is not None \
and account_detail.initiator_secret != self.initiator_secret:
update_account = True
changed = True
elif account_detail.target_secret is not None and self.target_secret is not None \
and account_detail.target_secret != self.target_secret:
update_account = True
changed = True
elif account_detail.attributes is not None and self.attributes is not None \
and account_detail.attributes != self.attributes:
update_account = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not account_exists:
self.create_account()
elif update_account:
self.update_account()
elif self.state == 'absent':
self.delete_account()
self.module.exit_json(changed=changed)
def main():
v = SolidFireAccount()
v.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,184 @@
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_check_connections
deprecated:
removed_in: "2.11"
why: This Module has been replaced
alternative: please use M(na_elementsw_check_connections)
short_description: Check connectivity to MVIP and SVIP.
extends_documentation_fragment:
- netapp.ontap.netapp.solidfire
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
description:
- Used to test the management connection to the cluster.
- The test pings the MVIP and SVIP, and executes a simple API method to verify connectivity.
options:
skip:
description:
- Skip checking connection to SVIP or MVIP.
choices: ['svip', 'mvip']
mvip:
description:
- Optionally, use to test connection of a different MVIP.
- This is not needed to test the connection to the target cluster.
svip:
description:
- Optionally, use to test connection of a different SVIP.
- This is not needed to test the connection to the target cluster.
'''
EXAMPLES = """
- name: Check connections to MVIP and SVIP
sf_check_connections:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireConnection(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
skip=dict(required=False, type='str', default=None, choices=['mvip', 'svip']),
mvip=dict(required=False, type='str', default=None),
svip=dict(required=False, type='str', default=None)
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.skip = p['skip']
self.mvip = p['mvip']
self.svip = p['svip']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.ElementFactory.create(p['hostname'], p['username'], p['password'], port=442)
def check_mvip_connection(self):
"""
Check connection to MVIP
:return: true if connection was successful, false otherwise.
:rtype: bool
"""
try:
test = self.sfe.test_connect_mvip(mvip=self.mvip)
result = test.details.connected
# Todo - Log details about the test
return result
except Exception as e:
self.module.fail_json(msg='Error checking connection to MVIP: %s' % to_native(e), exception=traceback.format_exc())
return False
def check_svip_connection(self):
"""
Check connection to SVIP
:return: true if connection was successful, false otherwise.
:rtype: bool
"""
try:
test = self.sfe.test_connect_svip(svip=self.svip)
result = test.details.connected
# Todo - Log details about the test
return result
except Exception as e:
self.module.fail_json(msg='Error checking connection to SVIP: %s' % to_native(e), exception=traceback.format_exc())
return False
def check(self):
failed = True
msg = ''
if self.skip is None:
mvip_connection_established = self.check_mvip_connection()
svip_connection_established = self.check_svip_connection()
# Set failed and msg
if not mvip_connection_established:
failed = True
msg = 'Connection to MVIP failed.'
elif not svip_connection_established:
failed = True
msg = 'Connection to SVIP failed.'
else:
failed = False
elif self.skip == 'mvip':
svip_connection_established = self.check_svip_connection()
# Set failed and msg
if not svip_connection_established:
failed = True
msg = 'Connection to SVIP failed.'
else:
failed = False
elif self.skip == 'svip':
mvip_connection_established = self.check_mvip_connection()
# Set failed and msg
if not mvip_connection_established:
failed = True
msg = 'Connection to MVIP failed.'
else:
failed = False
if failed:
self.module.fail_json(msg=msg)
else:
self.module.exit_json()
def main():
v = SolidFireConnection()
v.check()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,389 @@
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_snapshot_schedule_manager
deprecated:
removed_in: "2.11"
why: This Module has been replaced
alternative: please use M(na_elementsw_snapshot_schedule)
short_description: Manage SolidFire snapshot schedules
extends_documentation_fragment:
- netapp.ontap.netapp.solidfire
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
description:
- Create, destroy, or update accounts on SolidFire
options:
state:
description:
- Whether the specified schedule should exist or not.
required: true
choices: ['present', 'absent']
paused:
description:
- Pause / Resume a schedule.
required: false
recurring:
description:
- Should the schedule recur?
required: false
time_interval_days:
description: Time interval in days.
required: false
default: 1
time_interval_hours:
description: Time interval in hours.
required: false
default: 0
time_interval_minutes:
description: Time interval in minutes.
required: false
default: 0
name:
description:
- Name for the snapshot schedule.
required: true
snapshot_name:
description:
- Name for the created snapshots.
required: false
volumes:
description:
- Volume IDs that you want to set the snapshot schedule for.
- At least 1 volume ID is required for creating a new schedule.
- required when C(state=present)
required: false
retention:
description:
- Retention period for the snapshot.
- Format is 'HH:mm:ss'.
required: false
schedule_id:
description:
- The schedule ID for the schedule that you want to update or delete.
required: false
starting_date:
description:
- Starting date for the schedule.
- Required when C(state=present).
- Please use two '-' in the above format, or you may see an error- TypeError, is not JSON serializable description.
- "Format: C(2016--12--01T00:00:00Z)"
required: false
'''
EXAMPLES = """
- name: Create Snapshot schedule
sf_snapshot_schedule_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: Schedule_A
time_interval_days: 1
starting_date: 2016--12--01T00:00:00Z
volumes: 7
- name: Update Snapshot schedule
sf_snapshot_schedule_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
schedule_id: 6
recurring: True
snapshot_name: AnsibleSnapshots
- name: Delete Snapshot schedule
sf_snapshot_schedule_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
schedule_id: 6
"""
RETURN = """
schedule_id:
description: Schedule ID of the newly created schedule
returned: success
type: str
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireSnapShotSchedule(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
time_interval_days=dict(required=False, type='int', default=1),
time_interval_hours=dict(required=False, type='int', default=0),
time_interval_minutes=dict(required=False, type='int', default=0),
paused=dict(required=False, type='bool'),
recurring=dict(required=False, type='bool'),
starting_date=dict(type='str'),
snapshot_name=dict(required=False, type='str'),
volumes=dict(required=False, type='list'),
retention=dict(required=False, type='str'),
schedule_id=dict(type='int'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['starting_date', 'volumes'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
# self.interval = p['interval']
self.time_interval_days = p['time_interval_days']
self.time_interval_hours = p['time_interval_hours']
self.time_interval_minutes = p['time_interval_minutes']
self.paused = p['paused']
self.recurring = p['recurring']
self.starting_date = p['starting_date']
if self.starting_date is not None:
self.starting_date = self.starting_date.replace("--", "-")
self.snapshot_name = p['snapshot_name']
self.volumes = p['volumes']
self.retention = p['retention']
self.schedule_id = p['schedule_id']
self.create_schedule_result = None
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_schedule(self):
schedule_list = self.sfe.list_schedules()
for schedule in schedule_list.schedules:
if schedule.name == self.name:
# Update self.schedule_id:
if self.schedule_id is not None:
if schedule.schedule_id == self.schedule_id:
return schedule
else:
self.schedule_id = schedule.schedule_id
return schedule
return None
def create_schedule(self):
try:
sched = netapp_utils.Schedule()
# if self.interval == 'time_interval':
sched.frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
hours=self.time_interval_hours,
minutes=self.time_interval_minutes)
# Create schedule
sched.name = self.name
sched.schedule_info = netapp_utils.ScheduleInfo(
volume_ids=self.volumes,
snapshot_name=self.snapshot_name,
retention=self.retention
)
sched.paused = self.paused
sched.recurring = self.recurring
sched.starting_date = self.starting_date
self.create_schedule_result = self.sfe.create_schedule(schedule=sched)
except Exception as e:
self.module.fail_json(msg='Error creating schedule %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_schedule(self):
try:
get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
sched = get_schedule_result.schedule
sched.to_be_deleted = True
self.sfe.modify_schedule(schedule=sched)
except Exception as e:
self.module.fail_json(msg='Error deleting schedule %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def update_schedule(self):
try:
get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
sched = get_schedule_result.schedule
# Update schedule properties
# if self.interval == 'time_interval':
temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
hours=self.time_interval_hours,
minutes=self.time_interval_minutes)
if sched.frequency.days != temp_frequency.days or \
sched.frequency.hours != temp_frequency.hours \
or sched.frequency.minutes != temp_frequency.minutes:
sched.frequency = temp_frequency
sched.name = self.name
if self.volumes is not None:
sched.schedule_info.volume_ids = self.volumes
if self.retention is not None:
sched.schedule_info.retention = self.retention
if self.snapshot_name is not None:
sched.schedule_info.snapshot_name = self.snapshot_name
if self.paused is not None:
sched.paused = self.paused
if self.recurring is not None:
sched.recurring = self.recurring
if self.starting_date is not None:
sched.starting_date = self.starting_date
# Make API call
self.sfe.modify_schedule(schedule=sched)
except Exception as e:
self.module.fail_json(msg='Error updating schedule %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
schedule_exists = False
update_schedule = False
schedule_detail = self.get_schedule()
if schedule_detail:
schedule_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Check if we need to update the account
if self.retention is not None and schedule_detail.schedule_info.retention != self.retention:
update_schedule = True
changed = True
elif schedule_detail.name != self.name:
update_schedule = True
changed = True
elif self.snapshot_name is not None and schedule_detail.schedule_info.snapshot_name != self.snapshot_name:
update_schedule = True
changed = True
elif self.volumes is not None and schedule_detail.schedule_info.volume_ids != self.volumes:
update_schedule = True
changed = True
elif self.paused is not None and schedule_detail.paused != self.paused:
update_schedule = True
changed = True
elif self.recurring is not None and schedule_detail.recurring != self.recurring:
update_schedule = True
changed = True
elif self.starting_date is not None and schedule_detail.starting_date != self.starting_date:
update_schedule = True
changed = True
elif self.time_interval_minutes is not None or self.time_interval_hours is not None \
or self.time_interval_days is not None:
temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
hours=self.time_interval_hours,
minutes=self.time_interval_minutes)
if schedule_detail.frequency.days != temp_frequency.days or \
schedule_detail.frequency.hours != temp_frequency.hours \
or schedule_detail.frequency.minutes != temp_frequency.minutes:
update_schedule = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
# Skip changes
pass
else:
if self.state == 'present':
if not schedule_exists:
self.create_schedule()
elif update_schedule:
self.update_schedule()
elif self.state == 'absent':
self.delete_schedule()
if self.create_schedule_result is not None:
self.module.exit_json(changed=changed, schedule_id=self.create_schedule_result.schedule_id)
else:
self.module.exit_json(changed=changed)
def main():
v = SolidFireSnapShotSchedule()
v.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,249 @@
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_volume_access_group_manager
deprecated:
removed_in: "2.11"
why: This Module has been replaced
alternative: please use M(na_elementsw_access_group)
short_description: Manage SolidFire Volume Access Groups
extends_documentation_fragment:
- netapp.ontap.netapp.solidfire
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
description:
- Create, destroy, or update volume access groups on SolidFire
options:
state:
description:
- Whether the specified volume access group should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- Name of the volume access group. It is not required to be unique, but recommended.
required: true
initiators:
description:
- List of initiators to include in the volume access group. If unspecified, the access group will start out without configured initiators.
volumes:
description:
- List of volumes to initially include in the volume access group. If unspecified, the access group will start without any volumes.
virtual_network_id:
description:
- The ID of the SolidFire Virtual Network ID to associate the volume access group with.
virtual_network_tags:
description:
- The ID of the VLAN Virtual Network Tag to associate the volume access group with.
attributes:
description: List of Name/Value pairs in JSON object format.
volume_access_group_id:
description:
- The ID of the volume access group to modify or delete.
'''
EXAMPLES = """
- name: Create Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: AnsibleVolumeAccessGroup
volumes: [7,8]
- name: Modify Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
volume_access_group_id: 1
name: AnsibleVolumeAccessGroup-Renamed
attributes: {"volumes": [1,2,3], "virtual_network_id": 12345}
- name: Delete Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
volume_access_group_id: 1
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireVolumeAccessGroup(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
volume_access_group_id=dict(required=False, type='int', default=None),
initiators=dict(required=False, type='list', default=None),
volumes=dict(required=False, type='list', default=None),
virtual_network_id=dict(required=False, type='list', default=None),
virtual_network_tags=dict(required=False, type='list', default=None),
attributes=dict(required=False, type='dict', default=None),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.volume_access_group_id = p['volume_access_group_id']
self.initiators = p['initiators']
self.volumes = p['volumes']
self.virtual_network_id = p['virtual_network_id']
self.virtual_network_tags = p['virtual_network_tags']
self.attributes = p['attributes']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_volume_access_group(self):
access_groups_list = self.sfe.list_volume_access_groups()
for group in access_groups_list.volume_access_groups:
if group.name == self.name:
# Update self.volume_access_group_id:
if self.volume_access_group_id is not None:
if group.volume_access_group_id == self.volume_access_group_id:
return group
else:
self.volume_access_group_id = group.volume_access_group_id
return group
return None
def create_volume_access_group(self):
try:
self.sfe.create_volume_access_group(name=self.name,
initiators=self.initiators,
volumes=self.volumes,
virtual_network_id=self.virtual_network_id,
virtual_network_tags=self.virtual_network_tags,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg="Error creating volume access group %s: %s" %
(self.name, to_native(e)), exception=traceback.format_exc())
def delete_volume_access_group(self):
try:
self.sfe.delete_volume_access_group(volume_access_group_id=self.volume_access_group_id)
except Exception as e:
self.module.fail_json(msg="Error deleting volume access group %s: %s" %
(self.volume_access_group_id, to_native(e)),
exception=traceback.format_exc())
def update_volume_access_group(self):
try:
self.sfe.modify_volume_access_group(volume_access_group_id=self.volume_access_group_id,
virtual_network_id=self.virtual_network_id,
virtual_network_tags=self.virtual_network_tags,
name=self.name,
initiators=self.initiators,
volumes=self.volumes,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg="Error updating volume access group %s: %s" %
(self.volume_access_group_id, to_native(e)), exception=traceback.format_exc())
def apply(self):
changed = False
group_exists = False
update_group = False
group_detail = self.get_volume_access_group()
if group_detail:
group_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Check if we need to update the group
if self.volumes is not None and group_detail.volumes != self.volumes:
update_group = True
changed = True
elif self.initiators is not None and group_detail.initiators != self.initiators:
update_group = True
changed = True
elif self.virtual_network_id is not None or self.virtual_network_tags is not None or \
self.attributes is not None:
update_group = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not group_exists:
self.create_volume_access_group()
elif update_group:
self.update_volume_access_group()
elif self.state == 'absent':
self.delete_volume_access_group()
self.module.exit_json(changed=changed)
def main():
v = SolidFireVolumeAccessGroup()
v.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,320 @@
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_volume_manager
deprecated:
removed_in: "2.11"
why: This Module has been replaced
alternative: please use M(na_elementsw_volume)
short_description: Manage SolidFire volumes
extends_documentation_fragment:
- netapp.ontap.netapp.solidfire
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
description:
- Create, destroy, or update volumes on SolidFire
options:
state:
description:
- Whether the specified volume should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the volume to manage.
required: true
account_id:
description:
- Account ID for the owner of this volume.
required: true
512emulation:
description:
- Should the volume provide 512-byte sector emulation?
- Required when C(state=present)
qos:
description: Initial quality of service settings for this volume. Configure as dict in playbooks.
attributes:
description: A YAML dictionary of attributes that you would like to apply on this volume.
volume_id:
description:
- The ID of the volume to manage or update.
- In order to create multiple volumes with the same name, but different volume_ids, please declare the I(volume_id)
parameter with an arbitrary value. However, the specified volume_id will not be assigned to the newly created
volume (since it's an auto-generated property).
size:
description:
- The size of the volume in (size_unit).
- Required when C(state = present).
size_unit:
description:
- The unit used to interpret the size parameter.
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
access:
description:
- "Access allowed for the volume."
- "readOnly: Only read operations are allowed."
- "readWrite: Reads and writes are allowed."
- "locked: No reads or writes are allowed."
- "replicationTarget: Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked."
- "If unspecified, the access settings of the clone will be the same as the source."
choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
'''
EXAMPLES = """
- name: Create Volume
sf_volume_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: AnsibleVol
qos: {minIOPS: 1000, maxIOPS: 20000, burstIOPS: 50000}
account_id: 3
enable512e: False
size: 1
size_unit: gb
- name: Update Volume
sf_volume_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: AnsibleVol
account_id: 3
access: readWrite
- name: Delete Volume
sf_volume_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
name: AnsibleVol
account_id: 2
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireVolume(object):
def __init__(self):
self._size_unit_map = netapp_utils.SF_BYTE_MAP
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
account_id=dict(required=True, type='int'),
enable512e=dict(type='bool', aliases=['512emulation']),
qos=dict(required=False, type='dict', default=None),
attributes=dict(required=False, type='dict', default=None),
volume_id=dict(type='int', default=None),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
access=dict(required=False, type='str', default=None, choices=['readOnly', 'readWrite',
'locked', 'replicationTarget']),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['size', 'enable512e'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.account_id = p['account_id']
self.enable512e = p['enable512e']
self.qos = p['qos']
self.attributes = p['attributes']
self.volume_id = p['volume_id']
self.size_unit = p['size_unit']
if p['size'] is not None:
self.size = p['size'] * self._size_unit_map[self.size_unit]
else:
self.size = None
self.access = p['access']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_volume(self):
"""
Return volume object if found
:return: Details about the volume. None if not found.
:rtype: dict
"""
volume_list = self.sfe.list_volumes_for_account(account_id=self.account_id)
for volume in volume_list.volumes:
if volume.name == self.name:
# Update self.volume_id
if self.volume_id is not None:
if volume.volume_id == self.volume_id and str(volume.delete_time) == "":
return volume
else:
if str(volume.delete_time) == "":
self.volume_id = volume.volume_id
return volume
return None
def create_volume(self):
try:
self.sfe.create_volume(name=self.name,
account_id=self.account_id,
total_size=self.size,
enable512e=self.enable512e,
qos=self.qos,
attributes=self.attributes)
except Exception as err:
self.module.fail_json(msg="Error provisioning volume %s of size %s" % (self.name, self.size),
exception=to_native(err))
def delete_volume(self):
try:
self.sfe.delete_volume(volume_id=self.volume_id)
except Exception as err:
self.module.fail_json(msg="Error deleting volume %s" % self.volume_id,
exception=to_native(err))
def update_volume(self):
try:
self.sfe.modify_volume(self.volume_id,
account_id=self.account_id,
access=self.access,
qos=self.qos,
total_size=self.size,
attributes=self.attributes)
except Exception as err:
self.module.fail_json(msg="Error updating volume %s" % self.name,
exception=to_native(err))
def apply(self):
changed = False
volume_exists = False
update_volume = False
volume_detail = self.get_volume()
if volume_detail:
volume_exists = True
if self.state == 'absent':
# Checking for state change(s) here, and applying it later in the code allows us to support
# check_mode
changed = True
elif self.state == 'present':
if volume_detail.access is not None and self.access is not None and volume_detail.access != self.access:
update_volume = True
changed = True
elif volume_detail.account_id is not None and self.account_id is not None \
and volume_detail.account_id != self.account_id:
update_volume = True
changed = True
elif volume_detail.qos is not None and self.qos is not None and volume_detail.qos != self.qos:
update_volume = True
changed = True
elif volume_detail.total_size is not None and volume_detail.total_size != self.size:
size_difference = abs(float(volume_detail.total_size - self.size))
# Change size only if difference is bigger than 0.001
if size_difference / self.size > 0.001:
update_volume = True
changed = True
elif volume_detail.attributes is not None and self.attributes is not None and \
volume_detail.attributes != self.attributes:
update_volume = True
changed = True
else:
if self.state == 'present':
changed = True
result_message = ""
if changed:
if self.module.check_mode:
result_message = "Check mode, skipping changes"
else:
if self.state == 'present':
if not volume_exists:
self.create_volume()
result_message = "Volume created"
elif update_volume:
self.update_volume()
result_message = "Volume updated"
elif self.state == 'absent':
self.delete_volume()
result_message = "Volume deleted"
self.module.exit_json(changed=changed, msg=result_message)
def main():
v = SolidFireVolume()
v.apply()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,862 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favor of C(_info) module.
alternative: Use M(purefa_info) instead.
short_description: Collect facts from Pure Storage FlashArray
description:
- Collect facts information from a Pure Storage Flasharray running the
Purity//FA operating system. By default, the module will collect basic
fact information including hosts, host groups, protection
groups and volume counts. Additional fact information can be collected
based on the configured set of arguments.
author:
- Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
gather_subset:
description:
- When supplied, this argument will define the facts to be collected.
Possible values for this include all, minimum, config, performance,
capacity, network, subnet, interfaces, hgroups, pgroups, hosts,
admins, volumes, snapshots, pods, vgroups, offload, apps and arrays.
type: list
required: false
default: minimum
extends_documentation_fragment:
- community.general.purestorage.fa
'''
EXAMPLES = r'''
- name: collect default set of facts
purefa_facts:
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: collect configuration and capacity facts
purefa_facts:
gather_subset:
- config
- capacity
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: collect all facts
purefa_facts:
gather_subset:
- all
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
ansible_facts:
description: Returns the facts collected from the FlashArray
returned: always
type: complex
sample: {
"capacity": {},
"config": {
"directory_service": {
"array_admin_group": null,
"base_dn": null,
"bind_password": null,
"bind_user": null,
"check_peer": false,
"enabled": false,
"group_base": null,
"readonly_group": null,
"storage_admin_group": null,
"uri": []
},
"dns": {
"domain": "domain.com",
"nameservers": [
"8.8.8.8",
"8.8.4.4"
]
},
"ntp": [
"0.ntp.pool.org",
"1.ntp.pool.org",
"2.ntp.pool.org",
"3.ntp.pool.org"
],
"smtp": [
{
"enabled": true,
"name": "alerts@acme.com"
},
{
"enabled": true,
"name": "user@acme.com"
}
],
"snmp": [
{
"auth_passphrase": null,
"auth_protocol": null,
"community": null,
"host": "localhost",
"name": "localhost",
"privacy_passphrase": null,
"privacy_protocol": null,
"user": null,
"version": "v2c"
}
],
"ssl_certs": {
"country": null,
"email": null,
"issued_by": "",
"issued_to": "",
"key_size": 2048,
"locality": null,
"organization": "Acme Storage, Inc.",
"organizational_unit": "Acme Storage, Inc.",
"state": null,
"status": "self-signed",
"valid_from": "2017-08-11T23:09:06Z",
"valid_to": "2027-08-09T23:09:06Z"
},
"syslog": []
},
"default": {
"array_name": "flasharray1",
"connected_arrays": 1,
"hostgroups": 0,
"hosts": 10,
"pods": 3,
"protection_groups": 1,
"purity_version": "5.0.4",
"snapshots": 1,
"volume_groups": 2
},
"hgroups": {},
"hosts": {
"host1": {
"hgroup": null,
"iqn": [
"iqn.1994-05.com.redhat:2f6f5715a533"
],
"wwn": []
},
"host2": {
"hgroup": null,
"iqn": [
"iqn.1994-05.com.redhat:d17fb13fe0b"
],
"wwn": []
},
"host3": {
"hgroup": null,
"iqn": [
"iqn.1994-05.com.redhat:97b1351bfb2"
],
"wwn": []
},
"host4": {
"hgroup": null,
"iqn": [
"iqn.1994-05.com.redhat:dd84e9a7b2cb"
],
"wwn": [
"10000000C96C48D1",
"10000000C96C48D2"
]
}
},
"interfaces": {
"CT0.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
"CT0.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
"CT1.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
"CT1.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682"
},
"network": {
"ct0.eth0": {
"address": "10.10.10.10",
"gateway": "10.10.10.1",
"hwaddr": "ec:f4:bb:c8:8a:04",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"management"
],
"speed": 1000000000
},
"ct0.eth2": {
"address": "10.10.10.11",
"gateway": null,
"hwaddr": "ec:f4:bb:c8:8a:00",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"replication"
],
"speed": 10000000000
},
"ct0.eth3": {
"address": "10.10.10.12",
"gateway": null,
"hwaddr": "ec:f4:bb:c8:8a:02",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"replication"
],
"speed": 10000000000
},
"ct0.eth4": {
"address": "10.10.10.13",
"gateway": null,
"hwaddr": "90:e2:ba:83:79:0c",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"iscsi"
],
"speed": 10000000000
},
"ct0.eth5": {
"address": "10.10.10.14",
"gateway": null,
"hwaddr": "90:e2:ba:83:79:0d",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"iscsi"
],
"speed": 10000000000
},
"vir0": {
"address": "10.10.10.20",
"gateway": "10.10.10.1",
"hwaddr": "fe:ba:e9:e7:6b:0f",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"management"
],
"speed": 1000000000
}
},
"offload": {
"nfstarget": {
"address": "10.0.2.53",
"mount_options": null,
"mount_point": "/offload",
"protocol": "nfs",
"status": "scanning"
}
},
"performance": {
"input_per_sec": 8191,
"output_per_sec": 0,
"queue_depth": 1,
"reads_per_sec": 0,
"san_usec_per_write_op": 15,
"usec_per_read_op": 0,
"usec_per_write_op": 642,
"writes_per_sec": 2
},
"pgroups": {
"consisgroup-07b6b983-986e-46f5-bdc3-deaa3dbb299e-cinder": {
"hgroups": null,
"hosts": null,
"source": "host1",
"targets": null,
"volumes": [
"volume-1"
]
}
},
"pods": {
"srm-pod": {
"arrays": [
{
"array_id": "52595f7e-b460-4b46-8851-a5defd2ac192",
"mediator_status": "online",
"name": "sn1-405-c09-37",
"status": "online"
},
{
"array_id": "a2c32301-f8a0-4382-949b-e69b552ce8ca",
"mediator_status": "online",
"name": "sn1-420-c11-31",
"status": "online"
}
],
"source": null
}
},
"snapshots": {
"consisgroup.cgsnapshot": {
"created": "2018-03-28T09:34:02Z",
"size": 13958643712,
"source": "volume-1"
}
},
"subnet": {},
"vgroups": {
"vvol--vSphere-HA-0ffc7dd1-vg": {
"volumes": [
"vvol--vSphere-HA-0ffc7dd1-vg/Config-aad5d7c6"
]
}
},
"volumes": {
"ansible_data": {
"bandwidth": null,
"hosts": [
[
"host1",
1
]
],
"serial": "43BE47C12334399B000114A6",
"size": 1099511627776,
"source": null
}
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.pure import get_system, purefa_argument_spec
ADMIN_API_VERSION = '1.14'
S3_REQUIRED_API_VERSION = '1.16'
LATENCY_REQUIRED_API_VERSION = '1.16'
AC_REQUIRED_API_VERSION = '1.14'
CAP_REQUIRED_API_VERSION = '1.6'
SAN_REQUIRED_API_VERSION = '1.10'
NVME_API_VERSION = '1.16'
PREFERRED_API_VERSION = '1.15'
CONN_STATUS_API_VERSION = '1.17'
def generate_default_dict(array):
default_facts = {}
defaults = array.get()
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
default_facts['volume_groups'] = len(array.list_vgroups())
default_facts['connected_arrays'] = len(array.list_array_connections())
default_facts['pods'] = len(array.list_pods())
default_facts['connection_key'] = array.get(connection_key=True)['connection_key']
hosts = array.list_hosts()
admins = array.list_admins()
snaps = array.list_volumes(snap=True, pending=True)
pgroups = array.list_pgroups(pending=True)
hgroups = array.list_hgroups()
# Old FA arrays only report model from the primary controller
ct0_model = array.get_hardware('CT0')['model']
if ct0_model:
model = ct0_model
else:
ct1_model = array.get_hardware('CT1')['model']
model = ct1_model
default_facts['array_model'] = model
default_facts['array_name'] = defaults['array_name']
default_facts['purity_version'] = defaults['version']
default_facts['hosts'] = len(hosts)
default_facts['snapshots'] = len(snaps)
default_facts['protection_groups'] = len(pgroups)
default_facts['hostgroups'] = len(hgroups)
default_facts['admins'] = len(admins)
return default_facts
def generate_perf_dict(array):
perf_facts = {}
api_version = array._list_available_rest_versions()
if LATENCY_REQUIRED_API_VERSION in api_version:
latency_info = array.get(action='monitor', latency=True)[0]
perf_info = array.get(action='monitor')[0]
# IOPS
perf_facts['writes_per_sec'] = perf_info['writes_per_sec']
perf_facts['reads_per_sec'] = perf_info['reads_per_sec']
# Bandwidth
perf_facts['input_per_sec'] = perf_info['input_per_sec']
perf_facts['output_per_sec'] = perf_info['output_per_sec']
# Latency
if LATENCY_REQUIRED_API_VERSION in api_version:
perf_facts['san_usec_per_read_op'] = latency_info['san_usec_per_read_op']
perf_facts['san_usec_per_write_op'] = latency_info['san_usec_per_write_op']
perf_facts['queue_usec_per_read_op'] = latency_info['queue_usec_per_read_op']
perf_facts['queue_usec_per_write_op'] = latency_info['queue_usec_per_write_op']
perf_facts['qos_rate_limit_usec_per_read_op'] = latency_info['qos_rate_limit_usec_per_read_op']
perf_facts['qos_rate_limit_usec_per_write_op'] = latency_info['qos_rate_limit_usec_per_write_op']
perf_facts['local_queue_usec_per_op'] = perf_info['local_queue_usec_per_op']
perf_facts['usec_per_read_op'] = perf_info['usec_per_read_op']
perf_facts['usec_per_write_op'] = perf_info['usec_per_write_op']
perf_facts['queue_depth'] = perf_info['queue_depth']
return perf_facts
def generate_config_dict(array):
config_facts = {}
api_version = array._list_available_rest_versions()
# DNS
config_facts['dns'] = array.get_dns()
# SMTP
config_facts['smtp'] = array.list_alert_recipients()
# SNMP
config_facts['snmp'] = array.list_snmp_managers()
config_facts['snmp_v3_engine_id'] = array.get_snmp_engine_id()['engine_id']
# DS
config_facts['directory_service'] = array.get_directory_service()
if S3_REQUIRED_API_VERSION in api_version:
config_facts['directory_service_roles'] = {}
roles = array.list_directory_service_roles()
for role in range(0, len(roles)):
role_name = roles[role]['name']
config_facts['directory_service_roles'][role_name] = {
'group': roles[role]['group'],
'group_base': roles[role]['group_base'],
}
else:
config_facts['directory_service'].update(array.get_directory_service(groups=True))
# NTP
config_facts['ntp'] = array.get(ntpserver=True)['ntpserver']
# SYSLOG
config_facts['syslog'] = array.get(syslogserver=True)['syslogserver']
# Phonehome
config_facts['phonehome'] = array.get(phonehome=True)['phonehome']
# Proxy
config_facts['proxy'] = array.get(proxy=True)['proxy']
# Relay Host
config_facts['relayhost'] = array.get(relayhost=True)['relayhost']
# Sender Domain
config_facts['senderdomain'] = array.get(senderdomain=True)['senderdomain']
# SYSLOG
config_facts['syslog'] = array.get(syslogserver=True)['syslogserver']
# Idle Timeout
config_facts['idle_timeout'] = array.get(idle_timeout=True)['idle_timeout']
# SCSI Timeout
config_facts['scsi_timeout'] = array.get(scsi_timeout=True)['scsi_timeout']
# SSL
config_facts['ssl_certs'] = array.get_certificate()
# Global Admin settings
if S3_REQUIRED_API_VERSION in api_version:
config_facts['global_admin'] = array.get_global_admin_attributes()
return config_facts
def generate_admin_dict(array):
api_version = array._list_available_rest_versions()
admin_facts = {}
if ADMIN_API_VERSION in api_version:
admins = array.list_admins()
for admin in range(0, len(admins)):
admin_name = admins[admin]['name']
admin_facts[admin_name] = {
'type': admins[admin]['type'],
'role': admins[admin]['role'],
}
return admin_facts
def generate_subnet_dict(array):
sub_facts = {}
subnets = array.list_subnets()
for sub in range(0, len(subnets)):
sub_name = subnets[sub]['name']
if subnets[sub]['enabled']:
sub_facts[sub_name] = {
'gateway': subnets[sub]['gateway'],
'mtu': subnets[sub]['mtu'],
'vlan': subnets[sub]['vlan'],
'prefix': subnets[sub]['prefix'],
'interfaces': subnets[sub]['interfaces'],
'services': subnets[sub]['services'],
}
return sub_facts
def generate_network_dict(array):
net_facts = {}
ports = array.list_network_interfaces()
for port in range(0, len(ports)):
int_name = ports[port]['name']
net_facts[int_name] = {
'hwaddr': ports[port]['hwaddr'],
'mtu': ports[port]['mtu'],
'enabled': ports[port]['enabled'],
'speed': ports[port]['speed'],
'address': ports[port]['address'],
'slaves': ports[port]['slaves'],
'services': ports[port]['services'],
'gateway': ports[port]['gateway'],
'netmask': ports[port]['netmask'],
}
if ports[port]['subnet']:
subnets = array.get_subnet(ports[port]['subnet'])
if subnets['enabled']:
net_facts[int_name]['subnet'] = {
'name': subnets['name'],
'prefix': subnets['prefix'],
'vlan': subnets['vlan'],
}
return net_facts
def generate_capacity_dict(array):
capacity_facts = {}
api_version = array._list_available_rest_versions()
if CAP_REQUIRED_API_VERSION in api_version:
volumes = array.list_volumes(pending=True)
capacity_facts['provisioned_space'] = sum(item['size'] for item in volumes)
capacity = array.get(space=True)
total_capacity = capacity[0]['capacity']
used_space = capacity[0]["total"]
capacity_facts['free_space'] = total_capacity - used_space
capacity_facts['total_capacity'] = total_capacity
capacity_facts['data_reduction'] = capacity[0]['data_reduction']
capacity_facts['system_space'] = capacity[0]['system']
capacity_facts['volume_space'] = capacity[0]['volumes']
capacity_facts['shared_space'] = capacity[0]['shared_space']
capacity_facts['snapshot_space'] = capacity[0]['snapshots']
capacity_facts['thin_provisioning'] = capacity[0]['thin_provisioning']
capacity_facts['total_reduction'] = capacity[0]['total_reduction']
return capacity_facts
def generate_snap_dict(array):
snap_facts = {}
snaps = array.list_volumes(snap=True)
for snap in range(0, len(snaps)):
snapshot = snaps[snap]['name']
snap_facts[snapshot] = {
'size': snaps[snap]['size'],
'source': snaps[snap]['source'],
'created': snaps[snap]['created'],
}
return snap_facts
def generate_vol_dict(array):
volume_facts = {}
vols = array.list_volumes()
for vol in range(0, len(vols)):
volume = vols[vol]['name']
volume_facts[volume] = {
'source': vols[vol]['source'],
'size': vols[vol]['size'],
'serial': vols[vol]['serial'],
'hosts': [],
'bandwidth': ""
}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
qvols = array.list_volumes(qos=True)
for qvol in range(0, len(qvols)):
volume = qvols[qvol]['name']
qos = qvols[qvol]['bandwidth_limit']
volume_facts[volume]['bandwidth'] = qos
vvols = array.list_volumes(protocol_endpoint=True)
for vvol in range(0, len(vvols)):
volume = vvols[vvol]['name']
volume_facts[volume] = {
'source': vvols[vvol]['source'],
'serial': vvols[vvol]['serial'],
'hosts': []
}
cvols = array.list_volumes(connect=True)
for cvol in range(0, len(cvols)):
volume = cvols[cvol]['name']
voldict = [cvols[cvol]['host'], cvols[cvol]['lun']]
volume_facts[volume]['hosts'].append(voldict)
return volume_facts
def generate_host_dict(array):
api_version = array._list_available_rest_versions()
host_facts = {}
hosts = array.list_hosts()
for host in range(0, len(hosts)):
hostname = hosts[host]['name']
tports = []
host_all_info = array.get_host(hostname, all=True)
if host_all_info:
tports = host_all_info[0]['target_port']
host_facts[hostname] = {
'hgroup': hosts[host]['hgroup'],
'iqn': hosts[host]['iqn'],
'wwn': hosts[host]['wwn'],
'personality': array.get_host(hostname,
personality=True)['personality'],
'target_port': tports
}
if NVME_API_VERSION in api_version:
host_facts[hostname]['nqn'] = hosts[host]['nqn']
if PREFERRED_API_VERSION in api_version:
hosts = array.list_hosts(preferred_array=True)
for host in range(0, len(hosts)):
hostname = hosts[host]['name']
host_facts[hostname]['preferred_array'] = hosts[host]['preferred_array']
return host_facts
def generate_pgroups_dict(array):
pgroups_facts = {}
pgroups = array.list_pgroups()
for pgroup in range(0, len(pgroups)):
protgroup = pgroups[pgroup]['name']
pgroups_facts[protgroup] = {
'hgroups': pgroups[pgroup]['hgroups'],
'hosts': pgroups[pgroup]['hosts'],
'source': pgroups[pgroup]['source'],
'targets': pgroups[pgroup]['targets'],
'volumes': pgroups[pgroup]['volumes'],
}
prot_sched = array.get_pgroup(protgroup, schedule=True)
prot_reten = array.get_pgroup(protgroup, retention=True)
if prot_sched['snap_enabled'] or prot_sched['replicate_enabled']:
pgroups_facts[protgroup]['snap_freqyency'] = prot_sched['snap_frequency']
pgroups_facts[protgroup]['replicate_freqyency'] = prot_sched['replicate_frequency']
pgroups_facts[protgroup]['snap_enabled'] = prot_sched['snap_enabled']
pgroups_facts[protgroup]['replicate_enabled'] = prot_sched['replicate_enabled']
pgroups_facts[protgroup]['snap_at'] = prot_sched['snap_at']
pgroups_facts[protgroup]['replicate_at'] = prot_sched['replicate_at']
pgroups_facts[protgroup]['replicate_blackout'] = prot_sched['replicate_blackout']
pgroups_facts[protgroup]['per_day'] = prot_reten['per_day']
pgroups_facts[protgroup]['target_per_day'] = prot_reten['target_per_day']
pgroups_facts[protgroup]['target_days'] = prot_reten['target_days']
pgroups_facts[protgroup]['days'] = prot_reten['days']
pgroups_facts[protgroup]['all_for'] = prot_reten['all_for']
pgroups_facts[protgroup]['target_all_for'] = prot_reten['target_all_for']
if ":" in protgroup:
snap_transfers = array.get_pgroup(protgroup, snap=True, transfer=True)
pgroups_facts[protgroup]['snaps'] = {}
for snap_transfer in range(0, len(snap_transfers)):
snap = snap_transfers[snap_transfer]['name']
pgroups_facts[protgroup]['snaps'][snap] = {
'created': snap_transfers[snap_transfer]['created'],
'started': snap_transfers[snap_transfer]['started'],
'completed': snap_transfers[snap_transfer]['completed'],
'physical_bytes_written': snap_transfers[snap_transfer]['physical_bytes_written'],
'data_transferred': snap_transfers[snap_transfer]['data_transferred'],
'progress': snap_transfers[snap_transfer]['progress'],
}
return pgroups_facts
def generate_pods_dict(array):
pods_facts = {}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
pods = array.list_pods()
for pod in range(0, len(pods)):
acpod = pods[pod]['name']
pods_facts[acpod] = {
'source': pods[pod]['source'],
'arrays': pods[pod]['arrays'],
}
return pods_facts
def generate_conn_array_dict(array):
conn_array_facts = {}
api_version = array._list_available_rest_versions()
if CONN_STATUS_API_VERSION in api_version:
carrays = array.list_connected_arrays()
for carray in range(0, len(carrays)):
arrayname = carrays[carray]['array_name']
conn_array_facts[arrayname] = {
'array_id': carrays[carray]['id'],
'throtled': carrays[carray]['throtled'],
'version': carrays[carray]['version'],
'type': carrays[carray]['type'],
'mgmt_ip': carrays[carray]['management_address'],
'repl_ip': carrays[carray]['replication_address'],
}
if CONN_STATUS_API_VERSION in api_version:
conn_array_facts[arrayname]['status'] = carrays[carray]['status']
return conn_array_facts
def generate_apps_dict(array):
apps_facts = {}
api_version = array._list_available_rest_versions()
if SAN_REQUIRED_API_VERSION in api_version:
apps = array.list_apps()
for app in range(0, len(apps)):
appname = apps[app]['name']
apps_facts[appname] = {
'version': apps[app]['version'],
'status': apps[app]['status'],
'description': apps[app]['description'],
}
return apps_facts
def generate_vgroups_dict(array):
vgroups_facts = {}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
vgroups = array.list_vgroups()
for vgroup in range(0, len(vgroups)):
virtgroup = vgroups[vgroup]['name']
vgroups_facts[virtgroup] = {
'volumes': vgroups[vgroup]['volumes'],
}
return vgroups_facts
def generate_nfs_offload_dict(array):
offload_facts = {}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
offload = array.list_nfs_offload()
for target in range(0, len(offload)):
offloadt = offload[target]['name']
offload_facts[offloadt] = {
'status': offload[target]['status'],
'mount_point': offload[target]['mount_point'],
'protocol': offload[target]['protocol'],
'mount_options': offload[target]['mount_options'],
'address': offload[target]['address'],
}
return offload_facts
def generate_s3_offload_dict(array):
offload_facts = {}
api_version = array._list_available_rest_versions()
if S3_REQUIRED_API_VERSION in api_version:
offload = array.list_s3_offload()
for target in range(0, len(offload)):
offloadt = offload[target]['name']
offload_facts[offloadt] = {
'status': offload[target]['status'],
'bucket': offload[target]['bucket'],
'protocol': offload[target]['protocol'],
'access_key_id': offload[target]['access_key_id'],
}
return offload_facts
def generate_hgroups_dict(array):
hgroups_facts = {}
hgroups = array.list_hgroups()
for hgroup in range(0, len(hgroups)):
hostgroup = hgroups[hgroup]['name']
hgroups_facts[hostgroup] = {
'hosts': hgroups[hgroup]['hosts'],
'pgs': [],
'vols': [],
}
pghgroups = array.list_hgroups(protect=True)
for pghg in range(0, len(pghgroups)):
pgname = pghgroups[pghg]['name']
hgroups_facts[pgname]['pgs'].append(pghgroups[pghg]['protection_group'])
volhgroups = array.list_hgroups(connect=True)
for pgvol in range(0, len(volhgroups)):
pgname = volhgroups[pgvol]['name']
volpgdict = [volhgroups[pgvol]['vol'], volhgroups[pgvol]['lun']]
hgroups_facts[pgname]['vols'].append(volpgdict)
return hgroups_facts
def generate_interfaces_dict(array):
api_version = array._list_available_rest_versions()
int_facts = {}
ports = array.list_ports()
for port in range(0, len(ports)):
int_name = ports[port]['name']
if ports[port]['wwn']:
int_facts[int_name] = ports[port]['wwn']
if ports[port]['iqn']:
int_facts[int_name] = ports[port]['iqn']
if NVME_API_VERSION in api_version:
if ports[port]['nqn']:
int_facts[int_name] = ports[port]['nqn']
return int_facts
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
gather_subset=dict(default='minimum', type='list',)
))
module = AnsibleModule(argument_spec, supports_check_mode=False)
array = get_system(module)
subset = [test.lower() for test in module.params['gather_subset']]
valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
'network', 'subnet', 'interfaces', 'hgroups', 'pgroups',
'hosts', 'admins', 'volumes', 'snapshots', 'pods',
'vgroups', 'offload', 'apps', 'arrays')
subset_test = (test in valid_subsets for test in subset)
if not all(subset_test):
module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
% (",".join(valid_subsets), ",".join(subset)))
facts = {}
if 'minimum' in subset or 'all' in subset:
facts['default'] = generate_default_dict(array)
if 'performance' in subset or 'all' in subset:
facts['performance'] = generate_perf_dict(array)
if 'config' in subset or 'all' in subset:
facts['config'] = generate_config_dict(array)
if 'capacity' in subset or 'all' in subset:
facts['capacity'] = generate_capacity_dict(array)
if 'network' in subset or 'all' in subset:
facts['network'] = generate_network_dict(array)
if 'subnet' in subset or 'all' in subset:
facts['subnet'] = generate_subnet_dict(array)
if 'interfaces' in subset or 'all' in subset:
facts['interfaces'] = generate_interfaces_dict(array)
if 'hosts' in subset or 'all' in subset:
facts['hosts'] = generate_host_dict(array)
if 'volumes' in subset or 'all' in subset:
facts['volumes'] = generate_vol_dict(array)
if 'snapshots' in subset or 'all' in subset:
facts['snapshots'] = generate_snap_dict(array)
if 'hgroups' in subset or 'all' in subset:
facts['hgroups'] = generate_hgroups_dict(array)
if 'pgroups' in subset or 'all' in subset:
facts['pgroups'] = generate_pgroups_dict(array)
if 'pods' in subset or 'all' in subset:
facts['pods'] = generate_pods_dict(array)
if 'admins' in subset or 'all' in subset:
facts['admins'] = generate_admin_dict(array)
if 'vgroups' in subset or 'all' in subset:
facts['vgroups'] = generate_vgroups_dict(array)
if 'offload' in subset or 'all' in subset:
facts['nfs_offload'] = generate_nfs_offload_dict(array)
facts['s3_offload'] = generate_s3_offload_dict(array)
if 'apps' in subset or 'all' in subset:
facts['apps'] = generate_apps_dict(array)
if 'arrays' in subset or 'all' in subset:
facts['arrays'] = generate_conn_array_dict(array)
module.exit_json(ansible_facts={'ansible_purefa_facts': facts})
if __name__ == '__main__':
main()

View file

@ -0,0 +1,656 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefb_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favor of C(_info) module.
alternative: Use M(purefb_info) instead.
short_description: Collect facts from Pure Storage FlashBlade
description:
- Collect facts information from a Pure Storage FlashBlade running the
Purity//FB operating system. By default, the module will collect basic
fact information including hosts, host groups, protection
groups and volume counts. Additional fact information can be collected
based on the configured set of arguments.
author:
- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
gather_subset:
description:
- When supplied, this argument will define the facts to be collected.
Possible values for this include all, minimum, config, performance,
capacity, network, subnets, lags, filesystems and snapshots.
required: false
type: list
default: minimum
extends_documentation_fragment:
- community.general.purestorage.fb
'''
EXAMPLES = r'''
- name: collect default set of facts
purefb_facts:
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: collect configuration and capacity facts
purefb_facts:
gather_subset:
- config
- capacity
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: collect all facts
purefb_facts:
gather_subset:
- all
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
'''
RETURN = r'''
ansible_facts:
description: Returns the facts collected from the FlashBlade
returned: always
type: complex
sample: {
"capacity": {
"aggregate": {
"data_reduction": 1.1179228,
"snapshots": 0,
"total_physical": 17519748439,
"unique": 17519748439,
"virtual": 19585726464
},
"file-system": {
"data_reduction": 1.3642412,
"snapshots": 0,
"total_physical": 4748219708,
"unique": 4748219708,
"virtual": 6477716992
},
"object-store": {
"data_reduction": 1.0263462,
"snapshots": 0,
"total_physical": 12771528731,
"unique": 12771528731,
"virtual": 6477716992
},
"total": 83359896948925
},
"config": {
"alert_watchers": {
"enabled": true,
"name": "notify@acmestorage.com"
},
"array_management": {
"base_dn": null,
"bind_password": null,
"bind_user": null,
"enabled": false,
"name": "management",
"services": [
"management"
],
"uris": []
},
"directory_service_roles": {
"array_admin": {
"group": null,
"group_base": null
},
"ops_admin": {
"group": null,
"group_base": null
},
"readonly": {
"group": null,
"group_base": null
},
"storage_admin": {
"group": null,
"group_base": null
}
},
"dns": {
"domain": "demo.acmestorage.com",
"name": "demo-fb-1",
"nameservers": [
"8.8.8.8"
],
"search": [
"demo.acmestorage.com"
]
},
"nfs_directory_service": {
"base_dn": null,
"bind_password": null,
"bind_user": null,
"enabled": false,
"name": "nfs",
"services": [
"nfs"
],
"uris": []
},
"ntp": [
"0.ntp.pool.org"
],
"smb_directory_service": {
"base_dn": null,
"bind_password": null,
"bind_user": null,
"enabled": false,
"name": "smb",
"services": [
"smb"
],
"uris": []
},
"smtp": {
"name": "demo-fb-1",
"relay_host": null,
"sender_domain": "acmestorage.com"
},
"ssl_certs": {
"certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----",
"common_name": "Acme Storage",
"country": "US",
"email": null,
"intermediate_certificate": null,
"issued_by": "Acme Storage",
"issued_to": "Acme Storage",
"key_size": 4096,
"locality": null,
"name": "global",
"organization": "Acme Storage",
"organizational_unit": "Acme Storage",
"passphrase": null,
"private_key": null,
"state": null,
"status": "self-signed",
"valid_from": "1508433967000",
"valid_to": "2458833967000"
}
},
"default": {
"blades": 15,
"buckets": 7,
"filesystems": 2,
"flashblade_name": "demo-fb-1",
"object_store_accounts": 1,
"object_store_users": 1,
"purity_version": "2.2.0",
"snapshots": 1,
"total_capacity": 83359896948925
},
"filesystems": {
"k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": {
"destroyed": false,
"fast_remove": false,
"hard_limit": true,
"nfs_rules": "*(rw,no_root_squash)",
"provisioned": 21474836480,
"snapshot_enabled": false
},
"z": {
"destroyed": false,
"fast_remove": false,
"hard_limit": false,
"provisioned": 1073741824,
"snapshot_enabled": false
}
},
"lag": {
"uplink": {
"lag_speed": 0,
"port_speed": 40000000000,
"ports": [
{
"name": "CH1.FM1.ETH1.1"
},
{
"name": "CH1.FM1.ETH1.2"
},
],
"status": "healthy"
}
},
"network": {
"fm1.admin0": {
"address": "10.10.100.6",
"gateway": "10.10.100.1",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"support"
],
"type": "vip",
"vlan": 2200
},
"fm2.admin0": {
"address": "10.10.100.7",
"gateway": "10.10.100.1",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"support"
],
"type": "vip",
"vlan": 2200
},
"nfs1": {
"address": "10.10.100.4",
"gateway": "10.10.100.1",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"data"
],
"type": "vip",
"vlan": 2200
},
"vir0": {
"address": "10.10.100.5",
"gateway": "10.10.100.1",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"management"
],
"type": "vip",
"vlan": 2200
}
},
"performance": {
"aggregate": {
"bytes_per_op": 0,
"bytes_per_read": 0,
"bytes_per_write": 0,
"read_bytes_per_sec": 0,
"reads_per_sec": 0,
"usec_per_other_op": 0,
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"write_bytes_per_sec": 0,
"writes_per_sec": 0
},
"http": {
"bytes_per_op": 0,
"bytes_per_read": 0,
"bytes_per_write": 0,
"read_bytes_per_sec": 0,
"reads_per_sec": 0,
"usec_per_other_op": 0,
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"write_bytes_per_sec": 0,
"writes_per_sec": 0
},
"nfs": {
"bytes_per_op": 0,
"bytes_per_read": 0,
"bytes_per_write": 0,
"read_bytes_per_sec": 0,
"reads_per_sec": 0,
"usec_per_other_op": 0,
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"write_bytes_per_sec": 0,
"writes_per_sec": 0
},
"s3": {
"bytes_per_op": 0,
"bytes_per_read": 0,
"bytes_per_write": 0,
"read_bytes_per_sec": 0,
"reads_per_sec": 0,
"usec_per_other_op": 0,
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"write_bytes_per_sec": 0,
"writes_per_sec": 0
}
},
"snapshots": {
"z.188": {
"destroyed": false,
"source": "z",
"source_destroyed": false,
"suffix": "188"
}
},
"subnet": {
"new-mgmt": {
"gateway": "10.10.100.1",
"interfaces": [
{
"name": "fm1.admin0"
},
{
"name": "fm2.admin0"
},
{
"name": "nfs1"
},
{
"name": "vir0"
}
],
"lag": "uplink",
"mtu": 1500,
"prefix": "10.10.100.0/24",
"services": [
"data",
"management",
"support"
],
"vlan": 2200
}
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.pure import get_blade, purefb_argument_spec
MIN_REQUIRED_API_VERSION = '1.3'
HARD_LIMIT_API_VERSION = '1.4'
def generate_default_dict(blade):
default_facts = {}
defaults = blade.arrays.list_arrays().items[0]
default_facts['flashblade_name'] = defaults.name
default_facts['purity_version'] = defaults.version
default_facts['filesystems'] = \
len(blade.file_systems.list_file_systems().items)
default_facts['snapshots'] = \
len(blade.file_system_snapshots.list_file_system_snapshots().items)
default_facts['buckets'] = len(blade.buckets.list_buckets().items)
default_facts['object_store_users'] = \
len(blade.object_store_users.list_object_store_users().items)
default_facts['object_store_accounts'] = \
len(blade.object_store_accounts.list_object_store_accounts().items)
default_facts['blades'] = len(blade.blade.list_blades().items)
default_facts['total_capacity'] = \
blade.arrays.list_arrays_space().items[0].capacity
return default_facts
def generate_perf_dict(blade):
perf_facts = {}
total_perf = blade.arrays.list_arrays_performance()
http_perf = blade.arrays.list_arrays_performance(protocol='http')
s3_perf = blade.arrays.list_arrays_performance(protocol='s3')
nfs_perf = blade.arrays.list_arrays_performance(protocol='nfs')
perf_facts['aggregate'] = {
'bytes_per_op': total_perf.items[0].bytes_per_op,
'bytes_per_read': total_perf.items[0].bytes_per_read,
'bytes_per_write': total_perf.items[0].bytes_per_write,
'read_bytes_per_sec': total_perf.items[0].read_bytes_per_sec,
'reads_per_sec': total_perf.items[0].reads_per_sec,
'usec_per_other_op': total_perf.items[0].usec_per_other_op,
'usec_per_read_op': total_perf.items[0].usec_per_read_op,
'usec_per_write_op': total_perf.items[0].usec_per_write_op,
'write_bytes_per_sec': total_perf.items[0].write_bytes_per_sec,
'writes_per_sec': total_perf.items[0].writes_per_sec,
}
perf_facts['http'] = {
'bytes_per_op': http_perf.items[0].bytes_per_op,
'bytes_per_read': http_perf.items[0].bytes_per_read,
'bytes_per_write': http_perf.items[0].bytes_per_write,
'read_bytes_per_sec': http_perf.items[0].read_bytes_per_sec,
'reads_per_sec': http_perf.items[0].reads_per_sec,
'usec_per_other_op': http_perf.items[0].usec_per_other_op,
'usec_per_read_op': http_perf.items[0].usec_per_read_op,
'usec_per_write_op': http_perf.items[0].usec_per_write_op,
'write_bytes_per_sec': http_perf.items[0].write_bytes_per_sec,
'writes_per_sec': http_perf.items[0].writes_per_sec,
}
perf_facts['s3'] = {
'bytes_per_op': s3_perf.items[0].bytes_per_op,
'bytes_per_read': s3_perf.items[0].bytes_per_read,
'bytes_per_write': s3_perf.items[0].bytes_per_write,
'read_bytes_per_sec': s3_perf.items[0].read_bytes_per_sec,
'reads_per_sec': s3_perf.items[0].reads_per_sec,
'usec_per_other_op': s3_perf.items[0].usec_per_other_op,
'usec_per_read_op': s3_perf.items[0].usec_per_read_op,
'usec_per_write_op': s3_perf.items[0].usec_per_write_op,
'write_bytes_per_sec': s3_perf.items[0].write_bytes_per_sec,
'writes_per_sec': s3_perf.items[0].writes_per_sec,
}
perf_facts['nfs'] = {
'bytes_per_op': nfs_perf.items[0].bytes_per_op,
'bytes_per_read': nfs_perf.items[0].bytes_per_read,
'bytes_per_write': nfs_perf.items[0].bytes_per_write,
'read_bytes_per_sec': nfs_perf.items[0].read_bytes_per_sec,
'reads_per_sec': nfs_perf.items[0].reads_per_sec,
'usec_per_other_op': nfs_perf.items[0].usec_per_other_op,
'usec_per_read_op': nfs_perf.items[0].usec_per_read_op,
'usec_per_write_op': nfs_perf.items[0].usec_per_write_op,
'write_bytes_per_sec': nfs_perf.items[0].write_bytes_per_sec,
'writes_per_sec': nfs_perf.items[0].writes_per_sec,
}
return perf_facts
def generate_config_dict(blade):
config_facts = {}
config_facts['dns'] = blade.dns.list_dns().items[0].to_dict()
config_facts['smtp'] = blade.smtp.list_smtp().items[0].to_dict()
config_facts['alert_watchers'] = \
blade.alert_watchers.list_alert_watchers().items[0].to_dict()
api_version = blade.api_version.list_versions().versions
if HARD_LIMIT_API_VERSION in api_version:
config_facts['array_management'] = \
blade.directory_services.list_directory_services(names=['management']).items[0].to_dict()
config_facts['directory_service_roles'] = {}
roles = blade.directory_services.list_directory_services_roles()
for role in range(0, len(roles.items)):
role_name = roles.items[role].name
config_facts['directory_service_roles'][role_name] = {
'group': roles.items[role].group,
'group_base': roles.items[role].group_base
}
config_facts['nfs_directory_service'] = \
blade.directory_services.list_directory_services(names=['nfs']).items[0].to_dict()
config_facts['smb_directory_service'] = \
blade.directory_services.list_directory_services(names=['smb']).items[0].to_dict()
config_facts['ntp'] = blade.arrays.list_arrays().items[0].ntp_servers
config_facts['ssl_certs'] = \
blade.certificates.list_certificates().items[0].to_dict()
return config_facts
def generate_subnet_dict(blade):
sub_facts = {}
subnets = blade.subnets.list_subnets()
for sub in range(0, len(subnets.items)):
sub_name = subnets.items[sub].name
if subnets.items[sub].enabled:
sub_facts[sub_name] = {
'gateway': subnets.items[sub].gateway,
'mtu': subnets.items[sub].mtu,
'vlan': subnets.items[sub].vlan,
'prefix': subnets.items[sub].prefix,
'services': subnets.items[sub].services,
}
sub_facts[sub_name]['lag'] = subnets.items[sub].link_aggregation_group.name
sub_facts[sub_name]['interfaces'] = []
for iface in range(0, len(subnets.items[sub].interfaces)):
sub_facts[sub_name]['interfaces'].append({'name': subnets.items[sub].interfaces[iface].name})
return sub_facts
def generate_lag_dict(blade):
lag_facts = {}
groups = blade.link_aggregation_groups.list_link_aggregation_groups()
for groupcnt in range(0, len(groups.items)):
lag_name = groups.items[groupcnt].name
lag_facts[lag_name] = {
'lag_speed': groups.items[groupcnt].lag_speed,
'port_speed': groups.items[groupcnt].port_speed,
'status': groups.items[groupcnt].status,
}
lag_facts[lag_name]['ports'] = []
for port in range(0, len(groups.items[groupcnt].ports)):
lag_facts[lag_name]['ports'].append({'name': groups.items[groupcnt].ports[port].name})
return lag_facts
def generate_network_dict(blade):
net_facts = {}
ports = blade.network_interfaces.list_network_interfaces()
for portcnt in range(0, len(ports.items)):
int_name = ports.items[portcnt].name
if ports.items[portcnt].enabled:
net_facts[int_name] = {
'type': ports.items[portcnt].type,
'mtu': ports.items[portcnt].mtu,
'vlan': ports.items[portcnt].vlan,
'address': ports.items[portcnt].address,
'services': ports.items[portcnt].services,
'gateway': ports.items[portcnt].gateway,
'netmask': ports.items[portcnt].netmask,
}
return net_facts
def generate_capacity_dict(blade):
capacity_facts = {}
total_cap = blade.arrays.list_arrays_space()
file_cap = blade.arrays.list_arrays_space(type='file-system')
object_cap = blade.arrays.list_arrays_space(type='object-store')
capacity_facts['total'] = total_cap.items[0].capacity
capacity_facts['aggregate'] = {
'data_reduction': total_cap.items[0].space.data_reduction,
'snapshots': total_cap.items[0].space.snapshots,
'total_physical': total_cap.items[0].space.total_physical,
'unique': total_cap.items[0].space.unique,
'virtual': total_cap.items[0].space.virtual,
}
capacity_facts['file-system'] = {
'data_reduction': file_cap.items[0].space.data_reduction,
'snapshots': file_cap.items[0].space.snapshots,
'total_physical': file_cap.items[0].space.total_physical,
'unique': file_cap.items[0].space.unique,
'virtual': file_cap.items[0].space.virtual,
}
capacity_facts['object-store'] = {
'data_reduction': object_cap.items[0].space.data_reduction,
'snapshots': object_cap.items[0].space.snapshots,
'total_physical': object_cap.items[0].space.total_physical,
'unique': object_cap.items[0].space.unique,
'virtual': file_cap.items[0].space.virtual,
}
return capacity_facts
def generate_snap_dict(blade):
snap_facts = {}
snaps = blade.file_system_snapshots.list_file_system_snapshots()
for snap in range(0, len(snaps.items)):
snapshot = snaps.items[snap].name
snap_facts[snapshot] = {
'destroyed': snaps.items[snap].destroyed,
'source': snaps.items[snap].source,
'suffix': snaps.items[snap].suffix,
'source_destroyed': snaps.items[snap].source_destroyed,
}
return snap_facts
def generate_fs_dict(blade):
fs_facts = {}
fsys = blade.file_systems.list_file_systems()
for fsystem in range(0, len(fsys.items)):
share = fsys.items[fsystem].name
fs_facts[share] = {
'fast_remove': fsys.items[fsystem].fast_remove_directory_enabled,
'snapshot_enabled': fsys.items[fsystem].snapshot_directory_enabled,
'provisioned': fsys.items[fsystem].provisioned,
'destroyed': fsys.items[fsystem].destroyed,
}
if fsys.items[fsystem].http.enabled:
fs_facts[share]['http'] = fsys.items[fsystem].http.enabled
if fsys.items[fsystem].smb.enabled:
fs_facts[share]['smb_mode'] = fsys.items[fsystem].smb.acl_mode
if fsys.items[fsystem].nfs.enabled:
fs_facts[share]['nfs_rules'] = fsys.items[fsystem].nfs.rules
api_version = blade.api_version.list_versions().versions
if HARD_LIMIT_API_VERSION in api_version:
fs_facts[share]['hard_limit'] = fsys.items[fsystem].hard_limit_enabled
return fs_facts
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(dict(
gather_subset=dict(default='minimum', type='list',)
))
module = AnsibleModule(argument_spec, supports_check_mode=True)
blade = get_blade(module)
versions = blade.api_version.list_versions().versions
if MIN_REQUIRED_API_VERSION not in versions:
module.fail_json(msg='FlashBlade REST version not supported. Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
subset = [test.lower() for test in module.params['gather_subset']]
valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
'network', 'subnets', 'lags',
'filesystems', 'snapshots')
subset_test = (test in valid_subsets for test in subset)
if not all(subset_test):
module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
% (",".join(valid_subsets), ",".join(subset)))
facts = {}
if 'minimum' in subset or 'all' in subset:
facts['default'] = generate_default_dict(blade)
if 'performance' in subset or 'all' in subset:
facts['performance'] = generate_perf_dict(blade)
if 'config' in subset or 'all' in subset:
facts['config'] = generate_config_dict(blade)
if 'capacity' in subset or 'all' in subset:
facts['capacity'] = generate_capacity_dict(blade)
if 'lags' in subset or 'all' in subset:
facts['lag'] = generate_lag_dict(blade)
if 'network' in subset or 'all' in subset:
facts['network'] = generate_network_dict(blade)
if 'subnets' in subset or 'all' in subset:
facts['subnet'] = generate_subnet_dict(blade)
if 'filesystems' in subset or 'all' in subset:
facts['filesystems'] = generate_fs_dict(blade)
if 'snapshots' in subset or 'all' in subset:
facts['snapshots'] = generate_snap_dict(blade)
module.exit_json(ansible_facts={'ansible_purefb_facts': facts})
if __name__ == '__main__':
main()

View file

@ -0,0 +1,214 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vexata_eg
short_description: Manage export groups on Vexata VX100 storage arrays
description:
- Create or delete export groups on a Vexata VX100 array.
- An export group is a tuple of a volume group, initiator group and port
group that allows a set of volumes to be exposed to one or more hosts
through specific array ports.
author:
- Sandeep Kasargod (@vexata)
options:
name:
description:
- Export group name.
required: true
type: str
state:
description:
- Creates export group when present or delete when absent.
default: present
choices: [ present, absent ]
type: str
vg:
description:
- Volume group name.
type: str
ig:
description:
- Initiator group name.
type: str
pg:
description:
- Port group name.
type: str
extends_documentation_fragment:
- community.general.vexata.vx100
'''
EXAMPLES = r'''
- name: Create export group named db_export.
vexata_eg:
name: db_export
vg: dbvols
ig: dbhosts
pg: pg1
state: present
array: vx100_ultra.test.com
user: admin
password: secret
- name: Delete export group named db_export
vexata_eg:
name: db_export
state: absent
array: vx100_ultra.test.com
user: admin
password: secret
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.vexata import (
argument_spec, get_array, required_together)
def get_eg(module, array):
"""Retrieve a named vg if it exists, None if absent."""
name = module.params['name']
try:
egs = array.list_egs()
eg = filter(lambda eg: eg['name'] == name, egs)
if len(eg) == 1:
return eg[0]
else:
return None
except Exception:
module.fail_json(msg='Error while attempting to retrieve export groups.')
def get_vg_id(module, array):
"""Retrieve a named vg's id if it exists, error if absent."""
name = module.params['vg']
try:
vgs = array.list_vgs()
vg = filter(lambda vg: vg['name'] == name, vgs)
if len(vg) == 1:
return vg[0]['id']
else:
module.fail_json(msg='Volume group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve volume groups.')
def get_ig_id(module, array):
"""Retrieve a named ig's id if it exists, error if absent."""
name = module.params['ig']
try:
igs = array.list_igs()
ig = filter(lambda ig: ig['name'] == name, igs)
if len(ig) == 1:
return ig[0]['id']
else:
module.fail_json(msg='Initiator group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve initiator groups.')
def get_pg_id(module, array):
"""Retrieve a named pg's id if it exists, error if absent."""
name = module.params['pg']
try:
pgs = array.list_pgs()
pg = filter(lambda pg: pg['name'] == name, pgs)
if len(pg) == 1:
return pg[0]['id']
else:
module.fail_json(msg='Port group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve port groups.')
def create_eg(module, array):
""""Create a new export group."""
changed = False
eg_name = module.params['name']
vg_id = get_vg_id(module, array)
ig_id = get_ig_id(module, array)
pg_id = get_pg_id(module, array)
if module.check_mode:
module.exit_json(changed=changed)
try:
eg = array.create_eg(
eg_name,
'Ansible export group',
(vg_id, ig_id, pg_id))
if eg:
module.log(msg='Created export group {0}'.format(eg_name))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Export group {0} create failed.'.format(eg_name))
module.exit_json(changed=changed)
def delete_eg(module, array, eg):
changed = False
eg_name = eg['name']
if module.check_mode:
module.exit_json(changed=changed)
try:
ok = array.delete_eg(
eg['id'])
if ok:
module.log(msg='Export group {0} deleted.'.format(eg_name))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Export group {0} delete failed.'.format(eg_name))
module.exit_json(changed=changed)
def main():
arg_spec = argument_spec()
arg_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
vg=dict(type='str'),
ig=dict(type='str'),
pg=dict(type='str')
)
)
module = AnsibleModule(arg_spec,
supports_check_mode=True,
required_together=required_together())
state = module.params['state']
array = get_array(module)
eg = get_eg(module, array)
if state == 'present' and not eg:
create_eg(module, array)
elif state == 'absent' and eg:
delete_eg(module, array, eg)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,201 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vexata_volume
short_description: Manage volumes on Vexata VX100 storage arrays
description:
- Create, deletes or extend volumes on a Vexata VX100 array.
author:
- Sandeep Kasargod (@vexata)
options:
name:
description:
- Volume name.
required: true
type: str
state:
description:
- Creates/Modifies volume when present or removes when absent.
default: present
choices: [ present, absent ]
type: str
size:
description:
- Volume size in M, G, T units. M=2^20, G=2^30, T=2^40 bytes.
type: str
extends_documentation_fragment:
- community.general.vexata.vx100
'''
EXAMPLES = r'''
- name: Create new 2 TiB volume named foo
vexata_volume:
name: foo
size: 2T
state: present
array: vx100_ultra.test.com
user: admin
password: secret
- name: Expand volume named foo to 4 TiB
vexata_volume:
name: foo
size: 4T
state: present
array: vx100_ultra.test.com
user: admin
password: secret
- name: Delete volume named foo
vexata_volume:
name: foo
state: absent
array: vx100_ultra.test.com
user: admin
password: secret
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.vexata import (
argument_spec, get_array, required_together, size_to_MiB)
def get_volume(module, array):
"""Retrieve a named volume if it exists, None if absent."""
name = module.params['name']
try:
vols = array.list_volumes()
vol = filter(lambda v: v['name'] == name, vols)
if len(vol) == 1:
return vol[0]
else:
return None
except Exception:
module.fail_json(msg='Error while attempting to retrieve volumes.')
def validate_size(module, err_msg):
size = module.params.get('size', False)
if not size:
module.fail_json(msg=err_msg)
size = size_to_MiB(size)
if size <= 0:
module.fail_json(msg='Invalid volume size, must be <integer>[MGT].')
return size
def create_volume(module, array):
""""Create a new volume."""
changed = False
size = validate_size(module, err_msg='Size is required to create volume.')
if module.check_mode:
module.exit_json(changed=changed)
try:
vol = array.create_volume(
module.params['name'],
'Ansible volume',
size)
if vol:
module.log(msg='Created volume {0}'.format(vol['id']))
changed = True
else:
module.fail_json(msg='Volume create failed.')
except Exception:
pass
module.exit_json(changed=changed)
def update_volume(module, array, volume):
"""Expand the volume size."""
changed = False
size = validate_size(module, err_msg='Size is required to update volume')
prev_size = volume['volSize']
if size <= prev_size:
module.log(msg='Volume expanded size needs to be larger '
'than current size.')
if module.check_mode:
module.exit_json(changed=changed)
try:
vol = array.grow_volume(
volume['name'],
volume['description'],
volume['id'],
size)
if vol:
changed = True
except Exception:
pass
module.exit_json(changed=changed)
def delete_volume(module, array, volume):
changed = False
vol_name = volume['name']
if module.check_mode:
module.exit_json(changed=changed)
try:
ok = array.delete_volume(
volume['id'])
if ok:
module.log(msg='Volume {0} deleted.'.format(vol_name))
changed = True
else:
raise Exception
except Exception:
pass
module.exit_json(changed=changed)
def main():
arg_spec = argument_spec()
arg_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(default='present', choices=['present', 'absent']),
size=dict(type='str')
)
)
module = AnsibleModule(arg_spec,
supports_check_mode=True,
required_together=required_together())
state = module.params['state']
array = get_array(module)
volume = get_volume(module, array)
if state == 'present':
if not volume:
create_volume(module, array)
else:
update_volume(module, array, volume)
elif state == 'absent' and volume:
delete_volume(module, array, volume)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,266 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zfs
short_description: Manage zfs
description:
- Manages ZFS file systems, volumes, clones and snapshots
options:
name:
description:
- File system, snapshot or volume name e.g. C(rpool/myfs).
required: true
state:
description:
- Whether to create (C(present)), or remove (C(absent)) a
file system, snapshot or volume. All parents/children
will be created/destroyed as needed to reach the desired state.
choices: [ absent, present ]
required: true
origin:
description:
- Snapshot from which to create a clone.
extra_zfs_properties:
description:
- A dictionary of zfs properties to be set.
- See the zfs(8) man page for more information.
author:
- Johan Wiren (@johanwiren)
'''
EXAMPLES = '''
- name: Create a new file system called myfs in pool rpool with the setuid property turned off
zfs:
name: rpool/myfs
state: present
extra_zfs_properties:
setuid: off
- name: Create a new volume called myvol in pool rpool.
zfs:
name: rpool/myvol
state: present
extra_zfs_properties:
volsize: 10M
- name: Create a snapshot of rpool/myfs file system.
zfs:
name: rpool/myfs@mysnapshot
state: present
- name: Create a new file system called myfs2 with snapdir enabled
zfs:
name: rpool/myfs2
state: present
extra_zfs_properties:
snapdir: enabled
- name: Create a new file system by cloning a snapshot
zfs:
name: rpool/cloned_fs
state: present
origin: rpool/myfs@mysnapshot
- name: Destroy a filesystem
zfs:
name: rpool/myfs
state: absent
'''
import os
from ansible.module_utils.basic import AnsibleModule
class Zfs(object):
def __init__(self, module, name, properties):
self.module = module
self.name = name
self.properties = properties
self.changed = False
self.zfs_cmd = module.get_bin_path('zfs', True)
self.zpool_cmd = module.get_bin_path('zpool', True)
self.pool = name.split('/')[0]
self.is_solaris = os.uname()[0] == 'SunOS'
self.is_openzfs = self.check_openzfs()
self.enhanced_sharing = self.check_enhanced_sharing()
def check_openzfs(self):
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if version == '-':
return True
if int(version) == 5000:
return True
return False
def check_enhanced_sharing(self):
if self.is_solaris and not self.is_openzfs:
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if int(version) >= 34:
return True
return False
def exists(self):
cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create(self):
if self.module.check_mode:
self.changed = True
return
properties = self.properties
origin = self.module.params.get('origin', None)
cmd = [self.zfs_cmd]
if "@" in self.name:
action = 'snapshot'
elif origin:
action = 'clone'
else:
action = 'create'
cmd.append(action)
if action in ['create', 'clone']:
cmd += ['-p']
if properties:
for prop, value in properties.items():
if prop == 'volsize':
cmd += ['-V', value]
elif prop == 'volblocksize':
cmd += ['-b', value]
else:
cmd += ['-o', '%s="%s"' % (prop, value)]
if origin and action == 'clone':
cmd.append(origin)
cmd.append(self.name)
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def destroy(self):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_property(self, prop, value):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_properties_if_changed(self):
current_properties = self.get_current_properties()
for prop, value in self.properties.items():
if current_properties.get(prop, None) != value:
self.set_property(prop, value)
def get_current_properties(self):
cmd = [self.zfs_cmd, 'get', '-H']
if self.enhanced_sharing:
cmd += ['-e']
cmd += ['all', self.name]
rc, out, err = self.module.run_command(" ".join(cmd))
properties = dict()
for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]:
if source == 'local':
properties[prop] = value
# Add alias for enhanced sharing properties
if self.enhanced_sharing:
properties['sharenfs'] = properties.get('share.nfs', None)
properties['sharesmb'] = properties.get('share.smb', None)
return properties
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'present']),
origin=dict(type='str', default=None),
extra_zfs_properties=dict(type='dict', default={}),
),
supports_check_mode=True,
)
state = module.params.get('state')
name = module.params.get('name')
if module.params.get('origin') and '@' in name:
module.fail_json(msg='cannot specify origin when operating on a snapshot')
# Reverse the boolification of zfs properties
for prop, value in module.params['extra_zfs_properties'].items():
if isinstance(value, bool):
if value is True:
module.params['extra_zfs_properties'][prop] = 'on'
else:
module.params['extra_zfs_properties'][prop] = 'off'
else:
module.params['extra_zfs_properties'][prop] = value
result = dict(
name=name,
state=state,
)
zfs = Zfs(module, name, module.params['extra_zfs_properties'])
if state == 'present':
if zfs.exists():
zfs.set_properties_if_changed()
else:
zfs.create()
elif state == 'absent':
if zfs.exists():
zfs.destroy()
result.update(zfs.properties)
result['changed'] = zfs.changed
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,267 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Nate Coraor <nate@coraor.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = r'''
---
module: zfs_delegate_admin
short_description: Manage ZFS delegated administration (user admin privileges)
description:
- Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS
operations normally restricted to the superuser.
- See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options.
- This module attempts to adhere to the behavior of the command line tool as much as possible.
requirements:
- "A ZFS/OpenZFS implementation that supports delegation with `zfs allow`, including: Solaris >= 10, illumos (all
versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0."
options:
name:
description:
- File system or volume name e.g. C(rpool/myfs).
required: true
type: str
state:
description:
- Whether to allow (C(present)), or unallow (C(absent)) a permission.
- When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required.
- When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified.
required: true
choices: [ absent, present ]
default: present
users:
description:
- List of users to whom permission(s) should be granted.
type: list
groups:
description:
- List of groups to whom permission(s) should be granted.
type: list
everyone:
description:
- Apply permissions to everyone.
type: bool
default: no
permissions:
description:
- The list of permission(s) to delegate (required if C(state) is C(present)).
type: list
choices: [ allow, clone, create, destroy, mount, promote, readonly, receive, rename, rollback, send, share, snapshot, unallow ]
local:
description:
- Apply permissions to C(name) locally (C(zfs allow -l)).
type: bool
descendents:
description:
- Apply permissions to C(name)'s descendents (C(zfs allow -d)).
type: bool
recursive:
description:
- Unallow permissions recursively (ignored when C(state) is C(present)).
type: bool
default: no
author:
- Nate Coraor (@natefoo)
'''
EXAMPLES = r'''
- name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope
zfs_delegate_admin:
name: rpool/myfs
users: adm
permissions: allow,unallow
- name: Grant `zfs send` to everyone, plus the group `backup`
zfs_delegate_admin:
name: rpool/myvol
groups: backup
everyone: yes
permissions: send
- name: Grant `zfs send,receive` to users `foo` and `bar` with local scope only
zfs_delegate_admin:
name: rpool/myfs
users: foo,bar
permissions: send,receive
local: yes
- name: Revoke all permissions from everyone (permissions specifically assigned to users and groups remain)
- zfs_delegate_admin:
name: rpool/myfs
everyone: yes
state: absent
'''
# This module does not return anything other than the standard
# changed/state/msg/stdout
RETURN = '''
'''
from itertools import product
from ansible.module_utils.basic import AnsibleModule
class ZfsDelegateAdmin(object):
def __init__(self, module):
self.module = module
self.name = module.params.get('name')
self.state = module.params.get('state')
self.users = module.params.get('users')
self.groups = module.params.get('groups')
self.everyone = module.params.get('everyone')
self.perms = module.params.get('permissions')
self.scope = None
self.changed = False
self.initial_perms = None
self.subcommand = 'allow'
self.recursive_opt = []
self.run_method = self.update
self.setup(module)
def setup(self, module):
""" Validate params and set up for run.
"""
if self.state == 'absent':
self.subcommand = 'unallow'
if module.params.get('recursive'):
self.recursive_opt = ['-r']
local = module.params.get('local')
descendents = module.params.get('descendents')
if (local and descendents) or (not local and not descendents):
self.scope = 'ld'
elif local:
self.scope = 'l'
elif descendents:
self.scope = 'd'
else:
self.module.fail_json(msg='Impossible value for local and descendents')
if not (self.users or self.groups or self.everyone):
if self.state == 'present':
self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set')
elif self.state == 'absent':
self.run_method = self.clear
# ansible ensures the else cannot happen here
self.zfs_path = module.get_bin_path('zfs', True)
@property
def current_perms(self):
""" Parse the output of `zfs allow <name>` to retrieve current permissions.
"""
out = self.run_zfs_raw(subcommand='allow')
perms = {
'l': {'u': {}, 'g': {}, 'e': []},
'd': {'u': {}, 'g': {}, 'e': []},
'ld': {'u': {}, 'g': {}, 'e': []},
}
linemap = {
'Local permissions:': 'l',
'Descendent permissions:': 'd',
'Local+Descendent permissions:': 'ld',
}
scope = None
for line in out.splitlines():
scope = linemap.get(line, scope)
if not scope:
continue
try:
if line.startswith('\tuser ') or line.startswith('\tgroup '):
ent_type, ent, cur_perms = line.split()
perms[scope][ent_type[0]][ent] = cur_perms.split(',')
elif line.startswith('\teveryone '):
perms[scope]['e'] = line.split()[1].split(',')
except ValueError:
self.module.fail_json(msg="Cannot parse user/group permission output by `zfs allow`: '%s'" % line)
return perms
def run_zfs_raw(self, subcommand=None, args=None):
""" Run a raw zfs command, fail on error.
"""
cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name]
rc, out, err = self.module.run_command(cmd)
if rc:
self.module.fail_json(msg='Command `%s` failed: %s' % (' '.join(cmd), err))
return out
def run_zfs(self, args):
""" Run zfs allow/unallow with appropriate options as per module arguments.
"""
args = self.recursive_opt + ['-' + self.scope] + args
if self.perms:
args.append(','.join(self.perms))
return self.run_zfs_raw(args=args)
def clear(self):
""" Called by run() to clear all permissions.
"""
changed = False
stdout = ''
for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')):
for ent in self.initial_perms[scope][ent_type].keys():
stdout += self.run_zfs(['-%s' % ent_type, ent])
changed = True
for scope in ('ld', 'l', 'd'):
if self.initial_perms[scope]['e']:
stdout += self.run_zfs(['-e'])
changed = True
return (changed, stdout)
def update(self):
""" Update permissions as per module arguments.
"""
stdout = ''
for ent_type, entities in (('u', self.users), ('g', self.groups)):
if entities:
stdout += self.run_zfs(['-%s' % ent_type, ','.join(entities)])
if self.everyone:
stdout += self.run_zfs(['-e'])
return (self.initial_perms != self.current_perms, stdout)
def run(self):
""" Run an operation, return results for Ansible.
"""
exit_args = {'state': self.state}
self.initial_perms = self.current_perms
exit_args['changed'], stdout = self.run_method()
if exit_args['changed']:
exit_args['msg'] = 'ZFS delegated admin permissions updated'
exit_args['stdout'] = stdout
self.module.exit_json(**exit_args)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
users=dict(type='list'),
groups=dict(type='list'),
everyone=dict(type='bool', default=False),
permissions=dict(type='list',
choices=['allow', 'clone', 'create', 'destroy', 'mount', 'promote', 'readonly', 'receive',
'rename', 'rollback', 'send', 'share', 'snapshot', 'unallow']),
local=dict(type='bool'),
descendents=dict(type='bool'),
recursive=dict(type='bool', default=False),
),
supports_check_mode=False,
required_if=[('state', 'present', ['permissions'])],
)
zfs_delegate_admin = ZfsDelegateAdmin(module)
zfs_delegate_admin.run()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,265 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zfs_facts
short_description: Gather facts about ZFS datasets.
description:
- Gather facts from ZFS dataset properties.
author: Adam Števko (@xen0l)
options:
name:
description:
- ZFS dataset name.
required: yes
aliases: [ "ds", "dataset" ]
recurse:
description:
- Specifies if properties for any children should be recursively
displayed.
type: bool
default: 'no'
parsable:
description:
- Specifies if property values should be displayed in machine
friendly format.
type: bool
default: 'no'
properties:
description:
- Specifies which dataset properties should be queried in comma-separated format.
For more information about dataset properties, check zfs(1M) man page.
default: all
aliases: [ "props" ]
type:
description:
- Specifies which datasets types to display. Multiple values have to be
provided in comma-separated form.
choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ]
default: all
depth:
description:
- Specifies recursion depth.
'''
EXAMPLES = '''
- name: Gather facts about ZFS dataset rpool/export/home
zfs_facts:
dataset: rpool/export/home
- name: Report space usage on ZFS filesystems under data/home
zfs_facts:
name: data/home
recurse: yes
type: filesystem
- debug:
msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.'
with_items: '{{ ansible_zfs_datasets }}'
'''
RETURN = '''
name:
description: ZFS dataset name
returned: always
type: str
sample: rpool/var/spool
parsable:
description: if parsable output should be provided in machine friendly format.
returned: if 'parsable' is set to True
type: bool
sample: True
recurse:
description: if we should recurse over ZFS dataset
returned: if 'recurse' is set to True
type: bool
sample: True
zfs_datasets:
description: ZFS dataset facts
returned: always
type: str
sample:
{
"aclinherit": "restricted",
"aclmode": "discard",
"atime": "on",
"available": "43.8G",
"canmount": "on",
"casesensitivity": "sensitive",
"checksum": "on",
"compression": "off",
"compressratio": "1.00x",
"copies": "1",
"creation": "Thu Jun 16 11:37 2016",
"dedup": "off",
"devices": "on",
"exec": "on",
"filesystem_count": "none",
"filesystem_limit": "none",
"logbias": "latency",
"logicalreferenced": "18.5K",
"logicalused": "3.45G",
"mlslabel": "none",
"mounted": "yes",
"mountpoint": "/rpool",
"name": "rpool",
"nbmand": "off",
"normalization": "none",
"org.openindiana.caiman:install": "ready",
"primarycache": "all",
"quota": "none",
"readonly": "off",
"recordsize": "128K",
"redundant_metadata": "all",
"refcompressratio": "1.00x",
"referenced": "29.5K",
"refquota": "none",
"refreservation": "none",
"reservation": "none",
"secondarycache": "all",
"setuid": "on",
"sharenfs": "off",
"sharesmb": "off",
"snapdir": "hidden",
"snapshot_count": "none",
"snapshot_limit": "none",
"sync": "standard",
"type": "filesystem",
"used": "4.41G",
"usedbychildren": "4.41G",
"usedbydataset": "29.5K",
"usedbyrefreservation": "0",
"usedbysnapshots": "0",
"utf8only": "off",
"version": "5",
"vscan": "off",
"written": "29.5K",
"xattr": "on",
"zoned": "off"
}
'''
from collections import defaultdict
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark']
class ZFSFacts(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.recurse = module.params['recurse']
self.parsable = module.params['parsable']
self.properties = module.params['properties']
self.type = module.params['type']
self.depth = module.params['depth']
self._datasets = defaultdict(dict)
self.facts = []
def dataset_exists(self):
cmd = [self.module.get_bin_path('zfs')]
cmd.append('list')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def get_facts(self):
cmd = [self.module.get_bin_path('zfs')]
cmd.append('get')
cmd.append('-H')
if self.parsable:
cmd.append('-p')
if self.recurse:
cmd.append('-r')
if int(self.depth) != 0:
cmd.append('-d')
cmd.append('%s' % self.depth)
if self.type:
cmd.append('-t')
cmd.append(self.type)
cmd.append('-o')
cmd.append('name,property,value')
cmd.append(self.properties)
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
for line in out.splitlines():
dataset, property, value = line.split('\t')
self._datasets[dataset].update({property: value})
for k, v in iteritems(self._datasets):
v.update({'name': k})
self.facts.append(v)
return {'ansible_zfs_datasets': self.facts}
else:
self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name,
stderr=err,
rc=rc)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['ds', 'dataset'], type='str'),
recurse=dict(required=False, default=False, type='bool'),
parsable=dict(required=False, default=False, type='bool'),
properties=dict(required=False, default='all', type='str'),
type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES),
depth=dict(required=False, default=0, type='int')
),
supports_check_mode=True
)
zfs_facts = ZFSFacts(module)
result = {}
result['changed'] = False
result['name'] = zfs_facts.name
if zfs_facts.parsable:
result['parsable'] = zfs_facts.parsable
if zfs_facts.recurse:
result['recurse'] = zfs_facts.recurse
if zfs_facts.dataset_exists():
result['ansible_facts'] = zfs_facts.get_facts()
else:
module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name)
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,214 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zpool_facts
short_description: Gather facts about ZFS pools.
description:
- Gather facts from ZFS pool properties.
author: Adam Števko (@xen0l)
options:
name:
description:
- ZFS pool name.
aliases: [ "pool", "zpool" ]
required: false
parsable:
description:
- Specifies if property values should be displayed in machine
friendly format.
type: bool
default: False
required: false
properties:
description:
- Specifies which dataset properties should be queried in comma-separated format.
For more information about dataset properties, check zpool(1M) man page.
aliases: [ "props" ]
default: all
required: false
'''
EXAMPLES = '''
# Gather facts about ZFS pool rpool
- zpool_facts: pool=rpool
# Gather space usage about all imported ZFS pools
- zpool_facts: properties='free,size'
- debug: msg='ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.'
with_items: '{{ ansible_zfs_pools }}'
'''
RETURN = '''
ansible_facts:
description: Dictionary containing all the detailed information about the ZFS pool facts
returned: always
type: complex
contains:
ansible_zfs_pools:
description: ZFS pool facts
returned: always
type: str
sample:
{
"allocated": "3.46G",
"altroot": "-",
"autoexpand": "off",
"autoreplace": "off",
"bootfs": "rpool/ROOT/openindiana",
"cachefile": "-",
"capacity": "6%",
"comment": "-",
"dedupditto": "0",
"dedupratio": "1.00x",
"delegation": "on",
"expandsize": "-",
"failmode": "wait",
"feature@async_destroy": "enabled",
"feature@bookmarks": "enabled",
"feature@edonr": "enabled",
"feature@embedded_data": "active",
"feature@empty_bpobj": "active",
"feature@enabled_txg": "active",
"feature@extensible_dataset": "enabled",
"feature@filesystem_limits": "enabled",
"feature@hole_birth": "active",
"feature@large_blocks": "enabled",
"feature@lz4_compress": "active",
"feature@multi_vdev_crash_dump": "enabled",
"feature@sha512": "enabled",
"feature@skein": "enabled",
"feature@spacemap_histogram": "active",
"fragmentation": "3%",
"free": "46.3G",
"freeing": "0",
"guid": "15729052870819522408",
"health": "ONLINE",
"leaked": "0",
"listsnapshots": "off",
"name": "rpool",
"readonly": "off",
"size": "49.8G",
"version": "-"
}
name:
description: ZFS pool name
returned: always
type: str
sample: rpool
parsable:
description: if parsable output should be provided in machine friendly format.
returned: if 'parsable' is set to True
type: bool
sample: True
'''
from collections import defaultdict
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
class ZPoolFacts(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.parsable = module.params['parsable']
self.properties = module.params['properties']
self._pools = defaultdict(dict)
self.facts = []
def pool_exists(self):
cmd = [self.module.get_bin_path('zpool')]
cmd.append('list')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def get_facts(self):
cmd = [self.module.get_bin_path('zpool')]
cmd.append('get')
cmd.append('-H')
if self.parsable:
cmd.append('-p')
cmd.append('-o')
cmd.append('name,property,value')
cmd.append(self.properties)
if self.name:
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
for line in out.splitlines():
pool, property, value = line.split('\t')
self._pools[pool].update({property: value})
for k, v in iteritems(self._pools):
v.update({'name': k})
self.facts.append(v)
return {'ansible_zfs_pools': self.facts}
else:
self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name,
stderr=err,
rc=rc)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False, aliases=['pool', 'zpool'], type='str'),
parsable=dict(required=False, default=False, type='bool'),
properties=dict(required=False, default='all', type='str'),
),
supports_check_mode=True
)
zpool_facts = ZPoolFacts(module)
result = {}
result['changed'] = False
result['name'] = zpool_facts.name
if zpool_facts.parsable:
result['parsable'] = zpool_facts.parsable
if zpool_facts.name is not None:
if zpool_facts.pool_exists():
result['ansible_facts'] = zpool_facts.get_facts()
else:
module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name)
else:
result['ansible_facts'] = zpool_facts.get_facts()
module.exit_json(**result)
if __name__ == '__main__':
main()