mirror of
https://github.com/ansible-collections/community.general.git
synced 2025-07-25 14:20:22 -07:00
Bulk autopep8 (modules)
As agreed in 2017-12-07 Core meeting bulk fix pep8 issues Generated using: autopep8 1.3.3 (pycodestyle: 2.3.1) autopep8 -r --max-line-length 160 --in-place --ignore E305,E402,E722,E741 lib/ansible/modules Manually fix issues that autopep8 has introduced
This commit is contained in:
parent
d13d7e9404
commit
c57a7f05e1
314 changed files with 3462 additions and 3383 deletions
|
@ -303,7 +303,7 @@ def get_block_device_mapping(image):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
bdm_dict = dict()
|
bdm_dict = dict()
|
||||||
bdm = getattr(image,'block_device_mapping')
|
bdm = getattr(image, 'block_device_mapping')
|
||||||
for device_name in bdm.keys():
|
for device_name in bdm.keys():
|
||||||
bdm_dict[device_name] = {
|
bdm_dict[device_name] = {
|
||||||
'size': bdm[device_name].size,
|
'size': bdm[device_name].size,
|
||||||
|
@ -319,28 +319,28 @@ def get_block_device_mapping(image):
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
owner = dict(required=False, default=None),
|
owner=dict(required=False, default=None),
|
||||||
ami_id = dict(required=False),
|
ami_id=dict(required=False),
|
||||||
ami_tags = dict(required=False, type='dict',
|
ami_tags=dict(required=False, type='dict',
|
||||||
aliases = ['search_tags', 'image_tags']),
|
aliases=['search_tags', 'image_tags']),
|
||||||
architecture = dict(required=False),
|
architecture=dict(required=False),
|
||||||
hypervisor = dict(required=False),
|
hypervisor=dict(required=False),
|
||||||
is_public = dict(required=False, type='bool'),
|
is_public=dict(required=False, type='bool'),
|
||||||
name = dict(required=False),
|
name=dict(required=False),
|
||||||
platform = dict(required=False),
|
platform=dict(required=False),
|
||||||
product_code = dict(required=False),
|
product_code=dict(required=False),
|
||||||
sort = dict(required=False, default=None,
|
sort=dict(required=False, default=None,
|
||||||
choices=['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location',
|
choices=['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location',
|
||||||
'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']),
|
'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']),
|
||||||
sort_tag = dict(required=False),
|
sort_tag=dict(required=False),
|
||||||
sort_order = dict(required=False, default='ascending',
|
sort_order=dict(required=False, default='ascending',
|
||||||
choices=['ascending', 'descending']),
|
choices=['ascending', 'descending']),
|
||||||
sort_start = dict(required=False),
|
sort_start=dict(required=False),
|
||||||
sort_end = dict(required=False),
|
sort_end=dict(required=False),
|
||||||
state = dict(required=False, default='available'),
|
state=dict(required=False, default='available'),
|
||||||
virtualization_type = dict(required=False),
|
virtualization_type=dict(required=False),
|
||||||
no_result_action = dict(required=False, default='success',
|
no_result_action=dict(required=False, default='success',
|
||||||
choices = ['success', 'fail']),
|
choices=['success', 'fail']),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -379,7 +379,7 @@ def main():
|
||||||
filter['image_id'] = ami_id
|
filter['image_id'] = ami_id
|
||||||
if ami_tags:
|
if ami_tags:
|
||||||
for tag in ami_tags:
|
for tag in ami_tags:
|
||||||
filter['tag:'+tag] = ami_tags[tag]
|
filter['tag:' + tag] = ami_tags[tag]
|
||||||
if architecture:
|
if architecture:
|
||||||
filter['architecture'] = architecture
|
filter['architecture'] = architecture
|
||||||
if hypervisor:
|
if hypervisor:
|
||||||
|
@ -435,9 +435,9 @@ def main():
|
||||||
if sort == 'tag':
|
if sort == 'tag':
|
||||||
if not sort_tag:
|
if not sort_tag:
|
||||||
module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
|
module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
|
||||||
results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
|
results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order == 'descending'))
|
||||||
elif sort:
|
elif sort:
|
||||||
results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
|
results.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if sort and sort_start and sort_end:
|
if sort and sort_start and sort_end:
|
||||||
|
|
|
@ -74,12 +74,12 @@ def get_instance_info(instance):
|
||||||
# Get groups
|
# Get groups
|
||||||
groups = []
|
groups = []
|
||||||
for group in instance.groups:
|
for group in instance.groups:
|
||||||
groups.append({ 'id': group.id, 'name': group.name }.copy())
|
groups.append({'id': group.id, 'name': group.name}.copy())
|
||||||
|
|
||||||
# Get interfaces
|
# Get interfaces
|
||||||
interfaces = []
|
interfaces = []
|
||||||
for interface in instance.interfaces:
|
for interface in instance.interfaces:
|
||||||
interfaces.append({ 'id': interface.id, 'mac_address': interface.mac_address }.copy())
|
interfaces.append({'id': interface.id, 'mac_address': interface.mac_address}.copy())
|
||||||
|
|
||||||
# If an instance is terminated, sourceDestCheck is no longer returned
|
# If an instance is terminated, sourceDestCheck is no longer returned
|
||||||
try:
|
try:
|
||||||
|
@ -104,7 +104,7 @@ def get_instance_info(instance):
|
||||||
|
|
||||||
instance_profile = dict(instance.instance_profile) if instance.instance_profile is not None else None
|
instance_profile = dict(instance.instance_profile) if instance.instance_profile is not None else None
|
||||||
|
|
||||||
instance_info = { 'id': instance.id,
|
instance_info = {'id': instance.id,
|
||||||
'kernel': instance.kernel,
|
'kernel': instance.kernel,
|
||||||
'instance_profile': instance_profile,
|
'instance_profile': instance_profile,
|
||||||
'root_device_type': instance.root_device_type,
|
'root_device_type': instance.root_device_type,
|
||||||
|
@ -163,7 +163,7 @@ def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
dict(
|
dict(
|
||||||
filters = dict(default=None, type='dict')
|
filters=dict(default=None, type='dict')
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -189,6 +189,7 @@ def get_vpc_info(vpc):
|
||||||
'state': vpc.state,
|
'state': vpc.state,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
||||||
"""
|
"""
|
||||||
Finds a VPC that matches a specific id or cidr + tags
|
Finds a VPC that matches a specific id or cidr + tags
|
||||||
|
@ -211,7 +212,7 @@ def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
||||||
|
|
||||||
# Check for existing VPC by cidr_block or id
|
# Check for existing VPC by cidr_block or id
|
||||||
if vpc_id is not None:
|
if vpc_id is not None:
|
||||||
found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',})
|
found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available', })
|
||||||
|
|
||||||
else:
|
else:
|
||||||
previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'})
|
previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'})
|
||||||
|
@ -234,8 +235,8 @@ def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
||||||
|
|
||||||
return (found_vpc)
|
return (found_vpc)
|
||||||
|
|
||||||
def routes_match(rt_list=None, rt=None, igw=None):
|
|
||||||
|
|
||||||
|
def routes_match(rt_list=None, rt=None, igw=None):
|
||||||
"""
|
"""
|
||||||
Check if the route table has all routes as in given list
|
Check if the route table has all routes as in given list
|
||||||
|
|
||||||
|
@ -284,6 +285,7 @@ def routes_match(rt_list=None, rt=None, igw=None):
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=None):
|
def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=None):
|
||||||
"""
|
"""
|
||||||
Checks if the remote routes match the local routes.
|
Checks if the remote routes match the local routes.
|
||||||
|
@ -299,7 +301,7 @@ def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=Non
|
||||||
False when both routes and subnet associations matched.
|
False when both routes and subnet associations matched.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
#We add a one for the main table
|
# We add a one for the main table
|
||||||
rtb_len = len(route_tables) + 1
|
rtb_len = len(route_tables) + 1
|
||||||
remote_rtb_len = len(vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}))
|
remote_rtb_len = len(vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}))
|
||||||
if remote_rtb_len != rtb_len:
|
if remote_rtb_len != rtb_len:
|
||||||
|
@ -307,10 +309,10 @@ def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=Non
|
||||||
for rt in route_tables:
|
for rt in route_tables:
|
||||||
rt_id = None
|
rt_id = None
|
||||||
for sn in rt['subnets']:
|
for sn in rt['subnets']:
|
||||||
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
|
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id})
|
||||||
if len(rsn) != 1:
|
if len(rsn) != 1:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg='The subnet {0} to associate with route_table {1} ' \
|
msg='The subnet {0} to associate with route_table {1} '
|
||||||
'does not exist, aborting'.format(sn, rt)
|
'does not exist, aborting'.format(sn, rt)
|
||||||
)
|
)
|
||||||
nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
|
nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
|
||||||
|
@ -388,10 +390,10 @@ def create_vpc(module, vpc_conn):
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
if wait and wait_timeout <= time.time():
|
if wait and wait_timeout <= time.time():
|
||||||
# waiting took too long
|
# waiting took too long
|
||||||
module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime())
|
module.fail_json(msg="wait for vpc availability timeout on %s" % time.asctime())
|
||||||
|
|
||||||
except boto.exception.BotoServerError as e:
|
except boto.exception.BotoServerError as e:
|
||||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||||
|
|
||||||
# Done with base VPC, now change to attributes and features.
|
# Done with base VPC, now change to attributes and features.
|
||||||
|
|
||||||
|
@ -408,7 +410,6 @@ def create_vpc(module, vpc_conn):
|
||||||
if new_tags:
|
if new_tags:
|
||||||
vpc_conn.create_tags(vpc.id, new_tags)
|
vpc_conn.create_tags(vpc.id, new_tags)
|
||||||
|
|
||||||
|
|
||||||
# boto doesn't appear to have a way to determine the existing
|
# boto doesn't appear to have a way to determine the existing
|
||||||
# value of the dns attributes, so we just set them.
|
# value of the dns attributes, so we just set them.
|
||||||
# It also must be done one at a time.
|
# It also must be done one at a time.
|
||||||
|
@ -420,7 +421,7 @@ def create_vpc(module, vpc_conn):
|
||||||
if not isinstance(subnets, list):
|
if not isinstance(subnets, list):
|
||||||
module.fail_json(msg='subnets needs to be a list of cidr blocks')
|
module.fail_json(msg='subnets needs to be a list of cidr blocks')
|
||||||
|
|
||||||
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
|
current_subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
|
||||||
|
|
||||||
# First add all new subnets
|
# First add all new subnets
|
||||||
for subnet in subnets:
|
for subnet in subnets:
|
||||||
|
@ -468,7 +469,7 @@ def create_vpc(module, vpc_conn):
|
||||||
# to create tags results in exception.
|
# to create tags results in exception.
|
||||||
# boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending'
|
# boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending'
|
||||||
# so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet
|
# so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet
|
||||||
while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0:
|
while len(vpc_conn.get_all_subnets(filters={'subnet-id': new_subnet.id})) == 0:
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
|
|
||||||
vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
|
vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
|
||||||
|
@ -548,7 +549,7 @@ def create_vpc(module, vpc_conn):
|
||||||
if route['gw'] == 'igw':
|
if route['gw'] == 'igw':
|
||||||
if not internet_gateway:
|
if not internet_gateway:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg='You asked for an Internet Gateway ' \
|
msg='You asked for an Internet Gateway '
|
||||||
'(igw) route, but you have no Internet Gateway'
|
'(igw) route, but you have no Internet Gateway'
|
||||||
)
|
)
|
||||||
route_kwargs['gateway_id'] = igw.id
|
route_kwargs['gateway_id'] = igw.id
|
||||||
|
@ -564,10 +565,10 @@ def create_vpc(module, vpc_conn):
|
||||||
|
|
||||||
# Associate with subnets
|
# Associate with subnets
|
||||||
for sn in rt['subnets']:
|
for sn in rt['subnets']:
|
||||||
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
|
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id})
|
||||||
if len(rsn) != 1:
|
if len(rsn) != 1:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg='The subnet {0} to associate with route_table {1} ' \
|
msg='The subnet {0} to associate with route_table {1} '
|
||||||
'does not exist, aborting'.format(sn, rt)
|
'does not exist, aborting'.format(sn, rt)
|
||||||
)
|
)
|
||||||
rsn = rsn[0]
|
rsn = rsn[0]
|
||||||
|
@ -576,7 +577,7 @@ def create_vpc(module, vpc_conn):
|
||||||
old_rt = vpc_conn.get_all_route_tables(
|
old_rt = vpc_conn.get_all_route_tables(
|
||||||
filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id}
|
filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id}
|
||||||
)
|
)
|
||||||
old_rt = [ x for x in old_rt if x.id is not None ]
|
old_rt = [x for x in old_rt if x.id is not None]
|
||||||
if len(old_rt) == 1:
|
if len(old_rt) == 1:
|
||||||
old_rt = old_rt[0]
|
old_rt = old_rt[0]
|
||||||
association_id = None
|
association_id = None
|
||||||
|
@ -591,7 +592,7 @@ def create_vpc(module, vpc_conn):
|
||||||
changed = True
|
changed = True
|
||||||
except EC2ResponseError as e:
|
except EC2ResponseError as e:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg='Unable to create and associate route table {0}, error: ' \
|
msg='Unable to create and associate route table {0}, error: '
|
||||||
'{1}'.format(rt, e)
|
'{1}'.format(rt, e)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -625,7 +626,7 @@ def create_vpc(module, vpc_conn):
|
||||||
|
|
||||||
created_vpc_id = vpc.id
|
created_vpc_id = vpc.id
|
||||||
returned_subnets = []
|
returned_subnets = []
|
||||||
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
|
current_subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
|
||||||
|
|
||||||
for sn in current_subnets:
|
for sn in current_subnets:
|
||||||
returned_subnets.append({
|
returned_subnets.append({
|
||||||
|
@ -647,6 +648,7 @@ def create_vpc(module, vpc_conn):
|
||||||
|
|
||||||
return (vpc_dict, created_vpc_id, returned_subnets, igw_id, changed)
|
return (vpc_dict, created_vpc_id, returned_subnets, igw_id, changed)
|
||||||
|
|
||||||
|
|
||||||
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
||||||
"""
|
"""
|
||||||
Terminates a VPC
|
Terminates a VPC
|
||||||
|
@ -671,8 +673,8 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
||||||
|
|
||||||
if vpc is not None:
|
if vpc is not None:
|
||||||
if vpc.state == 'available':
|
if vpc.state == 'available':
|
||||||
terminated_vpc_id=vpc.id
|
terminated_vpc_id = vpc.id
|
||||||
vpc_dict=get_vpc_info(vpc)
|
vpc_dict = get_vpc_info(vpc)
|
||||||
try:
|
try:
|
||||||
subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
|
subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
|
||||||
for sn in subnets:
|
for sn in subnets:
|
||||||
|
@ -709,18 +711,18 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
cidr_block = dict(),
|
cidr_block=dict(),
|
||||||
instance_tenancy = dict(choices=['default', 'dedicated'], default='default'),
|
instance_tenancy=dict(choices=['default', 'dedicated'], default='default'),
|
||||||
wait = dict(type='bool', default=False),
|
wait=dict(type='bool', default=False),
|
||||||
wait_timeout = dict(default=300),
|
wait_timeout=dict(default=300),
|
||||||
dns_support = dict(type='bool', default=True),
|
dns_support=dict(type='bool', default=True),
|
||||||
dns_hostnames = dict(type='bool', default=True),
|
dns_hostnames=dict(type='bool', default=True),
|
||||||
subnets = dict(type='list'),
|
subnets=dict(type='list'),
|
||||||
vpc_id = dict(),
|
vpc_id=dict(),
|
||||||
internet_gateway = dict(type='bool', default=False),
|
internet_gateway=dict(type='bool', default=False),
|
||||||
resource_tags = dict(type='dict', required=True),
|
resource_tags=dict(type='dict', required=True),
|
||||||
route_tables = dict(type='list'),
|
route_tables=dict(type='list'),
|
||||||
state = dict(choices=['present', 'absent'], default='present'),
|
state=dict(choices=['present', 'absent'], default='present'),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -740,7 +742,7 @@ def main():
|
||||||
try:
|
try:
|
||||||
vpc_conn = connect_to_aws(boto.vpc, region, **aws_connect_kwargs)
|
vpc_conn = connect_to_aws(boto.vpc, region, **aws_connect_kwargs)
|
||||||
except boto.exception.NoAuthHandlerFound as e:
|
except boto.exception.NoAuthHandlerFound as e:
|
||||||
module.fail_json(msg = str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
|
||||||
|
|
|
@ -120,6 +120,7 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
HAS_BOTO3 = False
|
HAS_BOTO3 = False
|
||||||
|
|
||||||
|
|
||||||
def get_arn_from_kms_alias(kms, aliasname):
|
def get_arn_from_kms_alias(kms, aliasname):
|
||||||
ret = kms.list_aliases()
|
ret = kms.list_aliases()
|
||||||
key_id = None
|
key_id = None
|
||||||
|
@ -138,12 +139,14 @@ def get_arn_from_kms_alias(kms, aliasname):
|
||||||
return k['KeyArn']
|
return k['KeyArn']
|
||||||
raise Exception('could not find key from id: {}'.format(key_id))
|
raise Exception('could not find key from id: {}'.format(key_id))
|
||||||
|
|
||||||
|
|
||||||
def get_arn_from_role_name(iam, rolename):
|
def get_arn_from_role_name(iam, rolename):
|
||||||
ret = iam.get_role(RoleName=rolename)
|
ret = iam.get_role(RoleName=rolename)
|
||||||
if ret.get('Role') and ret['Role'].get('Arn'):
|
if ret.get('Role') and ret['Role'].get('Arn'):
|
||||||
return ret['Role']['Arn']
|
return ret['Role']['Arn']
|
||||||
raise Exception('could not find arn for name {}.'.format(rolename))
|
raise Exception('could not find arn for name {}.'.format(rolename))
|
||||||
|
|
||||||
|
|
||||||
def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clean_invalid_entries=True):
|
def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clean_invalid_entries=True):
|
||||||
ret = {}
|
ret = {}
|
||||||
keyret = kms.get_key_policy(KeyId=keyarn, PolicyName='default')
|
keyret = kms.get_key_policy(KeyId=keyarn, PolicyName='default')
|
||||||
|
@ -210,6 +213,7 @@ def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clea
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def assert_policy_shape(policy):
|
def assert_policy_shape(policy):
|
||||||
'''Since the policy seems a little, uh, fragile, make sure we know approximately what we're looking at.'''
|
'''Since the policy seems a little, uh, fragile, make sure we know approximately what we're looking at.'''
|
||||||
errors = []
|
errors = []
|
||||||
|
@ -218,7 +222,7 @@ def assert_policy_shape(policy):
|
||||||
|
|
||||||
found_statement_type = {}
|
found_statement_type = {}
|
||||||
for statement in policy['Statement']:
|
for statement in policy['Statement']:
|
||||||
for label,sidlabel in statement_label.items():
|
for label, sidlabel in statement_label.items():
|
||||||
if statement['Sid'] == sidlabel:
|
if statement['Sid'] == sidlabel:
|
||||||
found_statement_type[label] = True
|
found_statement_type[label] = True
|
||||||
|
|
||||||
|
@ -230,16 +234,17 @@ def assert_policy_shape(policy):
|
||||||
raise Exception('Problems asserting policy shape. Cowardly refusing to modify it: {}'.format(' '.join(errors)))
|
raise Exception('Problems asserting policy shape. Cowardly refusing to modify it: {}'.format(' '.join(errors)))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ansible.module_utils.ec2.ec2_argument_spec()
|
argument_spec = ansible.module_utils.ec2.ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
mode = dict(choices=['grant', 'deny'], default='grant'),
|
mode=dict(choices=['grant', 'deny'], default='grant'),
|
||||||
key_alias = dict(required=False, type='str'),
|
key_alias=dict(required=False, type='str'),
|
||||||
key_arn = dict(required=False, type='str'),
|
key_arn=dict(required=False, type='str'),
|
||||||
role_name = dict(required=False, type='str'),
|
role_name=dict(required=False, type='str'),
|
||||||
role_arn = dict(required=False, type='str'),
|
role_arn=dict(required=False, type='str'),
|
||||||
grant_types = dict(required=False, type='list'),
|
grant_types=dict(required=False, type='list'),
|
||||||
clean_invalid_entries = dict(type='bool', default=True),
|
clean_invalid_entries=dict(type='bool', default=True),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -255,7 +260,6 @@ def main():
|
||||||
result = {}
|
result = {}
|
||||||
mode = module.params['mode']
|
mode = module.params['mode']
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True)
|
region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True)
|
||||||
kms = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='kms', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
kms = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='kms', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||||
|
@ -263,7 +267,6 @@ def main():
|
||||||
except botocore.exceptions.NoCredentialsError as e:
|
except botocore.exceptions.NoCredentialsError as e:
|
||||||
module.fail_json(msg='cannot connect to AWS', exception=traceback.format_exc())
|
module.fail_json(msg='cannot connect to AWS', exception=traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if module.params['key_alias'] and not module.params['key_arn']:
|
if module.params['key_alias'] and not module.params['key_arn']:
|
||||||
module.params['key_arn'] = get_arn_from_kms_alias(kms, module.params['key_alias'])
|
module.params['key_arn'] = get_arn_from_kms_alias(kms, module.params['key_alias'])
|
||||||
|
|
|
@ -263,7 +263,7 @@ from ansible.module_utils._text import to_bytes, to_native
|
||||||
|
|
||||||
def get_stack_events(cfn, stack_name, token_filter=None):
|
def get_stack_events(cfn, stack_name, token_filter=None):
|
||||||
'''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
|
'''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
|
||||||
ret = {'events':[], 'log':[]}
|
ret = {'events': [], 'log': []}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
pg = cfn.get_paginator(
|
pg = cfn.get_paginator(
|
||||||
|
@ -426,7 +426,7 @@ def stack_operation(cfn, stack_name, operation, op_token=None):
|
||||||
ret.update({'changed': True, 'output': 'Stack Deleted'})
|
ret.update({'changed': True, 'output': 'Stack Deleted'})
|
||||||
return ret
|
return ret
|
||||||
else:
|
else:
|
||||||
ret.update({'changed': False, 'failed': True, 'output' : 'Stack not found.'})
|
ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'})
|
||||||
return ret
|
return ret
|
||||||
# it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
|
# it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
|
||||||
# Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
|
# Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
|
||||||
|
@ -435,7 +435,7 @@ def stack_operation(cfn, stack_name, operation, op_token=None):
|
||||||
return ret
|
return ret
|
||||||
# note the ordering of ROLLBACK_COMPLETE and COMPLETE, because otherwise COMPLETE will match both cases.
|
# note the ordering of ROLLBACK_COMPLETE and COMPLETE, because otherwise COMPLETE will match both cases.
|
||||||
elif stack['StackStatus'].endswith('_COMPLETE'):
|
elif stack['StackStatus'].endswith('_COMPLETE'):
|
||||||
ret.update({'changed': True, 'output' : 'Stack %s complete' % operation })
|
ret.update({'changed': True, 'output': 'Stack %s complete' % operation})
|
||||||
return ret
|
return ret
|
||||||
elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
|
elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
|
||||||
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
|
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
|
||||||
|
@ -447,7 +447,7 @@ def stack_operation(cfn, stack_name, operation, op_token=None):
|
||||||
else:
|
else:
|
||||||
# this can loop forever :/
|
# this can loop forever :/
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
return {'failed': True, 'output':'Failed for unknown reasons.'}
|
return {'failed': True, 'output': 'Failed for unknown reasons.'}
|
||||||
|
|
||||||
|
|
||||||
def build_changeset_name(stack_params):
|
def build_changeset_name(stack_params):
|
||||||
|
@ -496,7 +496,7 @@ def get_stack_facts(cfn, stack_name):
|
||||||
try:
|
try:
|
||||||
stack_response = cfn.describe_stacks(StackName=stack_name)
|
stack_response = cfn.describe_stacks(StackName=stack_name)
|
||||||
stack_info = stack_response['Stacks'][0]
|
stack_info = stack_response['Stacks'][0]
|
||||||
except (botocore.exceptions.ValidationError,botocore.exceptions.ClientError) as err:
|
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
|
||||||
error_msg = boto_exception(err)
|
error_msg = boto_exception(err)
|
||||||
if 'does not exist' in error_msg:
|
if 'does not exist' in error_msg:
|
||||||
# missing stack, don't bail.
|
# missing stack, don't bail.
|
||||||
|
@ -567,7 +567,7 @@ def main():
|
||||||
stack_params['StackPolicyBody'] = open(module.params['stack_policy'], 'r').read()
|
stack_params['StackPolicyBody'] = open(module.params['stack_policy'], 'r').read()
|
||||||
|
|
||||||
template_parameters = module.params['template_parameters']
|
template_parameters = module.params['template_parameters']
|
||||||
stack_params['Parameters'] = [{'ParameterKey':k, 'ParameterValue':str(v)} for k, v in template_parameters.items()]
|
stack_params['Parameters'] = [{'ParameterKey': k, 'ParameterValue': str(v)} for k, v in template_parameters.items()]
|
||||||
|
|
||||||
if isinstance(module.params.get('tags'), dict):
|
if isinstance(module.params.get('tags'), dict):
|
||||||
stack_params['Tags'] = ansible.module_utils.ec2.ansible_dict_to_boto3_tag_list(module.params['tags'])
|
stack_params['Tags'] = ansible.module_utils.ec2.ansible_dict_to_boto3_tag_list(module.params['tags'])
|
||||||
|
|
|
@ -259,7 +259,7 @@ class CloudFrontServiceManager:
|
||||||
|
|
||||||
def get_distribution(self, distribution_id):
|
def get_distribution(self, distribution_id):
|
||||||
try:
|
try:
|
||||||
func = partial(self.client.get_distribution,Id=distribution_id)
|
func = partial(self.client.get_distribution, Id=distribution_id)
|
||||||
return self.paginated_response(func)
|
return self.paginated_response(func)
|
||||||
except botocore.exceptions.ClientError as e:
|
except botocore.exceptions.ClientError as e:
|
||||||
self.module.fail_json(msg="Error describing distribution - " + str(e),
|
self.module.fail_json(msg="Error describing distribution - " + str(e),
|
||||||
|
@ -268,7 +268,7 @@ class CloudFrontServiceManager:
|
||||||
|
|
||||||
def get_distribution_config(self, distribution_id):
|
def get_distribution_config(self, distribution_id):
|
||||||
try:
|
try:
|
||||||
func = partial(self.client.get_distribution_config,Id=distribution_id)
|
func = partial(self.client.get_distribution_config, Id=distribution_id)
|
||||||
return self.paginated_response(func)
|
return self.paginated_response(func)
|
||||||
except botocore.exceptions.ClientError as e:
|
except botocore.exceptions.ClientError as e:
|
||||||
self.module.fail_json(msg="Error describing distribution configuration - " + str(e),
|
self.module.fail_json(msg="Error describing distribution configuration - " + str(e),
|
||||||
|
@ -277,7 +277,7 @@ class CloudFrontServiceManager:
|
||||||
|
|
||||||
def get_origin_access_identity(self, origin_access_identity_id):
|
def get_origin_access_identity(self, origin_access_identity_id):
|
||||||
try:
|
try:
|
||||||
func = partial(self.client.get_cloud_front_origin_access_identity,Id=origin_access_identity_id)
|
func = partial(self.client.get_cloud_front_origin_access_identity, Id=origin_access_identity_id)
|
||||||
return self.paginated_response(func)
|
return self.paginated_response(func)
|
||||||
except botocore.exceptions.ClientError as e:
|
except botocore.exceptions.ClientError as e:
|
||||||
self.module.fail_json(msg="Error describing origin access identity - " + str(e),
|
self.module.fail_json(msg="Error describing origin access identity - " + str(e),
|
||||||
|
@ -286,7 +286,7 @@ class CloudFrontServiceManager:
|
||||||
|
|
||||||
def get_origin_access_identity_config(self, origin_access_identity_id):
|
def get_origin_access_identity_config(self, origin_access_identity_id):
|
||||||
try:
|
try:
|
||||||
func = partial(self.client.get_cloud_front_origin_access_identity_config,Id=origin_access_identity_id)
|
func = partial(self.client.get_cloud_front_origin_access_identity_config, Id=origin_access_identity_id)
|
||||||
return self.paginated_response(func)
|
return self.paginated_response(func)
|
||||||
except botocore.exceptions.ClientError as e:
|
except botocore.exceptions.ClientError as e:
|
||||||
self.module.fail_json(msg="Error describing origin access identity configuration - " + str(e),
|
self.module.fail_json(msg="Error describing origin access identity configuration - " + str(e),
|
||||||
|
@ -295,7 +295,7 @@ class CloudFrontServiceManager:
|
||||||
|
|
||||||
def get_invalidation(self, distribution_id, invalidation_id):
|
def get_invalidation(self, distribution_id, invalidation_id):
|
||||||
try:
|
try:
|
||||||
func = partial(self.client.get_invalidation,DistributionId=distribution_id,Id=invalidation_id)
|
func = partial(self.client.get_invalidation, DistributionId=distribution_id, Id=invalidation_id)
|
||||||
return self.paginated_response(func)
|
return self.paginated_response(func)
|
||||||
except botocore.exceptions.ClientError as e:
|
except botocore.exceptions.ClientError as e:
|
||||||
self.module.fail_json(msg="Error describing invalidation - " + str(e),
|
self.module.fail_json(msg="Error describing invalidation - " + str(e),
|
||||||
|
@ -304,7 +304,7 @@ class CloudFrontServiceManager:
|
||||||
|
|
||||||
def get_streaming_distribution(self, distribution_id):
|
def get_streaming_distribution(self, distribution_id):
|
||||||
try:
|
try:
|
||||||
func = partial(self.client.get_streaming_distribution,Id=distribution_id)
|
func = partial(self.client.get_streaming_distribution, Id=distribution_id)
|
||||||
return self.paginated_response(func)
|
return self.paginated_response(func)
|
||||||
except botocore.exceptions.ClientError as e:
|
except botocore.exceptions.ClientError as e:
|
||||||
self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
|
self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
|
||||||
|
@ -313,7 +313,7 @@ class CloudFrontServiceManager:
|
||||||
|
|
||||||
def get_streaming_distribution_config(self, distribution_id):
|
def get_streaming_distribution_config(self, distribution_id):
|
||||||
try:
|
try:
|
||||||
func = partial(self.client.get_streaming_distribution_config,Id=distribution_id)
|
func = partial(self.client.get_streaming_distribution_config, Id=distribution_id)
|
||||||
return self.paginated_response(func)
|
return self.paginated_response(func)
|
||||||
except botocore.exceptions.ClientError as e:
|
except botocore.exceptions.ClientError as e:
|
||||||
self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
|
self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
|
||||||
|
@ -399,13 +399,13 @@ class CloudFrontServiceManager:
|
||||||
|
|
||||||
def summary_get_origin_access_identity_list(self):
|
def summary_get_origin_access_identity_list(self):
|
||||||
try:
|
try:
|
||||||
origin_access_identity_list = { 'origin_access_identities': [] }
|
origin_access_identity_list = {'origin_access_identities': []}
|
||||||
origin_access_identities = self.list_origin_access_identities()
|
origin_access_identities = self.list_origin_access_identities()
|
||||||
for origin_access_identity in origin_access_identities:
|
for origin_access_identity in origin_access_identities:
|
||||||
oai_id = origin_access_identity['Id']
|
oai_id = origin_access_identity['Id']
|
||||||
oai_full_response = self.get_origin_access_identity(oai_id)
|
oai_full_response = self.get_origin_access_identity(oai_id)
|
||||||
oai_summary = { 'Id': oai_id, 'ETag': oai_full_response['ETag'] }
|
oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
|
||||||
origin_access_identity_list['origin_access_identities'].append( oai_summary )
|
origin_access_identity_list['origin_access_identities'].append(oai_summary)
|
||||||
return origin_access_identity_list
|
return origin_access_identity_list
|
||||||
except botocore.exceptions.ClientError as e:
|
except botocore.exceptions.ClientError as e:
|
||||||
self.module.fail_json(msg="Error generating summary of origin access identities - " + str(e),
|
self.module.fail_json(msg="Error generating summary of origin access identities - " + str(e),
|
||||||
|
@ -415,8 +415,8 @@ class CloudFrontServiceManager:
|
||||||
def summary_get_distribution_list(self, streaming=False):
|
def summary_get_distribution_list(self, streaming=False):
|
||||||
try:
|
try:
|
||||||
list_name = 'streaming_distributions' if streaming else 'distributions'
|
list_name = 'streaming_distributions' if streaming else 'distributions'
|
||||||
key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled' ]
|
key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
|
||||||
distribution_list = { list_name: [] }
|
distribution_list = {list_name: []}
|
||||||
distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
|
distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
|
||||||
for dist in distributions:
|
for dist in distributions:
|
||||||
temp_distribution = {}
|
temp_distribution = {}
|
||||||
|
@ -520,16 +520,18 @@ class CloudFrontServiceManager:
|
||||||
if 'Items' in item['Aliases']:
|
if 'Items' in item['Aliases']:
|
||||||
aliases = item['Aliases']['Items']
|
aliases = item['Aliases']['Items']
|
||||||
for alias in aliases:
|
for alias in aliases:
|
||||||
keyed_list.update( { alias: item } )
|
keyed_list.update({alias: item})
|
||||||
keyed_list.update( { distribution_id: item } )
|
keyed_list.update({distribution_id: item})
|
||||||
return keyed_list
|
return keyed_list
|
||||||
|
|
||||||
|
|
||||||
def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases):
|
def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases):
|
||||||
facts[distribution_id].update(details)
|
facts[distribution_id].update(details)
|
||||||
for alias in aliases:
|
for alias in aliases:
|
||||||
facts[alias].update(details)
|
facts[alias].update(details)
|
||||||
return facts
|
return facts
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
|
@ -581,7 +583,7 @@ def main():
|
||||||
summary = module.params.get('summary')
|
summary = module.params.get('summary')
|
||||||
|
|
||||||
aliases = []
|
aliases = []
|
||||||
result = { 'cloudfront': {} }
|
result = {'cloudfront': {}}
|
||||||
facts = {}
|
facts = {}
|
||||||
|
|
||||||
require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or
|
require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or
|
||||||
|
@ -611,21 +613,21 @@ def main():
|
||||||
|
|
||||||
# set appropriate cloudfront id
|
# set appropriate cloudfront id
|
||||||
if distribution_id and not list_invalidations:
|
if distribution_id and not list_invalidations:
|
||||||
facts = { distribution_id: {} }
|
facts = {distribution_id: {}}
|
||||||
aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
|
aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
|
||||||
for alias in aliases:
|
for alias in aliases:
|
||||||
facts.update( { alias: {} } )
|
facts.update({alias: {}})
|
||||||
if invalidation_id:
|
if invalidation_id:
|
||||||
facts.update( { invalidation_id: {} } )
|
facts.update({invalidation_id: {}})
|
||||||
elif distribution_id and list_invalidations:
|
elif distribution_id and list_invalidations:
|
||||||
facts = { distribution_id: {} }
|
facts = {distribution_id: {}}
|
||||||
aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
|
aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
|
||||||
for alias in aliases:
|
for alias in aliases:
|
||||||
facts.update( { alias: {} } )
|
facts.update({alias: {}})
|
||||||
elif origin_access_identity_id:
|
elif origin_access_identity_id:
|
||||||
facts = { origin_access_identity_id: {} }
|
facts = {origin_access_identity_id: {}}
|
||||||
elif web_acl_id:
|
elif web_acl_id:
|
||||||
facts = { web_acl_id: {} }
|
facts = {web_acl_id: {}}
|
||||||
|
|
||||||
# get details based on options
|
# get details based on options
|
||||||
if distribution:
|
if distribution:
|
||||||
|
@ -644,7 +646,7 @@ def main():
|
||||||
if streaming_distribution_config:
|
if streaming_distribution_config:
|
||||||
facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id)
|
facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id)
|
||||||
if list_invalidations:
|
if list_invalidations:
|
||||||
facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id) }
|
facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)}
|
||||||
if 'facts_to_set' in vars():
|
if 'facts_to_set' in vars():
|
||||||
facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases)
|
facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases)
|
||||||
|
|
||||||
|
|
|
@ -223,7 +223,6 @@ def create_or_update_dynamo_table(connection, module, boto3_dynamodb=None, boto3
|
||||||
try:
|
try:
|
||||||
table = Table(table_name, connection=connection)
|
table = Table(table_name, connection=connection)
|
||||||
|
|
||||||
|
|
||||||
if dynamo_table_exists(table):
|
if dynamo_table_exists(table):
|
||||||
result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
|
result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
|
||||||
else:
|
else:
|
||||||
|
@ -397,6 +396,7 @@ def validate_index(index, module):
|
||||||
if index['type'] not in INDEX_TYPE_OPTIONS:
|
if index['type'] not in INDEX_TYPE_OPTIONS:
|
||||||
module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))
|
module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))
|
||||||
|
|
||||||
|
|
||||||
def get_indexes(all_indexes):
|
def get_indexes(all_indexes):
|
||||||
indexes = []
|
indexes = []
|
||||||
global_indexes = []
|
global_indexes = []
|
||||||
|
@ -429,7 +429,6 @@ def get_indexes(all_indexes):
|
||||||
return indexes, global_indexes
|
return indexes, global_indexes
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
|
@ -442,8 +441,8 @@ def main():
|
||||||
read_capacity=dict(default=1, type='int'),
|
read_capacity=dict(default=1, type='int'),
|
||||||
write_capacity=dict(default=1, type='int'),
|
write_capacity=dict(default=1, type='int'),
|
||||||
indexes=dict(default=[], type='list'),
|
indexes=dict(default=[], type='list'),
|
||||||
tags = dict(type='dict'),
|
tags=dict(type='dict'),
|
||||||
wait_for_active_timeout = dict(default=60, type='int'),
|
wait_for_active_timeout=dict(default=60, type='int'),
|
||||||
))
|
))
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
|
|
|
@ -158,7 +158,6 @@ except ImportError:
|
||||||
HAS_BOTO3 = False
|
HAS_BOTO3 = False
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def copy_image(module, ec2):
|
def copy_image(module, ec2):
|
||||||
"""
|
"""
|
||||||
Copies an AMI
|
Copies an AMI
|
||||||
|
@ -185,7 +184,7 @@ def copy_image(module, ec2):
|
||||||
if module.params.get('tags'):
|
if module.params.get('tags'):
|
||||||
ec2.create_tags(
|
ec2.create_tags(
|
||||||
Resources=[image_id],
|
Resources=[image_id],
|
||||||
Tags=[{'Key' : k, 'Value': v} for k,v in module.params.get('tags').items()]
|
Tags=[{'Key': k, 'Value': v} for k, v in module.params.get('tags').items()]
|
||||||
)
|
)
|
||||||
|
|
||||||
module.exit_json(changed=True, image_id=image_id)
|
module.exit_json(changed=True, image_id=image_id)
|
||||||
|
|
|
@ -171,7 +171,7 @@ class ElbManager:
|
||||||
found = False
|
found = False
|
||||||
for lb in self.lbs:
|
for lb in self.lbs:
|
||||||
if lb.name == lbtest:
|
if lb.name == lbtest:
|
||||||
found=True
|
found = True
|
||||||
break
|
break
|
||||||
return found
|
return found
|
||||||
|
|
||||||
|
@ -330,7 +330,7 @@ def main():
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
state={'required': True},
|
state={'required': True},
|
||||||
instance_id={'required': True},
|
instance_id={'required': True},
|
||||||
ec2_elbs={'default': None, 'required': False, 'type':'list'},
|
ec2_elbs={'default': None, 'required': False, 'type': 'list'},
|
||||||
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
|
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
|
||||||
wait={'required': False, 'default': True, 'type': 'bool'},
|
wait={'required': False, 'default': True, 'type': 'bool'},
|
||||||
wait_timeout={'required': False, 'default': 0, 'type': 'int'}
|
wait_timeout={'required': False, 'default': 0, 'type': 'int'}
|
||||||
|
@ -363,7 +363,7 @@ def main():
|
||||||
if ec2_elbs is not None:
|
if ec2_elbs is not None:
|
||||||
for elb in ec2_elbs:
|
for elb in ec2_elbs:
|
||||||
if not elb_man.exists(elb):
|
if not elb_man.exists(elb):
|
||||||
msg="ELB %s does not exist" % elb
|
msg = "ELB %s does not exist" % elb
|
||||||
module.fail_json(msg=msg)
|
module.fail_json(msg=msg)
|
||||||
|
|
||||||
if module.params['state'] == 'present':
|
if module.params['state'] == 'present':
|
||||||
|
|
|
@ -425,6 +425,7 @@ def _throttleable_operation(max_retries):
|
||||||
return _do_op
|
return _do_op
|
||||||
return _operation_wrapper
|
return _operation_wrapper
|
||||||
|
|
||||||
|
|
||||||
def _get_vpc_connection(module, region, aws_connect_params):
|
def _get_vpc_connection(module, region, aws_connect_params):
|
||||||
try:
|
try:
|
||||||
return connect_to_aws(boto.vpc, region, **aws_connect_params)
|
return connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||||
|
@ -434,6 +435,7 @@ def _get_vpc_connection(module, region, aws_connect_params):
|
||||||
|
|
||||||
_THROTTLING_RETRIES = 5
|
_THROTTLING_RETRIES = 5
|
||||||
|
|
||||||
|
|
||||||
class ElbManager(object):
|
class ElbManager(object):
|
||||||
"""Handles ELB creation and destruction"""
|
"""Handles ELB creation and destruction"""
|
||||||
|
|
||||||
|
@ -579,10 +581,10 @@ class ElbManager(object):
|
||||||
|
|
||||||
# status of instances behind the ELB
|
# status of instances behind the ELB
|
||||||
if info['instances']:
|
if info['instances']:
|
||||||
info['instance_health'] = [ dict(
|
info['instance_health'] = [dict(
|
||||||
instance_id = instance_state.instance_id,
|
instance_id=instance_state.instance_id,
|
||||||
reason_code = instance_state.reason_code,
|
reason_code=instance_state.reason_code,
|
||||||
state = instance_state.state
|
state=instance_state.state
|
||||||
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
|
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
|
||||||
else:
|
else:
|
||||||
info['instance_health'] = []
|
info['instance_health'] = []
|
||||||
|
@ -663,7 +665,7 @@ class ElbManager(object):
|
||||||
|
|
||||||
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
|
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
|
||||||
filters={'attachment.instance-owner-id': 'amazon-elb',
|
filters={'attachment.instance-owner-id': 'amazon-elb',
|
||||||
'description': 'ELB {0}'.format(self.name) })
|
'description': 'ELB {0}'.format(self.name)})
|
||||||
|
|
||||||
for x in range(0, max_retries):
|
for x in range(0, max_retries):
|
||||||
for interface in elb_interfaces:
|
for interface in elb_interfaces:
|
||||||
|
@ -1005,7 +1007,7 @@ class ElbManager(object):
|
||||||
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
|
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
|
||||||
|
|
||||||
def _create_policy(self, policy_param, policy_meth, policy):
|
def _create_policy(self, policy_param, policy_meth, policy):
|
||||||
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
|
getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy)
|
||||||
|
|
||||||
def _delete_policy(self, elb_name, policy):
|
def _delete_policy(self, elb_name, policy):
|
||||||
self.elb_conn.delete_lb_policy(elb_name, policy)
|
self.elb_conn.delete_lb_policy(elb_name, policy)
|
||||||
|
@ -1223,7 +1225,7 @@ class ElbManager(object):
|
||||||
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
|
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
|
||||||
|
|
||||||
self.elb_conn.make_request('AddTags', params)
|
self.elb_conn.make_request('AddTags', params)
|
||||||
self.changed=True
|
self.changed = True
|
||||||
|
|
||||||
# Remove extra tags
|
# Remove extra tags
|
||||||
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
|
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
|
||||||
|
@ -1232,7 +1234,7 @@ class ElbManager(object):
|
||||||
params['Tags.member.%d.Key' % (i + 1)] = key
|
params['Tags.member.%d.Key' % (i + 1)] = key
|
||||||
|
|
||||||
self.elb_conn.make_request('RemoveTags', params)
|
self.elb_conn.make_request('RemoveTags', params)
|
||||||
self.changed=True
|
self.changed = True
|
||||||
|
|
||||||
def _get_health_check_target(self):
|
def _get_health_check_target(self):
|
||||||
"""Compose target string from healthcheck parameters"""
|
"""Compose target string from healthcheck parameters"""
|
||||||
|
@ -1275,7 +1277,7 @@ def main():
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=argument_spec,
|
argument_spec=argument_spec,
|
||||||
mutually_exclusive = [['security_group_ids', 'security_group_names']]
|
mutually_exclusive=[['security_group_ids', 'security_group_names']]
|
||||||
)
|
)
|
||||||
|
|
||||||
if not HAS_BOTO:
|
if not HAS_BOTO:
|
||||||
|
@ -1333,10 +1335,10 @@ def main():
|
||||||
if isinstance(group_name, string_types):
|
if isinstance(group_name, string_types):
|
||||||
group_name = [group_name]
|
group_name = [group_name]
|
||||||
|
|
||||||
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
|
group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
|
||||||
security_group_ids.extend(group_id)
|
security_group_ids.extend(group_id)
|
||||||
except boto.exception.NoAuthHandlerFound as e:
|
except boto.exception.NoAuthHandlerFound as e:
|
||||||
module.fail_json(msg = str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
|
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
|
||||||
purge_zones, security_group_ids, health_check,
|
purge_zones, security_group_ids, health_check,
|
||||||
|
|
|
@ -99,7 +99,7 @@ def get_eni_info(interface):
|
||||||
# Private addresses
|
# Private addresses
|
||||||
private_addresses = []
|
private_addresses = []
|
||||||
for ip in interface.private_ip_addresses:
|
for ip in interface.private_ip_addresses:
|
||||||
private_addresses.append({ 'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary })
|
private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
|
||||||
|
|
||||||
interface_info = {'id': interface.id,
|
interface_info = {'id': interface.id,
|
||||||
'subnet_id': interface.subnet_id,
|
'subnet_id': interface.subnet_id,
|
||||||
|
@ -152,7 +152,7 @@ def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
dict(
|
dict(
|
||||||
filters = dict(default=None, type='dict')
|
filters=dict(default=None, type='dict')
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -191,7 +191,7 @@ def list_launch_configs(connection, module):
|
||||||
launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
|
launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
|
||||||
|
|
||||||
if sort:
|
if sort:
|
||||||
snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
|
snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if sort and sort_start and sort_end:
|
if sort and sort_start and sort_end:
|
||||||
|
@ -210,13 +210,13 @@ def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
dict(
|
dict(
|
||||||
name = dict(required=False, default=[], type='list'),
|
name=dict(required=False, default=[], type='list'),
|
||||||
sort = dict(required=False, default=None,
|
sort=dict(required=False, default=None,
|
||||||
choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
|
choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
|
||||||
sort_order = dict(required=False, default='ascending',
|
sort_order=dict(required=False, default='ascending',
|
||||||
choices=['ascending', 'descending']),
|
choices=['ascending', 'descending']),
|
||||||
sort_start = dict(required=False),
|
sort_start=dict(required=False),
|
||||||
sort_end = dict(required=False),
|
sort_end=dict(required=False),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -199,13 +199,13 @@ def create_metric_alarm(connection, module):
|
||||||
alarm = alarms[0]
|
alarm = alarms[0]
|
||||||
changed = False
|
changed = False
|
||||||
|
|
||||||
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
|
for attr in ('comparison', 'metric', 'namespace', 'statistic', 'threshold', 'period', 'evaluation_periods', 'unit', 'description'):
|
||||||
if getattr(alarm, attr) != module.params.get(attr):
|
if getattr(alarm, attr) != module.params.get(attr):
|
||||||
changed = True
|
changed = True
|
||||||
setattr(alarm, attr, module.params.get(attr))
|
setattr(alarm, attr, module.params.get(attr))
|
||||||
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
|
# this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
|
||||||
comparison = alarm.comparison
|
comparison = alarm.comparison
|
||||||
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
|
comparisons = {'<=': 'LessThanOrEqualToThreshold', '<': 'LessThanThreshold', '>=': 'GreaterThanOrEqualToThreshold', '>': 'GreaterThanThreshold'}
|
||||||
alarm.comparison = comparisons[comparison]
|
alarm.comparison = comparisons[comparison]
|
||||||
|
|
||||||
dim1 = module.params.get('dimensions')
|
dim1 = module.params.get('dimensions')
|
||||||
|
@ -215,10 +215,10 @@ def create_metric_alarm(connection, module):
|
||||||
if not isinstance(dim1[keys], list):
|
if not isinstance(dim1[keys], list):
|
||||||
dim1[keys] = [dim1[keys]]
|
dim1[keys] = [dim1[keys]]
|
||||||
if keys not in dim2 or dim1[keys] != dim2[keys]:
|
if keys not in dim2 or dim1[keys] != dim2[keys]:
|
||||||
changed=True
|
changed = True
|
||||||
setattr(alarm, 'dimensions', dim1)
|
setattr(alarm, 'dimensions', dim1)
|
||||||
|
|
||||||
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
|
for attr in ('alarm_actions', 'insufficient_data_actions', 'ok_actions'):
|
||||||
action = module.params.get(attr) or []
|
action = module.params.get(attr) or []
|
||||||
# Boto and/or ansible may provide same elements in lists but in different order.
|
# Boto and/or ansible may provide same elements in lists but in different order.
|
||||||
# Compare on sets since they do not need any order.
|
# Compare on sets since they do not need any order.
|
||||||
|
@ -252,6 +252,7 @@ def create_metric_alarm(connection, module):
|
||||||
threshold=result.threshold,
|
threshold=result.threshold,
|
||||||
unit=result.unit)
|
unit=result.unit)
|
||||||
|
|
||||||
|
|
||||||
def delete_metric_alarm(connection, module):
|
def delete_metric_alarm(connection, module):
|
||||||
name = module.params.get('name')
|
name = module.params.get('name')
|
||||||
|
|
||||||
|
|
|
@ -88,7 +88,7 @@ def create_scaling_policy(connection, module):
|
||||||
min_adjustment_step = module.params.get('min_adjustment_step')
|
min_adjustment_step = module.params.get('min_adjustment_step')
|
||||||
cooldown = module.params.get('cooldown')
|
cooldown = module.params.get('cooldown')
|
||||||
|
|
||||||
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
|
scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])
|
||||||
|
|
||||||
if not scalingPolicies:
|
if not scalingPolicies:
|
||||||
sp = ScalingPolicy(
|
sp = ScalingPolicy(
|
||||||
|
@ -101,7 +101,7 @@ def create_scaling_policy(connection, module):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
connection.create_scaling_policy(sp)
|
connection.create_scaling_policy(sp)
|
||||||
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
|
policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0]
|
||||||
module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
|
module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
|
||||||
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
|
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
|
||||||
except BotoServerError as e:
|
except BotoServerError as e:
|
||||||
|
@ -121,7 +121,7 @@ def create_scaling_policy(connection, module):
|
||||||
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
|
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
|
||||||
|
|
||||||
# check the remaining attributes
|
# check the remaining attributes
|
||||||
for attr in ('adjustment_type','scaling_adjustment','cooldown'):
|
for attr in ('adjustment_type', 'scaling_adjustment', 'cooldown'):
|
||||||
if getattr(policy, attr) != module.params.get(attr):
|
if getattr(policy, attr) != module.params.get(attr):
|
||||||
changed = True
|
changed = True
|
||||||
setattr(policy, attr, module.params.get(attr))
|
setattr(policy, attr, module.params.get(attr))
|
||||||
|
@ -129,7 +129,7 @@ def create_scaling_policy(connection, module):
|
||||||
try:
|
try:
|
||||||
if changed:
|
if changed:
|
||||||
connection.create_scaling_policy(policy)
|
connection.create_scaling_policy(policy)
|
||||||
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
|
policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0]
|
||||||
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
|
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
|
||||||
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
|
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
|
||||||
except BotoServerError as e:
|
except BotoServerError as e:
|
||||||
|
@ -140,7 +140,7 @@ def delete_scaling_policy(connection, module):
|
||||||
sp_name = module.params.get('name')
|
sp_name = module.params.get('name')
|
||||||
asg_name = module.params.get('asg_name')
|
asg_name = module.params.get('asg_name')
|
||||||
|
|
||||||
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
|
scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])
|
||||||
|
|
||||||
if scalingPolicies:
|
if scalingPolicies:
|
||||||
try:
|
try:
|
||||||
|
@ -156,12 +156,12 @@ def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
dict(
|
dict(
|
||||||
name = dict(required=True, type='str'),
|
name=dict(required=True, type='str'),
|
||||||
adjustment_type = dict(type='str', choices=['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']),
|
adjustment_type=dict(type='str', choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']),
|
||||||
asg_name = dict(required=True, type='str'),
|
asg_name=dict(required=True, type='str'),
|
||||||
scaling_adjustment = dict(type='int'),
|
scaling_adjustment=dict(type='int'),
|
||||||
min_adjustment_step = dict(type='int'),
|
min_adjustment_step=dict(type='int'),
|
||||||
cooldown = dict(type='int'),
|
cooldown=dict(type='int'),
|
||||||
state=dict(default='present', choices=['present', 'absent']),
|
state=dict(default='present', choices=['present', 'absent']),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -178,7 +178,7 @@ def main():
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
|
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||||
module.fail_json(msg = str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
create_scaling_policy(connection, module)
|
create_scaling_policy(connection, module)
|
||||||
|
|
|
@ -193,7 +193,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None,
|
||||||
try:
|
try:
|
||||||
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
|
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
|
||||||
except boto.exception.BotoServerError as e:
|
except boto.exception.BotoServerError as e:
|
||||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||||
|
|
||||||
if not volumes:
|
if not volumes:
|
||||||
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
|
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
|
||||||
|
@ -202,7 +202,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None,
|
||||||
|
|
||||||
if state == 'absent':
|
if state == 'absent':
|
||||||
if not snapshot_id:
|
if not snapshot_id:
|
||||||
module.fail_json(msg = 'snapshot_id must be set when state is absent')
|
module.fail_json(msg='snapshot_id must be set when state is absent')
|
||||||
try:
|
try:
|
||||||
ec2.delete_snapshot(snapshot_id)
|
ec2.delete_snapshot(snapshot_id)
|
||||||
except boto.exception.BotoServerError as e:
|
except boto.exception.BotoServerError as e:
|
||||||
|
@ -210,7 +210,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None,
|
||||||
if e.error_code == 'InvalidSnapshot.NotFound':
|
if e.error_code == 'InvalidSnapshot.NotFound':
|
||||||
module.exit_json(changed=False)
|
module.exit_json(changed=False)
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||||
|
|
||||||
# successful delete
|
# successful delete
|
||||||
module.exit_json(changed=True)
|
module.exit_json(changed=True)
|
||||||
|
@ -249,16 +249,16 @@ def create_snapshot_ansible_module():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
dict(
|
dict(
|
||||||
volume_id = dict(),
|
volume_id=dict(),
|
||||||
description = dict(),
|
description=dict(),
|
||||||
instance_id = dict(),
|
instance_id=dict(),
|
||||||
snapshot_id = dict(),
|
snapshot_id=dict(),
|
||||||
device_name = dict(),
|
device_name=dict(),
|
||||||
wait = dict(type='bool', default=True),
|
wait=dict(type='bool', default=True),
|
||||||
wait_timeout = dict(type='int', default=0),
|
wait_timeout=dict(type='int', default=0),
|
||||||
last_snapshot_min_age = dict(type='int', default=0),
|
last_snapshot_min_age=dict(type='int', default=0),
|
||||||
snapshot_tags = dict(type='dict', default=dict()),
|
snapshot_tags=dict(type='dict', default=dict()),
|
||||||
state = dict(choices=['absent', 'present'], default='present'),
|
state=dict(choices=['absent', 'present'], default='present'),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
module = AnsibleModule(argument_spec=argument_spec)
|
module = AnsibleModule(argument_spec=argument_spec)
|
||||||
|
|
|
@ -126,9 +126,9 @@ from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
resource = dict(required=True),
|
resource=dict(required=True),
|
||||||
tags = dict(type='dict'),
|
tags=dict(type='dict'),
|
||||||
state = dict(default='present', choices=['present', 'absent', 'list']),
|
state=dict(default='present', choices=['present', 'absent', 'list']),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||||
|
@ -144,7 +144,7 @@ def main():
|
||||||
|
|
||||||
# We need a comparison here so that we can accurately report back changed status.
|
# We need a comparison here so that we can accurately report back changed status.
|
||||||
# Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate.
|
# Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate.
|
||||||
filters = {'resource-id' : resource}
|
filters = {'resource-id': resource}
|
||||||
gettags = ec2.get_all_tags(filters=filters)
|
gettags = ec2.get_all_tags(filters=filters)
|
||||||
|
|
||||||
dictadd = {}
|
dictadd = {}
|
||||||
|
@ -158,14 +158,14 @@ def main():
|
||||||
if not tags:
|
if not tags:
|
||||||
module.fail_json(msg="tags argument is required when state is present")
|
module.fail_json(msg="tags argument is required when state is present")
|
||||||
if set(tags.items()).issubset(set(tagdict.items())):
|
if set(tags.items()).issubset(set(tagdict.items())):
|
||||||
module.exit_json(msg="Tags already exists in %s." %resource, changed=False)
|
module.exit_json(msg="Tags already exists in %s." % resource, changed=False)
|
||||||
else:
|
else:
|
||||||
for (key, value) in set(tags.items()):
|
for (key, value) in set(tags.items()):
|
||||||
if (key, value) not in set(tagdict.items()):
|
if (key, value) not in set(tagdict.items()):
|
||||||
dictadd[key] = value
|
dictadd[key] = value
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
ec2.create_tags(resource, dictadd)
|
ec2.create_tags(resource, dictadd)
|
||||||
module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True)
|
module.exit_json(msg="Tags %s created for resource %s." % (dictadd, resource), changed=True)
|
||||||
|
|
||||||
if state == 'absent':
|
if state == 'absent':
|
||||||
if not tags:
|
if not tags:
|
||||||
|
@ -180,7 +180,7 @@ def main():
|
||||||
dictremove[key] = value
|
dictremove[key] = value
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
ec2.delete_tags(resource, dictremove)
|
ec2.delete_tags(resource, dictremove)
|
||||||
module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True)
|
module.exit_json(msg="Tags %s removed for resource %s." % (dictremove, resource), changed=True)
|
||||||
|
|
||||||
if state == 'list':
|
if state == 'list':
|
||||||
module.exit_json(changed=False, tags=tagdict)
|
module.exit_json(changed=False, tags=tagdict)
|
||||||
|
|
|
@ -280,7 +280,7 @@ def get_volume(module, ec2):
|
||||||
try:
|
try:
|
||||||
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
|
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
|
||||||
except boto.exception.BotoServerError as e:
|
except boto.exception.BotoServerError as e:
|
||||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||||
|
|
||||||
if not vols:
|
if not vols:
|
||||||
if id:
|
if id:
|
||||||
|
@ -306,7 +306,7 @@ def get_volumes(module, ec2):
|
||||||
else:
|
else:
|
||||||
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
|
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
|
||||||
except boto.exception.BotoServerError as e:
|
except boto.exception.BotoServerError as e:
|
||||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||||
return vols
|
return vols
|
||||||
|
|
||||||
|
|
||||||
|
@ -330,6 +330,7 @@ def boto_supports_volume_encryption():
|
||||||
"""
|
"""
|
||||||
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
|
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
|
||||||
|
|
||||||
|
|
||||||
def boto_supports_kms_key_id():
|
def boto_supports_kms_key_id():
|
||||||
"""
|
"""
|
||||||
Check if Boto library supports kms_key_ids (added in 2.39.0)
|
Check if Boto library supports kms_key_ids (added in 2.39.0)
|
||||||
|
@ -339,6 +340,7 @@ def boto_supports_kms_key_id():
|
||||||
"""
|
"""
|
||||||
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.39.0')
|
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.39.0')
|
||||||
|
|
||||||
|
|
||||||
def create_volume(module, ec2, zone):
|
def create_volume(module, ec2, zone):
|
||||||
changed = False
|
changed = False
|
||||||
name = module.params.get('name')
|
name = module.params.get('name')
|
||||||
|
@ -375,7 +377,7 @@ def create_volume(module, ec2, zone):
|
||||||
if tags:
|
if tags:
|
||||||
ec2.create_tags([volume.id], tags)
|
ec2.create_tags([volume.id], tags)
|
||||||
except boto.exception.BotoServerError as e:
|
except boto.exception.BotoServerError as e:
|
||||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||||
|
|
||||||
return volume, changed
|
return volume, changed
|
||||||
|
|
||||||
|
@ -400,12 +402,12 @@ def attach_volume(module, ec2, volume, instance):
|
||||||
else:
|
else:
|
||||||
device_name = '/dev/xvdf'
|
device_name = '/dev/xvdf'
|
||||||
except boto.exception.BotoServerError as e:
|
except boto.exception.BotoServerError as e:
|
||||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||||
|
|
||||||
if volume.attachment_state() is not None:
|
if volume.attachment_state() is not None:
|
||||||
adata = volume.attach_data
|
adata = volume.attach_data
|
||||||
if adata.instance_id != instance.id:
|
if adata.instance_id != instance.id:
|
||||||
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
|
module.fail_json(msg="Volume %s is already attached to another instance: %s"
|
||||||
% (volume.id, adata.instance_id))
|
% (volume.id, adata.instance_id))
|
||||||
else:
|
else:
|
||||||
# Volume is already attached to right instance
|
# Volume is already attached to right instance
|
||||||
|
@ -418,7 +420,7 @@ def attach_volume(module, ec2, volume, instance):
|
||||||
volume.update()
|
volume.update()
|
||||||
changed = True
|
changed = True
|
||||||
except boto.exception.BotoServerError as e:
|
except boto.exception.BotoServerError as e:
|
||||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||||
|
|
||||||
modify_dot_attribute(module, ec2, instance, device_name)
|
modify_dot_attribute(module, ec2, instance, device_name)
|
||||||
|
|
||||||
|
@ -435,7 +437,7 @@ def modify_dot_attribute(module, ec2, instance, device_name):
|
||||||
instance.update()
|
instance.update()
|
||||||
dot = instance.block_device_mapping[device_name].delete_on_termination
|
dot = instance.block_device_mapping[device_name].delete_on_termination
|
||||||
except boto.exception.BotoServerError as e:
|
except boto.exception.BotoServerError as e:
|
||||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||||
|
|
||||||
if delete_on_termination != dot:
|
if delete_on_termination != dot:
|
||||||
try:
|
try:
|
||||||
|
@ -450,7 +452,7 @@ def modify_dot_attribute(module, ec2, instance, device_name):
|
||||||
instance.update()
|
instance.update()
|
||||||
changed = True
|
changed = True
|
||||||
except boto.exception.BotoServerError as e:
|
except boto.exception.BotoServerError as e:
|
||||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||||
|
|
||||||
return changed
|
return changed
|
||||||
|
|
||||||
|
@ -506,20 +508,20 @@ def get_volume_info(volume, state):
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
instance = dict(),
|
instance=dict(),
|
||||||
id = dict(),
|
id=dict(),
|
||||||
name = dict(),
|
name=dict(),
|
||||||
volume_size = dict(),
|
volume_size=dict(),
|
||||||
volume_type = dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'),
|
volume_type=dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'),
|
||||||
iops = dict(),
|
iops=dict(),
|
||||||
encrypted = dict(type='bool', default=False),
|
encrypted=dict(type='bool', default=False),
|
||||||
kms_key_id = dict(),
|
kms_key_id=dict(),
|
||||||
device_name = dict(),
|
device_name=dict(),
|
||||||
delete_on_termination = dict(type='bool', default=False),
|
delete_on_termination=dict(type='bool', default=False),
|
||||||
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
|
zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
|
||||||
snapshot = dict(),
|
snapshot=dict(),
|
||||||
state = dict(choices=['absent', 'present', 'list'], default='present'),
|
state=dict(choices=['absent', 'present', 'list'], default='present'),
|
||||||
tags = dict(type='dict', default={})
|
tags=dict(type='dict', default={})
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
module = AnsibleModule(argument_spec=argument_spec)
|
module = AnsibleModule(argument_spec=argument_spec)
|
||||||
|
|
|
@ -98,6 +98,7 @@ def get_volume_info(volume):
|
||||||
|
|
||||||
return volume_info
|
return volume_info
|
||||||
|
|
||||||
|
|
||||||
def list_ec2_volumes(connection, module):
|
def list_ec2_volumes(connection, module):
|
||||||
|
|
||||||
filters = module.params.get("filters")
|
filters = module.params.get("filters")
|
||||||
|
@ -118,7 +119,7 @@ def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
dict(
|
dict(
|
||||||
filters = dict(default=None, type='dict')
|
filters=dict(default=None, type='dict')
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -235,6 +235,7 @@ def ensure_tags(module, vpc_conn, resource_id, tags, add_only, check_mode):
|
||||||
except EC2ResponseError as e:
|
except EC2ResponseError as e:
|
||||||
module.fail_json(msg="Failed to modify tags: %s" % e.message, exception=traceback.format_exc())
|
module.fail_json(msg="Failed to modify tags: %s" % e.message, exception=traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id):
|
def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id):
|
||||||
"""
|
"""
|
||||||
Returns the DHCP options object currently associated with the requested VPC ID using the VPC
|
Returns the DHCP options object currently associated with the requested VPC ID using the VPC
|
||||||
|
@ -312,17 +313,17 @@ def main():
|
||||||
new_options['ntp-servers'] = params['ntp_servers']
|
new_options['ntp-servers'] = params['ntp_servers']
|
||||||
if params['domain_name'] is not None:
|
if params['domain_name'] is not None:
|
||||||
# needs to be a list for comparison with boto objects later
|
# needs to be a list for comparison with boto objects later
|
||||||
new_options['domain-name'] = [ params['domain_name'] ]
|
new_options['domain-name'] = [params['domain_name']]
|
||||||
if params['netbios_node_type'] is not None:
|
if params['netbios_node_type'] is not None:
|
||||||
# needs to be a list for comparison with boto objects later
|
# needs to be a list for comparison with boto objects later
|
||||||
new_options['netbios-node-type'] = [ str(params['netbios_node_type']) ]
|
new_options['netbios-node-type'] = [str(params['netbios_node_type'])]
|
||||||
# If we were given a vpc_id then we need to look at the options on that
|
# If we were given a vpc_id then we need to look at the options on that
|
||||||
if params['vpc_id']:
|
if params['vpc_id']:
|
||||||
existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id'])
|
existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id'])
|
||||||
# if we've been asked to inherit existing options, do that now
|
# if we've been asked to inherit existing options, do that now
|
||||||
if params['inherit_existing']:
|
if params['inherit_existing']:
|
||||||
if existing_options:
|
if existing_options:
|
||||||
for option in [ 'domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
|
for option in ['domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
|
||||||
if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]):
|
if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]):
|
||||||
new_options[option] = existing_options.options.get(option)
|
new_options[option] = existing_options.options.get(option)
|
||||||
|
|
||||||
|
@ -336,7 +337,7 @@ def main():
|
||||||
# Now let's cover the case where there are existing options that we were told about by id
|
# Now let's cover the case where there are existing options that we were told about by id
|
||||||
# If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
|
# If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
|
||||||
else:
|
else:
|
||||||
supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id':params['dhcp_options_id']})
|
supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id': params['dhcp_options_id']})
|
||||||
if len(supplied_options) != 1:
|
if len(supplied_options) != 1:
|
||||||
if params['state'] != 'absent':
|
if params['state'] != 'absent':
|
||||||
module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist")
|
module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist")
|
||||||
|
|
|
@ -159,7 +159,7 @@ DEFAULT_EGRESS = dict(list(DEFAULT_RULE_FIELDS.items()) + [('Egress', True)])
|
||||||
PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, }
|
PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, }
|
||||||
|
|
||||||
|
|
||||||
#Utility methods
|
# Utility methods
|
||||||
def icmp_present(entry):
|
def icmp_present(entry):
|
||||||
if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1:
|
if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1:
|
||||||
return True
|
return True
|
||||||
|
@ -225,7 +225,7 @@ def nacls_changed(nacl, client, module):
|
||||||
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
|
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
|
||||||
nacl = describe_network_acl(client, module)
|
nacl = describe_network_acl(client, module)
|
||||||
entries = nacl['NetworkAcls'][0]['Entries']
|
entries = nacl['NetworkAcls'][0]['Entries']
|
||||||
tmp_egress = [entry for entry in entries if entry['Egress'] is True and DEFAULT_EGRESS !=entry]
|
tmp_egress = [entry for entry in entries if entry['Egress'] is True and DEFAULT_EGRESS != entry]
|
||||||
tmp_ingress = [entry for entry in entries if entry['Egress'] is False]
|
tmp_ingress = [entry for entry in entries if entry['Egress'] is False]
|
||||||
egress = [rule for rule in tmp_egress if DEFAULT_EGRESS != rule]
|
egress = [rule for rule in tmp_egress if DEFAULT_EGRESS != rule]
|
||||||
ingress = [rule for rule in tmp_ingress if DEFAULT_INGRESS != rule]
|
ingress = [rule for rule in tmp_ingress if DEFAULT_INGRESS != rule]
|
||||||
|
@ -321,7 +321,7 @@ def construct_acl_entries(nacl, client, module):
|
||||||
create_network_acl_entry(params, client, module)
|
create_network_acl_entry(params, client, module)
|
||||||
|
|
||||||
|
|
||||||
## Module invocations
|
# Module invocations
|
||||||
def setup_network_acl(client, module):
|
def setup_network_acl(client, module):
|
||||||
changed = False
|
changed = False
|
||||||
nacl = describe_network_acl(client, module)
|
nacl = describe_network_acl(client, module)
|
||||||
|
@ -372,7 +372,7 @@ def remove_network_acl(client, module):
|
||||||
return changed, result
|
return changed, result
|
||||||
|
|
||||||
|
|
||||||
#Boto3 client methods
|
# Boto3 client methods
|
||||||
def create_network_acl(vpc_id, client, module):
|
def create_network_acl(vpc_id, client, module):
|
||||||
try:
|
try:
|
||||||
if module.check_mode:
|
if module.check_mode:
|
||||||
|
|
|
@ -154,9 +154,9 @@ def vpc_exists(module, vpc, name, cidr_block, multi):
|
||||||
matched_vpc = None
|
matched_vpc = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
|
matching_vpcs = vpc.get_all_vpcs(filters={'tag:Name': name, 'cidr-block': cidr_block})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
e_msg=boto_exception(e)
|
e_msg = boto_exception(e)
|
||||||
module.fail_json(msg=e_msg)
|
module.fail_json(msg=e_msg)
|
||||||
|
|
||||||
if multi:
|
if multi:
|
||||||
|
@ -186,7 +186,7 @@ def update_vpc_tags(vpc, module, vpc_obj, tags, name):
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
e_msg=boto_exception(e)
|
e_msg = boto_exception(e)
|
||||||
module.fail_json(msg=e_msg)
|
module.fail_json(msg=e_msg)
|
||||||
|
|
||||||
|
|
||||||
|
@ -199,6 +199,7 @@ def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def get_vpc_values(vpc_obj):
|
def get_vpc_values(vpc_obj):
|
||||||
|
|
||||||
if vpc_obj is not None:
|
if vpc_obj is not None:
|
||||||
|
@ -213,18 +214,19 @@ def get_vpc_values(vpc_obj):
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec=ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
name = dict(type='str', default=None, required=True),
|
name=dict(type='str', default=None, required=True),
|
||||||
cidr_block = dict(type='str', default=None, required=True),
|
cidr_block=dict(type='str', default=None, required=True),
|
||||||
tenancy = dict(choices=['default', 'dedicated'], default='default'),
|
tenancy=dict(choices=['default', 'dedicated'], default='default'),
|
||||||
dns_support = dict(type='bool', default=True),
|
dns_support=dict(type='bool', default=True),
|
||||||
dns_hostnames = dict(type='bool', default=True),
|
dns_hostnames=dict(type='bool', default=True),
|
||||||
dhcp_opts_id = dict(type='str', default=None, required=False),
|
dhcp_opts_id=dict(type='str', default=None, required=False),
|
||||||
tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
|
tags=dict(type='dict', required=False, default=None, aliases=['resource_tags']),
|
||||||
state = dict(choices=['present', 'absent'], default='present'),
|
state=dict(choices=['present', 'absent'], default='present'),
|
||||||
multi_ok = dict(type='bool', default=False)
|
multi_ok=dict(type='bool', default=False)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -236,17 +238,17 @@ def main():
|
||||||
if not HAS_BOTO:
|
if not HAS_BOTO:
|
||||||
module.fail_json(msg='boto is required for this module')
|
module.fail_json(msg='boto is required for this module')
|
||||||
|
|
||||||
name=module.params.get('name')
|
name = module.params.get('name')
|
||||||
cidr_block=module.params.get('cidr_block')
|
cidr_block = module.params.get('cidr_block')
|
||||||
tenancy=module.params.get('tenancy')
|
tenancy = module.params.get('tenancy')
|
||||||
dns_support=module.params.get('dns_support')
|
dns_support = module.params.get('dns_support')
|
||||||
dns_hostnames=module.params.get('dns_hostnames')
|
dns_hostnames = module.params.get('dns_hostnames')
|
||||||
dhcp_id=module.params.get('dhcp_opts_id')
|
dhcp_id = module.params.get('dhcp_opts_id')
|
||||||
tags=module.params.get('tags')
|
tags = module.params.get('tags')
|
||||||
state=module.params.get('state')
|
state = module.params.get('state')
|
||||||
multi=module.params.get('multi_ok')
|
multi = module.params.get('multi_ok')
|
||||||
|
|
||||||
changed=False
|
changed = False
|
||||||
|
|
||||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||||
|
|
||||||
|
@ -298,7 +300,7 @@ def main():
|
||||||
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
|
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
|
||||||
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
|
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
|
||||||
except BotoServerError as e:
|
except BotoServerError as e:
|
||||||
e_msg=boto_exception(e)
|
e_msg = boto_exception(e)
|
||||||
module.fail_json(msg=e_msg)
|
module.fail_json(msg=e_msg)
|
||||||
|
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
|
@ -306,7 +308,7 @@ def main():
|
||||||
try:
|
try:
|
||||||
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
|
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
|
||||||
except BotoServerError as e:
|
except BotoServerError as e:
|
||||||
e_msg=boto_exception(e)
|
e_msg = boto_exception(e)
|
||||||
module.fail_json(msg=e_msg)
|
module.fail_json(msg=e_msg)
|
||||||
|
|
||||||
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
|
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
|
||||||
|
|
|
@ -373,7 +373,7 @@ def main():
|
||||||
client = boto3_conn(module, conn_type='client', resource='ec2',
|
client = boto3_conn(module, conn_type='client', resource='ec2',
|
||||||
region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||||
except botocore.exceptions.NoCredentialsError as e:
|
except botocore.exceptions.NoCredentialsError as e:
|
||||||
module.fail_json(msg="Can't authorize connection - "+str(e))
|
module.fail_json(msg="Can't authorize connection - " + str(e))
|
||||||
|
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
(changed, results) = create_peer_connection(client, module)
|
(changed, results) = create_peer_connection(client, module)
|
||||||
|
|
|
@ -147,6 +147,7 @@ def get_vgw_info(vgws):
|
||||||
|
|
||||||
return vgw_info
|
return vgw_info
|
||||||
|
|
||||||
|
|
||||||
def wait_for_status(client, module, vpn_gateway_id, status):
|
def wait_for_status(client, module, vpn_gateway_id, status):
|
||||||
polling_increment_secs = 15
|
polling_increment_secs = 15
|
||||||
max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
|
max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
|
||||||
|
@ -227,7 +228,7 @@ def delete_vgw(client, module, vpn_gateway_id):
|
||||||
except botocore.exceptions.ClientError as e:
|
except botocore.exceptions.ClientError as e:
|
||||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||||
|
|
||||||
#return the deleted VpnGatewayId as this is not included in the above response
|
# return the deleted VpnGatewayId as this is not included in the above response
|
||||||
result = vpn_gateway_id
|
result = vpn_gateway_id
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
@ -236,7 +237,7 @@ def create_tags(client, module, vpn_gateway_id):
|
||||||
params = dict()
|
params = dict()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = client.create_tags(Resources=[vpn_gateway_id],Tags=load_tags(module))
|
response = client.create_tags(Resources=[vpn_gateway_id], Tags=load_tags(module))
|
||||||
except botocore.exceptions.ClientError as e:
|
except botocore.exceptions.ClientError as e:
|
||||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||||
|
|
||||||
|
@ -295,7 +296,7 @@ def check_tags(client, module, existing_vgw, vpn_gateway_id):
|
||||||
changed = False
|
changed = False
|
||||||
tags_list = {}
|
tags_list = {}
|
||||||
|
|
||||||
#format tags for comparison
|
# format tags for comparison
|
||||||
for tags in existing_vgw[0]['Tags']:
|
for tags in existing_vgw[0]['Tags']:
|
||||||
if tags['Key'] != 'Name':
|
if tags['Key'] != 'Name':
|
||||||
tags_list[tags['Key']] = tags['Value']
|
tags_list[tags['Key']] = tags['Value']
|
||||||
|
@ -307,7 +308,7 @@ def check_tags(client, module, existing_vgw, vpn_gateway_id):
|
||||||
vgw = find_vgw(client, module)
|
vgw = find_vgw(client, module)
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
#if no tag args are supplied, delete any existing tags with the exception of the name tag
|
# if no tag args are supplied, delete any existing tags with the exception of the name tag
|
||||||
if params['Tags'] is None and tags_list != {}:
|
if params['Tags'] is None and tags_list != {}:
|
||||||
tags_to_delete = []
|
tags_to_delete = []
|
||||||
for tags in existing_vgw[0]['Tags']:
|
for tags in existing_vgw[0]['Tags']:
|
||||||
|
@ -502,7 +503,7 @@ def ensure_vgw_absent(client, module):
|
||||||
deleted_vgw = "Nothing to do"
|
deleted_vgw = "Nothing to do"
|
||||||
|
|
||||||
else:
|
else:
|
||||||
#Check that a name and type argument has been supplied if no vgw-id
|
# Check that a name and type argument has been supplied if no vgw-id
|
||||||
if not module.params.get('name') or not module.params.get('type'):
|
if not module.params.get('name') or not module.params.get('type'):
|
||||||
module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is suppled')
|
module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is suppled')
|
||||||
|
|
||||||
|
@ -518,7 +519,7 @@ def ensure_vgw_absent(client, module):
|
||||||
# detach the vpc from the vgw
|
# detach the vpc from the vgw
|
||||||
detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
|
detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
|
||||||
|
|
||||||
#now that the vpc has been detached, delete the vgw
|
# now that the vpc has been detached, delete the vgw
|
||||||
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
|
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
|
@ -528,7 +529,7 @@ def ensure_vgw_absent(client, module):
|
||||||
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
|
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
#now that the vpc has been detached, delete the vgw
|
# now that the vpc has been detached, delete the vgw
|
||||||
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
|
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -126,7 +126,7 @@ def list_virtual_gateways(client, module):
|
||||||
try:
|
try:
|
||||||
all_virtual_gateways = client.describe_vpn_gateways(**params)
|
all_virtual_gateways = client.describe_vpn_gateways(**params)
|
||||||
except botocore.exceptions.ClientError as e:
|
except botocore.exceptions.ClientError as e:
|
||||||
module.fail_json(msg=str(e),exception=traceback.format_exc())
|
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||||
|
|
||||||
snaked_vgws = [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw))
|
snaked_vgws = [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw))
|
||||||
for vgw in all_virtual_gateways['VpnGateways']]
|
for vgw in all_virtual_gateways['VpnGateways']]
|
||||||
|
@ -138,8 +138,8 @@ def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
dict(
|
dict(
|
||||||
filters = dict(type='dict', default=dict()),
|
filters=dict(type='dict', default=dict()),
|
||||||
vpn_gateway_ids = dict(type='list', default=None)
|
vpn_gateway_ids=dict(type='list', default=None)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -153,7 +153,7 @@ def main():
|
||||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||||
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||||
except botocore.exceptions.NoCredentialsError as e:
|
except botocore.exceptions.NoCredentialsError as e:
|
||||||
module.fail_json(msg="Can't authorize connection - "+str(e))
|
module.fail_json(msg="Can't authorize connection - " + str(e))
|
||||||
|
|
||||||
# call your function here
|
# call your function here
|
||||||
results = list_virtual_gateways(connection, module)
|
results = list_virtual_gateways(connection, module)
|
||||||
|
|
|
@ -119,11 +119,11 @@ BACKEND = default_backend()
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
instance_id = dict(required=True),
|
instance_id=dict(required=True),
|
||||||
key_file = dict(required=True, type='path'),
|
key_file=dict(required=True, type='path'),
|
||||||
key_passphrase = dict(no_log=True, default=None, required=False),
|
key_passphrase=dict(no_log=True, default=None, required=False),
|
||||||
wait = dict(type='bool', default=False, required=False),
|
wait=dict(type='bool', default=False, required=False),
|
||||||
wait_timeout = dict(default=120, required=False),
|
wait_timeout=dict(default=120, required=False),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
module = AnsibleModule(argument_spec=argument_spec)
|
module = AnsibleModule(argument_spec=argument_spec)
|
||||||
|
@ -158,18 +158,18 @@ def main():
|
||||||
decoded = b64decode(data)
|
decoded = b64decode(data)
|
||||||
|
|
||||||
if wait and datetime.datetime.now() >= end:
|
if wait and datetime.datetime.now() >= end:
|
||||||
module.fail_json(msg = "wait for password timeout after %d seconds" % wait_timeout)
|
module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
f = open(key_file, 'rb')
|
f = open(key_file, 'rb')
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
module.fail_json(msg = "I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
|
module.fail_json(msg="I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
with f:
|
with f:
|
||||||
key = load_pem_private_key(f.read(), b_key_passphrase, BACKEND)
|
key = load_pem_private_key(f.read(), b_key_passphrase, BACKEND)
|
||||||
except (ValueError, TypeError) as e:
|
except (ValueError, TypeError) as e:
|
||||||
module.fail_json(msg = "unable to parse key file")
|
module.fail_json(msg="unable to parse key file")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
decrypted = key.decrypt(decoded, PKCS1v15())
|
decrypted = key.decrypt(decoded, PKCS1v15())
|
||||||
|
|
|
@ -144,34 +144,35 @@ class EcsClusterManager:
|
||||||
response = self.ecs.describe_clusters(clusters=[
|
response = self.ecs.describe_clusters(clusters=[
|
||||||
cluster_name
|
cluster_name
|
||||||
])
|
])
|
||||||
if len(response['failures'])>0:
|
if len(response['failures']) > 0:
|
||||||
c = self.find_in_array(response['failures'], cluster_name, 'arn')
|
c = self.find_in_array(response['failures'], cluster_name, 'arn')
|
||||||
if c and c['reason']=='MISSING':
|
if c and c['reason'] == 'MISSING':
|
||||||
return None
|
return None
|
||||||
# fall thru and look through found ones
|
# fall thru and look through found ones
|
||||||
if len(response['clusters'])>0:
|
if len(response['clusters']) > 0:
|
||||||
c = self.find_in_array(response['clusters'], cluster_name)
|
c = self.find_in_array(response['clusters'], cluster_name)
|
||||||
if c:
|
if c:
|
||||||
return c
|
return c
|
||||||
raise Exception("Unknown problem describing cluster %s." % cluster_name)
|
raise Exception("Unknown problem describing cluster %s." % cluster_name)
|
||||||
|
|
||||||
def create_cluster(self, clusterName = 'default'):
|
def create_cluster(self, clusterName='default'):
|
||||||
response = self.ecs.create_cluster(clusterName=clusterName)
|
response = self.ecs.create_cluster(clusterName=clusterName)
|
||||||
return response['cluster']
|
return response['cluster']
|
||||||
|
|
||||||
def delete_cluster(self, clusterName):
|
def delete_cluster(self, clusterName):
|
||||||
return self.ecs.delete_cluster(cluster=clusterName)
|
return self.ecs.delete_cluster(cluster=clusterName)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
state=dict(required=True, choices=['present', 'absent', 'has_instances'] ),
|
state=dict(required=True, choices=['present', 'absent', 'has_instances']),
|
||||||
name=dict(required=True, type='str' ),
|
name=dict(required=True, type='str'),
|
||||||
delay=dict(required=False, type='int', default=10),
|
delay=dict(required=False, type='int', default=10),
|
||||||
repeat=dict(required=False, type='int', default=10)
|
repeat=dict(required=False, type='int', default=10)
|
||||||
))
|
))
|
||||||
required_together = ( ['state', 'name'] )
|
required_together = (['state', 'name'])
|
||||||
|
|
||||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
|
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
|
||||||
|
|
||||||
|
@ -185,12 +186,12 @@ def main():
|
||||||
try:
|
try:
|
||||||
existing = cluster_mgr.describe_cluster(module.params['name'])
|
existing = cluster_mgr.describe_cluster(module.params['name'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
module.fail_json(msg="Exception describing cluster '"+module.params['name']+"': "+str(e))
|
module.fail_json(msg="Exception describing cluster '" + module.params['name'] + "': " + str(e))
|
||||||
|
|
||||||
results = dict(changed=False)
|
results = dict(changed=False)
|
||||||
if module.params['state'] == 'present':
|
if module.params['state'] == 'present':
|
||||||
if existing and 'status' in existing and existing['status']=="ACTIVE":
|
if existing and 'status' in existing and existing['status'] == "ACTIVE":
|
||||||
results['cluster']=existing
|
results['cluster'] = existing
|
||||||
else:
|
else:
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
# doesn't exist. create it.
|
# doesn't exist. create it.
|
||||||
|
@ -205,7 +206,7 @@ def main():
|
||||||
# it exists, so we should delete it and mark changed.
|
# it exists, so we should delete it and mark changed.
|
||||||
# return info about the cluster deleted
|
# return info about the cluster deleted
|
||||||
results['cluster'] = existing
|
results['cluster'] = existing
|
||||||
if 'status' in existing and existing['status']=="INACTIVE":
|
if 'status' in existing and existing['status'] == "INACTIVE":
|
||||||
results['changed'] = False
|
results['changed'] = False
|
||||||
else:
|
else:
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
|
@ -213,7 +214,7 @@ def main():
|
||||||
results['changed'] = True
|
results['changed'] = True
|
||||||
elif module.params['state'] == 'has_instances':
|
elif module.params['state'] == 'has_instances':
|
||||||
if not existing:
|
if not existing:
|
||||||
module.fail_json(msg="Cluster '"+module.params['name']+" not found.")
|
module.fail_json(msg="Cluster '" + module.params['name'] + " not found.")
|
||||||
return
|
return
|
||||||
# it exists, so we should delete it and mark changed.
|
# it exists, so we should delete it and mark changed.
|
||||||
# return info about the cluster deleted
|
# return info about the cluster deleted
|
||||||
|
@ -228,8 +229,8 @@ def main():
|
||||||
results['changed'] = True
|
results['changed'] = True
|
||||||
break
|
break
|
||||||
time.sleep(delay)
|
time.sleep(delay)
|
||||||
if count == 0 and i is repeat-1:
|
if count == 0 and i is repeat - 1:
|
||||||
module.fail_json(msg="Cluster instance count still zero after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
|
module.fail_json(msg="Cluster instance count still zero after " + str(repeat) + " tries of " + str(delay) + " seconds each.")
|
||||||
return
|
return
|
||||||
|
|
||||||
module.exit_json(**results)
|
module.exit_json(**results)
|
||||||
|
|
|
@ -308,13 +308,13 @@ class EcsServiceManager:
|
||||||
cluster=cluster_name,
|
cluster=cluster_name,
|
||||||
services=[service_name])
|
services=[service_name])
|
||||||
msg = ''
|
msg = ''
|
||||||
if len(response['failures'])>0:
|
if len(response['failures']) > 0:
|
||||||
c = self.find_in_array(response['failures'], service_name, 'arn')
|
c = self.find_in_array(response['failures'], service_name, 'arn')
|
||||||
msg += ", failure reason is " + c['reason']
|
msg += ", failure reason is " + c['reason']
|
||||||
if c and c['reason']=='MISSING':
|
if c and c['reason'] == 'MISSING':
|
||||||
return None
|
return None
|
||||||
# fall thru and look through found ones
|
# fall thru and look through found ones
|
||||||
if len(response['services'])>0:
|
if len(response['services']) > 0:
|
||||||
c = self.find_in_array(response['services'], service_name)
|
c = self.find_in_array(response['services'], service_name)
|
||||||
if c:
|
if c:
|
||||||
return c
|
return c
|
||||||
|
@ -426,7 +426,7 @@ def main():
|
||||||
|
|
||||||
matching = False
|
matching = False
|
||||||
update = False
|
update = False
|
||||||
if existing and 'status' in existing and existing['status']=="ACTIVE":
|
if existing and 'status' in existing and existing['status'] == "ACTIVE":
|
||||||
if service_mgr.is_matching_service(module.params, existing):
|
if service_mgr.is_matching_service(module.params, existing):
|
||||||
matching = True
|
matching = True
|
||||||
results['service'] = service_mgr.jsonize(existing)
|
results['service'] = service_mgr.jsonize(existing)
|
||||||
|
@ -479,7 +479,7 @@ def main():
|
||||||
del existing['deployments']
|
del existing['deployments']
|
||||||
del existing['events']
|
del existing['events']
|
||||||
results['ansible_facts'] = existing
|
results['ansible_facts'] = existing
|
||||||
if 'status' in existing and existing['status']=="INACTIVE":
|
if 'status' in existing and existing['status'] == "INACTIVE":
|
||||||
results['changed'] = False
|
results['changed'] = False
|
||||||
else:
|
else:
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
|
|
|
@ -170,17 +170,17 @@ class EcsServiceManager:
|
||||||
if cluster and cluster is not None:
|
if cluster and cluster is not None:
|
||||||
fn_args['cluster'] = cluster
|
fn_args['cluster'] = cluster
|
||||||
response = self.ecs.list_services(**fn_args)
|
response = self.ecs.list_services(**fn_args)
|
||||||
relevant_response = dict(services = response['serviceArns'])
|
relevant_response = dict(services=response['serviceArns'])
|
||||||
return relevant_response
|
return relevant_response
|
||||||
|
|
||||||
def describe_services(self, cluster, services):
|
def describe_services(self, cluster, services):
|
||||||
fn_args = dict()
|
fn_args = dict()
|
||||||
if cluster and cluster is not None:
|
if cluster and cluster is not None:
|
||||||
fn_args['cluster'] = cluster
|
fn_args['cluster'] = cluster
|
||||||
fn_args['services']=services.split(",")
|
fn_args['services'] = services.split(",")
|
||||||
response = self.ecs.describe_services(**fn_args)
|
response = self.ecs.describe_services(**fn_args)
|
||||||
relevant_response = dict(services = map(self.extract_service_from, response['services']))
|
relevant_response = dict(services=map(self.extract_service_from, response['services']))
|
||||||
if 'failures' in response and len(response['failures'])>0:
|
if 'failures' in response and len(response['failures']) > 0:
|
||||||
relevant_response['services_not_running'] = response['failures']
|
relevant_response['services_not_running'] = response['failures']
|
||||||
return relevant_response
|
return relevant_response
|
||||||
|
|
||||||
|
@ -199,13 +199,14 @@ class EcsServiceManager:
|
||||||
e['createdAt'] = str(e['createdAt'])
|
e['createdAt'] = str(e['createdAt'])
|
||||||
return service
|
return service
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
details=dict(required=False, type='bool', default=False ),
|
details=dict(required=False, type='bool', default=False),
|
||||||
cluster=dict(required=False, type='str' ),
|
cluster=dict(required=False, type='str'),
|
||||||
service=dict(required=False, type='str' )
|
service=dict(required=False, type='str')
|
||||||
))
|
))
|
||||||
|
|
||||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||||
|
|
|
@ -186,7 +186,7 @@ class EcsExecManager:
|
||||||
family=service_name,
|
family=service_name,
|
||||||
desiredStatus=status
|
desiredStatus=status
|
||||||
)
|
)
|
||||||
if len(response['taskArns'])>0:
|
if len(response['taskArns']) > 0:
|
||||||
for c in response['taskArns']:
|
for c in response['taskArns']:
|
||||||
if c.endswith(service_name):
|
if c.endswith(service_name):
|
||||||
return c
|
return c
|
||||||
|
@ -209,13 +209,13 @@ class EcsExecManager:
|
||||||
if cluster:
|
if cluster:
|
||||||
args['cluster'] = cluster
|
args['cluster'] = cluster
|
||||||
if task_definition:
|
if task_definition:
|
||||||
args['taskDefinition']=task_definition
|
args['taskDefinition'] = task_definition
|
||||||
if overrides:
|
if overrides:
|
||||||
args['overrides']=overrides
|
args['overrides'] = overrides
|
||||||
if container_instances:
|
if container_instances:
|
||||||
args['containerInstances']=container_instances
|
args['containerInstances'] = container_instances
|
||||||
if startedBy:
|
if startedBy:
|
||||||
args['startedBy']=startedBy
|
args['startedBy'] = startedBy
|
||||||
response = self.ecs.start_task(**args)
|
response = self.ecs.start_task(**args)
|
||||||
# include tasks and failures
|
# include tasks and failures
|
||||||
return response['tasks']
|
return response['tasks']
|
||||||
|
@ -224,17 +224,18 @@ class EcsExecManager:
|
||||||
response = self.ecs.stop_task(cluster=cluster, task=task)
|
response = self.ecs.stop_task(cluster=cluster, task=task)
|
||||||
return response['task']
|
return response['task']
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
operation=dict(required=True, choices=['run', 'start', 'stop'] ),
|
operation=dict(required=True, choices=['run', 'start', 'stop']),
|
||||||
cluster=dict(required=False, type='str' ), # R S P
|
cluster=dict(required=False, type='str'), # R S P
|
||||||
task_definition=dict(required=False, type='str' ), # R* S*
|
task_definition=dict(required=False, type='str'), # R* S*
|
||||||
overrides=dict(required=False, type='dict'), # R S
|
overrides=dict(required=False, type='dict'), # R S
|
||||||
count=dict(required=False, type='int' ), # R
|
count=dict(required=False, type='int'), # R
|
||||||
task=dict(required=False, type='str' ), # P*
|
task=dict(required=False, type='str'), # P*
|
||||||
container_instances=dict(required=False, type='list'), # S*
|
container_instances=dict(required=False, type='list'), # S*
|
||||||
started_by=dict(required=False, type='str' ) # R S
|
started_by=dict(required=False, type='str') # R S
|
||||||
))
|
))
|
||||||
|
|
||||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||||
|
@ -276,7 +277,7 @@ def main():
|
||||||
if module.params['operation'] == 'run':
|
if module.params['operation'] == 'run':
|
||||||
if existing:
|
if existing:
|
||||||
# TBD - validate the rest of the details
|
# TBD - validate the rest of the details
|
||||||
results['task']=existing
|
results['task'] = existing
|
||||||
else:
|
else:
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
results['task'] = service_mgr.run_task(
|
results['task'] = service_mgr.run_task(
|
||||||
|
@ -290,7 +291,7 @@ def main():
|
||||||
elif module.params['operation'] == 'start':
|
elif module.params['operation'] == 'start':
|
||||||
if existing:
|
if existing:
|
||||||
# TBD - validate the rest of the details
|
# TBD - validate the rest of the details
|
||||||
results['task']=existing
|
results['task'] = existing
|
||||||
else:
|
else:
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
results['task'] = service_mgr.start_task(
|
results['task'] = service_mgr.start_task(
|
||||||
|
@ -304,7 +305,7 @@ def main():
|
||||||
|
|
||||||
elif module.params['operation'] == 'stop':
|
elif module.params['operation'] == 'stop':
|
||||||
if existing:
|
if existing:
|
||||||
results['task']=existing
|
results['task'] = existing
|
||||||
else:
|
else:
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
# it exists, so we should delete it and mark changed.
|
# it exists, so we should delete it and mark changed.
|
||||||
|
|
|
@ -131,6 +131,7 @@ from ansible.module_utils.basic import AnsibleModule
|
||||||
from ansible.module_utils.ec2 import boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info
|
from ansible.module_utils.ec2 import boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info
|
||||||
from ansible.module_utils._text import to_text
|
from ansible.module_utils._text import to_text
|
||||||
|
|
||||||
|
|
||||||
class EcsTaskManager:
|
class EcsTaskManager:
|
||||||
"""Handles ECS Tasks"""
|
"""Handles ECS Tasks"""
|
||||||
|
|
||||||
|
|
|
@ -76,10 +76,10 @@ from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, get_aws_connec
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
state = dict(required=True, choices=['present', 'absent']),
|
state=dict(required=True, choices=['present', 'absent']),
|
||||||
name = dict(required=True),
|
name=dict(required=True),
|
||||||
description = dict(required=False),
|
description=dict(required=False),
|
||||||
subnets = dict(required=False, type='list'),
|
subnets=dict(required=False, type='list'),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
module = AnsibleModule(argument_spec=argument_spec)
|
module = AnsibleModule(argument_spec=argument_spec)
|
||||||
|
@ -95,18 +95,17 @@ def main():
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
for required in ['name', 'description', 'subnets']:
|
for required in ['name', 'description', 'subnets']:
|
||||||
if not module.params.get(required):
|
if not module.params.get(required):
|
||||||
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
|
module.fail_json(msg=str("Parameter %s required for state='present'" % required))
|
||||||
else:
|
else:
|
||||||
for not_allowed in ['description', 'subnets']:
|
for not_allowed in ['description', 'subnets']:
|
||||||
if module.params.get(not_allowed):
|
if module.params.get(not_allowed):
|
||||||
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
|
module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed))
|
||||||
|
|
||||||
# Retrieve any AWS settings from the environment.
|
# Retrieve any AWS settings from the environment.
|
||||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||||
|
|
||||||
if not region:
|
if not region:
|
||||||
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
|
module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
|
||||||
|
|
||||||
|
|
||||||
"""Get an elasticache connection"""
|
"""Get an elasticache connection"""
|
||||||
try:
|
try:
|
||||||
|
@ -123,7 +122,7 @@ def main():
|
||||||
exists = len(matching_groups) > 0
|
exists = len(matching_groups) > 0
|
||||||
except BotoServerError as e:
|
except BotoServerError as e:
|
||||||
if e.error_code != 'CacheSubnetGroupNotFoundFault':
|
if e.error_code != 'CacheSubnetGroupNotFoundFault':
|
||||||
module.fail_json(msg = e.error_message)
|
module.fail_json(msg=e.error_message)
|
||||||
|
|
||||||
if state == 'absent':
|
if state == 'absent':
|
||||||
if exists:
|
if exists:
|
||||||
|
@ -139,7 +138,7 @@ def main():
|
||||||
|
|
||||||
except BotoServerError as e:
|
except BotoServerError as e:
|
||||||
if e.error_message != 'No modifications were requested.':
|
if e.error_message != 'No modifications were requested.':
|
||||||
module.fail_json(msg = e.error_message)
|
module.fail_json(msg=e.error_message)
|
||||||
else:
|
else:
|
||||||
changed = False
|
changed = False
|
||||||
|
|
||||||
|
|
|
@ -657,7 +657,6 @@ def compare_listeners(connection, module, current_listeners, new_listeners, purg
|
||||||
|
|
||||||
|
|
||||||
def compare_rules(connection, module, current_listeners, listener):
|
def compare_rules(connection, module, current_listeners, listener):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Compare rules and return rules to add, rules to modify and rules to remove
|
Compare rules and return rules to add, rules to modify and rules to remove
|
||||||
Rules are compared based on priority
|
Rules are compared based on priority
|
||||||
|
|
|
@ -425,6 +425,7 @@ def _throttleable_operation(max_retries):
|
||||||
return _do_op
|
return _do_op
|
||||||
return _operation_wrapper
|
return _operation_wrapper
|
||||||
|
|
||||||
|
|
||||||
def _get_vpc_connection(module, region, aws_connect_params):
|
def _get_vpc_connection(module, region, aws_connect_params):
|
||||||
try:
|
try:
|
||||||
return connect_to_aws(boto.vpc, region, **aws_connect_params)
|
return connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||||
|
@ -434,6 +435,7 @@ def _get_vpc_connection(module, region, aws_connect_params):
|
||||||
|
|
||||||
_THROTTLING_RETRIES = 5
|
_THROTTLING_RETRIES = 5
|
||||||
|
|
||||||
|
|
||||||
class ElbManager(object):
|
class ElbManager(object):
|
||||||
"""Handles ELB creation and destruction"""
|
"""Handles ELB creation and destruction"""
|
||||||
|
|
||||||
|
@ -579,10 +581,10 @@ class ElbManager(object):
|
||||||
|
|
||||||
# status of instances behind the ELB
|
# status of instances behind the ELB
|
||||||
if info['instances']:
|
if info['instances']:
|
||||||
info['instance_health'] = [ dict(
|
info['instance_health'] = [dict(
|
||||||
instance_id = instance_state.instance_id,
|
instance_id=instance_state.instance_id,
|
||||||
reason_code = instance_state.reason_code,
|
reason_code=instance_state.reason_code,
|
||||||
state = instance_state.state
|
state=instance_state.state
|
||||||
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
|
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
|
||||||
else:
|
else:
|
||||||
info['instance_health'] = []
|
info['instance_health'] = []
|
||||||
|
@ -663,7 +665,7 @@ class ElbManager(object):
|
||||||
|
|
||||||
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
|
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
|
||||||
filters={'attachment.instance-owner-id': 'amazon-elb',
|
filters={'attachment.instance-owner-id': 'amazon-elb',
|
||||||
'description': 'ELB {0}'.format(self.name) })
|
'description': 'ELB {0}'.format(self.name)})
|
||||||
|
|
||||||
for x in range(0, max_retries):
|
for x in range(0, max_retries):
|
||||||
for interface in elb_interfaces:
|
for interface in elb_interfaces:
|
||||||
|
@ -1005,7 +1007,7 @@ class ElbManager(object):
|
||||||
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
|
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
|
||||||
|
|
||||||
def _create_policy(self, policy_param, policy_meth, policy):
|
def _create_policy(self, policy_param, policy_meth, policy):
|
||||||
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
|
getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy)
|
||||||
|
|
||||||
def _delete_policy(self, elb_name, policy):
|
def _delete_policy(self, elb_name, policy):
|
||||||
self.elb_conn.delete_lb_policy(elb_name, policy)
|
self.elb_conn.delete_lb_policy(elb_name, policy)
|
||||||
|
@ -1223,7 +1225,7 @@ class ElbManager(object):
|
||||||
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
|
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
|
||||||
|
|
||||||
self.elb_conn.make_request('AddTags', params)
|
self.elb_conn.make_request('AddTags', params)
|
||||||
self.changed=True
|
self.changed = True
|
||||||
|
|
||||||
# Remove extra tags
|
# Remove extra tags
|
||||||
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
|
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
|
||||||
|
@ -1232,7 +1234,7 @@ class ElbManager(object):
|
||||||
params['Tags.member.%d.Key' % (i + 1)] = key
|
params['Tags.member.%d.Key' % (i + 1)] = key
|
||||||
|
|
||||||
self.elb_conn.make_request('RemoveTags', params)
|
self.elb_conn.make_request('RemoveTags', params)
|
||||||
self.changed=True
|
self.changed = True
|
||||||
|
|
||||||
def _get_health_check_target(self):
|
def _get_health_check_target(self):
|
||||||
"""Compose target string from healthcheck parameters"""
|
"""Compose target string from healthcheck parameters"""
|
||||||
|
@ -1275,7 +1277,7 @@ def main():
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=argument_spec,
|
argument_spec=argument_spec,
|
||||||
mutually_exclusive = [['security_group_ids', 'security_group_names']]
|
mutually_exclusive=[['security_group_ids', 'security_group_names']]
|
||||||
)
|
)
|
||||||
|
|
||||||
if not HAS_BOTO:
|
if not HAS_BOTO:
|
||||||
|
@ -1333,10 +1335,10 @@ def main():
|
||||||
if isinstance(group_name, string_types):
|
if isinstance(group_name, string_types):
|
||||||
group_name = [group_name]
|
group_name = [group_name]
|
||||||
|
|
||||||
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
|
group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
|
||||||
security_group_ids.extend(group_id)
|
security_group_ids.extend(group_id)
|
||||||
except boto.exception.NoAuthHandlerFound as e:
|
except boto.exception.NoAuthHandlerFound as e:
|
||||||
module.fail_json(msg = str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
|
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
|
||||||
purge_zones, security_group_ids, health_check,
|
purge_zones, security_group_ids, health_check,
|
||||||
|
|
|
@ -167,7 +167,7 @@ class ElbManager:
|
||||||
found = False
|
found = False
|
||||||
for lb in self.lbs:
|
for lb in self.lbs:
|
||||||
if lb.name == lbtest:
|
if lb.name == lbtest:
|
||||||
found=True
|
found = True
|
||||||
break
|
break
|
||||||
return found
|
return found
|
||||||
|
|
||||||
|
@ -326,7 +326,7 @@ def main():
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
state={'required': True},
|
state={'required': True},
|
||||||
instance_id={'required': True},
|
instance_id={'required': True},
|
||||||
ec2_elbs={'default': None, 'required': False, 'type':'list'},
|
ec2_elbs={'default': None, 'required': False, 'type': 'list'},
|
||||||
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
|
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
|
||||||
wait={'required': False, 'default': True, 'type': 'bool'},
|
wait={'required': False, 'default': True, 'type': 'bool'},
|
||||||
wait_timeout={'required': False, 'default': 0, 'type': 'int'}
|
wait_timeout={'required': False, 'default': 0, 'type': 'int'}
|
||||||
|
@ -359,7 +359,7 @@ def main():
|
||||||
if ec2_elbs is not None:
|
if ec2_elbs is not None:
|
||||||
for elb in ec2_elbs:
|
for elb in ec2_elbs:
|
||||||
if not elb_man.exists(elb):
|
if not elb_man.exists(elb):
|
||||||
msg="ELB %s does not exist" % elb
|
msg = "ELB %s does not exist" % elb
|
||||||
module.fail_json(msg=msg)
|
module.fail_json(msg=msg)
|
||||||
|
|
||||||
if module.params['state'] == 'present':
|
if module.params['state'] == 'present':
|
||||||
|
|
|
@ -153,13 +153,13 @@ from ansible.module_utils._text import to_native
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
name = dict(),
|
name=dict(),
|
||||||
function_arn = dict(),
|
function_arn=dict(),
|
||||||
wait = dict(default=True, type='bool'),
|
wait=dict(default=True, type='bool'),
|
||||||
tail_log = dict(default=False, type='bool'),
|
tail_log=dict(default=False, type='bool'),
|
||||||
dry_run = dict(default=False, type='bool'),
|
dry_run=dict(default=False, type='bool'),
|
||||||
version_qualifier = dict(),
|
version_qualifier=dict(),
|
||||||
payload = dict(default={}, type='dict'),
|
payload=dict(default={}, type='dict'),
|
||||||
))
|
))
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=argument_spec,
|
argument_spec=argument_spec,
|
||||||
|
@ -247,7 +247,7 @@ def main():
|
||||||
module.fail_json(msg="Unexpected failure while invoking Lambda function",
|
module.fail_json(msg="Unexpected failure while invoking Lambda function",
|
||||||
exception=traceback.format_exc())
|
exception=traceback.format_exc())
|
||||||
|
|
||||||
results ={
|
results = {
|
||||||
'logs': '',
|
'logs': '',
|
||||||
'status': response['StatusCode'],
|
'status': response['StatusCode'],
|
||||||
'output': '',
|
'output': '',
|
||||||
|
|
|
@ -240,8 +240,8 @@ def create_user(module, iam, name, pwd, path, key_state, key_count):
|
||||||
if key_count:
|
if key_count:
|
||||||
while key_count > key_qty:
|
while key_count > key_qty:
|
||||||
keys.append(iam.create_access_key(
|
keys.append(iam.create_access_key(
|
||||||
user_name=name).create_access_key_response.\
|
user_name=name).create_access_key_response.
|
||||||
create_access_key_result.\
|
create_access_key_result.
|
||||||
access_key)
|
access_key)
|
||||||
key_qty += 1
|
key_qty += 1
|
||||||
else:
|
else:
|
||||||
|
@ -447,7 +447,7 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key
|
||||||
|
|
||||||
|
|
||||||
def set_users_groups(module, iam, name, groups, updated=None,
|
def set_users_groups(module, iam, name, groups, updated=None,
|
||||||
new_name=None):
|
new_name=None):
|
||||||
""" Sets groups for a user, will purge groups not explicitly passed, while
|
""" Sets groups for a user, will purge groups not explicitly passed, while
|
||||||
retaining pre-existing groups that also are in the new list.
|
retaining pre-existing groups that also are in the new list.
|
||||||
"""
|
"""
|
||||||
|
@ -526,6 +526,7 @@ def delete_group(module=None, iam=None, name=None):
|
||||||
changed = True
|
changed = True
|
||||||
return changed, name
|
return changed, name
|
||||||
|
|
||||||
|
|
||||||
def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
|
def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
|
||||||
changed = False
|
changed = False
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -232,7 +232,7 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state):
|
||||||
def group_action(module, iam, name, policy_name, skip, pdoc, state):
|
def group_action(module, iam, name, policy_name, skip, pdoc, state):
|
||||||
policy_match = False
|
policy_match = False
|
||||||
changed = False
|
changed = False
|
||||||
msg=''
|
msg = ''
|
||||||
try:
|
try:
|
||||||
current_policies = [cp for cp in iam.get_all_group_policies(name).
|
current_policies = [cp for cp in iam.get_all_group_policies(name).
|
||||||
list_group_policies_result.
|
list_group_policies_result.
|
||||||
|
@ -243,7 +243,7 @@ def group_action(module, iam, name, policy_name, skip, pdoc, state):
|
||||||
get_group_policy_result.policy_document) == pdoc:
|
get_group_policy_result.policy_document) == pdoc:
|
||||||
policy_match = True
|
policy_match = True
|
||||||
matching_policies.append(pol)
|
matching_policies.append(pol)
|
||||||
msg=("The policy document you specified already exists "
|
msg = ("The policy document you specified already exists "
|
||||||
"under the name %s." % pol)
|
"under the name %s." % pol)
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
# If policy document does not already exist (either it's changed
|
# If policy document does not already exist (either it's changed
|
||||||
|
@ -317,7 +317,7 @@ def main():
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
module.fail_json(msg='Failed to convert the policy into valid JSON: %s' % str(e))
|
module.fail_json(msg='Failed to convert the policy into valid JSON: %s' % str(e))
|
||||||
else:
|
else:
|
||||||
pdoc=None
|
pdoc = None
|
||||||
|
|
||||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||||
|
|
||||||
|
|
|
@ -80,10 +80,10 @@ from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
state = dict(required=True, choices=['present', 'absent']),
|
state=dict(required=True, choices=['present', 'absent']),
|
||||||
name = dict(required=True),
|
name=dict(required=True),
|
||||||
description = dict(required=False),
|
description=dict(required=False),
|
||||||
subnets = dict(required=False, type='list'),
|
subnets=dict(required=False, type='list'),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
module = AnsibleModule(argument_spec=argument_spec)
|
module = AnsibleModule(argument_spec=argument_spec)
|
||||||
|
@ -99,22 +99,22 @@ def main():
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
for required in ['name', 'description', 'subnets']:
|
for required in ['name', 'description', 'subnets']:
|
||||||
if not module.params.get(required):
|
if not module.params.get(required):
|
||||||
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
|
module.fail_json(msg=str("Parameter %s required for state='present'" % required))
|
||||||
else:
|
else:
|
||||||
for not_allowed in ['description', 'subnets']:
|
for not_allowed in ['description', 'subnets']:
|
||||||
if module.params.get(not_allowed):
|
if module.params.get(not_allowed):
|
||||||
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
|
module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed))
|
||||||
|
|
||||||
# Retrieve any AWS settings from the environment.
|
# Retrieve any AWS settings from the environment.
|
||||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||||
|
|
||||||
if not region:
|
if not region:
|
||||||
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
|
module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs)
|
conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs)
|
||||||
except BotoServerError as e:
|
except BotoServerError as e:
|
||||||
module.fail_json(msg = e.error_message)
|
module.fail_json(msg=e.error_message)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
changed = False
|
changed = False
|
||||||
|
@ -125,7 +125,7 @@ def main():
|
||||||
exists = len(matching_groups) > 0
|
exists = len(matching_groups) > 0
|
||||||
except BotoServerError as e:
|
except BotoServerError as e:
|
||||||
if e.error_code != 'DBSubnetGroupNotFoundFault':
|
if e.error_code != 'DBSubnetGroupNotFoundFault':
|
||||||
module.fail_json(msg = e.error_message)
|
module.fail_json(msg=e.error_message)
|
||||||
|
|
||||||
if state == 'absent':
|
if state == 'absent':
|
||||||
if exists:
|
if exists:
|
||||||
|
@ -145,7 +145,7 @@ def main():
|
||||||
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
|
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
|
||||||
changed = True
|
changed = True
|
||||||
except BotoServerError as e:
|
except BotoServerError as e:
|
||||||
module.fail_json(msg = e.error_message)
|
module.fail_json(msg=e.error_message)
|
||||||
|
|
||||||
module.exit_json(changed=changed)
|
module.exit_json(changed=changed)
|
||||||
|
|
||||||
|
|
|
@ -237,15 +237,15 @@ from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec
|
||||||
def _collect_facts(resource):
|
def _collect_facts(resource):
|
||||||
"""Transfrom cluster information to dict."""
|
"""Transfrom cluster information to dict."""
|
||||||
facts = {
|
facts = {
|
||||||
'identifier' : resource['ClusterIdentifier'],
|
'identifier': resource['ClusterIdentifier'],
|
||||||
'create_time' : resource['ClusterCreateTime'],
|
'create_time': resource['ClusterCreateTime'],
|
||||||
'status' : resource['ClusterStatus'],
|
'status': resource['ClusterStatus'],
|
||||||
'username' : resource['MasterUsername'],
|
'username': resource['MasterUsername'],
|
||||||
'db_name' : resource['DBName'],
|
'db_name': resource['DBName'],
|
||||||
'availability_zone' : resource['AvailabilityZone'],
|
'availability_zone': resource['AvailabilityZone'],
|
||||||
'maintenance_window': resource['PreferredMaintenanceWindow'],
|
'maintenance_window': resource['PreferredMaintenanceWindow'],
|
||||||
'url' : resource['Endpoint']['Address'],
|
'url': resource['Endpoint']['Address'],
|
||||||
'port' : resource['Endpoint']['Port']
|
'port': resource['Endpoint']['Port']
|
||||||
}
|
}
|
||||||
|
|
||||||
for node in resource['ClusterNodes']:
|
for node in resource['ClusterNodes']:
|
||||||
|
@ -286,7 +286,7 @@ def create_cluster(module, redshift):
|
||||||
'number_of_nodes', 'publicly_accessible',
|
'number_of_nodes', 'publicly_accessible',
|
||||||
'encrypted', 'elastic_ip'):
|
'encrypted', 'elastic_ip'):
|
||||||
if p in module.params:
|
if p in module.params:
|
||||||
params[ p ] = module.params.get( p )
|
params[p] = module.params.get(p)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||||
|
@ -310,7 +310,7 @@ def create_cluster(module, redshift):
|
||||||
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
|
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
if wait_timeout <= time.time():
|
if wait_timeout <= time.time():
|
||||||
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
|
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
|
||||||
|
|
||||||
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||||
|
|
||||||
|
@ -368,7 +368,7 @@ def delete_cluster(module, redshift):
|
||||||
while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting':
|
while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting':
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
if wait_timeout <= time.time():
|
if wait_timeout <= time.time():
|
||||||
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
|
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
|
||||||
|
|
||||||
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||||
|
|
||||||
|
@ -422,7 +422,7 @@ def modify_cluster(module, redshift):
|
||||||
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
|
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
if wait_timeout <= time.time():
|
if wait_timeout <= time.time():
|
||||||
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
|
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
|
||||||
|
|
||||||
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||||
|
|
||||||
|
@ -436,34 +436,34 @@ def modify_cluster(module, redshift):
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
command = dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
|
command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
|
||||||
identifier = dict(required=True),
|
identifier=dict(required=True),
|
||||||
node_type = dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large',
|
node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large',
|
||||||
'dc2.large','dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large',
|
'dc2.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large',
|
||||||
'dw2.8xlarge'], required=False),
|
'dw2.8xlarge'], required=False),
|
||||||
username = dict(required=False),
|
username=dict(required=False),
|
||||||
password = dict(no_log=True, required=False),
|
password=dict(no_log=True, required=False),
|
||||||
db_name = dict(require=False),
|
db_name=dict(require=False),
|
||||||
cluster_type = dict(choices=['multi-node', 'single-node', ], default='single-node'),
|
cluster_type=dict(choices=['multi-node', 'single-node', ], default='single-node'),
|
||||||
cluster_security_groups = dict(aliases=['security_groups'], type='list'),
|
cluster_security_groups=dict(aliases=['security_groups'], type='list'),
|
||||||
vpc_security_group_ids = dict(aliases=['vpc_security_groups'], type='list'),
|
vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list'),
|
||||||
skip_final_cluster_snapshot = dict(aliases=['skip_final_snapshot'], type='bool', default=False),
|
skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], type='bool', default=False),
|
||||||
final_cluster_snapshot_identifier = dict(aliases=['final_snapshot_id'], required=False),
|
final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False),
|
||||||
cluster_subnet_group_name = dict(aliases=['subnet']),
|
cluster_subnet_group_name=dict(aliases=['subnet']),
|
||||||
availability_zone = dict(aliases=['aws_zone', 'zone']),
|
availability_zone=dict(aliases=['aws_zone', 'zone']),
|
||||||
preferred_maintenance_window = dict(aliases=['maintance_window', 'maint_window']),
|
preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']),
|
||||||
cluster_parameter_group_name = dict(aliases=['param_group_name']),
|
cluster_parameter_group_name=dict(aliases=['param_group_name']),
|
||||||
automated_snapshot_retention_period = dict(aliases=['retention_period']),
|
automated_snapshot_retention_period=dict(aliases=['retention_period']),
|
||||||
port = dict(type='int'),
|
port=dict(type='int'),
|
||||||
cluster_version = dict(aliases=['version'], choices=['1.0']),
|
cluster_version=dict(aliases=['version'], choices=['1.0']),
|
||||||
allow_version_upgrade = dict(aliases=['version_upgrade'], type='bool', default=True),
|
allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True),
|
||||||
number_of_nodes = dict(type='int'),
|
number_of_nodes=dict(type='int'),
|
||||||
publicly_accessible = dict(type='bool', default=False),
|
publicly_accessible=dict(type='bool', default=False),
|
||||||
encrypted = dict(type='bool', default=False),
|
encrypted=dict(type='bool', default=False),
|
||||||
elastic_ip = dict(required=False),
|
elastic_ip=dict(required=False),
|
||||||
new_cluster_identifier = dict(aliases=['new_identifier']),
|
new_cluster_identifier=dict(aliases=['new_identifier']),
|
||||||
wait = dict(type='bool', default=False),
|
wait=dict(type='bool', default=False),
|
||||||
wait_timeout = dict(type='int', default=300),
|
wait_timeout=dict(type='int', default=300),
|
||||||
))
|
))
|
||||||
|
|
||||||
required_if = [
|
required_if = [
|
||||||
|
|
|
@ -166,6 +166,7 @@ def find_health_check(conn, wanted):
|
||||||
return check
|
return check
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def to_health_check(config):
|
def to_health_check(config):
|
||||||
return HealthCheck(
|
return HealthCheck(
|
||||||
config.get('IPAddress'),
|
config.get('IPAddress'),
|
||||||
|
@ -178,6 +179,7 @@ def to_health_check(config):
|
||||||
failure_threshold=int(config.get('FailureThreshold')),
|
failure_threshold=int(config.get('FailureThreshold')),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def health_check_diff(a, b):
|
def health_check_diff(a, b):
|
||||||
a = a.__dict__
|
a = a.__dict__
|
||||||
b = b.__dict__
|
b = b.__dict__
|
||||||
|
@ -189,6 +191,7 @@ def health_check_diff(a, b):
|
||||||
diff[key] = b.get(key)
|
diff[key] = b.get(key)
|
||||||
return diff
|
return diff
|
||||||
|
|
||||||
|
|
||||||
def to_template_params(health_check):
|
def to_template_params(health_check):
|
||||||
params = {
|
params = {
|
||||||
'ip_addr_part': '',
|
'ip_addr_part': '',
|
||||||
|
@ -240,7 +243,8 @@ UPDATEHCXMLBody = """
|
||||||
</UpdateHealthCheckRequest>
|
</UpdateHealthCheckRequest>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def create_health_check(conn, health_check, caller_ref = None):
|
|
||||||
|
def create_health_check(conn, health_check, caller_ref=None):
|
||||||
if caller_ref is None:
|
if caller_ref is None:
|
||||||
caller_ref = str(uuid.uuid4())
|
caller_ref = str(uuid.uuid4())
|
||||||
uri = '/%s/healthcheck' % conn.Version
|
uri = '/%s/healthcheck' % conn.Version
|
||||||
|
@ -259,6 +263,7 @@ def create_health_check(conn, health_check, caller_ref = None):
|
||||||
else:
|
else:
|
||||||
raise exception.DNSServerError(response.status, response.reason, body)
|
raise exception.DNSServerError(response.status, response.reason, body)
|
||||||
|
|
||||||
|
|
||||||
def update_health_check(conn, health_check_id, health_check_version, health_check):
|
def update_health_check(conn, health_check_id, health_check_version, health_check):
|
||||||
uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id)
|
uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id)
|
||||||
params = to_template_params(health_check)
|
params = to_template_params(health_check)
|
||||||
|
@ -279,18 +284,19 @@ def update_health_check(conn, health_check_id, health_check_version, health_chec
|
||||||
h.parse(body)
|
h.parse(body)
|
||||||
return e
|
return e
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
state = dict(choices=['present', 'absent'], default='present'),
|
state=dict(choices=['present', 'absent'], default='present'),
|
||||||
ip_address = dict(),
|
ip_address=dict(),
|
||||||
port = dict(type='int'),
|
port=dict(type='int'),
|
||||||
type = dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
|
type=dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
|
||||||
resource_path = dict(),
|
resource_path=dict(),
|
||||||
fqdn = dict(),
|
fqdn=dict(),
|
||||||
string_match = dict(),
|
string_match=dict(),
|
||||||
request_interval = dict(type='int', choices=[10, 30], default=30),
|
request_interval=dict(type='int', choices=[10, 30], default=30),
|
||||||
failure_threshold = dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3),
|
failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
module = AnsibleModule(argument_spec=argument_spec)
|
module = AnsibleModule(argument_spec=argument_spec)
|
||||||
|
@ -334,7 +340,7 @@ def main():
|
||||||
try:
|
try:
|
||||||
conn = Route53Connection(**aws_connect_kwargs)
|
conn = Route53Connection(**aws_connect_kwargs)
|
||||||
except boto.exception.BotoServerError as e:
|
except boto.exception.BotoServerError as e:
|
||||||
module.fail_json(msg = e.error_message)
|
module.fail_json(msg=e.error_message)
|
||||||
|
|
||||||
changed = False
|
changed = False
|
||||||
action = None
|
action = None
|
||||||
|
@ -362,7 +368,7 @@ def main():
|
||||||
conn.delete_health_check(check_id)
|
conn.delete_health_check(check_id)
|
||||||
changed = True
|
changed = True
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg = "Logic Error: Unknown state")
|
module.fail_json(msg="Logic Error: Unknown state")
|
||||||
|
|
||||||
module.exit_json(changed=changed, health_check=dict(id=check_id), action=action)
|
module.exit_json(changed=changed, health_check=dict(id=check_id), action=action)
|
||||||
|
|
||||||
|
|
|
@ -256,6 +256,7 @@ def create_lifecycle_rule(connection, module):
|
||||||
|
|
||||||
module.exit_json(changed=changed)
|
module.exit_json(changed=changed)
|
||||||
|
|
||||||
|
|
||||||
def compare_rule(rule_a, rule_b):
|
def compare_rule(rule_a, rule_b):
|
||||||
|
|
||||||
# Copy objects
|
# Copy objects
|
||||||
|
@ -364,26 +365,26 @@ def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
dict(
|
dict(
|
||||||
name = dict(required=True, type='str'),
|
name=dict(required=True, type='str'),
|
||||||
expiration_days = dict(default=None, required=False, type='int'),
|
expiration_days=dict(default=None, required=False, type='int'),
|
||||||
expiration_date = dict(default=None, required=False, type='str'),
|
expiration_date=dict(default=None, required=False, type='str'),
|
||||||
prefix = dict(default=None, required=False),
|
prefix=dict(default=None, required=False),
|
||||||
requester_pays = dict(default='no', type='bool'),
|
requester_pays=dict(default='no', type='bool'),
|
||||||
rule_id = dict(required=False, type='str'),
|
rule_id=dict(required=False, type='str'),
|
||||||
state = dict(default='present', choices=['present', 'absent']),
|
state=dict(default='present', choices=['present', 'absent']),
|
||||||
status = dict(default='enabled', choices=['enabled', 'disabled']),
|
status=dict(default='enabled', choices=['enabled', 'disabled']),
|
||||||
storage_class = dict(default='glacier', type='str', choices=['glacier', 'standard_ia']),
|
storage_class=dict(default='glacier', type='str', choices=['glacier', 'standard_ia']),
|
||||||
transition_days = dict(default=None, required=False, type='int'),
|
transition_days=dict(default=None, required=False, type='int'),
|
||||||
transition_date = dict(default=None, required=False, type='str')
|
transition_date=dict(default=None, required=False, type='str')
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
module = AnsibleModule(argument_spec=argument_spec,
|
module = AnsibleModule(argument_spec=argument_spec,
|
||||||
mutually_exclusive = [
|
mutually_exclusive=[
|
||||||
[ 'expiration_days', 'expiration_date' ],
|
['expiration_days', 'expiration_date'],
|
||||||
[ 'expiration_days', 'transition_date' ],
|
['expiration_days', 'transition_date'],
|
||||||
[ 'transition_days', 'transition_date' ],
|
['transition_days', 'transition_date'],
|
||||||
[ 'transition_days', 'expiration_date' ]
|
['transition_days', 'expiration_date']
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -428,7 +429,7 @@ def main():
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
|
module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
|
||||||
|
|
||||||
boto_required_version = (2,40,0)
|
boto_required_version = (2, 40, 0)
|
||||||
if storage_class == 'standard_ia' and tuple(map(int, (boto.__version__.split(".")))) < boto_required_version:
|
if storage_class == 'standard_ia' and tuple(map(int, (boto.__version__.split(".")))) < boto_required_version:
|
||||||
module.fail_json(msg="'standard_ia' class requires boto >= 2.40.0")
|
module.fail_json(msg="'standard_ia' class requires boto >= 2.40.0")
|
||||||
|
|
||||||
|
|
|
@ -137,10 +137,10 @@ def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
dict(
|
dict(
|
||||||
name = dict(required=True),
|
name=dict(required=True),
|
||||||
target_bucket = dict(required=False, default=None),
|
target_bucket=dict(required=False, default=None),
|
||||||
target_prefix = dict(required=False, default=""),
|
target_prefix=dict(required=False, default=""),
|
||||||
state = dict(required=False, default='present', choices=['present', 'absent'])
|
state=dict(required=False, default='present', choices=['present', 'absent'])
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -256,7 +256,6 @@ DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024
|
||||||
|
|
||||||
|
|
||||||
def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
|
def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
calculates a multipart upload etag for amazon s3
|
calculates a multipart upload etag for amazon s3
|
||||||
|
|
||||||
|
|
|
@ -180,10 +180,10 @@ def _create_website_configuration(suffix, error_key, redirect_all_requests):
|
||||||
website_configuration = {}
|
website_configuration = {}
|
||||||
|
|
||||||
if error_key is not None:
|
if error_key is not None:
|
||||||
website_configuration['ErrorDocument'] = { 'Key': error_key }
|
website_configuration['ErrorDocument'] = {'Key': error_key}
|
||||||
|
|
||||||
if suffix is not None:
|
if suffix is not None:
|
||||||
website_configuration['IndexDocument'] = { 'Suffix': suffix }
|
website_configuration['IndexDocument'] = {'Suffix': suffix}
|
||||||
|
|
||||||
if redirect_all_requests is not None:
|
if redirect_all_requests is not None:
|
||||||
website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
|
website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
|
||||||
|
@ -288,7 +288,7 @@ def main():
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=argument_spec,
|
argument_spec=argument_spec,
|
||||||
mutually_exclusive = [
|
mutually_exclusive=[
|
||||||
['redirect_all_requests', 'suffix'],
|
['redirect_all_requests', 'suffix'],
|
||||||
['redirect_all_requests', 'error_key']
|
['redirect_all_requests', 'error_key']
|
||||||
])
|
])
|
||||||
|
|
|
@ -194,7 +194,6 @@ class SnsTopicManager(object):
|
||||||
break
|
break
|
||||||
return [t['TopicArn'] for t in topics]
|
return [t['TopicArn'] for t in topics]
|
||||||
|
|
||||||
|
|
||||||
def _arn_topic_lookup(self):
|
def _arn_topic_lookup(self):
|
||||||
# topic names cannot have colons, so this captures the full topic name
|
# topic names cannot have colons, so this captures the full topic name
|
||||||
all_topics = self._get_all_topics()
|
all_topics = self._get_all_topics()
|
||||||
|
@ -203,7 +202,6 @@ class SnsTopicManager(object):
|
||||||
if topic.endswith(lookup_topic):
|
if topic.endswith(lookup_topic):
|
||||||
return topic
|
return topic
|
||||||
|
|
||||||
|
|
||||||
def _create_topic(self):
|
def _create_topic(self):
|
||||||
self.changed = True
|
self.changed = True
|
||||||
self.topic_created = True
|
self.topic_created = True
|
||||||
|
@ -214,11 +212,8 @@ class SnsTopicManager(object):
|
||||||
time.sleep(3)
|
time.sleep(3)
|
||||||
self.arn_topic = self._arn_topic_lookup()
|
self.arn_topic = self._arn_topic_lookup()
|
||||||
|
|
||||||
|
|
||||||
def _set_topic_attrs(self):
|
def _set_topic_attrs(self):
|
||||||
topic_attributes = self.connection.get_topic_attributes(self.arn_topic) \
|
topic_attributes = self.connection.get_topic_attributes(self.arn_topic)['GetTopicAttributesResponse']['GetTopicAttributesResult']['Attributes']
|
||||||
['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \
|
|
||||||
['Attributes']
|
|
||||||
|
|
||||||
if self.display_name and self.display_name != topic_attributes['DisplayName']:
|
if self.display_name and self.display_name != topic_attributes['DisplayName']:
|
||||||
self.changed = True
|
self.changed = True
|
||||||
|
@ -234,7 +229,7 @@ class SnsTopicManager(object):
|
||||||
self.connection.set_topic_attributes(self.arn_topic, 'Policy',
|
self.connection.set_topic_attributes(self.arn_topic, 'Policy',
|
||||||
json.dumps(self.policy))
|
json.dumps(self.policy))
|
||||||
|
|
||||||
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or \
|
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or
|
||||||
self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
|
self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
|
||||||
self.changed = True
|
self.changed = True
|
||||||
self.attributes_set.append('delivery_policy')
|
self.attributes_set.append('delivery_policy')
|
||||||
|
@ -242,21 +237,18 @@ class SnsTopicManager(object):
|
||||||
self.connection.set_topic_attributes(self.arn_topic, 'DeliveryPolicy',
|
self.connection.set_topic_attributes(self.arn_topic, 'DeliveryPolicy',
|
||||||
json.dumps(self.delivery_policy))
|
json.dumps(self.delivery_policy))
|
||||||
|
|
||||||
|
|
||||||
def _canonicalize_endpoint(self, protocol, endpoint):
|
def _canonicalize_endpoint(self, protocol, endpoint):
|
||||||
if protocol == 'sms':
|
if protocol == 'sms':
|
||||||
return re.sub('[^0-9]*', '', endpoint)
|
return re.sub('[^0-9]*', '', endpoint)
|
||||||
return endpoint
|
return endpoint
|
||||||
|
|
||||||
|
|
||||||
def _get_topic_subs(self):
|
def _get_topic_subs(self):
|
||||||
next_token = None
|
next_token = None
|
||||||
while True:
|
while True:
|
||||||
response = self.connection.get_all_subscriptions_by_topic(self.arn_topic, next_token)
|
response = self.connection.get_all_subscriptions_by_topic(self.arn_topic, next_token)
|
||||||
self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse'] \
|
self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse']
|
||||||
['ListSubscriptionsByTopicResult']['Subscriptions'])
|
['ListSubscriptionsByTopicResult']['Subscriptions'])
|
||||||
next_token = response['ListSubscriptionsByTopicResponse'] \
|
next_token = response['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['NextToken']
|
||||||
['ListSubscriptionsByTopicResult']['NextToken']
|
|
||||||
if not next_token:
|
if not next_token:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
@ -284,7 +276,6 @@ class SnsTopicManager(object):
|
||||||
if not self.check_mode:
|
if not self.check_mode:
|
||||||
self.connection.subscribe(self.arn_topic, protocol, endpoint)
|
self.connection.subscribe(self.arn_topic, protocol, endpoint)
|
||||||
|
|
||||||
|
|
||||||
def _delete_subscriptions(self):
|
def _delete_subscriptions(self):
|
||||||
# NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
|
# NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
|
||||||
# https://forums.aws.amazon.com/thread.jspa?threadID=85993
|
# https://forums.aws.amazon.com/thread.jspa?threadID=85993
|
||||||
|
@ -295,14 +286,12 @@ class SnsTopicManager(object):
|
||||||
if not self.check_mode:
|
if not self.check_mode:
|
||||||
self.connection.unsubscribe(sub['SubscriptionArn'])
|
self.connection.unsubscribe(sub['SubscriptionArn'])
|
||||||
|
|
||||||
|
|
||||||
def _delete_topic(self):
|
def _delete_topic(self):
|
||||||
self.topic_deleted = True
|
self.topic_deleted = True
|
||||||
self.changed = True
|
self.changed = True
|
||||||
if not self.check_mode:
|
if not self.check_mode:
|
||||||
self.connection.delete_topic(self.arn_topic)
|
self.connection.delete_topic(self.arn_topic)
|
||||||
|
|
||||||
|
|
||||||
def ensure_ok(self):
|
def ensure_ok(self):
|
||||||
self.arn_topic = self._arn_topic_lookup()
|
self.arn_topic = self._arn_topic_lookup()
|
||||||
if not self.arn_topic:
|
if not self.arn_topic:
|
||||||
|
@ -319,7 +308,6 @@ class SnsTopicManager(object):
|
||||||
self._delete_subscriptions()
|
self._delete_subscriptions()
|
||||||
self._delete_topic()
|
self._delete_topic()
|
||||||
|
|
||||||
|
|
||||||
def get_info(self):
|
def get_info(self):
|
||||||
info = {
|
info = {
|
||||||
'name': self.name,
|
'name': self.name,
|
||||||
|
@ -341,7 +329,6 @@ class SnsTopicManager(object):
|
||||||
return info
|
return info
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
|
|
|
@ -113,17 +113,18 @@ def assume_role_policy(connection, module):
|
||||||
|
|
||||||
module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__)
|
module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
dict(
|
dict(
|
||||||
role_arn = dict(required=True, default=None),
|
role_arn=dict(required=True, default=None),
|
||||||
role_session_name = dict(required=True, default=None),
|
role_session_name=dict(required=True, default=None),
|
||||||
duration_seconds = dict(required=False, default=None, type='int'),
|
duration_seconds=dict(required=False, default=None, type='int'),
|
||||||
external_id = dict(required=False, default=None),
|
external_id=dict(required=False, default=None),
|
||||||
policy = dict(required=False, default=None),
|
policy=dict(required=False, default=None),
|
||||||
mfa_serial_number = dict(required=False, default=None),
|
mfa_serial_number=dict(required=False, default=None),
|
||||||
mfa_token = dict(required=False, default=None)
|
mfa_token=dict(required=False, default=None)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -108,6 +108,7 @@ def normalize_credentials(credentials):
|
||||||
'expiration': expiration
|
'expiration': expiration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_session_token(connection, module):
|
def get_session_token(connection, module):
|
||||||
duration_seconds = module.params.get('duration_seconds')
|
duration_seconds = module.params.get('duration_seconds')
|
||||||
mfa_serial_number = module.params.get('mfa_serial_number')
|
mfa_serial_number = module.params.get('mfa_serial_number')
|
||||||
|
@ -131,13 +132,14 @@ def get_session_token(connection, module):
|
||||||
credentials = normalize_credentials(response.get('Credentials', {}))
|
credentials = normalize_credentials(response.get('Credentials', {}))
|
||||||
module.exit_json(changed=changed, sts_creds=credentials)
|
module.exit_json(changed=changed, sts_creds=credentials)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = ec2_argument_spec()
|
argument_spec = ec2_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
dict(
|
dict(
|
||||||
duration_seconds = dict(required=False, default=None, type='int'),
|
duration_seconds=dict(required=False, default=None, type='int'),
|
||||||
mfa_serial_number = dict(required=False, default=None),
|
mfa_serial_number=dict(required=False, default=None),
|
||||||
mfa_token = dict(required=False, default=None)
|
mfa_token=dict(required=False, default=None)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -154,6 +154,7 @@ def managed_disk_to_dict(managed_disk):
|
||||||
|
|
||||||
class AzureRMManagedDisk(AzureRMModuleBase):
|
class AzureRMManagedDisk(AzureRMModuleBase):
|
||||||
"""Configuration class for an Azure RM Managed Disk resource"""
|
"""Configuration class for an Azure RM Managed Disk resource"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.module_arg_spec = dict(
|
self.module_arg_spec = dict(
|
||||||
resource_group=dict(
|
resource_group=dict(
|
||||||
|
|
|
@ -475,8 +475,7 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
|
||||||
tags=results['tags'],
|
tags=results['tags'],
|
||||||
ip_configurations=[
|
ip_configurations=[
|
||||||
NetworkInterfaceIPConfiguration(
|
NetworkInterfaceIPConfiguration(
|
||||||
private_ip_allocation_method=
|
private_ip_allocation_method=results['ip_configuration']['private_ip_allocation_method']
|
||||||
results['ip_configuration']['private_ip_allocation_method']
|
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
@ -496,7 +495,7 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
|
||||||
id=pip.id,
|
id=pip.id,
|
||||||
location=pip.location,
|
location=pip.location,
|
||||||
resource_guid=pip.resource_guid)
|
resource_guid=pip.resource_guid)
|
||||||
#name=pip.name,
|
# name=pip.name,
|
||||||
|
|
||||||
if results['network_security_group'].get('id'):
|
if results['network_security_group'].get('id'):
|
||||||
nsg = self.get_security_group(results['network_security_group']['name'])
|
nsg = self.get_security_group(results['network_security_group']['name'])
|
||||||
|
|
|
@ -193,7 +193,7 @@ class AzureRMPublicIPAddress(AzureRMModuleBase):
|
||||||
if self.domain_name != results['dns_settings'].get('domain_name_label'):
|
if self.domain_name != results['dns_settings'].get('domain_name_label'):
|
||||||
self.log('CHANGED: domain_name_label')
|
self.log('CHANGED: domain_name_label')
|
||||||
changed = True
|
changed = True
|
||||||
results['dns_settings']['domain_name_label'] =self.domain_name
|
results['dns_settings']['domain_name_label'] = self.domain_name
|
||||||
|
|
||||||
if self.allocation_method != results['public_ip_allocation_method']:
|
if self.allocation_method != results['public_ip_allocation_method']:
|
||||||
self.log("CHANGED: allocation_method")
|
self.log("CHANGED: allocation_method")
|
||||||
|
|
|
@ -183,7 +183,6 @@ class AzureRMPublicIPFacts(AzureRMModuleBase):
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
AzureRMPublicIPFacts()
|
AzureRMPublicIPFacts()
|
||||||
|
|
||||||
|
|
|
@ -226,7 +226,7 @@ class AzureRMStorageAccount(AzureRMModuleBase):
|
||||||
self.account_dict = self.get_account()
|
self.account_dict = self.get_account()
|
||||||
|
|
||||||
if self.state == 'present' and self.account_dict and \
|
if self.state == 'present' and self.account_dict and \
|
||||||
self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE :
|
self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE:
|
||||||
self.fail("Error: storage account {0} has not completed provisioning. State is {1}. Expecting state "
|
self.fail("Error: storage account {0} has not completed provisioning. State is {1}. Expecting state "
|
||||||
"to be {2}.".format(self.name, self.account_dict['provisioning_state'], AZURE_SUCCESS_STATE))
|
"to be {2}.".format(self.name, self.account_dict['provisioning_state'], AZURE_SUCCESS_STATE))
|
||||||
|
|
||||||
|
|
|
@ -132,7 +132,6 @@ except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def subnet_to_dict(subnet):
|
def subnet_to_dict(subnet):
|
||||||
result = dict(
|
result = dict(
|
||||||
id=subnet.id,
|
id=subnet.id,
|
||||||
|
|
|
@ -659,7 +659,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
|
||||||
open_ports=dict(type='list'),
|
open_ports=dict(type='list'),
|
||||||
network_interface_names=dict(type='list', aliases=['network_interfaces']),
|
network_interface_names=dict(type='list', aliases=['network_interfaces']),
|
||||||
remove_on_absent=dict(type='list', default=['all']),
|
remove_on_absent=dict(type='list', default=['all']),
|
||||||
virtual_network_resource_group=dict(type = 'str'),
|
virtual_network_resource_group=dict(type='str'),
|
||||||
virtual_network_name=dict(type='str', aliases=['virtual_network']),
|
virtual_network_name=dict(type='str', aliases=['virtual_network']),
|
||||||
subnet_name=dict(type='str', aliases=['subnet']),
|
subnet_name=dict(type='str', aliases=['subnet']),
|
||||||
allocated=dict(type='bool', default=True),
|
allocated=dict(type='bool', default=True),
|
||||||
|
@ -1297,7 +1297,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
|
||||||
nic_names = []
|
nic_names = []
|
||||||
pip_names = []
|
pip_names = []
|
||||||
|
|
||||||
if self.remove_on_absent.intersection(set(['all','virtual_storage'])):
|
if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])):
|
||||||
# store the attached vhd info so we can nuke it after the VM is gone
|
# store the attached vhd info so we can nuke it after the VM is gone
|
||||||
if(vm.storage_profile.os_disk.managed_disk):
|
if(vm.storage_profile.os_disk.managed_disk):
|
||||||
self.log('Storing managed disk ID for deletion')
|
self.log('Storing managed disk ID for deletion')
|
||||||
|
@ -1319,7 +1319,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
|
||||||
self.log("Managed disk IDs to delete: {0}".format(', '.join(managed_disk_ids)))
|
self.log("Managed disk IDs to delete: {0}".format(', '.join(managed_disk_ids)))
|
||||||
self.results['deleted_managed_disk_ids'] = managed_disk_ids
|
self.results['deleted_managed_disk_ids'] = managed_disk_ids
|
||||||
|
|
||||||
if self.remove_on_absent.intersection(set(['all','network_interfaces'])):
|
if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])):
|
||||||
# store the attached nic info so we can nuke them after the VM is gone
|
# store the attached nic info so we can nuke them after the VM is gone
|
||||||
self.log('Storing NIC names for deletion.')
|
self.log('Storing NIC names for deletion.')
|
||||||
for interface in vm.network_profile.network_interfaces:
|
for interface in vm.network_profile.network_interfaces:
|
||||||
|
@ -1327,7 +1327,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
|
||||||
nic_names.append(id_dict['networkInterfaces'])
|
nic_names.append(id_dict['networkInterfaces'])
|
||||||
self.log('NIC names to delete {0}'.format(', '.join(nic_names)))
|
self.log('NIC names to delete {0}'.format(', '.join(nic_names)))
|
||||||
self.results['deleted_network_interfaces'] = nic_names
|
self.results['deleted_network_interfaces'] = nic_names
|
||||||
if self.remove_on_absent.intersection(set(['all','public_ips'])):
|
if self.remove_on_absent.intersection(set(['all', 'public_ips'])):
|
||||||
# also store each nic's attached public IPs and delete after the NIC is gone
|
# also store each nic's attached public IPs and delete after the NIC is gone
|
||||||
for name in nic_names:
|
for name in nic_names:
|
||||||
nic = self.get_network_interface(name)
|
nic = self.get_network_interface(name)
|
||||||
|
@ -1349,18 +1349,18 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
|
||||||
|
|
||||||
# TODO: parallelize nic, vhd, and public ip deletions with begin_deleting
|
# TODO: parallelize nic, vhd, and public ip deletions with begin_deleting
|
||||||
# TODO: best-effort to keep deleting other linked resources if we encounter an error
|
# TODO: best-effort to keep deleting other linked resources if we encounter an error
|
||||||
if self.remove_on_absent.intersection(set(['all','virtual_storage'])):
|
if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])):
|
||||||
self.log('Deleting VHDs')
|
self.log('Deleting VHDs')
|
||||||
self.delete_vm_storage(vhd_uris)
|
self.delete_vm_storage(vhd_uris)
|
||||||
self.log('Deleting managed disks')
|
self.log('Deleting managed disks')
|
||||||
self.delete_managed_disks(managed_disk_ids)
|
self.delete_managed_disks(managed_disk_ids)
|
||||||
|
|
||||||
if self.remove_on_absent.intersection(set(['all','network_interfaces'])):
|
if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])):
|
||||||
self.log('Deleting network interfaces')
|
self.log('Deleting network interfaces')
|
||||||
for name in nic_names:
|
for name in nic_names:
|
||||||
self.delete_nic(name)
|
self.delete_nic(name)
|
||||||
|
|
||||||
if self.remove_on_absent.intersection(set(['all','public_ips'])):
|
if self.remove_on_absent.intersection(set(['all', 'public_ips'])):
|
||||||
self.log('Deleting public IPs')
|
self.log('Deleting public IPs')
|
||||||
for name in pip_names:
|
for name in pip_names:
|
||||||
self.delete_pip(name)
|
self.delete_pip(name)
|
||||||
|
@ -1461,6 +1461,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
|
||||||
return ImageReference(id=vm_image.id)
|
return ImageReference(id=vm_image.id)
|
||||||
|
|
||||||
self.fail("Error could not find image with name {0}".format(name))
|
self.fail("Error could not find image with name {0}".format(name))
|
||||||
|
|
||||||
def get_availability_set(self, resource_group, name):
|
def get_availability_set(self, resource_group, name):
|
||||||
try:
|
try:
|
||||||
return self.compute_client.availability_sets.get(resource_group, name)
|
return self.compute_client.availability_sets.get(resource_group, name)
|
||||||
|
|
|
@ -195,7 +195,7 @@ class AzureRMVirtualNetwork(AzureRMModuleBase):
|
||||||
self.dns_servers = None
|
self.dns_servers = None
|
||||||
self.purge_dns_servers = None
|
self.purge_dns_servers = None
|
||||||
|
|
||||||
self.results=dict(
|
self.results = dict(
|
||||||
changed=False,
|
changed=False,
|
||||||
state=dict()
|
state=dict()
|
||||||
)
|
)
|
||||||
|
@ -327,7 +327,6 @@ class AzureRMVirtualNetwork(AzureRMModuleBase):
|
||||||
self.delete_virtual_network()
|
self.delete_virtual_network()
|
||||||
self.results['state']['status'] = 'Deleted'
|
self.results['state']['status'] = 'Deleted'
|
||||||
|
|
||||||
|
|
||||||
return self.results
|
return self.results
|
||||||
|
|
||||||
def create_or_update_vnet(self, vnet):
|
def create_or_update_vnet(self, vnet):
|
||||||
|
|
|
@ -178,6 +178,7 @@ class AzureRMNetworkInterfaceFacts(AzureRMModuleBase):
|
||||||
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
|
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
AzureRMNetworkInterfaceFacts()
|
AzureRMNetworkInterfaceFacts()
|
||||||
|
|
||||||
|
|
|
@ -951,7 +951,7 @@ class TaskParameters(DockerBaseClass):
|
||||||
Returns parameters used to create a HostConfig object
|
Returns parameters used to create a HostConfig object
|
||||||
'''
|
'''
|
||||||
|
|
||||||
host_config_params=dict(
|
host_config_params = dict(
|
||||||
port_bindings='published_ports',
|
port_bindings='published_ports',
|
||||||
publish_all_ports='publish_all_ports',
|
publish_all_ports='publish_all_ports',
|
||||||
links='links',
|
links='links',
|
||||||
|
@ -1163,7 +1163,7 @@ class TaskParameters(DockerBaseClass):
|
||||||
|
|
||||||
options = dict(
|
options = dict(
|
||||||
Type=self.log_driver,
|
Type=self.log_driver,
|
||||||
Config = dict()
|
Config=dict()
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.log_options is not None:
|
if self.log_options is not None:
|
||||||
|
@ -1217,7 +1217,6 @@ class TaskParameters(DockerBaseClass):
|
||||||
return network_id
|
return network_id
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class Container(DockerBaseClass):
|
class Container(DockerBaseClass):
|
||||||
|
|
||||||
def __init__(self, container, parameters):
|
def __init__(self, container, parameters):
|
||||||
|
|
|
@ -184,6 +184,7 @@ class TaskParameters(DockerBaseClass):
|
||||||
def container_names_in_network(network):
|
def container_names_in_network(network):
|
||||||
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
|
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
|
||||||
|
|
||||||
|
|
||||||
class DockerNetworkManager(object):
|
class DockerNetworkManager(object):
|
||||||
|
|
||||||
def __init__(self, client):
|
def __init__(self, client):
|
||||||
|
@ -362,16 +363,16 @@ class DockerNetworkManager(object):
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = dict(
|
argument_spec = dict(
|
||||||
network_name = dict(type='str', required=True, aliases=['name']),
|
network_name=dict(type='str', required=True, aliases=['name']),
|
||||||
connected = dict(type='list', default=[], aliases=['containers']),
|
connected=dict(type='list', default=[], aliases=['containers']),
|
||||||
state = dict(type='str', default='present', choices=['present', 'absent']),
|
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||||
driver = dict(type='str', default='bridge'),
|
driver=dict(type='str', default='bridge'),
|
||||||
driver_options = dict(type='dict', default={}),
|
driver_options=dict(type='dict', default={}),
|
||||||
force = dict(type='bool', default=False),
|
force=dict(type='bool', default=False),
|
||||||
appends = dict(type='bool', default=False, aliases=['incremental']),
|
appends=dict(type='bool', default=False, aliases=['incremental']),
|
||||||
ipam_driver = dict(type='str', default=None),
|
ipam_driver=dict(type='str', default=None),
|
||||||
ipam_options = dict(type='dict', default={}),
|
ipam_options=dict(type='dict', default={}),
|
||||||
debug = dict(type='bool', default=False)
|
debug=dict(type='bool', default=False)
|
||||||
)
|
)
|
||||||
|
|
||||||
client = AnsibleDockerClient(
|
client = AnsibleDockerClient(
|
||||||
|
|
|
@ -179,27 +179,26 @@ def grant_check(module, gs, obj):
|
||||||
try:
|
try:
|
||||||
acp = obj.get_acl()
|
acp = obj.get_acl()
|
||||||
if module.params.get('permission') == 'public-read':
|
if module.params.get('permission') == 'public-read':
|
||||||
grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
|
grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
|
||||||
if not grant:
|
if not grant:
|
||||||
obj.set_acl('public-read')
|
obj.set_acl('public-read')
|
||||||
module.exit_json(changed=True, result="The objects permission as been set to public-read")
|
module.exit_json(changed=True, result="The objects permission as been set to public-read")
|
||||||
if module.params.get('permission') == 'authenticated-read':
|
if module.params.get('permission') == 'authenticated-read':
|
||||||
grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
|
grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
|
||||||
if not grant:
|
if not grant:
|
||||||
obj.set_acl('authenticated-read')
|
obj.set_acl('authenticated-read')
|
||||||
module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
|
module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
|
||||||
except gs.provider.storage_response_error as e:
|
except gs.provider.storage_response_error as e:
|
||||||
module.fail_json(msg= str(e))
|
module.fail_json(msg=str(e))
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def key_check(module, gs, bucket, obj):
|
def key_check(module, gs, bucket, obj):
|
||||||
try:
|
try:
|
||||||
bucket = gs.lookup(bucket)
|
bucket = gs.lookup(bucket)
|
||||||
key_check = bucket.get_key(obj)
|
key_check = bucket.get_key(obj)
|
||||||
except gs.provider.storage_response_error as e:
|
except gs.provider.storage_response_error as e:
|
||||||
module.fail_json(msg= str(e))
|
module.fail_json(msg=str(e))
|
||||||
if key_check:
|
if key_check:
|
||||||
grant_check(module, gs, key_check)
|
grant_check(module, gs, key_check)
|
||||||
return True
|
return True
|
||||||
|
@ -223,7 +222,7 @@ def bucket_check(module, gs, bucket):
|
||||||
try:
|
try:
|
||||||
result = gs.lookup(bucket)
|
result = gs.lookup(bucket)
|
||||||
except gs.provider.storage_response_error as e:
|
except gs.provider.storage_response_error as e:
|
||||||
module.fail_json(msg= str(e))
|
module.fail_json(msg=str(e))
|
||||||
if result:
|
if result:
|
||||||
grant_check(module, gs, result)
|
grant_check(module, gs, result)
|
||||||
return True
|
return True
|
||||||
|
@ -237,7 +236,7 @@ def create_bucket(module, gs, bucket):
|
||||||
bucket.set_acl(module.params.get('permission'))
|
bucket.set_acl(module.params.get('permission'))
|
||||||
bucket.configure_versioning(module.params.get('versioning'))
|
bucket.configure_versioning(module.params.get('versioning'))
|
||||||
except gs.provider.storage_response_error as e:
|
except gs.provider.storage_response_error as e:
|
||||||
module.fail_json(msg= str(e))
|
module.fail_json(msg=str(e))
|
||||||
if bucket:
|
if bucket:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -251,7 +250,7 @@ def delete_bucket(module, gs, bucket):
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
return True
|
return True
|
||||||
except gs.provider.storage_response_error as e:
|
except gs.provider.storage_response_error as e:
|
||||||
module.fail_json(msg= str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
|
||||||
def delete_key(module, gs, bucket, obj):
|
def delete_key(module, gs, bucket, obj):
|
||||||
|
@ -260,7 +259,7 @@ def delete_key(module, gs, bucket, obj):
|
||||||
bucket.delete_key(obj)
|
bucket.delete_key(obj)
|
||||||
module.exit_json(msg="Object deleted from bucket ", changed=True)
|
module.exit_json(msg="Object deleted from bucket ", changed=True)
|
||||||
except gs.provider.storage_response_error as e:
|
except gs.provider.storage_response_error as e:
|
||||||
module.fail_json(msg= str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
|
||||||
def create_dirkey(module, gs, bucket, obj):
|
def create_dirkey(module, gs, bucket, obj):
|
||||||
|
@ -270,7 +269,7 @@ def create_dirkey(module, gs, bucket, obj):
|
||||||
key.set_contents_from_string('')
|
key.set_contents_from_string('')
|
||||||
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
|
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
|
||||||
except gs.provider.storage_response_error as e:
|
except gs.provider.storage_response_error as e:
|
||||||
module.fail_json(msg= str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
|
||||||
def path_check(path):
|
def path_check(path):
|
||||||
|
@ -308,7 +307,7 @@ def upload_gsfile(module, gs, bucket, obj, src, expiry):
|
||||||
url = key.generate_url(expiry)
|
url = key.generate_url(expiry)
|
||||||
module.exit_json(msg="PUT operation complete", url=url, changed=True)
|
module.exit_json(msg="PUT operation complete", url=url, changed=True)
|
||||||
except gs.provider.storage_copy_error as e:
|
except gs.provider.storage_copy_error as e:
|
||||||
module.fail_json(msg= str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
|
||||||
def download_gsfile(module, gs, bucket, obj, dest):
|
def download_gsfile(module, gs, bucket, obj, dest):
|
||||||
|
@ -318,7 +317,7 @@ def download_gsfile(module, gs, bucket, obj, dest):
|
||||||
key.get_contents_to_filename(dest)
|
key.get_contents_to_filename(dest)
|
||||||
module.exit_json(msg="GET operation complete", changed=True)
|
module.exit_json(msg="GET operation complete", changed=True)
|
||||||
except gs.provider.storage_copy_error as e:
|
except gs.provider.storage_copy_error as e:
|
||||||
module.fail_json(msg= str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
|
||||||
def download_gsstr(module, gs, bucket, obj):
|
def download_gsstr(module, gs, bucket, obj):
|
||||||
|
@ -328,7 +327,7 @@ def download_gsstr(module, gs, bucket, obj):
|
||||||
contents = key.get_contents_as_string()
|
contents = key.get_contents_as_string()
|
||||||
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
|
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
|
||||||
except gs.provider.storage_copy_error as e:
|
except gs.provider.storage_copy_error as e:
|
||||||
module.fail_json(msg= str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
|
||||||
def get_download_url(module, gs, bucket, obj, expiry):
|
def get_download_url(module, gs, bucket, obj, expiry):
|
||||||
|
@ -338,7 +337,7 @@ def get_download_url(module, gs, bucket, obj, expiry):
|
||||||
url = key.generate_url(expiry)
|
url = key.generate_url(expiry)
|
||||||
module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True)
|
module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True)
|
||||||
except gs.provider.storage_response_error as e:
|
except gs.provider.storage_response_error as e:
|
||||||
module.fail_json(msg= str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
|
||||||
def handle_get(module, gs, bucket, obj, overwrite, dest):
|
def handle_get(module, gs, bucket, obj, overwrite, dest):
|
||||||
|
@ -380,7 +379,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
|
||||||
def handle_delete(module, gs, bucket, obj):
|
def handle_delete(module, gs, bucket, obj):
|
||||||
if bucket and not obj:
|
if bucket and not obj:
|
||||||
if bucket_check(module, gs, bucket):
|
if bucket_check(module, gs, bucket):
|
||||||
module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=delete_bucket(module, gs, bucket))
|
module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=delete_bucket(module, gs, bucket))
|
||||||
else:
|
else:
|
||||||
module.exit_json(msg="Bucket does not exist.", changed=False)
|
module.exit_json(msg="Bucket does not exist.", changed=False)
|
||||||
if bucket and obj:
|
if bucket and obj:
|
||||||
|
@ -409,7 +408,7 @@ def handle_create(module, gs, bucket, obj):
|
||||||
|
|
||||||
if bucket_check(module, gs, bucket):
|
if bucket_check(module, gs, bucket):
|
||||||
if key_check(module, gs, bucket, dirobj):
|
if key_check(module, gs, bucket, dirobj):
|
||||||
module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
|
module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
|
||||||
else:
|
else:
|
||||||
create_dirkey(module, gs, bucket, dirobj)
|
create_dirkey(module, gs, bucket, dirobj)
|
||||||
else:
|
else:
|
||||||
|
@ -419,20 +418,20 @@ def handle_create(module, gs, bucket, obj):
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
bucket = dict(required=True),
|
bucket=dict(required=True),
|
||||||
object = dict(default=None, type='path'),
|
object=dict(default=None, type='path'),
|
||||||
src = dict(default=None),
|
src=dict(default=None),
|
||||||
dest = dict(default=None, type='path'),
|
dest=dict(default=None, type='path'),
|
||||||
expiration = dict(type='int', default=600, aliases=['expiry']),
|
expiration=dict(type='int', default=600, aliases=['expiry']),
|
||||||
mode = dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
|
mode=dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
|
||||||
permission = dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
|
permission=dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
|
||||||
headers = dict(type='dict', default={}),
|
headers=dict(type='dict', default={}),
|
||||||
gs_secret_key = dict(no_log=True, required=True),
|
gs_secret_key=dict(no_log=True, required=True),
|
||||||
gs_access_key = dict(required=True),
|
gs_access_key=dict(required=True),
|
||||||
overwrite = dict(default=True, type='bool', aliases=['force']),
|
overwrite=dict(default=True, type='bool', aliases=['force']),
|
||||||
region = dict(default='US', type='str'),
|
region=dict(default='US', type='str'),
|
||||||
versioning = dict(default='no', type='bool')
|
versioning=dict(default='no', type='bool')
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -459,7 +458,7 @@ def main():
|
||||||
try:
|
try:
|
||||||
gs = boto.connect_gs(gs_access_key, gs_secret_key)
|
gs = boto.connect_gs(gs_access_key, gs_secret_key)
|
||||||
except boto.exception.NoAuthHandlerFound as e:
|
except boto.exception.NoAuthHandlerFound as e:
|
||||||
module.fail_json(msg = str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
if mode == 'get':
|
if mode == 'get':
|
||||||
if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
|
if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
|
||||||
|
|
|
@ -348,7 +348,7 @@ PROVIDER = Provider.GOOGLE
|
||||||
# I'm hard-coding the supported record types here, because they (hopefully!)
|
# I'm hard-coding the supported record types here, because they (hopefully!)
|
||||||
# shouldn't change much, and it allows me to use it as a "choices" parameter
|
# shouldn't change much, and it allows me to use it as a "choices" parameter
|
||||||
# in an AnsibleModule argument_spec.
|
# in an AnsibleModule argument_spec.
|
||||||
SUPPORTED_RECORD_TYPES = [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ]
|
SUPPORTED_RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR']
|
||||||
|
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
@ -378,8 +378,8 @@ def create_record(module, gcdns, zone, record):
|
||||||
# The record doesn't match, so we need to check if we can overwrite it.
|
# The record doesn't match, so we need to check if we can overwrite it.
|
||||||
if not overwrite:
|
if not overwrite:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'cannot overwrite existing record, overwrite protection enabled',
|
msg='cannot overwrite existing record, overwrite protection enabled',
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# The record either doesn't exist, or it exists and we can overwrite it.
|
# The record either doesn't exist, or it exists and we can overwrite it.
|
||||||
|
@ -393,9 +393,9 @@ def create_record(module, gcdns, zone, record):
|
||||||
# not when combined (e.g., an 'A' record with "www.example.com"
|
# not when combined (e.g., an 'A' record with "www.example.com"
|
||||||
# as its value).
|
# as its value).
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'value is invalid for the given type: ' +
|
msg='value is invalid for the given type: ' +
|
||||||
"%s, got value: %s" % (record_type, record_data),
|
"%s, got value: %s" % (record_type, record_data),
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
elif error.code == 'cnameResourceRecordSetConflict':
|
elif error.code == 'cnameResourceRecordSetConflict':
|
||||||
|
@ -403,8 +403,8 @@ def create_record(module, gcdns, zone, record):
|
||||||
# already have another type of resource record with the name
|
# already have another type of resource record with the name
|
||||||
# domain name.
|
# domain name.
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = "non-CNAME resource record already exists: %s" % record_name,
|
msg="non-CNAME resource record already exists: %s" % record_name,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -428,8 +428,8 @@ def create_record(module, gcdns, zone, record):
|
||||||
try:
|
try:
|
||||||
gcdns.create_record(record.name, record.zone, record.type, record.data)
|
gcdns.create_record(record.name, record.zone, record.type, record.data)
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'error updating record, the original record was restored',
|
msg='error updating record, the original record was restored',
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
except LibcloudError:
|
except LibcloudError:
|
||||||
# We deleted the old record, couldn't create the new record, and
|
# We deleted the old record, couldn't create the new record, and
|
||||||
|
@ -437,12 +437,12 @@ def create_record(module, gcdns, zone, record):
|
||||||
# record to the failure output so the user can resore it if
|
# record to the failure output so the user can resore it if
|
||||||
# necessary.
|
# necessary.
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'error updating record, and could not restore original record, ' +
|
msg='error updating record, and could not restore original record, ' +
|
||||||
"original name: %s " % record.name +
|
"original name: %s " % record.name +
|
||||||
"original zone: %s " % record.zone +
|
"original zone: %s " % record.zone +
|
||||||
"original type: %s " % record.type +
|
"original type: %s " % record.type +
|
||||||
"original data: %s" % record.data,
|
"original data: %s" % record.data,
|
||||||
changed = True)
|
changed=True)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -463,10 +463,10 @@ def remove_record(module, gcdns, record):
|
||||||
if not overwrite:
|
if not overwrite:
|
||||||
if not _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
|
if not _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'cannot delete due to non-matching ttl or record_data: ' +
|
msg='cannot delete due to non-matching ttl or record_data: ' +
|
||||||
"ttl: %d, record_data: %s " % (ttl, record_data) +
|
"ttl: %d, record_data: %s " % (ttl, record_data) +
|
||||||
"original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']),
|
"original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']),
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# If we got to this point, we're okay to delete the record.
|
# If we got to this point, we're okay to delete the record.
|
||||||
|
@ -539,20 +539,20 @@ def _sanity_check(module):
|
||||||
# Apache libcloud needs to be installed and at least the minimum version.
|
# Apache libcloud needs to be installed and at least the minimum version.
|
||||||
if not HAS_LIBCLOUD:
|
if not HAS_LIBCLOUD:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
|
msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
|
elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
|
msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# A negative TTL is not permitted (how would they even work?!).
|
# A negative TTL is not permitted (how would they even work?!).
|
||||||
if ttl < 0:
|
if ttl < 0:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'TTL cannot be less than zero, got: %d' % ttl,
|
msg='TTL cannot be less than zero, got: %d' % ttl,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# Deleting SOA records is not permitted.
|
# Deleting SOA records is not permitted.
|
||||||
|
@ -572,8 +572,8 @@ def _sanity_check(module):
|
||||||
socket.inet_aton(value)
|
socket.inet_aton(value)
|
||||||
except socket.error:
|
except socket.error:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'invalid A record value, got: %s' % value,
|
msg='invalid A record value, got: %s' % value,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# AAAA records must contain valid IPv6 addresses.
|
# AAAA records must contain valid IPv6 addresses.
|
||||||
|
@ -583,23 +583,23 @@ def _sanity_check(module):
|
||||||
socket.inet_pton(socket.AF_INET6, value)
|
socket.inet_pton(socket.AF_INET6, value)
|
||||||
except socket.error:
|
except socket.error:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'invalid AAAA record value, got: %s' % value,
|
msg='invalid AAAA record value, got: %s' % value,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# CNAME and SOA records can't have multiple values.
|
# CNAME and SOA records can't have multiple values.
|
||||||
if record_type in ['CNAME', 'SOA'] and len(record_data) > 1:
|
if record_type in ['CNAME', 'SOA'] and len(record_data) > 1:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'CNAME or SOA records cannot have more than one value, ' +
|
msg='CNAME or SOA records cannot have more than one value, ' +
|
||||||
"got: %s" % record_data,
|
"got: %s" % record_data,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# Google Cloud DNS does not support wildcard NS records.
|
# Google Cloud DNS does not support wildcard NS records.
|
||||||
if record_type == 'NS' and record_name[0] == '*':
|
if record_type == 'NS' and record_name[0] == '*':
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = "wildcard NS records not allowed, got: %s" % record_name,
|
msg="wildcard NS records not allowed, got: %s" % record_name,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# Values for txt records must begin and end with a double quote.
|
# Values for txt records must begin and end with a double quote.
|
||||||
|
@ -607,9 +607,9 @@ def _sanity_check(module):
|
||||||
for value in record_data:
|
for value in record_data:
|
||||||
if value[0] != '"' and value[-1] != '"':
|
if value[0] != '"' and value[-1] != '"':
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'TXT record_data must be enclosed in double quotes, ' +
|
msg='TXT record_data must be enclosed in double quotes, ' +
|
||||||
'got: %s' % value,
|
'got: %s' % value,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -624,15 +624,15 @@ def _additional_sanity_checks(module, zone):
|
||||||
# CNAME records are not allowed to have the same name as the root domain.
|
# CNAME records are not allowed to have the same name as the root domain.
|
||||||
if record_type == 'CNAME' and record_name == zone.domain:
|
if record_type == 'CNAME' and record_name == zone.domain:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'CNAME records cannot match the zone name',
|
msg='CNAME records cannot match the zone name',
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# The root domain must always have an NS record.
|
# The root domain must always have an NS record.
|
||||||
if record_type == 'NS' and record_name == zone.domain and state == 'absent':
|
if record_type == 'NS' and record_name == zone.domain and state == 'absent':
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'cannot delete root NS records',
|
msg='cannot delete root NS records',
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# Updating NS records with the name as the root domain is not allowed
|
# Updating NS records with the name as the root domain is not allowed
|
||||||
|
@ -640,16 +640,16 @@ def _additional_sanity_checks(module, zone):
|
||||||
# records cannot be removed.
|
# records cannot be removed.
|
||||||
if record_type == 'NS' and record_name == zone.domain and overwrite:
|
if record_type == 'NS' and record_name == zone.domain and overwrite:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'cannot update existing root NS records',
|
msg='cannot update existing root NS records',
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# SOA records with names that don't match the root domain are not permitted
|
# SOA records with names that don't match the root domain are not permitted
|
||||||
# (and wouldn't make sense anyway).
|
# (and wouldn't make sense anyway).
|
||||||
if record_type == 'SOA' and record_name != zone.domain:
|
if record_type == 'SOA' and record_name != zone.domain:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'non-root SOA records are not permitted, got: %s' % record_name,
|
msg='non-root SOA records are not permitted, got: %s' % record_name,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -661,26 +661,26 @@ def main():
|
||||||
"""Main function"""
|
"""Main function"""
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
state = dict(default='present', choices=['present', 'absent'], type='str'),
|
state=dict(default='present', choices=['present', 'absent'], type='str'),
|
||||||
record = dict(required=True, aliases=['name'], type='str'),
|
record=dict(required=True, aliases=['name'], type='str'),
|
||||||
zone = dict(type='str'),
|
zone=dict(type='str'),
|
||||||
zone_id = dict(type='str'),
|
zone_id=dict(type='str'),
|
||||||
type = dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'),
|
type=dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'),
|
||||||
record_data = dict(aliases=['value'], type='list'),
|
record_data=dict(aliases=['value'], type='list'),
|
||||||
ttl = dict(default=300, type='int'),
|
ttl=dict(default=300, type='int'),
|
||||||
overwrite = dict(default=False, type='bool'),
|
overwrite=dict(default=False, type='bool'),
|
||||||
service_account_email = dict(type='str'),
|
service_account_email=dict(type='str'),
|
||||||
pem_file = dict(type='path'),
|
pem_file=dict(type='path'),
|
||||||
credentials_file = dict(type='path'),
|
credentials_file=dict(type='path'),
|
||||||
project_id = dict(type='str')
|
project_id=dict(type='str')
|
||||||
),
|
),
|
||||||
required_if = [
|
required_if=[
|
||||||
('state', 'present', ['record_data']),
|
('state', 'present', ['record_data']),
|
||||||
('overwrite', False, ['record_data'])
|
('overwrite', False, ['record_data'])
|
||||||
],
|
],
|
||||||
required_one_of = [['zone', 'zone_id']],
|
required_one_of=[['zone', 'zone_id']],
|
||||||
supports_check_mode = True
|
supports_check_mode=True
|
||||||
)
|
)
|
||||||
|
|
||||||
_sanity_check(module)
|
_sanity_check(module)
|
||||||
|
@ -693,14 +693,14 @@ def main():
|
||||||
zone_id = module.params['zone_id']
|
zone_id = module.params['zone_id']
|
||||||
|
|
||||||
json_output = dict(
|
json_output = dict(
|
||||||
state = state,
|
state=state,
|
||||||
record = record_name,
|
record=record_name,
|
||||||
zone = zone_name,
|
zone=zone_name,
|
||||||
zone_id = zone_id,
|
zone_id=zone_id,
|
||||||
type = record_type,
|
type=record_type,
|
||||||
record_data = module.params['record_data'],
|
record_data=module.params['record_data'],
|
||||||
ttl = ttl,
|
ttl=ttl,
|
||||||
overwrite = module.boolean(module.params['overwrite'])
|
overwrite=module.boolean(module.params['overwrite'])
|
||||||
)
|
)
|
||||||
|
|
||||||
# Google Cloud DNS wants the trailing dot on all DNS names.
|
# Google Cloud DNS wants the trailing dot on all DNS names.
|
||||||
|
@ -718,13 +718,13 @@ def main():
|
||||||
zone = _get_zone(gcdns, zone_name, zone_id)
|
zone = _get_zone(gcdns, zone_name, zone_id)
|
||||||
if zone is None and zone_name is not None:
|
if zone is None and zone_name is not None:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'zone name was not found: %s' % zone_name,
|
msg='zone name was not found: %s' % zone_name,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
elif zone is None and zone_id is not None:
|
elif zone is None and zone_id is not None:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'zone id was not found: %s' % zone_id,
|
msg='zone id was not found: %s' % zone_id,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# Populate the returns with the actual zone information.
|
# Populate the returns with the actual zone information.
|
||||||
|
@ -738,8 +738,8 @@ def main():
|
||||||
except InvalidRequestError:
|
except InvalidRequestError:
|
||||||
# We gave Google Cloud DNS an invalid DNS record name.
|
# We gave Google Cloud DNS an invalid DNS record name.
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'record name is invalid: %s' % record_name,
|
msg='record name is invalid: %s' % record_name,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
_additional_sanity_checks(module, zone)
|
_additional_sanity_checks(module, zone)
|
||||||
|
@ -752,20 +752,20 @@ def main():
|
||||||
diff['before_header'] = '<absent>'
|
diff['before_header'] = '<absent>'
|
||||||
else:
|
else:
|
||||||
diff['before'] = dict(
|
diff['before'] = dict(
|
||||||
record = record.data['name'],
|
record=record.data['name'],
|
||||||
type = record.data['type'],
|
type=record.data['type'],
|
||||||
record_data = record.data['rrdatas'],
|
record_data=record.data['rrdatas'],
|
||||||
ttl = record.data['ttl']
|
ttl=record.data['ttl']
|
||||||
)
|
)
|
||||||
diff['before_header'] = "%s:%s" % (record_type, record_name)
|
diff['before_header'] = "%s:%s" % (record_type, record_name)
|
||||||
|
|
||||||
# Create, remove, or modify the record.
|
# Create, remove, or modify the record.
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
diff['after'] = dict(
|
diff['after'] = dict(
|
||||||
record = record_name,
|
record=record_name,
|
||||||
type = record_type,
|
type=record_type,
|
||||||
record_data = module.params['record_data'],
|
record_data=module.params['record_data'],
|
||||||
ttl = ttl
|
ttl=ttl
|
||||||
)
|
)
|
||||||
diff['after_header'] = "%s:%s" % (record_type, record_name)
|
diff['after_header'] = "%s:%s" % (record_type, record_name)
|
||||||
|
|
||||||
|
|
|
@ -145,17 +145,18 @@ MINIMUM_LIBCLOUD_VERSION = '0.19.0'
|
||||||
PROVIDER = Provider.GOOGLE
|
PROVIDER = Provider.GOOGLE
|
||||||
|
|
||||||
# The URL used to verify ownership of a zone in Google Cloud DNS.
|
# The URL used to verify ownership of a zone in Google Cloud DNS.
|
||||||
ZONE_VERIFICATION_URL= 'https://www.google.com/webmasters/verification/'
|
ZONE_VERIFICATION_URL = 'https://www.google.com/webmasters/verification/'
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
# Functions
|
# Functions
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
|
|
||||||
def create_zone(module, gcdns, zone):
|
def create_zone(module, gcdns, zone):
|
||||||
"""Creates a new Google Cloud DNS zone."""
|
"""Creates a new Google Cloud DNS zone."""
|
||||||
|
|
||||||
description = module.params['description']
|
description = module.params['description']
|
||||||
extra = dict(description = description)
|
extra = dict(description=description)
|
||||||
zone_name = module.params['zone']
|
zone_name = module.params['zone']
|
||||||
|
|
||||||
# Google Cloud DNS wants the trailing dot on the domain name.
|
# Google Cloud DNS wants the trailing dot on the domain name.
|
||||||
|
@ -184,8 +185,8 @@ def create_zone(module, gcdns, zone):
|
||||||
# The zone name or a parameter might be completely invalid. This is
|
# The zone name or a parameter might be completely invalid. This is
|
||||||
# typically caused by an illegal DNS name (e.g. foo..com).
|
# typically caused by an illegal DNS name (e.g. foo..com).
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = "zone name is not a valid DNS name: %s" % zone_name,
|
msg="zone name is not a valid DNS name: %s" % zone_name,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
elif error.code == 'managedZoneDnsNameNotAvailable':
|
elif error.code == 'managedZoneDnsNameNotAvailable':
|
||||||
|
@ -193,8 +194,8 @@ def create_zone(module, gcdns, zone):
|
||||||
# names, such as TLDs, ccTLDs, or special domain names such as
|
# names, such as TLDs, ccTLDs, or special domain names such as
|
||||||
# example.com.
|
# example.com.
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = "zone name is reserved or already in use: %s" % zone_name,
|
msg="zone name is reserved or already in use: %s" % zone_name,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
elif error.code == 'verifyManagedZoneDnsNameOwnership':
|
elif error.code == 'verifyManagedZoneDnsNameOwnership':
|
||||||
|
@ -202,8 +203,8 @@ def create_zone(module, gcdns, zone):
|
||||||
# it. This occurs when a user attempts to create a zone which shares
|
# it. This occurs when a user attempts to create a zone which shares
|
||||||
# a domain name with a zone hosted elsewhere in Google Cloud DNS.
|
# a domain name with a zone hosted elsewhere in Google Cloud DNS.
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = "ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
|
msg="ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -226,8 +227,8 @@ def remove_zone(module, gcdns, zone):
|
||||||
# refuse to remove the zone.
|
# refuse to remove the zone.
|
||||||
if len(zone.list_records()) > 2:
|
if len(zone.list_records()) > 2:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = "zone is not empty and cannot be removed: %s" % zone.domain,
|
msg="zone is not empty and cannot be removed: %s" % zone.domain,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -246,8 +247,8 @@ def remove_zone(module, gcdns, zone):
|
||||||
# the milliseconds between the check and the removal command,
|
# the milliseconds between the check and the removal command,
|
||||||
# records were added to the zone.
|
# records were added to the zone.
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = "zone is not empty and cannot be removed: %s" % zone.domain,
|
msg="zone is not empty and cannot be removed: %s" % zone.domain,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -273,6 +274,7 @@ def _get_zone(gcdns, zone_name):
|
||||||
|
|
||||||
return found_zone
|
return found_zone
|
||||||
|
|
||||||
|
|
||||||
def _sanity_check(module):
|
def _sanity_check(module):
|
||||||
"""Run module sanity checks."""
|
"""Run module sanity checks."""
|
||||||
|
|
||||||
|
@ -281,40 +283,41 @@ def _sanity_check(module):
|
||||||
# Apache libcloud needs to be installed and at least the minimum version.
|
# Apache libcloud needs to be installed and at least the minimum version.
|
||||||
if not HAS_LIBCLOUD:
|
if not HAS_LIBCLOUD:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
|
msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
|
elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
|
msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# Google Cloud DNS does not support the creation of TLDs.
|
# Google Cloud DNS does not support the creation of TLDs.
|
||||||
if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1:
|
if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1:
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
msg = 'cannot create top-level domain: %s' % zone_name,
|
msg='cannot create top-level domain: %s' % zone_name,
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
# Main
|
# Main
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Main function"""
|
"""Main function"""
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
state = dict(default='present', choices=['present', 'absent'], type='str'),
|
state=dict(default='present', choices=['present', 'absent'], type='str'),
|
||||||
zone = dict(required=True, aliases=['name'], type='str'),
|
zone=dict(required=True, aliases=['name'], type='str'),
|
||||||
description = dict(default='', type='str'),
|
description=dict(default='', type='str'),
|
||||||
service_account_email = dict(type='str'),
|
service_account_email=dict(type='str'),
|
||||||
pem_file = dict(type='path'),
|
pem_file=dict(type='path'),
|
||||||
credentials_file = dict(type='path'),
|
credentials_file=dict(type='path'),
|
||||||
project_id = dict(type='str')
|
project_id=dict(type='str')
|
||||||
),
|
),
|
||||||
supports_check_mode = True
|
supports_check_mode=True
|
||||||
)
|
)
|
||||||
|
|
||||||
_sanity_check(module)
|
_sanity_check(module)
|
||||||
|
@ -327,9 +330,9 @@ def main():
|
||||||
zone_name = zone_name + '.'
|
zone_name = zone_name + '.'
|
||||||
|
|
||||||
json_output = dict(
|
json_output = dict(
|
||||||
state = state,
|
state=state,
|
||||||
zone = zone_name,
|
zone=zone_name,
|
||||||
description = module.params['description']
|
description=module.params['description']
|
||||||
)
|
)
|
||||||
|
|
||||||
# Build a connection object that was can use to connect with Google
|
# Build a connection object that was can use to connect with Google
|
||||||
|
@ -347,16 +350,16 @@ def main():
|
||||||
diff['before_header'] = '<absent>'
|
diff['before_header'] = '<absent>'
|
||||||
else:
|
else:
|
||||||
diff['before'] = dict(
|
diff['before'] = dict(
|
||||||
zone = zone.domain,
|
zone=zone.domain,
|
||||||
description = zone.extra['description']
|
description=zone.extra['description']
|
||||||
)
|
)
|
||||||
diff['before_header'] = zone_name
|
diff['before_header'] = zone_name
|
||||||
|
|
||||||
# Create or remove the zone.
|
# Create or remove the zone.
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
diff['after'] = dict(
|
diff['after'] = dict(
|
||||||
zone = zone_name,
|
zone=zone_name,
|
||||||
description = module.params['description']
|
description=module.params['description']
|
||||||
)
|
)
|
||||||
diff['after_header'] = zone_name
|
diff['after_header'] = zone_name
|
||||||
|
|
||||||
|
|
|
@ -377,7 +377,7 @@ EXAMPLES = """
|
||||||
- test-container-new-archive-destroyed-clone
|
- test-container-new-archive-destroyed-clone
|
||||||
"""
|
"""
|
||||||
|
|
||||||
RETURN="""
|
RETURN = """
|
||||||
lxc_container:
|
lxc_container:
|
||||||
description: container information
|
description: container information
|
||||||
returned: success
|
returned: success
|
||||||
|
@ -579,7 +579,7 @@ def create_script(command):
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
# Ensure the script is executable.
|
# Ensure the script is executable.
|
||||||
os.chmod(script_file, int('0700',8))
|
os.chmod(script_file, int('0700', 8))
|
||||||
|
|
||||||
# Output log file.
|
# Output log file.
|
||||||
stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
|
stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
|
||||||
|
@ -915,7 +915,7 @@ class LxcContainerManagement(object):
|
||||||
'ips': self.container.get_ips(),
|
'ips': self.container.get_ips(),
|
||||||
'state': self._get_state(),
|
'state': self._get_state(),
|
||||||
'init_pid': int(self.container.init_pid),
|
'init_pid': int(self.container.init_pid),
|
||||||
'name' : self.container_name,
|
'name': self.container_name,
|
||||||
}
|
}
|
||||||
|
|
||||||
def _unfreeze(self):
|
def _unfreeze(self):
|
||||||
|
@ -1365,7 +1365,7 @@ class LxcContainerManagement(object):
|
||||||
:type source_dir: ``str``
|
:type source_dir: ``str``
|
||||||
"""
|
"""
|
||||||
|
|
||||||
old_umask = os.umask(int('0077',8))
|
old_umask = os.umask(int('0077', 8))
|
||||||
|
|
||||||
archive_path = self.module.params.get('archive_path')
|
archive_path = self.module.params.get('archive_path')
|
||||||
if not os.path.isdir(archive_path):
|
if not os.path.isdir(archive_path):
|
||||||
|
@ -1750,7 +1750,7 @@ def main():
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
supports_check_mode=False,
|
supports_check_mode=False,
|
||||||
required_if = ([
|
required_if=([
|
||||||
('archive', True, ['archive_path'])
|
('archive', True, ['archive_path'])
|
||||||
]),
|
]),
|
||||||
)
|
)
|
||||||
|
|
|
@ -216,7 +216,7 @@ EXAMPLES = '''
|
||||||
flat: true
|
flat: true
|
||||||
'''
|
'''
|
||||||
|
|
||||||
RETURN='''
|
RETURN = '''
|
||||||
addresses:
|
addresses:
|
||||||
description: Mapping from the network device name to a list of IPv4 addresses in the container
|
description: Mapping from the network device name to a list of IPv4 addresses in the container
|
||||||
returned: when state is started or restarted
|
returned: when state is started or restarted
|
||||||
|
@ -328,7 +328,7 @@ class LXDContainerManagement(object):
|
||||||
return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
|
return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
|
||||||
|
|
||||||
def _change_state(self, action, force_stop=False):
|
def _change_state(self, action, force_stop=False):
|
||||||
body_json={'action': action, 'timeout': self.timeout}
|
body_json = {'action': action, 'timeout': self.timeout}
|
||||||
if force_stop:
|
if force_stop:
|
||||||
body_json['force'] = True
|
body_json['force'] = True
|
||||||
return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
|
return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
|
||||||
|
@ -527,6 +527,7 @@ class LXDContainerManagement(object):
|
||||||
fail_params['logs'] = e.kwargs['logs']
|
fail_params['logs'] = e.kwargs['logs']
|
||||||
self.module.fail_json(**fail_params)
|
self.module.fail_json(**fail_params)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Ansible Main module."""
|
"""Ansible Main module."""
|
||||||
|
|
||||||
|
@ -585,7 +586,7 @@ def main():
|
||||||
type='str',
|
type='str',
|
||||||
default='{}/.config/lxc/client.crt'.format(os.environ['HOME'])
|
default='{}/.config/lxc/client.crt'.format(os.environ['HOME'])
|
||||||
),
|
),
|
||||||
trust_password=dict( type='str', no_log=True )
|
trust_password=dict(type='str', no_log=True)
|
||||||
),
|
),
|
||||||
supports_check_mode=False,
|
supports_check_mode=False,
|
||||||
)
|
)
|
||||||
|
|
|
@ -347,6 +347,7 @@ failed = False
|
||||||
|
|
||||||
class RHEVConn(object):
|
class RHEVConn(object):
|
||||||
'Connection to RHEV-M'
|
'Connection to RHEV-M'
|
||||||
|
|
||||||
def __init__(self, module):
|
def __init__(self, module):
|
||||||
self.module = module
|
self.module = module
|
||||||
|
|
||||||
|
@ -726,11 +727,11 @@ class RHEVConn(object):
|
||||||
bond.append(ifacelist[slave])
|
bond.append(ifacelist[slave])
|
||||||
try:
|
try:
|
||||||
tmpiface = params.Bonding(
|
tmpiface = params.Bonding(
|
||||||
slaves = params.Slaves(host_nic = bond),
|
slaves=params.Slaves(host_nic=bond),
|
||||||
options = params.Options(
|
options=params.Options(
|
||||||
option = [
|
option=[
|
||||||
params.Option(name = 'miimon', value = '100'),
|
params.Option(name='miimon', value='100'),
|
||||||
params.Option(name = 'mode', value = '4')
|
params.Option(name='mode', value='4')
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -741,16 +742,16 @@ class RHEVConn(object):
|
||||||
return False
|
return False
|
||||||
try:
|
try:
|
||||||
tmpnetwork = params.HostNIC(
|
tmpnetwork = params.HostNIC(
|
||||||
network = params.Network(name = iface['network']),
|
network=params.Network(name=iface['network']),
|
||||||
name = iface['name'],
|
name=iface['name'],
|
||||||
boot_protocol = iface['boot_protocol'],
|
boot_protocol=iface['boot_protocol'],
|
||||||
ip = params.IP(
|
ip=params.IP(
|
||||||
address = iface['ip'],
|
address=iface['ip'],
|
||||||
netmask = iface['netmask'],
|
netmask=iface['netmask'],
|
||||||
gateway = iface['gateway']
|
gateway=iface['gateway']
|
||||||
),
|
),
|
||||||
override_configuration = True,
|
override_configuration=True,
|
||||||
bonding = tmpiface)
|
bonding=tmpiface)
|
||||||
networklist.append(tmpnetwork)
|
networklist.append(tmpnetwork)
|
||||||
setMsg('Applying network ' + iface['name'])
|
setMsg('Applying network ' + iface['name'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -760,13 +761,13 @@ class RHEVConn(object):
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
tmpnetwork = params.HostNIC(
|
tmpnetwork = params.HostNIC(
|
||||||
network = params.Network(name = iface['network']),
|
network=params.Network(name=iface['network']),
|
||||||
name = iface['name'],
|
name=iface['name'],
|
||||||
boot_protocol = iface['boot_protocol'],
|
boot_protocol=iface['boot_protocol'],
|
||||||
ip = params.IP(
|
ip=params.IP(
|
||||||
address = iface['ip'],
|
address=iface['ip'],
|
||||||
netmask = iface['netmask'],
|
netmask=iface['netmask'],
|
||||||
gateway = iface['gateway']
|
gateway=iface['gateway']
|
||||||
))
|
))
|
||||||
networklist.append(tmpnetwork)
|
networklist.append(tmpnetwork)
|
||||||
setMsg('Applying network ' + iface['name'])
|
setMsg('Applying network ' + iface['name'])
|
||||||
|
@ -828,8 +829,8 @@ class RHEVConn(object):
|
||||||
try:
|
try:
|
||||||
HOST.nics.setupnetworks(params.Action(
|
HOST.nics.setupnetworks(params.Action(
|
||||||
force=True,
|
force=True,
|
||||||
check_connectivity = False,
|
check_connectivity=False,
|
||||||
host_nics = params.HostNics(host_nic = networklist)
|
host_nics=params.HostNics(host_nic=networklist)
|
||||||
))
|
))
|
||||||
setMsg('nics are set')
|
setMsg('nics are set')
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -1469,31 +1470,31 @@ def core(module):
|
||||||
def main():
|
def main():
|
||||||
global module
|
global module
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
state = dict(default='present', choices=['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info']),
|
state=dict(default='present', choices=['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info']),
|
||||||
user = dict(default="admin@internal"),
|
user=dict(default="admin@internal"),
|
||||||
password = dict(required=True, no_log=True),
|
password=dict(required=True, no_log=True),
|
||||||
server = dict(default="127.0.0.1"),
|
server=dict(default="127.0.0.1"),
|
||||||
port = dict(default="443"),
|
port=dict(default="443"),
|
||||||
insecure_api = dict(default=False, type='bool'),
|
insecure_api=dict(default=False, type='bool'),
|
||||||
name = dict(),
|
name=dict(),
|
||||||
image = dict(default=False),
|
image=dict(default=False),
|
||||||
datacenter = dict(default="Default"),
|
datacenter=dict(default="Default"),
|
||||||
type = dict(default="server", choices=['server', 'desktop', 'host']),
|
type=dict(default="server", choices=['server', 'desktop', 'host']),
|
||||||
cluster = dict(default=''),
|
cluster=dict(default=''),
|
||||||
vmhost = dict(default=False),
|
vmhost=dict(default=False),
|
||||||
vmcpu = dict(default="2"),
|
vmcpu=dict(default="2"),
|
||||||
vmmem = dict(default="1"),
|
vmmem=dict(default="1"),
|
||||||
disks = dict(),
|
disks=dict(),
|
||||||
osver = dict(default="rhel_6x64"),
|
osver=dict(default="rhel_6x64"),
|
||||||
ifaces = dict(aliases=['nics', 'interfaces']),
|
ifaces=dict(aliases=['nics', 'interfaces']),
|
||||||
timeout = dict(default=False),
|
timeout=dict(default=False),
|
||||||
mempol = dict(default="1"),
|
mempol=dict(default="1"),
|
||||||
vm_ha = dict(default=True),
|
vm_ha=dict(default=True),
|
||||||
cpu_share = dict(default="0"),
|
cpu_share=dict(default="0"),
|
||||||
boot_order = dict(default=["network", "hd"]),
|
boot_order=dict(default=["network", "hd"]),
|
||||||
del_prot = dict(default=True, type="bool"),
|
del_prot=dict(default=True, type="bool"),
|
||||||
cd_drive = dict(default=False)
|
cd_drive=dict(default=False)
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -154,13 +154,13 @@ def get_service_name(module, stage):
|
||||||
def main():
|
def main():
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=dict(
|
argument_spec=dict(
|
||||||
service_path = dict(required=True, type='path'),
|
service_path=dict(required=True, type='path'),
|
||||||
state = dict(default='present', choices=['present', 'absent'], required=False),
|
state=dict(default='present', choices=['present', 'absent'], required=False),
|
||||||
functions = dict(type='list', required=False),
|
functions=dict(type='list', required=False),
|
||||||
region = dict(default='', required=False),
|
region=dict(default='', required=False),
|
||||||
stage = dict(default='', required=False),
|
stage=dict(default='', required=False),
|
||||||
deploy = dict(default=True, type='bool', required=False),
|
deploy=dict(default=True, type='bool', required=False),
|
||||||
serverless_bin_path = dict(required=False, type='path')
|
serverless_bin_path=dict(required=False, type='path')
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -151,31 +151,32 @@ from ansible.module_utils._text import to_native
|
||||||
|
|
||||||
VIRT_FAILED = 1
|
VIRT_FAILED = 1
|
||||||
VIRT_SUCCESS = 0
|
VIRT_SUCCESS = 0
|
||||||
VIRT_UNAVAILABLE=2
|
VIRT_UNAVAILABLE = 2
|
||||||
|
|
||||||
ALL_COMMANDS = []
|
ALL_COMMANDS = []
|
||||||
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop',
|
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop',
|
||||||
'undefine', 'destroy', 'get_xml', 'define',
|
'undefine', 'destroy', 'get_xml', 'define',
|
||||||
'modify' ]
|
'modify']
|
||||||
HOST_COMMANDS = [ 'list_nets', 'facts', 'info' ]
|
HOST_COMMANDS = ['list_nets', 'facts', 'info']
|
||||||
ALL_COMMANDS.extend(ENTRY_COMMANDS)
|
ALL_COMMANDS.extend(ENTRY_COMMANDS)
|
||||||
ALL_COMMANDS.extend(HOST_COMMANDS)
|
ALL_COMMANDS.extend(HOST_COMMANDS)
|
||||||
|
|
||||||
ENTRY_STATE_ACTIVE_MAP = {
|
ENTRY_STATE_ACTIVE_MAP = {
|
||||||
0 : "inactive",
|
0: "inactive",
|
||||||
1 : "active"
|
1: "active"
|
||||||
}
|
}
|
||||||
|
|
||||||
ENTRY_STATE_AUTOSTART_MAP = {
|
ENTRY_STATE_AUTOSTART_MAP = {
|
||||||
0 : "no",
|
0: "no",
|
||||||
1 : "yes"
|
1: "yes"
|
||||||
}
|
}
|
||||||
|
|
||||||
ENTRY_STATE_PERSISTENT_MAP = {
|
ENTRY_STATE_PERSISTENT_MAP = {
|
||||||
0 : "no",
|
0: "no",
|
||||||
1 : "yes"
|
1: "yes"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class EntryNotFound(Exception):
|
class EntryNotFound(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -245,7 +246,7 @@ class LibvirtConnection(object):
|
||||||
if host is None:
|
if host is None:
|
||||||
# add the host
|
# add the host
|
||||||
if not self.module.check_mode:
|
if not self.module.check_mode:
|
||||||
res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST,
|
res = network.update(libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST,
|
||||||
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
|
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
|
||||||
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
|
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
|
||||||
else:
|
else:
|
||||||
|
@ -259,7 +260,7 @@ class LibvirtConnection(object):
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
if not self.module.check_mode:
|
if not self.module.check_mode:
|
||||||
res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY,
|
res = network.update(libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY,
|
||||||
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
|
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
|
||||||
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
|
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
|
||||||
else:
|
else:
|
||||||
|
@ -286,18 +287,18 @@ class LibvirtConnection(object):
|
||||||
|
|
||||||
def get_status2(self, entry):
|
def get_status2(self, entry):
|
||||||
state = entry.isActive()
|
state = entry.isActive()
|
||||||
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
|
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
|
||||||
|
|
||||||
def get_status(self, entryid):
|
def get_status(self, entryid):
|
||||||
if not self.module.check_mode:
|
if not self.module.check_mode:
|
||||||
state = self.find_entry(entryid).isActive()
|
state = self.find_entry(entryid).isActive()
|
||||||
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
|
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
state = self.find_entry(entryid).isActive()
|
state = self.find_entry(entryid).isActive()
|
||||||
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
|
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
|
||||||
except:
|
except:
|
||||||
return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
|
return ENTRY_STATE_ACTIVE_MAP.get("inactive", "unknown")
|
||||||
|
|
||||||
def get_uuid(self, entryid):
|
def get_uuid(self, entryid):
|
||||||
return self.find_entry(entryid).UUIDString()
|
return self.find_entry(entryid).UUIDString()
|
||||||
|
@ -331,7 +332,7 @@ class LibvirtConnection(object):
|
||||||
|
|
||||||
def get_autostart(self, entryid):
|
def get_autostart(self, entryid):
|
||||||
state = self.find_entry(entryid).autostart()
|
state = self.find_entry(entryid).autostart()
|
||||||
return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
|
return ENTRY_STATE_AUTOSTART_MAP.get(state, "unknown")
|
||||||
|
|
||||||
def get_autostart2(self, entryid):
|
def get_autostart2(self, entryid):
|
||||||
if not self.module.check_mode:
|
if not self.module.check_mode:
|
||||||
|
@ -358,7 +359,7 @@ class LibvirtConnection(object):
|
||||||
|
|
||||||
def get_persistent(self, entryid):
|
def get_persistent(self, entryid):
|
||||||
state = self.find_entry(entryid).isPersistent()
|
state = self.find_entry(entryid).isPersistent()
|
||||||
return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
|
return ENTRY_STATE_PERSISTENT_MAP.get(state, "unknown")
|
||||||
|
|
||||||
def get_dhcp_leases(self, entryid):
|
def get_dhcp_leases(self, entryid):
|
||||||
network = self.find_entry(entryid)
|
network = self.find_entry(entryid)
|
||||||
|
@ -398,7 +399,7 @@ class VirtNetwork(object):
|
||||||
results = []
|
results = []
|
||||||
for entry in self.list_nets():
|
for entry in self.list_nets():
|
||||||
state_blurb = self.conn.get_status(entry)
|
state_blurb = self.conn.get_status(entry)
|
||||||
results.append("%s %s" % (entry,state_blurb))
|
results.append("%s %s" % (entry, state_blurb))
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def autostart(self, entryid):
|
def autostart(self, entryid):
|
||||||
|
@ -494,33 +495,33 @@ def core(module):
|
||||||
if state and command == 'list_nets':
|
if state and command == 'list_nets':
|
||||||
res = v.list_nets(state=state)
|
res = v.list_nets(state=state)
|
||||||
if not isinstance(res, dict):
|
if not isinstance(res, dict):
|
||||||
res = { command: res }
|
res = {command: res}
|
||||||
return VIRT_SUCCESS, res
|
return VIRT_SUCCESS, res
|
||||||
|
|
||||||
if state:
|
if state:
|
||||||
if not name:
|
if not name:
|
||||||
module.fail_json(msg = "state change requires a specified name")
|
module.fail_json(msg="state change requires a specified name")
|
||||||
|
|
||||||
res['changed'] = False
|
res['changed'] = False
|
||||||
if state in [ 'active' ]:
|
if state in ['active']:
|
||||||
if v.status(name) is not 'active':
|
if v.status(name) is not 'active':
|
||||||
res['changed'] = True
|
res['changed'] = True
|
||||||
res['msg'] = v.start(name)
|
res['msg'] = v.start(name)
|
||||||
elif state in [ 'present' ]:
|
elif state in ['present']:
|
||||||
try:
|
try:
|
||||||
v.get_net(name)
|
v.get_net(name)
|
||||||
except EntryNotFound:
|
except EntryNotFound:
|
||||||
if not xml:
|
if not xml:
|
||||||
module.fail_json(msg = "network '" + name + "' not present, but xml not specified")
|
module.fail_json(msg="network '" + name + "' not present, but xml not specified")
|
||||||
v.define(name, xml)
|
v.define(name, xml)
|
||||||
res = {'changed': True, 'created': name}
|
res = {'changed': True, 'created': name}
|
||||||
elif state in [ 'inactive' ]:
|
elif state in ['inactive']:
|
||||||
entries = v.list_nets()
|
entries = v.list_nets()
|
||||||
if name in entries:
|
if name in entries:
|
||||||
if v.status(name) is not 'inactive':
|
if v.status(name) is not 'inactive':
|
||||||
res['changed'] = True
|
res['changed'] = True
|
||||||
res['msg'] = v.destroy(name)
|
res['msg'] = v.destroy(name)
|
||||||
elif state in [ 'undefined', 'absent' ]:
|
elif state in ['undefined', 'absent']:
|
||||||
entries = v.list_nets()
|
entries = v.list_nets()
|
||||||
if name in entries:
|
if name in entries:
|
||||||
if v.status(name) is not 'inactive':
|
if v.status(name) is not 'inactive':
|
||||||
|
@ -535,10 +536,10 @@ def core(module):
|
||||||
if command:
|
if command:
|
||||||
if command in ENTRY_COMMANDS:
|
if command in ENTRY_COMMANDS:
|
||||||
if not name:
|
if not name:
|
||||||
module.fail_json(msg = "%s requires 1 argument: name" % command)
|
module.fail_json(msg="%s requires 1 argument: name" % command)
|
||||||
if command in ('define', 'modify'):
|
if command in ('define', 'modify'):
|
||||||
if not xml:
|
if not xml:
|
||||||
module.fail_json(msg = command+" requires xml argument")
|
module.fail_json(msg=command + " requires xml argument")
|
||||||
try:
|
try:
|
||||||
v.get_net(name)
|
v.get_net(name)
|
||||||
except EntryNotFound:
|
except EntryNotFound:
|
||||||
|
@ -551,13 +552,13 @@ def core(module):
|
||||||
return VIRT_SUCCESS, res
|
return VIRT_SUCCESS, res
|
||||||
res = getattr(v, command)(name)
|
res = getattr(v, command)(name)
|
||||||
if not isinstance(res, dict):
|
if not isinstance(res, dict):
|
||||||
res = { command: res }
|
res = {command: res}
|
||||||
return VIRT_SUCCESS, res
|
return VIRT_SUCCESS, res
|
||||||
|
|
||||||
elif hasattr(v, command):
|
elif hasattr(v, command):
|
||||||
res = getattr(v, command)()
|
res = getattr(v, command)()
|
||||||
if not isinstance(res, dict):
|
if not isinstance(res, dict):
|
||||||
res = { command: res }
|
res = {command: res}
|
||||||
return VIRT_SUCCESS, res
|
return VIRT_SUCCESS, res
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -565,7 +566,7 @@ def core(module):
|
||||||
|
|
||||||
if autostart is not None:
|
if autostart is not None:
|
||||||
if not name:
|
if not name:
|
||||||
module.fail_json(msg = "state change requires a specified name")
|
module.fail_json(msg="state change requires a specified name")
|
||||||
|
|
||||||
res['changed'] = False
|
res['changed'] = False
|
||||||
if autostart:
|
if autostart:
|
||||||
|
@ -584,16 +585,16 @@ def core(module):
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
module = AnsibleModule (
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
name = dict(aliases=['network']),
|
name=dict(aliases=['network']),
|
||||||
state = dict(choices=['active', 'inactive', 'present', 'absent']),
|
state=dict(choices=['active', 'inactive', 'present', 'absent']),
|
||||||
command = dict(choices=ALL_COMMANDS),
|
command=dict(choices=ALL_COMMANDS),
|
||||||
uri = dict(default='qemu:///system'),
|
uri=dict(default='qemu:///system'),
|
||||||
xml = dict(),
|
xml=dict(),
|
||||||
autostart = dict(type='bool')
|
autostart=dict(type='bool')
|
||||||
),
|
),
|
||||||
supports_check_mode = True
|
supports_check_mode=True
|
||||||
)
|
)
|
||||||
|
|
||||||
if not HAS_VIRT:
|
if not HAS_VIRT:
|
||||||
|
|
|
@ -165,49 +165,49 @@ from ansible.module_utils.basic import AnsibleModule
|
||||||
|
|
||||||
VIRT_FAILED = 1
|
VIRT_FAILED = 1
|
||||||
VIRT_SUCCESS = 0
|
VIRT_SUCCESS = 0
|
||||||
VIRT_UNAVAILABLE=2
|
VIRT_UNAVAILABLE = 2
|
||||||
|
|
||||||
ALL_COMMANDS = []
|
ALL_COMMANDS = []
|
||||||
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete',
|
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete',
|
||||||
'undefine', 'destroy', 'get_xml', 'define', 'refresh']
|
'undefine', 'destroy', 'get_xml', 'define', 'refresh']
|
||||||
HOST_COMMANDS = [ 'list_pools', 'facts', 'info' ]
|
HOST_COMMANDS = ['list_pools', 'facts', 'info']
|
||||||
ALL_COMMANDS.extend(ENTRY_COMMANDS)
|
ALL_COMMANDS.extend(ENTRY_COMMANDS)
|
||||||
ALL_COMMANDS.extend(HOST_COMMANDS)
|
ALL_COMMANDS.extend(HOST_COMMANDS)
|
||||||
|
|
||||||
ENTRY_STATE_ACTIVE_MAP = {
|
ENTRY_STATE_ACTIVE_MAP = {
|
||||||
0 : "inactive",
|
0: "inactive",
|
||||||
1 : "active"
|
1: "active"
|
||||||
}
|
}
|
||||||
|
|
||||||
ENTRY_STATE_AUTOSTART_MAP = {
|
ENTRY_STATE_AUTOSTART_MAP = {
|
||||||
0 : "no",
|
0: "no",
|
||||||
1 : "yes"
|
1: "yes"
|
||||||
}
|
}
|
||||||
|
|
||||||
ENTRY_STATE_PERSISTENT_MAP = {
|
ENTRY_STATE_PERSISTENT_MAP = {
|
||||||
0 : "no",
|
0: "no",
|
||||||
1 : "yes"
|
1: "yes"
|
||||||
}
|
}
|
||||||
|
|
||||||
ENTRY_STATE_INFO_MAP = {
|
ENTRY_STATE_INFO_MAP = {
|
||||||
0 : "inactive",
|
0: "inactive",
|
||||||
1 : "building",
|
1: "building",
|
||||||
2 : "running",
|
2: "running",
|
||||||
3 : "degraded",
|
3: "degraded",
|
||||||
4 : "inaccessible"
|
4: "inaccessible"
|
||||||
}
|
}
|
||||||
|
|
||||||
ENTRY_BUILD_FLAGS_MAP = {
|
ENTRY_BUILD_FLAGS_MAP = {
|
||||||
"new" : 0,
|
"new": 0,
|
||||||
"repair" : 1,
|
"repair": 1,
|
||||||
"resize" : 2,
|
"resize": 2,
|
||||||
"no_overwrite" : 4,
|
"no_overwrite": 4,
|
||||||
"overwrite" : 8
|
"overwrite": 8
|
||||||
}
|
}
|
||||||
|
|
||||||
ENTRY_DELETE_FLAGS_MAP = {
|
ENTRY_DELETE_FLAGS_MAP = {
|
||||||
"normal" : 0,
|
"normal": 0,
|
||||||
"zeroed" : 1
|
"zeroed": 1
|
||||||
}
|
}
|
||||||
|
|
||||||
ALL_MODES = []
|
ALL_MODES = []
|
||||||
|
@ -283,18 +283,18 @@ class LibvirtConnection(object):
|
||||||
|
|
||||||
def get_status2(self, entry):
|
def get_status2(self, entry):
|
||||||
state = entry.isActive()
|
state = entry.isActive()
|
||||||
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
|
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
|
||||||
|
|
||||||
def get_status(self, entryid):
|
def get_status(self, entryid):
|
||||||
if not self.module.check_mode:
|
if not self.module.check_mode:
|
||||||
state = self.find_entry(entryid).isActive()
|
state = self.find_entry(entryid).isActive()
|
||||||
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
|
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
state = self.find_entry(entryid).isActive()
|
state = self.find_entry(entryid).isActive()
|
||||||
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
|
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
|
||||||
except:
|
except:
|
||||||
return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
|
return ENTRY_STATE_ACTIVE_MAP.get("inactive", "unknown")
|
||||||
|
|
||||||
def get_uuid(self, entryid):
|
def get_uuid(self, entryid):
|
||||||
return self.find_entry(entryid).UUIDString()
|
return self.find_entry(entryid).UUIDString()
|
||||||
|
@ -378,7 +378,7 @@ class LibvirtConnection(object):
|
||||||
|
|
||||||
def get_autostart(self, entryid):
|
def get_autostart(self, entryid):
|
||||||
state = self.find_entry(entryid).autostart()
|
state = self.find_entry(entryid).autostart()
|
||||||
return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
|
return ENTRY_STATE_AUTOSTART_MAP.get(state, "unknown")
|
||||||
|
|
||||||
def get_autostart2(self, entryid):
|
def get_autostart2(self, entryid):
|
||||||
if not self.module.check_mode:
|
if not self.module.check_mode:
|
||||||
|
@ -405,7 +405,7 @@ class LibvirtConnection(object):
|
||||||
|
|
||||||
def get_persistent(self, entryid):
|
def get_persistent(self, entryid):
|
||||||
state = self.find_entry(entryid).isPersistent()
|
state = self.find_entry(entryid).isPersistent()
|
||||||
return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
|
return ENTRY_STATE_PERSISTENT_MAP.get(state, "unknown")
|
||||||
|
|
||||||
def define_from_xml(self, entryid, xml):
|
def define_from_xml(self, entryid, xml):
|
||||||
if not self.module.check_mode:
|
if not self.module.check_mode:
|
||||||
|
@ -441,7 +441,7 @@ class VirtStoragePool(object):
|
||||||
results = []
|
results = []
|
||||||
for entry in self.list_pools():
|
for entry in self.list_pools():
|
||||||
state_blurb = self.conn.get_status(entry)
|
state_blurb = self.conn.get_status(entry)
|
||||||
results.append("%s %s" % (entry,state_blurb))
|
results.append("%s %s" % (entry, state_blurb))
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def autostart(self, entryid):
|
def autostart(self, entryid):
|
||||||
|
@ -478,10 +478,10 @@ class VirtStoragePool(object):
|
||||||
return self.conn.define_from_xml(entryid, xml)
|
return self.conn.define_from_xml(entryid, xml)
|
||||||
|
|
||||||
def build(self, entryid, flags):
|
def build(self, entryid, flags):
|
||||||
return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags,0))
|
return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags, 0))
|
||||||
|
|
||||||
def delete(self, entryid, flags):
|
def delete(self, entryid, flags):
|
||||||
return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags,0))
|
return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags, 0))
|
||||||
|
|
||||||
def refresh(self, entryid):
|
def refresh(self, entryid):
|
||||||
return self.conn.refresh(entryid)
|
return self.conn.refresh(entryid)
|
||||||
|
@ -501,10 +501,10 @@ class VirtStoragePool(object):
|
||||||
# assume the other end of the xmlrpc connection can figure things
|
# assume the other end of the xmlrpc connection can figure things
|
||||||
# out or doesn't care.
|
# out or doesn't care.
|
||||||
results[entry] = {
|
results[entry] = {
|
||||||
"status" : ENTRY_STATE_INFO_MAP.get(data[0],"unknown"),
|
"status": ENTRY_STATE_INFO_MAP.get(data[0], "unknown"),
|
||||||
"size_total" : str(data[1]),
|
"size_total": str(data[1]),
|
||||||
"size_used" : str(data[2]),
|
"size_used": str(data[2]),
|
||||||
"size_available" : str(data[3]),
|
"size_available": str(data[3]),
|
||||||
}
|
}
|
||||||
results[entry]["autostart"] = self.conn.get_autostart(entry)
|
results[entry]["autostart"] = self.conn.get_autostart(entry)
|
||||||
results[entry]["persistent"] = self.conn.get_persistent(entry)
|
results[entry]["persistent"] = self.conn.get_persistent(entry)
|
||||||
|
@ -569,40 +569,40 @@ def core(module):
|
||||||
if state and command == 'list_pools':
|
if state and command == 'list_pools':
|
||||||
res = v.list_pools(state=state)
|
res = v.list_pools(state=state)
|
||||||
if not isinstance(res, dict):
|
if not isinstance(res, dict):
|
||||||
res = { command: res }
|
res = {command: res}
|
||||||
return VIRT_SUCCESS, res
|
return VIRT_SUCCESS, res
|
||||||
|
|
||||||
if state:
|
if state:
|
||||||
if not name:
|
if not name:
|
||||||
module.fail_json(msg = "state change requires a specified name")
|
module.fail_json(msg="state change requires a specified name")
|
||||||
|
|
||||||
res['changed'] = False
|
res['changed'] = False
|
||||||
if state in [ 'active' ]:
|
if state in ['active']:
|
||||||
if v.status(name) is not 'active':
|
if v.status(name) is not 'active':
|
||||||
res['changed'] = True
|
res['changed'] = True
|
||||||
res['msg'] = v.start(name)
|
res['msg'] = v.start(name)
|
||||||
elif state in [ 'present' ]:
|
elif state in ['present']:
|
||||||
try:
|
try:
|
||||||
v.get_pool(name)
|
v.get_pool(name)
|
||||||
except EntryNotFound:
|
except EntryNotFound:
|
||||||
if not xml:
|
if not xml:
|
||||||
module.fail_json(msg = "storage pool '" + name + "' not present, but xml not specified")
|
module.fail_json(msg="storage pool '" + name + "' not present, but xml not specified")
|
||||||
v.define(name, xml)
|
v.define(name, xml)
|
||||||
res = {'changed': True, 'created': name}
|
res = {'changed': True, 'created': name}
|
||||||
elif state in [ 'inactive' ]:
|
elif state in ['inactive']:
|
||||||
entries = v.list_pools()
|
entries = v.list_pools()
|
||||||
if name in entries:
|
if name in entries:
|
||||||
if v.status(name) is not 'inactive':
|
if v.status(name) is not 'inactive':
|
||||||
res['changed'] = True
|
res['changed'] = True
|
||||||
res['msg'] = v.destroy(name)
|
res['msg'] = v.destroy(name)
|
||||||
elif state in [ 'undefined', 'absent' ]:
|
elif state in ['undefined', 'absent']:
|
||||||
entries = v.list_pools()
|
entries = v.list_pools()
|
||||||
if name in entries:
|
if name in entries:
|
||||||
if v.status(name) is not 'inactive':
|
if v.status(name) is not 'inactive':
|
||||||
v.destroy(name)
|
v.destroy(name)
|
||||||
res['changed'] = True
|
res['changed'] = True
|
||||||
res['msg'] = v.undefine(name)
|
res['msg'] = v.undefine(name)
|
||||||
elif state in [ 'deleted' ]:
|
elif state in ['deleted']:
|
||||||
entries = v.list_pools()
|
entries = v.list_pools()
|
||||||
if name in entries:
|
if name in entries:
|
||||||
if v.status(name) is not 'inactive':
|
if v.status(name) is not 'inactive':
|
||||||
|
@ -618,10 +618,10 @@ def core(module):
|
||||||
if command:
|
if command:
|
||||||
if command in ENTRY_COMMANDS:
|
if command in ENTRY_COMMANDS:
|
||||||
if not name:
|
if not name:
|
||||||
module.fail_json(msg = "%s requires 1 argument: name" % command)
|
module.fail_json(msg="%s requires 1 argument: name" % command)
|
||||||
if command == 'define':
|
if command == 'define':
|
||||||
if not xml:
|
if not xml:
|
||||||
module.fail_json(msg = "define requires xml argument")
|
module.fail_json(msg="define requires xml argument")
|
||||||
try:
|
try:
|
||||||
v.get_pool(name)
|
v.get_pool(name)
|
||||||
except EntryNotFound:
|
except EntryNotFound:
|
||||||
|
@ -631,22 +631,22 @@ def core(module):
|
||||||
elif command == 'build':
|
elif command == 'build':
|
||||||
res = v.build(name, mode)
|
res = v.build(name, mode)
|
||||||
if not isinstance(res, dict):
|
if not isinstance(res, dict):
|
||||||
res = { 'changed': True, command: res }
|
res = {'changed': True, command: res}
|
||||||
return VIRT_SUCCESS, res
|
return VIRT_SUCCESS, res
|
||||||
elif command == 'delete':
|
elif command == 'delete':
|
||||||
res = v.delete(name, mode)
|
res = v.delete(name, mode)
|
||||||
if not isinstance(res, dict):
|
if not isinstance(res, dict):
|
||||||
res = { 'changed': True, command: res }
|
res = {'changed': True, command: res}
|
||||||
return VIRT_SUCCESS, res
|
return VIRT_SUCCESS, res
|
||||||
res = getattr(v, command)(name)
|
res = getattr(v, command)(name)
|
||||||
if not isinstance(res, dict):
|
if not isinstance(res, dict):
|
||||||
res = { command: res }
|
res = {command: res}
|
||||||
return VIRT_SUCCESS, res
|
return VIRT_SUCCESS, res
|
||||||
|
|
||||||
elif hasattr(v, command):
|
elif hasattr(v, command):
|
||||||
res = getattr(v, command)()
|
res = getattr(v, command)()
|
||||||
if not isinstance(res, dict):
|
if not isinstance(res, dict):
|
||||||
res = { command: res }
|
res = {command: res}
|
||||||
return VIRT_SUCCESS, res
|
return VIRT_SUCCESS, res
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -654,7 +654,7 @@ def core(module):
|
||||||
|
|
||||||
if autostart is not None:
|
if autostart is not None:
|
||||||
if not name:
|
if not name:
|
||||||
module.fail_json(msg = "state change requires a specified name")
|
module.fail_json(msg="state change requires a specified name")
|
||||||
|
|
||||||
res['changed'] = False
|
res['changed'] = False
|
||||||
if autostart:
|
if autostart:
|
||||||
|
@ -673,17 +673,17 @@ def core(module):
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
module = AnsibleModule (
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
name = dict(aliases=['pool']),
|
name=dict(aliases=['pool']),
|
||||||
state = dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']),
|
state=dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']),
|
||||||
command = dict(choices=ALL_COMMANDS),
|
command=dict(choices=ALL_COMMANDS),
|
||||||
uri = dict(default='qemu:///system'),
|
uri=dict(default='qemu:///system'),
|
||||||
xml = dict(),
|
xml=dict(),
|
||||||
autostart = dict(type='bool'),
|
autostart=dict(type='bool'),
|
||||||
mode = dict(choices=ALL_MODES),
|
mode=dict(choices=ALL_MODES),
|
||||||
),
|
),
|
||||||
supports_check_mode = True
|
supports_check_mode=True
|
||||||
)
|
)
|
||||||
|
|
||||||
if not HAS_VIRT:
|
if not HAS_VIRT:
|
||||||
|
|
|
@ -137,12 +137,14 @@ def change_keys(recs, key='uuid', filter_func=None):
|
||||||
|
|
||||||
return new_recs
|
return new_recs
|
||||||
|
|
||||||
|
|
||||||
def get_host(session):
|
def get_host(session):
|
||||||
"""Get the host"""
|
"""Get the host"""
|
||||||
host_recs = session.xenapi.host.get_all()
|
host_recs = session.xenapi.host.get_all()
|
||||||
# We only have one host, so just return its entry
|
# We only have one host, so just return its entry
|
||||||
return session.xenapi.host.get_record(host_recs[0])
|
return session.xenapi.host.get_record(host_recs[0])
|
||||||
|
|
||||||
|
|
||||||
def get_vms(session):
|
def get_vms(session):
|
||||||
xs_vms = {}
|
xs_vms = {}
|
||||||
recs = session.xenapi.VM.get_all()
|
recs = session.xenapi.VM.get_all()
|
||||||
|
@ -165,6 +167,7 @@ def get_srs(session):
|
||||||
xs_srs[sr['name_label']] = sr
|
xs_srs[sr['name_label']] = sr
|
||||||
return xs_srs
|
return xs_srs
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
module = AnsibleModule({})
|
module = AnsibleModule({})
|
||||||
|
|
||||||
|
|
|
@ -137,20 +137,20 @@ from ansible.module_utils.openstack import openstack_full_argument_spec, opensta
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
argument_spec = openstack_full_argument_spec(
|
argument_spec = openstack_full_argument_spec(
|
||||||
name = dict(required=True),
|
name=dict(required=True),
|
||||||
id = dict(default=None),
|
id=dict(default=None),
|
||||||
checksum = dict(default=None),
|
checksum=dict(default=None),
|
||||||
disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'vhdx', 'ploop']),
|
disk_format=dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'vhdx', 'ploop']),
|
||||||
container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova', 'docker']),
|
container_format=dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova', 'docker']),
|
||||||
owner = dict(default=None),
|
owner=dict(default=None),
|
||||||
min_disk = dict(type='int', default=0),
|
min_disk=dict(type='int', default=0),
|
||||||
min_ram = dict(type='int', default=0),
|
min_ram=dict(type='int', default=0),
|
||||||
is_public = dict(type='bool', default=False),
|
is_public=dict(type='bool', default=False),
|
||||||
filename = dict(default=None),
|
filename=dict(default=None),
|
||||||
ramdisk = dict(default=None),
|
ramdisk=dict(default=None),
|
||||||
kernel = dict(default=None),
|
kernel=dict(default=None),
|
||||||
properties = dict(type='dict', default={}),
|
properties=dict(type='dict', default={}),
|
||||||
state = dict(default='present', choices=['absent', 'present']),
|
state=dict(default='present', choices=['absent', 'present']),
|
||||||
)
|
)
|
||||||
module_kwargs = openstack_module_kwargs()
|
module_kwargs = openstack_module_kwargs()
|
||||||
module = AnsibleModule(argument_spec, **module_kwargs)
|
module = AnsibleModule(argument_spec, **module_kwargs)
|
||||||
|
@ -163,13 +163,13 @@ def main():
|
||||||
|
|
||||||
changed = False
|
changed = False
|
||||||
if module.params['checksum']:
|
if module.params['checksum']:
|
||||||
image = cloud.get_image(name_or_id=None,filters={'checksum': module.params['checksum']})
|
image = cloud.get_image(name_or_id=None, filters={'checksum': module.params['checksum']})
|
||||||
else:
|
else:
|
||||||
image = cloud.get_image(name_or_id=module.params['name'])
|
image = cloud.get_image(name_or_id=module.params['name'])
|
||||||
|
|
||||||
if module.params['state'] == 'present':
|
if module.params['state'] == 'present':
|
||||||
if not image:
|
if not image:
|
||||||
kwargs={}
|
kwargs = {}
|
||||||
if module.params['id'] is not None:
|
if module.params['id'] is not None:
|
||||||
kwargs['id'] = module.params['id']
|
kwargs['id'] = module.params['id']
|
||||||
image = cloud.create_image(
|
image = cloud.create_image(
|
||||||
|
|
|
@ -109,10 +109,10 @@ def _system_state_change(module, keypair):
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = openstack_full_argument_spec(
|
argument_spec = openstack_full_argument_spec(
|
||||||
name = dict(required=True),
|
name=dict(required=True),
|
||||||
public_key = dict(default=None),
|
public_key=dict(default=None),
|
||||||
public_key_file = dict(default=None),
|
public_key_file=dict(default=None),
|
||||||
state = dict(default='present',
|
state=dict(default='present',
|
||||||
choices=['absent', 'present']),
|
choices=['absent', 'present']),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -194,21 +194,21 @@ def _system_state_change(module, flavor):
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = openstack_full_argument_spec(
|
argument_spec = openstack_full_argument_spec(
|
||||||
state = dict(required=False, default='present',
|
state=dict(required=False, default='present',
|
||||||
choices=['absent', 'present']),
|
choices=['absent', 'present']),
|
||||||
name = dict(required=False),
|
name=dict(required=False),
|
||||||
|
|
||||||
# required when state is 'present'
|
# required when state is 'present'
|
||||||
ram = dict(required=False, type='int'),
|
ram=dict(required=False, type='int'),
|
||||||
vcpus = dict(required=False, type='int'),
|
vcpus=dict(required=False, type='int'),
|
||||||
disk = dict(required=False, type='int'),
|
disk=dict(required=False, type='int'),
|
||||||
|
|
||||||
ephemeral = dict(required=False, default=0, type='int'),
|
ephemeral=dict(required=False, default=0, type='int'),
|
||||||
swap = dict(required=False, default=0, type='int'),
|
swap=dict(required=False, default=0, type='int'),
|
||||||
rxtx_factor = dict(required=False, default=1.0, type='float'),
|
rxtx_factor=dict(required=False, default=1.0, type='float'),
|
||||||
is_public = dict(required=False, default=True, type='bool'),
|
is_public=dict(required=False, default=True, type='bool'),
|
||||||
flavorid = dict(required=False, default="auto"),
|
flavorid=dict(required=False, default="auto"),
|
||||||
extra_specs = dict(required=False, default=None, type='dict'),
|
extra_specs=dict(required=False, default=None, type='dict'),
|
||||||
)
|
)
|
||||||
|
|
||||||
module_kwargs = openstack_module_kwargs()
|
module_kwargs = openstack_module_kwargs()
|
||||||
|
@ -247,9 +247,9 @@ def main():
|
||||||
rxtx_factor=module.params['rxtx_factor'],
|
rxtx_factor=module.params['rxtx_factor'],
|
||||||
is_public=module.params['is_public']
|
is_public=module.params['is_public']
|
||||||
)
|
)
|
||||||
changed=True
|
changed = True
|
||||||
else:
|
else:
|
||||||
changed=False
|
changed = False
|
||||||
|
|
||||||
old_extra_specs = flavor['extra_specs']
|
old_extra_specs = flavor['extra_specs']
|
||||||
new_extra_specs = dict([(k, str(v)) for k, v in extra_specs.items()])
|
new_extra_specs = dict([(k, str(v)) for k, v in extra_specs.items()])
|
||||||
|
|
|
@ -306,14 +306,17 @@ def _get_volume_quotas(cloud, project):
|
||||||
|
|
||||||
return cloud.get_volume_quotas(project)
|
return cloud.get_volume_quotas(project)
|
||||||
|
|
||||||
|
|
||||||
def _get_network_quotas(cloud, project):
|
def _get_network_quotas(cloud, project):
|
||||||
|
|
||||||
return cloud.get_network_quotas(project)
|
return cloud.get_network_quotas(project)
|
||||||
|
|
||||||
|
|
||||||
def _get_compute_quotas(cloud, project):
|
def _get_compute_quotas(cloud, project):
|
||||||
|
|
||||||
return cloud.get_compute_quotas(project)
|
return cloud.get_compute_quotas(project)
|
||||||
|
|
||||||
|
|
||||||
def _get_quotas(module, cloud, project):
|
def _get_quotas(module, cloud, project):
|
||||||
|
|
||||||
quota = {}
|
quota = {}
|
||||||
|
@ -334,6 +337,7 @@ def _get_quotas(module, cloud, project):
|
||||||
|
|
||||||
return quota
|
return quota
|
||||||
|
|
||||||
|
|
||||||
def _scrub_results(quota):
|
def _scrub_results(quota):
|
||||||
|
|
||||||
filter_attr = [
|
filter_attr = [
|
||||||
|
@ -350,6 +354,7 @@ def _scrub_results(quota):
|
||||||
|
|
||||||
return quota
|
return quota
|
||||||
|
|
||||||
|
|
||||||
def _system_state_change_details(module, project_quota_output):
|
def _system_state_change_details(module, project_quota_output):
|
||||||
|
|
||||||
quota_change_request = {}
|
quota_change_request = {}
|
||||||
|
@ -368,6 +373,7 @@ def _system_state_change_details(module, project_quota_output):
|
||||||
|
|
||||||
return (changes_required, quota_change_request)
|
return (changes_required, quota_change_request)
|
||||||
|
|
||||||
|
|
||||||
def _system_state_change(module, project_quota_output):
|
def _system_state_change(module, project_quota_output):
|
||||||
"""
|
"""
|
||||||
Determine if changes are required to the current project quota.
|
Determine if changes are required to the current project quota.
|
||||||
|
@ -386,6 +392,7 @@ def _system_state_change(module, project_quota_output):
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
argument_spec = openstack_full_argument_spec(
|
argument_spec = openstack_full_argument_spec(
|
||||||
|
@ -437,7 +444,7 @@ def main():
|
||||||
cloud_params = dict(module.params)
|
cloud_params = dict(module.params)
|
||||||
cloud = shade.operator_cloud(**cloud_params)
|
cloud = shade.operator_cloud(**cloud_params)
|
||||||
|
|
||||||
#In order to handle the different volume types we update module params after.
|
# In order to handle the different volume types we update module params after.
|
||||||
dynamic_types = [
|
dynamic_types = [
|
||||||
'gigabytes_types',
|
'gigabytes_types',
|
||||||
'snapshots_types',
|
'snapshots_types',
|
||||||
|
@ -448,22 +455,22 @@ def main():
|
||||||
for k, v in module.params[dynamic_type].items():
|
for k, v in module.params[dynamic_type].items():
|
||||||
module.params[k] = int(v)
|
module.params[k] = int(v)
|
||||||
|
|
||||||
#Get current quota values
|
# Get current quota values
|
||||||
project_quota_output = _get_quotas(module, cloud, cloud_params['name'])
|
project_quota_output = _get_quotas(module, cloud, cloud_params['name'])
|
||||||
changes_required = False
|
changes_required = False
|
||||||
|
|
||||||
if module.params['state'] == "absent":
|
if module.params['state'] == "absent":
|
||||||
#If a quota state is set to absent we should assume there will be changes.
|
# If a quota state is set to absent we should assume there will be changes.
|
||||||
#The default quota values are not accessible so we can not determine if
|
# The default quota values are not accessible so we can not determine if
|
||||||
#no changes will occur or not.
|
# no changes will occur or not.
|
||||||
if module.check_mode:
|
if module.check_mode:
|
||||||
module.exit_json(changed=True)
|
module.exit_json(changed=True)
|
||||||
|
|
||||||
#Calling delete_network_quotas when a quota has not been set results
|
# Calling delete_network_quotas when a quota has not been set results
|
||||||
#in an error, according to the shade docs it should return the
|
# in an error, according to the shade docs it should return the
|
||||||
#current quota.
|
# current quota.
|
||||||
#The following error string is returned:
|
# The following error string is returned:
|
||||||
#network client call failed: Quota for tenant 69dd91d217e949f1a0b35a4b901741dc could not be found.
|
# network client call failed: Quota for tenant 69dd91d217e949f1a0b35a4b901741dc could not be found.
|
||||||
neutron_msg1 = "network client call failed: Quota for tenant"
|
neutron_msg1 = "network client call failed: Quota for tenant"
|
||||||
neutron_msg2 = "could not be found"
|
neutron_msg2 = "could not be found"
|
||||||
|
|
||||||
|
@ -495,7 +502,7 @@ def main():
|
||||||
quota_call = getattr(cloud, 'set_%s_quotas' % (quota_type))
|
quota_call = getattr(cloud, 'set_%s_quotas' % (quota_type))
|
||||||
quota_call(cloud_params['name'], **quota_change_request[quota_type])
|
quota_call(cloud_params['name'], **quota_change_request[quota_type])
|
||||||
|
|
||||||
#Get quota state post changes for validation
|
# Get quota state post changes for validation
|
||||||
project_quota_update = _get_quotas(module, cloud, cloud_params['name'])
|
project_quota_update = _get_quotas(module, cloud, cloud_params['name'])
|
||||||
|
|
||||||
if project_quota_output == project_quota_update:
|
if project_quota_output == project_quota_update:
|
||||||
|
|
|
@ -270,20 +270,20 @@ def _system_state_change(module, secgroup, remotegroup):
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = openstack_full_argument_spec(
|
argument_spec = openstack_full_argument_spec(
|
||||||
security_group = dict(required=True),
|
security_group=dict(required=True),
|
||||||
# NOTE(Shrews): None is an acceptable protocol value for
|
# NOTE(Shrews): None is an acceptable protocol value for
|
||||||
# Neutron, but Nova will balk at this.
|
# Neutron, but Nova will balk at this.
|
||||||
protocol = dict(default=None,
|
protocol=dict(default=None,
|
||||||
choices=[None, 'tcp', 'udp', 'icmp', '112']),
|
choices=[None, 'tcp', 'udp', 'icmp', '112']),
|
||||||
port_range_min = dict(required=False, type='int'),
|
port_range_min=dict(required=False, type='int'),
|
||||||
port_range_max = dict(required=False, type='int'),
|
port_range_max=dict(required=False, type='int'),
|
||||||
remote_ip_prefix = dict(required=False, default=None),
|
remote_ip_prefix=dict(required=False, default=None),
|
||||||
remote_group = dict(required=False, default=None),
|
remote_group=dict(required=False, default=None),
|
||||||
ethertype = dict(default='IPv4',
|
ethertype=dict(default='IPv4',
|
||||||
choices=['IPv4', 'IPv6']),
|
choices=['IPv4', 'IPv6']),
|
||||||
direction = dict(default='ingress',
|
direction=dict(default='ingress',
|
||||||
choices=['egress', 'ingress']),
|
choices=['egress', 'ingress']),
|
||||||
state = dict(default='present',
|
state=dict(default='present',
|
||||||
choices=['absent', 'present']),
|
choices=['absent', 'present']),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -312,7 +312,7 @@ def main():
|
||||||
if remote_group:
|
if remote_group:
|
||||||
remotegroup = cloud.get_security_group(remote_group)
|
remotegroup = cloud.get_security_group(remote_group)
|
||||||
else:
|
else:
|
||||||
remotegroup = { 'id' : None }
|
remotegroup = {'id': None}
|
||||||
|
|
||||||
if module.check_mode:
|
if module.check_mode:
|
||||||
module.exit_json(changed=_system_state_change(module, secgroup, remotegroup))
|
module.exit_json(changed=_system_state_change(module, secgroup, remotegroup))
|
||||||
|
|
|
@ -433,6 +433,7 @@ def _parse_nics(nics):
|
||||||
else:
|
else:
|
||||||
yield net
|
yield net
|
||||||
|
|
||||||
|
|
||||||
def _network_args(module, cloud):
|
def _network_args(module, cloud):
|
||||||
args = []
|
args = []
|
||||||
nics = module.params['nics']
|
nics = module.params['nics']
|
||||||
|
@ -685,31 +686,31 @@ def _get_server_state(module, cloud):
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
argument_spec = openstack_full_argument_spec(
|
argument_spec = openstack_full_argument_spec(
|
||||||
name = dict(required=True),
|
name=dict(required=True),
|
||||||
image = dict(default=None),
|
image=dict(default=None),
|
||||||
image_exclude = dict(default='(deprecated)'),
|
image_exclude=dict(default='(deprecated)'),
|
||||||
flavor = dict(default=None),
|
flavor=dict(default=None),
|
||||||
flavor_ram = dict(default=None, type='int'),
|
flavor_ram=dict(default=None, type='int'),
|
||||||
flavor_include = dict(default=None),
|
flavor_include=dict(default=None),
|
||||||
key_name = dict(default=None),
|
key_name=dict(default=None),
|
||||||
security_groups = dict(default=['default'], type='list'),
|
security_groups=dict(default=['default'], type='list'),
|
||||||
network = dict(default=None),
|
network=dict(default=None),
|
||||||
nics = dict(default=[], type='list'),
|
nics=dict(default=[], type='list'),
|
||||||
meta = dict(default=None, type='raw'),
|
meta=dict(default=None, type='raw'),
|
||||||
userdata = dict(default=None, aliases=['user_data']),
|
userdata=dict(default=None, aliases=['user_data']),
|
||||||
config_drive = dict(default=False, type='bool'),
|
config_drive=dict(default=False, type='bool'),
|
||||||
auto_ip = dict(default=True, type='bool', aliases=['auto_floating_ip', 'public_ip']),
|
auto_ip=dict(default=True, type='bool', aliases=['auto_floating_ip', 'public_ip']),
|
||||||
floating_ips = dict(default=None, type='list'),
|
floating_ips=dict(default=None, type='list'),
|
||||||
floating_ip_pools = dict(default=None, type='list'),
|
floating_ip_pools=dict(default=None, type='list'),
|
||||||
volume_size = dict(default=False, type='int'),
|
volume_size=dict(default=False, type='int'),
|
||||||
boot_from_volume = dict(default=False, type='bool'),
|
boot_from_volume=dict(default=False, type='bool'),
|
||||||
boot_volume = dict(default=None, aliases=['root_volume']),
|
boot_volume=dict(default=None, aliases=['root_volume']),
|
||||||
terminate_volume = dict(default=False, type='bool'),
|
terminate_volume=dict(default=False, type='bool'),
|
||||||
volumes = dict(default=[], type='list'),
|
volumes=dict(default=[], type='list'),
|
||||||
scheduler_hints = dict(default=None, type='dict'),
|
scheduler_hints=dict(default=None, type='dict'),
|
||||||
state = dict(default='present', choices=['absent', 'present']),
|
state=dict(default='present', choices=['absent', 'present']),
|
||||||
delete_fip = dict(default=False, type='bool'),
|
delete_fip=dict(default=False, type='bool'),
|
||||||
reuse_ips = dict(default=True, type='bool'),
|
reuse_ips=dict(default=True, type='bool'),
|
||||||
)
|
)
|
||||||
module_kwargs = openstack_module_kwargs(
|
module_kwargs = openstack_module_kwargs(
|
||||||
mutually_exclusive=[
|
mutually_exclusive=[
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
#coding: utf-8 -*-
|
# coding: utf-8 -*-
|
||||||
|
|
||||||
# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
|
# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
|
||||||
# (c) 2016, Steve Baker <sbaker@redhat.com>
|
# (c) 2016, Steve Baker <sbaker@redhat.com>
|
||||||
|
@ -181,6 +181,7 @@ def _create_stack(module, stack, cloud):
|
||||||
except shade.OpenStackCloudException as e:
|
except shade.OpenStackCloudException as e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
|
||||||
def _update_stack(module, stack, cloud):
|
def _update_stack(module, stack, cloud):
|
||||||
try:
|
try:
|
||||||
stack = cloud.update_stack(
|
stack = cloud.update_stack(
|
||||||
|
@ -195,11 +196,12 @@ def _update_stack(module, stack, cloud):
|
||||||
if stack['stack_status'] == 'UPDATE_COMPLETE':
|
if stack['stack_status'] == 'UPDATE_COMPLETE':
|
||||||
return stack
|
return stack
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg = "Failure in updating stack: %s" %
|
module.fail_json(msg="Failure in updating stack: %s" %
|
||||||
stack['stack_status_reason'])
|
stack['stack_status_reason'])
|
||||||
except shade.OpenStackCloudException as e:
|
except shade.OpenStackCloudException as e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
|
||||||
def _system_state_change(module, stack, cloud):
|
def _system_state_change(module, stack, cloud):
|
||||||
state = module.params['state']
|
state = module.params['state']
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
|
@ -209,6 +211,7 @@ def _system_state_change(module, stack, cloud):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
argument_spec = openstack_full_argument_spec(
|
argument_spec = openstack_full_argument_spec(
|
||||||
|
|
|
@ -113,7 +113,7 @@ PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
|
||||||
|
|
||||||
def serialize_sshkey(sshkey):
|
def serialize_sshkey(sshkey):
|
||||||
sshkey_data = {}
|
sshkey_data = {}
|
||||||
copy_keys = ['id', 'key', 'label','fingerprint']
|
copy_keys = ['id', 'key', 'label', 'fingerprint']
|
||||||
for name in copy_keys:
|
for name in copy_keys:
|
||||||
sshkey_data[name] = getattr(sshkey, name)
|
sshkey_data[name] = getattr(sshkey, name)
|
||||||
return sshkey_data
|
return sshkey_data
|
||||||
|
@ -132,7 +132,7 @@ def load_key_string(key_str):
|
||||||
key_str = key_str.strip()
|
key_str = key_str.strip()
|
||||||
ret_dict['key'] = key_str
|
ret_dict['key'] = key_str
|
||||||
cut_key = key_str.split()
|
cut_key = key_str.split()
|
||||||
if len(cut_key) in [2,3]:
|
if len(cut_key) in [2, 3]:
|
||||||
if len(cut_key) == 3:
|
if len(cut_key) == 3:
|
||||||
ret_dict['label'] = cut_key[2]
|
ret_dict['label'] = cut_key[2]
|
||||||
else:
|
else:
|
||||||
|
@ -165,7 +165,7 @@ def get_sshkey_selector(module):
|
||||||
return k.key == select_dict['key']
|
return k.key == select_dict['key']
|
||||||
else:
|
else:
|
||||||
# if key string not specified, all the fields must match
|
# if key string not specified, all the fields must match
|
||||||
return all([select_dict[f] == getattr(k,f) for f in select_dict])
|
return all([select_dict[f] == getattr(k, f) for f in select_dict])
|
||||||
return selector
|
return selector
|
||||||
|
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ def act_on_sshkeys(target_state, module, packet_conn):
|
||||||
newkey['label'] = module.params.get('label')
|
newkey['label'] = module.params.get('label')
|
||||||
for param in ('label', 'key'):
|
for param in ('label', 'key'):
|
||||||
if param not in newkey:
|
if param not in newkey:
|
||||||
_msg=("If you want to ensure a key is present, you must "
|
_msg = ("If you want to ensure a key is present, you must "
|
||||||
"supply both a label and a key string, either in "
|
"supply both a label and a key string, either in "
|
||||||
"module params, or in a key file. %s is missing"
|
"module params, or in a key file. %s is missing"
|
||||||
% param)
|
% param)
|
||||||
|
@ -220,7 +220,7 @@ def act_on_sshkeys(target_state, module, packet_conn):
|
||||||
def main():
|
def main():
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=dict(
|
argument_spec=dict(
|
||||||
state = dict(choices=['present', 'absent'], default='present'),
|
state=dict(choices=['present', 'absent'], default='present'),
|
||||||
auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
|
auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
|
||||||
no_log=True),
|
no_log=True),
|
||||||
label=dict(type='str', aliases=['name'], default=None),
|
label=dict(type='str', aliases=['name'], default=None),
|
||||||
|
@ -243,7 +243,7 @@ def main():
|
||||||
module.fail_json(msg='packet required for this module')
|
module.fail_json(msg='packet required for this module')
|
||||||
|
|
||||||
if not module.params.get('auth_token'):
|
if not module.params.get('auth_token'):
|
||||||
_fail_msg = ( "if Packet API token is not in environment variable %s, "
|
_fail_msg = ("if Packet API token is not in environment variable %s, "
|
||||||
"the auth_token parameter is required" %
|
"the auth_token parameter is required" %
|
||||||
PACKET_API_TOKEN_ENV_VAR)
|
PACKET_API_TOKEN_ENV_VAR)
|
||||||
module.fail_json(msg=_fail_msg)
|
module.fail_json(msg=_fail_msg)
|
||||||
|
@ -254,7 +254,7 @@ def main():
|
||||||
|
|
||||||
state = module.params.get('state')
|
state = module.params.get('state')
|
||||||
|
|
||||||
if state in ['present','absent']:
|
if state in ['present', 'absent']:
|
||||||
try:
|
try:
|
||||||
module.exit_json(**act_on_sshkeys(state, module, packet_conn))
|
module.exit_json(**act_on_sshkeys(state, module, packet_conn))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
|
@ -120,12 +120,14 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
|
||||||
promise['requestId']
|
promise['requestId']
|
||||||
) + '" to complete.')
|
) + '" to complete.')
|
||||||
|
|
||||||
|
|
||||||
def _remove_datacenter(module, profitbricks, datacenter):
|
def _remove_datacenter(module, profitbricks, datacenter):
|
||||||
try:
|
try:
|
||||||
profitbricks.delete_datacenter(datacenter)
|
profitbricks.delete_datacenter(datacenter)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
|
module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
|
||||||
|
|
||||||
|
|
||||||
def create_datacenter(module, profitbricks):
|
def create_datacenter(module, profitbricks):
|
||||||
"""
|
"""
|
||||||
Creates a Datacenter
|
Creates a Datacenter
|
||||||
|
@ -166,6 +168,7 @@ def create_datacenter(module, profitbricks):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
|
module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
|
||||||
|
|
||||||
|
|
||||||
def remove_datacenter(module, profitbricks):
|
def remove_datacenter(module, profitbricks):
|
||||||
"""
|
"""
|
||||||
Removes a Datacenter.
|
Removes a Datacenter.
|
||||||
|
@ -197,6 +200,7 @@ def remove_datacenter(module, profitbricks):
|
||||||
|
|
||||||
return changed
|
return changed
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=dict(
|
argument_spec=dict(
|
||||||
|
|
|
@ -123,6 +123,7 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
|
||||||
promise['requestId']
|
promise['requestId']
|
||||||
) + '" to complete.')
|
) + '" to complete.')
|
||||||
|
|
||||||
|
|
||||||
def create_nic(module, profitbricks):
|
def create_nic(module, profitbricks):
|
||||||
"""
|
"""
|
||||||
Creates a NIC.
|
Creates a NIC.
|
||||||
|
@ -173,6 +174,7 @@ def create_nic(module, profitbricks):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
module.fail_json(msg="failed to create the NIC: %s" % str(e))
|
module.fail_json(msg="failed to create the NIC: %s" % str(e))
|
||||||
|
|
||||||
|
|
||||||
def delete_nic(module, profitbricks):
|
def delete_nic(module, profitbricks):
|
||||||
"""
|
"""
|
||||||
Removes a NIC
|
Removes a NIC
|
||||||
|
@ -228,12 +230,13 @@ def delete_nic(module, profitbricks):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
module.fail_json(msg="failed to remove the NIC: %s" % str(e))
|
module.fail_json(msg="failed to remove the NIC: %s" % str(e))
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=dict(
|
argument_spec=dict(
|
||||||
datacenter=dict(),
|
datacenter=dict(),
|
||||||
server=dict(),
|
server=dict(),
|
||||||
name=dict(default=str(uuid.uuid4()).replace('-','')[:10]),
|
name=dict(default=str(uuid.uuid4()).replace('-', '')[:10]),
|
||||||
lan=dict(),
|
lan=dict(),
|
||||||
subscription_user=dict(),
|
subscription_user=dict(),
|
||||||
subscription_password=dict(no_log=True),
|
subscription_password=dict(no_log=True),
|
||||||
|
@ -255,7 +258,6 @@ def main():
|
||||||
if not module.params.get('server'):
|
if not module.params.get('server'):
|
||||||
module.fail_json(msg='server parameter is required')
|
module.fail_json(msg='server parameter is required')
|
||||||
|
|
||||||
|
|
||||||
subscription_user = module.params.get('subscription_user')
|
subscription_user = module.params.get('subscription_user')
|
||||||
subscription_password = module.params.get('subscription_password')
|
subscription_password = module.params.get('subscription_password')
|
||||||
|
|
||||||
|
|
|
@ -120,6 +120,7 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
|
||||||
promise['requestId']
|
promise['requestId']
|
||||||
) + '" to complete.')
|
) + '" to complete.')
|
||||||
|
|
||||||
|
|
||||||
def attach_volume(module, profitbricks):
|
def attach_volume(module, profitbricks):
|
||||||
"""
|
"""
|
||||||
Attaches a volume.
|
Attaches a volume.
|
||||||
|
@ -150,7 +151,7 @@ def attach_volume(module, profitbricks):
|
||||||
server_list = profitbricks.list_servers(datacenter)
|
server_list = profitbricks.list_servers(datacenter)
|
||||||
for s in server_list['items']:
|
for s in server_list['items']:
|
||||||
if server == s['properties']['name']:
|
if server == s['properties']['name']:
|
||||||
server= s['id']
|
server = s['id']
|
||||||
break
|
break
|
||||||
|
|
||||||
# Locate UUID for Volume
|
# Locate UUID for Volume
|
||||||
|
@ -163,6 +164,7 @@ def attach_volume(module, profitbricks):
|
||||||
|
|
||||||
return profitbricks.attach_volume(datacenter, server, volume)
|
return profitbricks.attach_volume(datacenter, server, volume)
|
||||||
|
|
||||||
|
|
||||||
def detach_volume(module, profitbricks):
|
def detach_volume(module, profitbricks):
|
||||||
"""
|
"""
|
||||||
Detaches a volume.
|
Detaches a volume.
|
||||||
|
@ -193,7 +195,7 @@ def detach_volume(module, profitbricks):
|
||||||
server_list = profitbricks.list_servers(datacenter)
|
server_list = profitbricks.list_servers(datacenter)
|
||||||
for s in server_list['items']:
|
for s in server_list['items']:
|
||||||
if server == s['properties']['name']:
|
if server == s['properties']['name']:
|
||||||
server= s['id']
|
server = s['id']
|
||||||
break
|
break
|
||||||
|
|
||||||
# Locate UUID for Volume
|
# Locate UUID for Volume
|
||||||
|
@ -206,6 +208,7 @@ def detach_volume(module, profitbricks):
|
||||||
|
|
||||||
return profitbricks.detach_volume(datacenter, server, volume)
|
return profitbricks.detach_volume(datacenter, server, volume)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=dict(
|
argument_spec=dict(
|
||||||
|
|
|
@ -11,7 +11,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||||
'supported_by': 'community'}
|
'supported_by': 'community'}
|
||||||
|
|
||||||
|
|
||||||
DOCUMENTATION='''
|
DOCUMENTATION = '''
|
||||||
module: rax_clb_ssl
|
module: rax_clb_ssl
|
||||||
short_description: Manage SSL termination for a Rackspace Cloud Load Balancer.
|
short_description: Manage SSL termination for a Rackspace Cloud Load Balancer.
|
||||||
description:
|
description:
|
||||||
|
@ -102,6 +102,7 @@ from ansible.module_utils.rax import (rax_argument_spec,
|
||||||
setup_rax_module,
|
setup_rax_module,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
|
def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
|
||||||
certificate, intermediate_certificate, secure_port,
|
certificate, intermediate_certificate, secure_port,
|
||||||
secure_traffic_only, https_redirect,
|
secure_traffic_only, https_redirect,
|
||||||
|
@ -222,6 +223,7 @@ def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
|
||||||
else:
|
else:
|
||||||
module.fail_json(**result)
|
module.fail_json(**result)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = rax_argument_spec()
|
argument_spec = rax_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
|
|
|
@ -180,6 +180,7 @@ def alarm(module, state, label, entity_id, check_id, notification_plan_id, crite
|
||||||
else:
|
else:
|
||||||
module.exit_json(changed=changed)
|
module.exit_json(changed=changed)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = rax_argument_spec()
|
argument_spec = rax_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
|
|
|
@ -256,6 +256,7 @@ def cloud_check(module, state, entity_id, label, check_type,
|
||||||
else:
|
else:
|
||||||
module.exit_json(changed=changed)
|
module.exit_json(changed=changed)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = rax_argument_spec()
|
argument_spec = rax_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
|
|
|
@ -152,6 +152,7 @@ def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
|
||||||
else:
|
else:
|
||||||
module.exit_json(changed=changed)
|
module.exit_json(changed=changed)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = rax_argument_spec()
|
argument_spec = rax_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
|
|
|
@ -138,6 +138,7 @@ def notification(module, state, label, notification_type, details):
|
||||||
else:
|
else:
|
||||||
module.exit_json(changed=changed)
|
module.exit_json(changed=changed)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = rax_argument_spec()
|
argument_spec = rax_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
|
|
|
@ -141,6 +141,7 @@ def notification_plan(module, state, label, critical_state, warning_state, ok_st
|
||||||
else:
|
else:
|
||||||
module.exit_json(changed=changed)
|
module.exit_json(changed=changed)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argument_spec = rax_argument_spec()
|
argument_spec = rax_argument_spec()
|
||||||
argument_spec.update(
|
argument_spec.update(
|
||||||
|
|
|
@ -94,21 +94,21 @@ from ansible.module_utils.univention_umc import (
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
type = dict(required=True,
|
type=dict(required=True,
|
||||||
type='str'),
|
type='str'),
|
||||||
zone = dict(required=True,
|
zone=dict(required=True,
|
||||||
type='str'),
|
type='str'),
|
||||||
name = dict(required=True,
|
name=dict(required=True,
|
||||||
type='str'),
|
type='str'),
|
||||||
data = dict(default=[],
|
data=dict(default=[],
|
||||||
type='dict'),
|
type='dict'),
|
||||||
state = dict(default='present',
|
state=dict(default='present',
|
||||||
choices=['present', 'absent'],
|
choices=['present', 'absent'],
|
||||||
type='str')
|
type='str')
|
||||||
),
|
),
|
||||||
supports_check_mode=True,
|
supports_check_mode=True,
|
||||||
required_if = ([
|
required_if=([
|
||||||
('state', 'present', ['data'])
|
('state', 'present', ['data'])
|
||||||
])
|
])
|
||||||
)
|
)
|
||||||
|
|
|
@ -111,10 +111,10 @@ from ansible.module_utils.univention_umc import (
|
||||||
def convert_time(time):
|
def convert_time(time):
|
||||||
"""Convert a time in seconds into the biggest unit"""
|
"""Convert a time in seconds into the biggest unit"""
|
||||||
units = [
|
units = [
|
||||||
(24 * 60 * 60 , 'days'),
|
(24 * 60 * 60, 'days'),
|
||||||
(60 * 60 , 'hours'),
|
(60 * 60, 'hours'),
|
||||||
(60 , 'minutes'),
|
(60, 'minutes'),
|
||||||
(1 , 'seconds'),
|
(1, 'seconds'),
|
||||||
]
|
]
|
||||||
|
|
||||||
if time == 0:
|
if time == 0:
|
||||||
|
@ -126,34 +126,34 @@ def convert_time(time):
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
type = dict(required=True,
|
type=dict(required=True,
|
||||||
type='str'),
|
type='str'),
|
||||||
zone = dict(required=True,
|
zone=dict(required=True,
|
||||||
aliases=['name'],
|
aliases=['name'],
|
||||||
type='str'),
|
type='str'),
|
||||||
nameserver = dict(default=[],
|
nameserver=dict(default=[],
|
||||||
type='list'),
|
type='list'),
|
||||||
interfaces = dict(default=[],
|
interfaces=dict(default=[],
|
||||||
type='list'),
|
type='list'),
|
||||||
refresh = dict(default=3600,
|
refresh=dict(default=3600,
|
||||||
type='int'),
|
type='int'),
|
||||||
retry = dict(default=1800,
|
retry=dict(default=1800,
|
||||||
type='int'),
|
type='int'),
|
||||||
expire = dict(default=604800,
|
expire=dict(default=604800,
|
||||||
type='int'),
|
type='int'),
|
||||||
ttl = dict(default=600,
|
ttl=dict(default=600,
|
||||||
type='int'),
|
type='int'),
|
||||||
contact = dict(default='',
|
contact=dict(default='',
|
||||||
type='str'),
|
type='str'),
|
||||||
mx = dict(default=[],
|
mx=dict(default=[],
|
||||||
type='list'),
|
type='list'),
|
||||||
state = dict(default='present',
|
state=dict(default='present',
|
||||||
choices=['present', 'absent'],
|
choices=['present', 'absent'],
|
||||||
type='str')
|
type='str')
|
||||||
),
|
),
|
||||||
supports_check_mode=True,
|
supports_check_mode=True,
|
||||||
required_if = ([
|
required_if=([
|
||||||
('state', 'present', ['nameserver', 'interfaces'])
|
('state', 'present', ['nameserver', 'interfaces'])
|
||||||
])
|
])
|
||||||
)
|
)
|
||||||
|
|
|
@ -87,18 +87,18 @@ from ansible.module_utils.univention_umc import (
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
name = dict(required=True,
|
name=dict(required=True,
|
||||||
type='str'),
|
type='str'),
|
||||||
description = dict(default=None,
|
description=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
position = dict(default='',
|
position=dict(default='',
|
||||||
type='str'),
|
type='str'),
|
||||||
ou = dict(default='',
|
ou=dict(default='',
|
||||||
type='str'),
|
type='str'),
|
||||||
subpath = dict(default='cn=groups',
|
subpath=dict(default='cn=groups',
|
||||||
type='str'),
|
type='str'),
|
||||||
state = dict(default='present',
|
state=dict(default='present',
|
||||||
choices=['present', 'absent'],
|
choices=['present', 'absent'],
|
||||||
type='str')
|
type='str')
|
||||||
),
|
),
|
||||||
|
|
|
@ -380,157 +380,157 @@ from ansible.module_utils.univention_umc import (
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
name = dict(required=True,
|
name=dict(required=True,
|
||||||
type='str'),
|
type='str'),
|
||||||
ou = dict(required=True,
|
ou=dict(required=True,
|
||||||
type='str'),
|
type='str'),
|
||||||
owner = dict(type='str',
|
owner=dict(type='str',
|
||||||
default='0'),
|
default='0'),
|
||||||
group = dict(type='str',
|
group=dict(type='str',
|
||||||
default='0'),
|
default='0'),
|
||||||
path = dict(type='path',
|
path=dict(type='path',
|
||||||
default=None),
|
default=None),
|
||||||
directorymode = dict(type='str',
|
directorymode=dict(type='str',
|
||||||
default='00755'),
|
default='00755'),
|
||||||
host = dict(type='str',
|
host=dict(type='str',
|
||||||
default=None),
|
default=None),
|
||||||
root_squash = dict(type='bool',
|
root_squash=dict(type='bool',
|
||||||
default=True),
|
default=True),
|
||||||
subtree_checking = dict(type='bool',
|
subtree_checking=dict(type='bool',
|
||||||
default=True),
|
default=True),
|
||||||
sync = dict(type='str',
|
sync=dict(type='str',
|
||||||
default='sync'),
|
default='sync'),
|
||||||
writeable = dict(type='bool',
|
writeable=dict(type='bool',
|
||||||
default=True),
|
default=True),
|
||||||
sambaBlockSize = dict(type='str',
|
sambaBlockSize=dict(type='str',
|
||||||
aliases=['samba_block_size'],
|
aliases=['samba_block_size'],
|
||||||
default=None),
|
default=None),
|
||||||
sambaBlockingLocks = dict(type='bool',
|
sambaBlockingLocks=dict(type='bool',
|
||||||
aliases=['samba_blocking_locks'],
|
aliases=['samba_blocking_locks'],
|
||||||
default=True),
|
default=True),
|
||||||
sambaBrowseable = dict(type='bool',
|
sambaBrowseable=dict(type='bool',
|
||||||
aliases=['samba_browsable'],
|
aliases=['samba_browsable'],
|
||||||
default=True),
|
default=True),
|
||||||
sambaCreateMode = dict(type='str',
|
sambaCreateMode=dict(type='str',
|
||||||
aliases=['samba_create_mode'],
|
aliases=['samba_create_mode'],
|
||||||
default='0744'),
|
default='0744'),
|
||||||
sambaCscPolicy = dict(type='str',
|
sambaCscPolicy=dict(type='str',
|
||||||
aliases=['samba_csc_policy'],
|
aliases=['samba_csc_policy'],
|
||||||
default='manual'),
|
default='manual'),
|
||||||
sambaCustomSettings = dict(type='list',
|
sambaCustomSettings=dict(type='list',
|
||||||
aliases=['samba_custom_settings'],
|
aliases=['samba_custom_settings'],
|
||||||
default=[]),
|
default=[]),
|
||||||
sambaDirectoryMode = dict(type='str',
|
sambaDirectoryMode=dict(type='str',
|
||||||
aliases=['samba_directory_mode'],
|
aliases=['samba_directory_mode'],
|
||||||
default='0755'),
|
default='0755'),
|
||||||
sambaDirectorySecurityMode = dict(type='str',
|
sambaDirectorySecurityMode=dict(type='str',
|
||||||
aliases=['samba_directory_security_mode'],
|
aliases=['samba_directory_security_mode'],
|
||||||
default='0777'),
|
default='0777'),
|
||||||
sambaDosFilemode = dict(type='bool',
|
sambaDosFilemode=dict(type='bool',
|
||||||
aliases=['samba_dos_filemode'],
|
aliases=['samba_dos_filemode'],
|
||||||
default=False),
|
default=False),
|
||||||
sambaFakeOplocks = dict(type='bool',
|
sambaFakeOplocks=dict(type='bool',
|
||||||
aliases=['samba_fake_oplocks'],
|
aliases=['samba_fake_oplocks'],
|
||||||
default=False),
|
default=False),
|
||||||
sambaForceCreateMode = dict(type='bool',
|
sambaForceCreateMode=dict(type='bool',
|
||||||
aliases=['samba_force_create_mode'],
|
aliases=['samba_force_create_mode'],
|
||||||
default=False),
|
default=False),
|
||||||
sambaForceDirectoryMode = dict(type='bool',
|
sambaForceDirectoryMode=dict(type='bool',
|
||||||
aliases=['samba_force_directory_mode'],
|
aliases=['samba_force_directory_mode'],
|
||||||
default=False),
|
default=False),
|
||||||
sambaForceDirectorySecurityMode = dict(type='bool',
|
sambaForceDirectorySecurityMode=dict(type='bool',
|
||||||
aliases=['samba_force_directory_security_mode'],
|
aliases=['samba_force_directory_security_mode'],
|
||||||
default=False),
|
default=False),
|
||||||
sambaForceGroup = dict(type='str',
|
sambaForceGroup=dict(type='str',
|
||||||
aliases=['samba_force_group'],
|
aliases=['samba_force_group'],
|
||||||
default=None),
|
default=None),
|
||||||
sambaForceSecurityMode = dict(type='bool',
|
sambaForceSecurityMode=dict(type='bool',
|
||||||
aliases=['samba_force_security_mode'],
|
aliases=['samba_force_security_mode'],
|
||||||
default=False),
|
default=False),
|
||||||
sambaForceUser = dict(type='str',
|
sambaForceUser=dict(type='str',
|
||||||
aliases=['samba_force_user'],
|
aliases=['samba_force_user'],
|
||||||
default=None),
|
default=None),
|
||||||
sambaHideFiles = dict(type='str',
|
sambaHideFiles=dict(type='str',
|
||||||
aliases=['samba_hide_files'],
|
aliases=['samba_hide_files'],
|
||||||
default=None),
|
default=None),
|
||||||
sambaHideUnreadable = dict(type='bool',
|
sambaHideUnreadable=dict(type='bool',
|
||||||
aliases=['samba_hide_unreadable'],
|
aliases=['samba_hide_unreadable'],
|
||||||
default=False),
|
default=False),
|
||||||
sambaHostsAllow = dict(type='list',
|
sambaHostsAllow=dict(type='list',
|
||||||
aliases=['samba_hosts_allow'],
|
aliases=['samba_hosts_allow'],
|
||||||
default=[]),
|
default=[]),
|
||||||
sambaHostsDeny = dict(type='list',
|
sambaHostsDeny=dict(type='list',
|
||||||
aliases=['samba_hosts_deny'],
|
aliases=['samba_hosts_deny'],
|
||||||
default=[]),
|
default=[]),
|
||||||
sambaInheritAcls = dict(type='bool',
|
sambaInheritAcls=dict(type='bool',
|
||||||
aliases=['samba_inherit_acls'],
|
aliases=['samba_inherit_acls'],
|
||||||
default=True),
|
default=True),
|
||||||
sambaInheritOwner = dict(type='bool',
|
sambaInheritOwner=dict(type='bool',
|
||||||
aliases=['samba_inherit_owner'],
|
aliases=['samba_inherit_owner'],
|
||||||
default=False),
|
default=False),
|
||||||
sambaInheritPermissions = dict(type='bool',
|
sambaInheritPermissions=dict(type='bool',
|
||||||
aliases=['samba_inherit_permissions'],
|
aliases=['samba_inherit_permissions'],
|
||||||
default=False),
|
default=False),
|
||||||
sambaInvalidUsers = dict(type='str',
|
sambaInvalidUsers=dict(type='str',
|
||||||
aliases=['samba_invalid_users'],
|
aliases=['samba_invalid_users'],
|
||||||
default=None),
|
default=None),
|
||||||
sambaLevel2Oplocks = dict(type='bool',
|
sambaLevel2Oplocks=dict(type='bool',
|
||||||
aliases=['samba_level_2_oplocks'],
|
aliases=['samba_level_2_oplocks'],
|
||||||
default=True),
|
default=True),
|
||||||
sambaLocking = dict(type='bool',
|
sambaLocking=dict(type='bool',
|
||||||
aliases=['samba_locking'],
|
aliases=['samba_locking'],
|
||||||
default=True),
|
default=True),
|
||||||
sambaMSDFSRoot = dict(type='bool',
|
sambaMSDFSRoot=dict(type='bool',
|
||||||
aliases=['samba_msdfs_root'],
|
aliases=['samba_msdfs_root'],
|
||||||
default=False),
|
default=False),
|
||||||
sambaName = dict(type='str',
|
sambaName=dict(type='str',
|
||||||
aliases=['samba_name'],
|
aliases=['samba_name'],
|
||||||
default=None),
|
default=None),
|
||||||
sambaNtAclSupport = dict(type='bool',
|
sambaNtAclSupport=dict(type='bool',
|
||||||
aliases=['samba_nt_acl_support'],
|
aliases=['samba_nt_acl_support'],
|
||||||
default=True),
|
default=True),
|
||||||
sambaOplocks = dict(type='bool',
|
sambaOplocks=dict(type='bool',
|
||||||
aliases=['samba_oplocks'],
|
aliases=['samba_oplocks'],
|
||||||
default=True),
|
default=True),
|
||||||
sambaPostexec = dict(type='str',
|
sambaPostexec=dict(type='str',
|
||||||
aliases=['samba_postexec'],
|
aliases=['samba_postexec'],
|
||||||
default=None),
|
default=None),
|
||||||
sambaPreexec = dict(type='str',
|
sambaPreexec=dict(type='str',
|
||||||
aliases=['samba_preexec'],
|
aliases=['samba_preexec'],
|
||||||
default=None),
|
default=None),
|
||||||
sambaPublic = dict(type='bool',
|
sambaPublic=dict(type='bool',
|
||||||
aliases=['samba_public'],
|
aliases=['samba_public'],
|
||||||
default=False),
|
default=False),
|
||||||
sambaSecurityMode = dict(type='str',
|
sambaSecurityMode=dict(type='str',
|
||||||
aliases=['samba_security_mode'],
|
aliases=['samba_security_mode'],
|
||||||
default='0777'),
|
default='0777'),
|
||||||
sambaStrictLocking = dict(type='str',
|
sambaStrictLocking=dict(type='str',
|
||||||
aliases=['samba_strict_locking'],
|
aliases=['samba_strict_locking'],
|
||||||
default='Auto'),
|
default='Auto'),
|
||||||
sambaVFSObjects = dict(type='str',
|
sambaVFSObjects=dict(type='str',
|
||||||
aliases=['samba_vfs_objects'],
|
aliases=['samba_vfs_objects'],
|
||||||
default=None),
|
default=None),
|
||||||
sambaValidUsers = dict(type='str',
|
sambaValidUsers=dict(type='str',
|
||||||
aliases=['samba_valid_users'],
|
aliases=['samba_valid_users'],
|
||||||
default=None),
|
default=None),
|
||||||
sambaWriteList = dict(type='str',
|
sambaWriteList=dict(type='str',
|
||||||
aliases=['samba_write_list'],
|
aliases=['samba_write_list'],
|
||||||
default=None),
|
default=None),
|
||||||
sambaWriteable = dict(type='bool',
|
sambaWriteable=dict(type='bool',
|
||||||
aliases=['samba_writeable'],
|
aliases=['samba_writeable'],
|
||||||
default=True),
|
default=True),
|
||||||
nfs_hosts = dict(type='list',
|
nfs_hosts=dict(type='list',
|
||||||
default=[]),
|
default=[]),
|
||||||
nfsCustomSettings = dict(type='list',
|
nfsCustomSettings=dict(type='list',
|
||||||
aliases=['nfs_custom_settings'],
|
aliases=['nfs_custom_settings'],
|
||||||
default=[]),
|
default=[]),
|
||||||
state = dict(default='present',
|
state=dict(default='present',
|
||||||
choices=['present', 'absent'],
|
choices=['present', 'absent'],
|
||||||
type='str')
|
type='str')
|
||||||
),
|
),
|
||||||
supports_check_mode=True,
|
supports_check_mode=True,
|
||||||
required_if = ([
|
required_if=([
|
||||||
('state', 'present', ['path', 'host', 'sambaName'])
|
('state', 'present', ['path', 'host', 'sambaName'])
|
||||||
])
|
])
|
||||||
)
|
)
|
||||||
|
|
|
@ -348,132 +348,132 @@ from ansible.module_utils.univention_umc import (
|
||||||
def main():
|
def main():
|
||||||
expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d")
|
expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d")
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
birthday = dict(default=None,
|
birthday=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
city = dict(default=None,
|
city=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
country = dict(default=None,
|
country=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
department_number = dict(default=None,
|
department_number=dict(default=None,
|
||||||
type='str',
|
type='str',
|
||||||
aliases=['departmentNumber']),
|
aliases=['departmentNumber']),
|
||||||
description = dict(default=None,
|
description=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
display_name = dict(default=None,
|
display_name=dict(default=None,
|
||||||
type='str',
|
type='str',
|
||||||
aliases=['displayName']),
|
aliases=['displayName']),
|
||||||
email = dict(default=[''],
|
email=dict(default=[''],
|
||||||
type='list'),
|
type='list'),
|
||||||
employee_number = dict(default=None,
|
employee_number=dict(default=None,
|
||||||
type='str',
|
type='str',
|
||||||
aliases=['employeeNumber']),
|
aliases=['employeeNumber']),
|
||||||
employee_type = dict(default=None,
|
employee_type=dict(default=None,
|
||||||
type='str',
|
type='str',
|
||||||
aliases=['employeeType']),
|
aliases=['employeeType']),
|
||||||
firstname = dict(default=None,
|
firstname=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
gecos = dict(default=None,
|
gecos=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
groups = dict(default=[],
|
groups=dict(default=[],
|
||||||
type='list'),
|
type='list'),
|
||||||
home_share = dict(default=None,
|
home_share=dict(default=None,
|
||||||
type='str',
|
type='str',
|
||||||
aliases=['homeShare']),
|
aliases=['homeShare']),
|
||||||
home_share_path = dict(default=None,
|
home_share_path=dict(default=None,
|
||||||
type='str',
|
type='str',
|
||||||
aliases=['homeSharePath']),
|
aliases=['homeSharePath']),
|
||||||
home_telephone_number = dict(default=[],
|
home_telephone_number=dict(default=[],
|
||||||
type='list',
|
type='list',
|
||||||
aliases=['homeTelephoneNumber']),
|
aliases=['homeTelephoneNumber']),
|
||||||
homedrive = dict(default=None,
|
homedrive=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
lastname = dict(default=None,
|
lastname=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
mail_alternative_address= dict(default=[],
|
mail_alternative_address=dict(default=[],
|
||||||
type='list',
|
type='list',
|
||||||
aliases=['mailAlternativeAddress']),
|
aliases=['mailAlternativeAddress']),
|
||||||
mail_home_server = dict(default=None,
|
mail_home_server=dict(default=None,
|
||||||
type='str',
|
type='str',
|
||||||
aliases=['mailHomeServer']),
|
aliases=['mailHomeServer']),
|
||||||
mail_primary_address = dict(default=None,
|
mail_primary_address=dict(default=None,
|
||||||
type='str',
|
type='str',
|
||||||
aliases=['mailPrimaryAddress']),
|
aliases=['mailPrimaryAddress']),
|
||||||
mobile_telephone_number = dict(default=[],
|
mobile_telephone_number=dict(default=[],
|
||||||
type='list',
|
type='list',
|
||||||
aliases=['mobileTelephoneNumber']),
|
aliases=['mobileTelephoneNumber']),
|
||||||
organisation = dict(default=None,
|
organisation=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
overridePWHistory = dict(default=False,
|
overridePWHistory=dict(default=False,
|
||||||
type='bool',
|
type='bool',
|
||||||
aliases=['override_pw_history']),
|
aliases=['override_pw_history']),
|
||||||
overridePWLength = dict(default=False,
|
overridePWLength=dict(default=False,
|
||||||
type='bool',
|
type='bool',
|
||||||
aliases=['override_pw_length']),
|
aliases=['override_pw_length']),
|
||||||
pager_telephonenumber = dict(default=[],
|
pager_telephonenumber=dict(default=[],
|
||||||
type='list',
|
type='list',
|
||||||
aliases=['pagerTelephonenumber']),
|
aliases=['pagerTelephonenumber']),
|
||||||
password = dict(default=None,
|
password=dict(default=None,
|
||||||
type='str',
|
type='str',
|
||||||
no_log=True),
|
no_log=True),
|
||||||
phone = dict(default=[],
|
phone=dict(default=[],
|
||||||
type='list'),
|
type='list'),
|
||||||
postcode = dict(default=None,
|
postcode=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
primary_group = dict(default=None,
|
primary_group=dict(default=None,
|
||||||
type='str',
|
type='str',
|
||||||
aliases=['primaryGroup']),
|
aliases=['primaryGroup']),
|
||||||
profilepath = dict(default=None,
|
profilepath=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
pwd_change_next_login = dict(default=None,
|
pwd_change_next_login=dict(default=None,
|
||||||
type='str',
|
type='str',
|
||||||
choices=['0', '1'],
|
choices=['0', '1'],
|
||||||
aliases=['pwdChangeNextLogin']),
|
aliases=['pwdChangeNextLogin']),
|
||||||
room_number = dict(default=None,
|
room_number=dict(default=None,
|
||||||
type='str',
|
type='str',
|
||||||
aliases=['roomNumber']),
|
aliases=['roomNumber']),
|
||||||
samba_privileges = dict(default=[],
|
samba_privileges=dict(default=[],
|
||||||
type='list',
|
type='list',
|
||||||
aliases=['sambaPrivileges']),
|
aliases=['sambaPrivileges']),
|
||||||
samba_user_workstations = dict(default=[],
|
samba_user_workstations=dict(default=[],
|
||||||
type='list',
|
type='list',
|
||||||
aliases=['sambaUserWorkstations']),
|
aliases=['sambaUserWorkstations']),
|
||||||
sambahome = dict(default=None,
|
sambahome=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
scriptpath = dict(default=None,
|
scriptpath=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
secretary = dict(default=[],
|
secretary=dict(default=[],
|
||||||
type='list'),
|
type='list'),
|
||||||
serviceprovider = dict(default=[''],
|
serviceprovider=dict(default=[''],
|
||||||
type='list'),
|
type='list'),
|
||||||
shell = dict(default='/bin/bash',
|
shell=dict(default='/bin/bash',
|
||||||
type='str'),
|
type='str'),
|
||||||
street = dict(default=None,
|
street=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
title = dict(default=None,
|
title=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
unixhome = dict(default=None,
|
unixhome=dict(default=None,
|
||||||
type='str'),
|
type='str'),
|
||||||
userexpiry = dict(default=expiry,
|
userexpiry=dict(default=expiry,
|
||||||
type='str'),
|
type='str'),
|
||||||
username = dict(required=True,
|
username=dict(required=True,
|
||||||
aliases=['name'],
|
aliases=['name'],
|
||||||
type='str'),
|
type='str'),
|
||||||
position = dict(default='',
|
position=dict(default='',
|
||||||
type='str'),
|
type='str'),
|
||||||
update_password = dict(default='always',
|
update_password=dict(default='always',
|
||||||
choices=['always', 'on_create'],
|
choices=['always', 'on_create'],
|
||||||
type='str'),
|
type='str'),
|
||||||
ou = dict(default='',
|
ou=dict(default='',
|
||||||
type='str'),
|
type='str'),
|
||||||
subpath = dict(default='cn=users',
|
subpath=dict(default='cn=users',
|
||||||
type='str'),
|
type='str'),
|
||||||
state = dict(default='present',
|
state=dict(default='present',
|
||||||
choices=['present', 'absent'],
|
choices=['present', 'absent'],
|
||||||
type='str')
|
type='str')
|
||||||
),
|
),
|
||||||
supports_check_mode=True,
|
supports_check_mode=True,
|
||||||
required_if = ([
|
required_if=([
|
||||||
('state', 'present', ['firstname', 'lastname', 'password'])
|
('state', 'present', ['firstname', 'lastname', 'password'])
|
||||||
])
|
])
|
||||||
)
|
)
|
||||||
|
|
|
@ -507,6 +507,7 @@ class PyVmomiDeviceHelper(object):
|
||||||
|
|
||||||
class PyVmomiCache(object):
|
class PyVmomiCache(object):
|
||||||
""" This class caches references to objects which are requested multiples times but not modified """
|
""" This class caches references to objects which are requested multiples times but not modified """
|
||||||
|
|
||||||
def __init__(self, content, dc_name=None):
|
def __init__(self, content, dc_name=None):
|
||||||
self.content = content
|
self.content = content
|
||||||
self.dc_name = dc_name
|
self.dc_name = dc_name
|
||||||
|
|
|
@ -156,7 +156,6 @@ class PyVmomiHelper(object):
|
||||||
return tree
|
return tree
|
||||||
|
|
||||||
def _build_folder_map(self, folder, inpath='/'):
|
def _build_folder_map(self, folder, inpath='/'):
|
||||||
|
|
||||||
""" Build a searchable index for vms+uuids+folders """
|
""" Build a searchable index for vms+uuids+folders """
|
||||||
if isinstance(folder, tuple):
|
if isinstance(folder, tuple):
|
||||||
folder = folder[1]
|
folder = folder[1]
|
||||||
|
|
|
@ -627,6 +627,7 @@ def spec_singleton(spec, request, vm):
|
||||||
spec = request.new_spec()
|
spec = request.new_spec()
|
||||||
return spec
|
return spec
|
||||||
|
|
||||||
|
|
||||||
def get_cdrom_params(module, s, vm_cdrom):
|
def get_cdrom_params(module, s, vm_cdrom):
|
||||||
cdrom_type = None
|
cdrom_type = None
|
||||||
cdrom_iso_path = None
|
cdrom_iso_path = None
|
||||||
|
@ -648,6 +649,7 @@ def get_cdrom_params(module, s, vm_cdrom):
|
||||||
|
|
||||||
return cdrom_type, cdrom_iso_path
|
return cdrom_type, cdrom_iso_path
|
||||||
|
|
||||||
|
|
||||||
def vmdisk_id(vm, current_datastore_name):
|
def vmdisk_id(vm, current_datastore_name):
|
||||||
id_list = []
|
id_list = []
|
||||||
for vm_disk in vm._disks:
|
for vm_disk in vm._disks:
|
||||||
|
@ -668,7 +670,7 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
|
||||||
dclist = [k for k,
|
dclist = [k for k,
|
||||||
v in vsphere_client.get_datacenters().items() if v == datacenter]
|
v in vsphere_client.get_datacenters().items() if v == datacenter]
|
||||||
if dclist:
|
if dclist:
|
||||||
dcmor=dclist[0]
|
dcmor = dclist[0]
|
||||||
else:
|
else:
|
||||||
vsphere_client.disconnect()
|
vsphere_client.disconnect()
|
||||||
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
|
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
|
||||||
|
@ -744,7 +746,7 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
|
||||||
cloneArgs = dict(resourcepool=rpmor, power_on=False)
|
cloneArgs = dict(resourcepool=rpmor, power_on=False)
|
||||||
|
|
||||||
if snapshot_to_clone is not None:
|
if snapshot_to_clone is not None:
|
||||||
#check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone.
|
# check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone.
|
||||||
cloneArgs["linked"] = True
|
cloneArgs["linked"] = True
|
||||||
cloneArgs["snapshot"] = snapshot_to_clone
|
cloneArgs["snapshot"] = snapshot_to_clone
|
||||||
|
|
||||||
|
@ -778,6 +780,8 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
|
||||||
|
|
||||||
# example from https://github.com/kalazzerx/pysphere/blob/master/examples/pysphere_create_disk_and_add_to_vm.py
|
# example from https://github.com/kalazzerx/pysphere/blob/master/examples/pysphere_create_disk_and_add_to_vm.py
|
||||||
# was used.
|
# was used.
|
||||||
|
|
||||||
|
|
||||||
def update_disks(vsphere_client, vm, module, vm_disk, changes):
|
def update_disks(vsphere_client, vm, module, vm_disk, changes):
|
||||||
request = VI.ReconfigVM_TaskRequestMsg()
|
request = VI.ReconfigVM_TaskRequestMsg()
|
||||||
changed = False
|
changed = False
|
||||||
|
@ -868,7 +872,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
|
||||||
if vm_extra_config:
|
if vm_extra_config:
|
||||||
spec = spec_singleton(spec, request, vm)
|
spec = spec_singleton(spec, request, vm)
|
||||||
extra_config = []
|
extra_config = []
|
||||||
for k,v in vm_extra_config.items():
|
for k, v in vm_extra_config.items():
|
||||||
ec = spec.new_extraConfig()
|
ec = spec.new_extraConfig()
|
||||||
ec.set_element_key(str(k))
|
ec.set_element_key(str(k))
|
||||||
ec.set_element_value(str(v))
|
ec.set_element_value(str(v))
|
||||||
|
@ -988,7 +992,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
|
||||||
spec = spec_singleton(spec, request, vm)
|
spec = spec_singleton(spec, request, vm)
|
||||||
|
|
||||||
# Get a list of the VM's hard drives
|
# Get a list of the VM's hard drives
|
||||||
dev_list = [d for d in vm.properties.config.hardware.device if d._type=='VirtualDisk']
|
dev_list = [d for d in vm.properties.config.hardware.device if d._type == 'VirtualDisk']
|
||||||
if len(vm_disk) > len(dev_list):
|
if len(vm_disk) > len(dev_list):
|
||||||
vsphere_client.disconnect()
|
vsphere_client.disconnect()
|
||||||
module.fail_json(msg="Error in vm_disk definition. Too many disks defined in comparison to the VM's disk profile.")
|
module.fail_json(msg="Error in vm_disk definition. Too many disks defined in comparison to the VM's disk profile.")
|
||||||
|
@ -1084,14 +1088,14 @@ def reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_n
|
||||||
dclist = [k for k,
|
dclist = [k for k,
|
||||||
v in vsphere_client.get_datacenters().items() if v == datacenter]
|
v in vsphere_client.get_datacenters().items() if v == datacenter]
|
||||||
if dclist:
|
if dclist:
|
||||||
dcmor=dclist[0]
|
dcmor = dclist[0]
|
||||||
else:
|
else:
|
||||||
vsphere_client.disconnect()
|
vsphere_client.disconnect()
|
||||||
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
|
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
|
||||||
dcprops = VIProperty(vsphere_client, dcmor)
|
dcprops = VIProperty(vsphere_client, dcmor)
|
||||||
nfmor = dcprops.networkFolder._obj
|
nfmor = dcprops.networkFolder._obj
|
||||||
for k,v in vm_nic.items():
|
for k, v in vm_nic.items():
|
||||||
nicNum = k[len(k) -1]
|
nicNum = k[len(k) - 1]
|
||||||
if vm_nic[k]['network_type'] == 'dvs':
|
if vm_nic[k]['network_type'] == 'dvs':
|
||||||
portgroupKey = find_portgroup_key(module, s, nfmor, vm_nic[k]['network'])
|
portgroupKey = find_portgroup_key(module, s, nfmor, vm_nic[k]['network'])
|
||||||
todvs = True
|
todvs = True
|
||||||
|
@ -1220,7 +1224,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
|
||||||
dclist = [k for k,
|
dclist = [k for k,
|
||||||
v in vsphere_client.get_datacenters().items() if v == datacenter]
|
v in vsphere_client.get_datacenters().items() if v == datacenter]
|
||||||
if dclist:
|
if dclist:
|
||||||
dcmor=dclist[0]
|
dcmor = dclist[0]
|
||||||
else:
|
else:
|
||||||
vsphere_client.disconnect()
|
vsphere_client.disconnect()
|
||||||
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
|
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
|
||||||
|
@ -1479,7 +1483,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
|
||||||
# Power on the VM if it was requested
|
# Power on the VM if it was requested
|
||||||
power_state(vm, state, True)
|
power_state(vm, state, True)
|
||||||
|
|
||||||
vmfacts=gather_facts(vm)
|
vmfacts = gather_facts(vm)
|
||||||
vsphere_client.disconnect()
|
vsphere_client.disconnect()
|
||||||
module.exit_json(
|
module.exit_json(
|
||||||
ansible_facts=vmfacts,
|
ansible_facts=vmfacts,
|
||||||
|
@ -1585,7 +1589,7 @@ def gather_facts(vm):
|
||||||
'hw_instance_uuid': vm.properties.config.instanceUuid,
|
'hw_instance_uuid': vm.properties.config.instanceUuid,
|
||||||
'hw_processor_count': vm.properties.config.hardware.numCPU,
|
'hw_processor_count': vm.properties.config.hardware.numCPU,
|
||||||
'hw_memtotal_mb': vm.properties.config.hardware.memoryMB,
|
'hw_memtotal_mb': vm.properties.config.hardware.memoryMB,
|
||||||
'hw_interfaces':[],
|
'hw_interfaces': [],
|
||||||
}
|
}
|
||||||
netInfo = vm.get_property('net')
|
netInfo = vm.get_property('net')
|
||||||
netDict = {}
|
netDict = {}
|
||||||
|
@ -1608,7 +1612,7 @@ def gather_facts(vm):
|
||||||
'macaddress_dash': entry.macAddress.replace(':', '-'),
|
'macaddress_dash': entry.macAddress.replace(':', '-'),
|
||||||
'summary': entry.deviceInfo.summary,
|
'summary': entry.deviceInfo.summary,
|
||||||
}
|
}
|
||||||
facts['hw_interfaces'].append('eth'+str(ifidx))
|
facts['hw_interfaces'].append('eth' + str(ifidx))
|
||||||
|
|
||||||
ifidx += 1
|
ifidx += 1
|
||||||
|
|
||||||
|
@ -1753,7 +1757,7 @@ def main():
|
||||||
|
|
||||||
),
|
),
|
||||||
supports_check_mode=False,
|
supports_check_mode=False,
|
||||||
mutually_exclusive=[['state', 'vmware_guest_facts'],['state', 'from_template']],
|
mutually_exclusive=[['state', 'vmware_guest_facts'], ['state', 'from_template']],
|
||||||
required_together=[
|
required_together=[
|
||||||
['state', 'force'],
|
['state', 'force'],
|
||||||
[
|
[
|
||||||
|
@ -1791,7 +1795,6 @@ def main():
|
||||||
power_on_after_clone = module.params['power_on_after_clone']
|
power_on_after_clone = module.params['power_on_after_clone']
|
||||||
validate_certs = module.params['validate_certs']
|
validate_certs = module.params['validate_certs']
|
||||||
|
|
||||||
|
|
||||||
# CONNECT TO THE SERVER
|
# CONNECT TO THE SERVER
|
||||||
viserver = VIServer()
|
viserver = VIServer()
|
||||||
if validate_certs and not hasattr(ssl, 'SSLContext') and not vcenter_hostname.startswith('http://'):
|
if validate_certs and not hasattr(ssl, 'SSLContext') and not vcenter_hostname.startswith('http://'):
|
||||||
|
@ -1899,7 +1902,6 @@ def main():
|
||||||
vm_hardware, vm_disk, vm_nic, esxi)):
|
vm_hardware, vm_disk, vm_nic, esxi)):
|
||||||
module.exit_json(changed=False, msg="vm %s not present" % guest)
|
module.exit_json(changed=False, msg="vm %s not present" % guest)
|
||||||
|
|
||||||
|
|
||||||
# Create the VM
|
# Create the VM
|
||||||
elif state in ['present', 'powered_off', 'powered_on']:
|
elif state in ['present', 'powered_off', 'powered_on']:
|
||||||
|
|
||||||
|
|
|
@ -110,16 +110,16 @@ webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
name = dict(required=True),
|
name=dict(required=True),
|
||||||
state = dict(required=False, choices=['present', 'absent'], default='present'),
|
state=dict(required=False, choices=['present', 'absent'], default='present'),
|
||||||
type = dict(required=True),
|
type=dict(required=True),
|
||||||
autostart = dict(required=False, type='bool', default=False),
|
autostart=dict(required=False, type='bool', default=False),
|
||||||
extra_info = dict(required=False, default=""),
|
extra_info=dict(required=False, default=""),
|
||||||
port_open = dict(required=False, type='bool', default=False),
|
port_open=dict(required=False, type='bool', default=False),
|
||||||
login_name = dict(required=True),
|
login_name=dict(required=True),
|
||||||
login_password = dict(required=True, no_log=True),
|
login_password=dict(required=True, no_log=True),
|
||||||
machine = dict(required=False, default=False),
|
machine=dict(required=False, default=False),
|
||||||
),
|
),
|
||||||
supports_check_mode=True
|
supports_check_mode=True
|
||||||
)
|
)
|
||||||
|
@ -157,7 +157,7 @@ def main():
|
||||||
# If it exists with the right type, we don't change it
|
# If it exists with the right type, we don't change it
|
||||||
# Should check other parameters.
|
# Should check other parameters.
|
||||||
module.exit_json(
|
module.exit_json(
|
||||||
changed = False,
|
changed=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
|
@ -176,7 +176,7 @@ def main():
|
||||||
# If the app's already not there, nothing changed.
|
# If the app's already not there, nothing changed.
|
||||||
if not existing_app:
|
if not existing_app:
|
||||||
module.exit_json(
|
module.exit_json(
|
||||||
changed = False,
|
changed=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
|
@ -188,10 +188,9 @@ def main():
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="Unknown state specified: {}".format(app_state))
|
module.fail_json(msg="Unknown state specified: {}".format(app_state))
|
||||||
|
|
||||||
|
|
||||||
module.exit_json(
|
module.exit_json(
|
||||||
changed = True,
|
changed=True,
|
||||||
result = result
|
result=result
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -102,15 +102,15 @@ webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
name = dict(required=True),
|
name=dict(required=True),
|
||||||
state = dict(required=False, choices=['present', 'absent'], default='present'),
|
state=dict(required=False, choices=['present', 'absent'], default='present'),
|
||||||
# You can specify an IP address or hostname.
|
# You can specify an IP address or hostname.
|
||||||
type = dict(required=True),
|
type=dict(required=True),
|
||||||
password = dict(required=False, default=None, no_log=True),
|
password=dict(required=False, default=None, no_log=True),
|
||||||
login_name = dict(required=True),
|
login_name=dict(required=True),
|
||||||
login_password = dict(required=True, no_log=True),
|
login_password=dict(required=True, no_log=True),
|
||||||
machine = dict(required=False, default=False),
|
machine=dict(required=False, default=False),
|
||||||
),
|
),
|
||||||
supports_check_mode=True
|
supports_check_mode=True
|
||||||
)
|
)
|
||||||
|
@ -153,10 +153,9 @@ def main():
|
||||||
|
|
||||||
# If it exists with the right type, we don't change anything.
|
# If it exists with the right type, we don't change anything.
|
||||||
module.exit_json(
|
module.exit_json(
|
||||||
changed = False,
|
changed=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
# If this isn't a dry run, create the db
|
# If this isn't a dry run, create the db
|
||||||
# and default user.
|
# and default user.
|
||||||
|
@ -172,7 +171,7 @@ def main():
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
|
|
||||||
if not (existing_db or existing_user):
|
if not (existing_db or existing_user):
|
||||||
module.exit_json(changed = False,)
|
module.exit_json(changed=False,)
|
||||||
|
|
||||||
if existing_db:
|
if existing_db:
|
||||||
# Delete the db if it exists
|
# Delete the db if it exists
|
||||||
|
@ -190,8 +189,8 @@ def main():
|
||||||
module.fail_json(msg="Unknown state specified: {}".format(db_state))
|
module.fail_json(msg="Unknown state specified: {}".format(db_state))
|
||||||
|
|
||||||
module.exit_json(
|
module.exit_json(
|
||||||
changed = True,
|
changed=True,
|
||||||
result = result
|
result=result
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -93,12 +93,12 @@ webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
name = dict(required=True),
|
name=dict(required=True),
|
||||||
state = dict(required=False, choices=['present', 'absent'], default='present'),
|
state=dict(required=False, choices=['present', 'absent'], default='present'),
|
||||||
subdomains = dict(required=False, default=[]),
|
subdomains=dict(required=False, default=[]),
|
||||||
login_name = dict(required=True),
|
login_name=dict(required=True),
|
||||||
login_password = dict(required=True, no_log=True),
|
login_password=dict(required=True, no_log=True),
|
||||||
),
|
),
|
||||||
supports_check_mode=True
|
supports_check_mode=True
|
||||||
)
|
)
|
||||||
|
@ -127,7 +127,7 @@ def main():
|
||||||
if set(existing_domain['subdomains']) >= set(domain_subdomains):
|
if set(existing_domain['subdomains']) >= set(domain_subdomains):
|
||||||
# If it exists with the right subdomains, we don't change anything.
|
# If it exists with the right subdomains, we don't change anything.
|
||||||
module.exit_json(
|
module.exit_json(
|
||||||
changed = False,
|
changed=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
positional_args = [session_id, domain_name] + domain_subdomains
|
positional_args = [session_id, domain_name] + domain_subdomains
|
||||||
|
@ -146,7 +146,7 @@ def main():
|
||||||
# If the app's already not there, nothing changed.
|
# If the app's already not there, nothing changed.
|
||||||
if not existing_domain:
|
if not existing_domain:
|
||||||
module.exit_json(
|
module.exit_json(
|
||||||
changed = False,
|
changed=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
positional_args = [session_id, domain_name] + domain_subdomains
|
positional_args = [session_id, domain_name] + domain_subdomains
|
||||||
|
@ -161,8 +161,8 @@ def main():
|
||||||
module.fail_json(msg="Unknown state specified: {}".format(domain_state))
|
module.fail_json(msg="Unknown state specified: {}".format(domain_state))
|
||||||
|
|
||||||
module.exit_json(
|
module.exit_json(
|
||||||
changed = True,
|
changed=True,
|
||||||
result = result
|
result=result
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -108,16 +108,16 @@ webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec = dict(
|
argument_spec=dict(
|
||||||
name = dict(required=True),
|
name=dict(required=True),
|
||||||
state = dict(required=False, choices=['present', 'absent'], default='present'),
|
state=dict(required=False, choices=['present', 'absent'], default='present'),
|
||||||
# You can specify an IP address or hostname.
|
# You can specify an IP address or hostname.
|
||||||
host = dict(required=True),
|
host=dict(required=True),
|
||||||
https = dict(required=False, type='bool', default=False),
|
https=dict(required=False, type='bool', default=False),
|
||||||
subdomains = dict(required=False, type='list', default=[]),
|
subdomains=dict(required=False, type='list', default=[]),
|
||||||
site_apps = dict(required=False, type='list', default=[]),
|
site_apps=dict(required=False, type='list', default=[]),
|
||||||
login_name = dict(required=True),
|
login_name=dict(required=True),
|
||||||
login_password = dict(required=True, no_log=True),
|
login_password=dict(required=True, no_log=True),
|
||||||
),
|
),
|
||||||
supports_check_mode=True
|
supports_check_mode=True
|
||||||
)
|
)
|
||||||
|
@ -159,7 +159,7 @@ def main():
|
||||||
(set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
|
(set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
|
||||||
(dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
|
(dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
|
||||||
module.exit_json(
|
module.exit_json(
|
||||||
changed = False
|
changed=False
|
||||||
)
|
)
|
||||||
|
|
||||||
positional_args = [
|
positional_args = [
|
||||||
|
@ -168,14 +168,14 @@ def main():
|
||||||
module.params['subdomains'],
|
module.params['subdomains'],
|
||||||
]
|
]
|
||||||
for a in module.params['site_apps']:
|
for a in module.params['site_apps']:
|
||||||
positional_args.append( (a[0], a[1]) )
|
positional_args.append((a[0], a[1]))
|
||||||
|
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
# If this isn't a dry run, create or modify the site
|
# If this isn't a dry run, create or modify the site
|
||||||
result.update(
|
result.update(
|
||||||
webfaction.create_website(
|
webfaction.create_website(
|
||||||
*positional_args
|
*positional_args
|
||||||
) if not existing_site else webfaction.update_website (
|
) if not existing_site else webfaction.update_website(
|
||||||
*positional_args
|
*positional_args
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -185,7 +185,7 @@ def main():
|
||||||
# If the site's already not there, nothing changed.
|
# If the site's already not there, nothing changed.
|
||||||
if not existing_site:
|
if not existing_site:
|
||||||
module.exit_json(
|
module.exit_json(
|
||||||
changed = False,
|
changed=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not module.check_mode:
|
if not module.check_mode:
|
||||||
|
@ -198,8 +198,8 @@ def main():
|
||||||
module.fail_json(msg="Unknown state specified: {}".format(site_state))
|
module.fail_json(msg="Unknown state specified: {}".format(site_state))
|
||||||
|
|
||||||
module.exit_json(
|
module.exit_json(
|
||||||
changed = True,
|
changed=True,
|
||||||
result = result
|
result=result
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -456,6 +456,7 @@ class Configuration:
|
||||||
"""
|
"""
|
||||||
Configuration for this module.
|
Configuration for this module.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
|
def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
|
||||||
rules=None, state=None, token=None, token_type=None):
|
rules=None, state=None, token=None, token_type=None):
|
||||||
self.management_token = management_token # type: str
|
self.management_token = management_token # type: str
|
||||||
|
@ -474,6 +475,7 @@ class Output:
|
||||||
"""
|
"""
|
||||||
Output of an action of this module.
|
Output of an action of this module.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, changed=None, token=None, rules=None, operation=None):
|
def __init__(self, changed=None, token=None, rules=None, operation=None):
|
||||||
self.changed = changed # type: bool
|
self.changed = changed # type: bool
|
||||||
self.token = token # type: str
|
self.token = token # type: str
|
||||||
|
@ -485,6 +487,7 @@ class ACL:
|
||||||
"""
|
"""
|
||||||
Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
|
Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, rules, token_type, token, name):
|
def __init__(self, rules, token_type, token, name):
|
||||||
self.rules = rules
|
self.rules = rules
|
||||||
self.token_type = token_type
|
self.token_type = token_type
|
||||||
|
@ -507,6 +510,7 @@ class Rule:
|
||||||
"""
|
"""
|
||||||
ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
|
ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, scope, policy, pattern=None):
|
def __init__(self, scope, policy, pattern=None):
|
||||||
self.scope = scope
|
self.scope = scope
|
||||||
self.policy = policy
|
self.policy = policy
|
||||||
|
@ -532,6 +536,7 @@ class RuleCollection:
|
||||||
"""
|
"""
|
||||||
Collection of ACL rules, which are part of a Consul ACL.
|
Collection of ACL rules, which are part of a Consul ACL.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._rules = {}
|
self._rules = {}
|
||||||
for scope in RULE_SCOPES:
|
for scope in RULE_SCOPES:
|
||||||
|
|
|
@ -147,9 +147,11 @@ def parse_plugin_repo(string):
|
||||||
|
|
||||||
return repo
|
return repo
|
||||||
|
|
||||||
|
|
||||||
def is_plugin_present(plugin_dir, working_dir):
|
def is_plugin_present(plugin_dir, working_dir):
|
||||||
return os.path.isdir(os.path.join(working_dir, plugin_dir))
|
return os.path.isdir(os.path.join(working_dir, plugin_dir))
|
||||||
|
|
||||||
|
|
||||||
def parse_error(string):
|
def parse_error(string):
|
||||||
reason = "reason: "
|
reason = "reason: "
|
||||||
try:
|
try:
|
||||||
|
@ -157,6 +159,7 @@ def parse_error(string):
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return string
|
return string
|
||||||
|
|
||||||
|
|
||||||
def install_plugin(module, plugin_bin, plugin_name, url, timeout):
|
def install_plugin(module, plugin_bin, plugin_name, url, timeout):
|
||||||
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
|
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
|
||||||
|
|
||||||
|
@ -178,6 +181,7 @@ def install_plugin(module, plugin_bin, plugin_name, url, timeout):
|
||||||
|
|
||||||
return True, cmd, out, err
|
return True, cmd, out, err
|
||||||
|
|
||||||
|
|
||||||
def remove_plugin(module, plugin_bin, plugin_name):
|
def remove_plugin(module, plugin_bin, plugin_name):
|
||||||
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
|
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
|
||||||
|
|
||||||
|
@ -193,6 +197,7 @@ def remove_plugin(module, plugin_bin, plugin_name):
|
||||||
|
|
||||||
return True, cmd, out, err
|
return True, cmd, out, err
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=dict(
|
argument_spec=dict(
|
||||||
|
|
|
@ -102,6 +102,7 @@ def ring_check(module, riak_admin_bin):
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
|
@ -115,10 +116,9 @@ def main():
|
||||||
wait_for_ring=dict(default=False, type='int'),
|
wait_for_ring=dict(default=False, type='int'),
|
||||||
wait_for_service=dict(
|
wait_for_service=dict(
|
||||||
required=False, default=None, choices=['kv']),
|
required=False, default=None, choices=['kv']),
|
||||||
validate_certs = dict(default='yes', type='bool'))
|
validate_certs=dict(default='yes', type='bool'))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
command = module.params.get('command')
|
command = module.params.get('command')
|
||||||
http_conn = module.params.get('http_conn')
|
http_conn = module.params.get('http_conn')
|
||||||
target_node = module.params.get('target_node')
|
target_node = module.params.get('target_node')
|
||||||
|
@ -126,8 +126,7 @@ def main():
|
||||||
wait_for_ring = module.params.get('wait_for_ring')
|
wait_for_ring = module.params.get('wait_for_ring')
|
||||||
wait_for_service = module.params.get('wait_for_service')
|
wait_for_service = module.params.get('wait_for_service')
|
||||||
|
|
||||||
|
# make sure riak commands are on the path
|
||||||
#make sure riak commands are on the path
|
|
||||||
riak_bin = module.get_bin_path('riak')
|
riak_bin = module.get_bin_path('riak')
|
||||||
riak_admin_bin = module.get_bin_path('riak-admin')
|
riak_admin_bin = module.get_bin_path('riak-admin')
|
||||||
|
|
||||||
|
@ -150,7 +149,7 @@ def main():
|
||||||
node_name = stats['nodename']
|
node_name = stats['nodename']
|
||||||
nodes = stats['ring_members']
|
nodes = stats['ring_members']
|
||||||
ring_size = stats['ring_creation_size']
|
ring_size = stats['ring_creation_size']
|
||||||
rc, out, err = module.run_command([riak_bin, 'version'] )
|
rc, out, err = module.run_command([riak_bin, 'version'])
|
||||||
version = out.strip()
|
version = out.strip()
|
||||||
|
|
||||||
result = dict(node_name=node_name,
|
result = dict(node_name=node_name,
|
||||||
|
@ -159,7 +158,7 @@ def main():
|
||||||
version=version)
|
version=version)
|
||||||
|
|
||||||
if command == 'ping':
|
if command == 'ping':
|
||||||
cmd = '%s ping %s' % ( riak_bin, target_node )
|
cmd = '%s ping %s' % (riak_bin, target_node)
|
||||||
rc, out, err = module.run_command(cmd)
|
rc, out, err = module.run_command(cmd)
|
||||||
if rc == 0:
|
if rc == 0:
|
||||||
result['ping'] = out
|
result['ping'] = out
|
||||||
|
@ -219,7 +218,7 @@ def main():
|
||||||
module.fail_json(msg='Timeout waiting for handoffs.')
|
module.fail_json(msg='Timeout waiting for handoffs.')
|
||||||
|
|
||||||
if wait_for_service:
|
if wait_for_service:
|
||||||
cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name ]
|
cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name]
|
||||||
rc, out, err = module.run_command(cmd)
|
rc, out, err = module.run_command(cmd)
|
||||||
result['service'] = out
|
result['service'] = out
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue