mirror of
https://github.com/ansible-collections/community.general.git
synced 2025-07-24 22:00:22 -07:00
Bulk autopep8 (modules)
As agreed in 2017-12-07 Core meeting bulk fix pep8 issues Generated using: autopep8 1.3.3 (pycodestyle: 2.3.1) autopep8 -r --max-line-length 160 --in-place --ignore E305,E402,E722,E741 lib/ansible/modules Manually fix issues that autopep8 has introduced
This commit is contained in:
parent
d13d7e9404
commit
c57a7f05e1
314 changed files with 3462 additions and 3383 deletions
|
@ -189,6 +189,7 @@ def get_vpc_info(vpc):
|
|||
'state': vpc.state,
|
||||
})
|
||||
|
||||
|
||||
def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
||||
"""
|
||||
Finds a VPC that matches a specific id or cidr + tags
|
||||
|
@ -234,8 +235,8 @@ def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
|||
|
||||
return (found_vpc)
|
||||
|
||||
def routes_match(rt_list=None, rt=None, igw=None):
|
||||
|
||||
def routes_match(rt_list=None, rt=None, igw=None):
|
||||
"""
|
||||
Check if the route table has all routes as in given list
|
||||
|
||||
|
@ -284,6 +285,7 @@ def routes_match(rt_list=None, rt=None, igw=None):
|
|||
else:
|
||||
return True
|
||||
|
||||
|
||||
def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=None):
|
||||
"""
|
||||
Checks if the remote routes match the local routes.
|
||||
|
@ -310,7 +312,7 @@ def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=Non
|
|||
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id})
|
||||
if len(rsn) != 1:
|
||||
module.fail_json(
|
||||
msg='The subnet {0} to associate with route_table {1} ' \
|
||||
msg='The subnet {0} to associate with route_table {1} '
|
||||
'does not exist, aborting'.format(sn, rt)
|
||||
)
|
||||
nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
|
||||
|
@ -408,7 +410,6 @@ def create_vpc(module, vpc_conn):
|
|||
if new_tags:
|
||||
vpc_conn.create_tags(vpc.id, new_tags)
|
||||
|
||||
|
||||
# boto doesn't appear to have a way to determine the existing
|
||||
# value of the dns attributes, so we just set them.
|
||||
# It also must be done one at a time.
|
||||
|
@ -548,7 +549,7 @@ def create_vpc(module, vpc_conn):
|
|||
if route['gw'] == 'igw':
|
||||
if not internet_gateway:
|
||||
module.fail_json(
|
||||
msg='You asked for an Internet Gateway ' \
|
||||
msg='You asked for an Internet Gateway '
|
||||
'(igw) route, but you have no Internet Gateway'
|
||||
)
|
||||
route_kwargs['gateway_id'] = igw.id
|
||||
|
@ -567,7 +568,7 @@ def create_vpc(module, vpc_conn):
|
|||
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id})
|
||||
if len(rsn) != 1:
|
||||
module.fail_json(
|
||||
msg='The subnet {0} to associate with route_table {1} ' \
|
||||
msg='The subnet {0} to associate with route_table {1} '
|
||||
'does not exist, aborting'.format(sn, rt)
|
||||
)
|
||||
rsn = rsn[0]
|
||||
|
@ -591,7 +592,7 @@ def create_vpc(module, vpc_conn):
|
|||
changed = True
|
||||
except EC2ResponseError as e:
|
||||
module.fail_json(
|
||||
msg='Unable to create and associate route table {0}, error: ' \
|
||||
msg='Unable to create and associate route table {0}, error: '
|
||||
'{1}'.format(rt, e)
|
||||
)
|
||||
|
||||
|
@ -647,6 +648,7 @@ def create_vpc(module, vpc_conn):
|
|||
|
||||
return (vpc_dict, created_vpc_id, returned_subnets, igw_id, changed)
|
||||
|
||||
|
||||
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
||||
"""
|
||||
Terminates a VPC
|
||||
|
|
|
@ -120,6 +120,7 @@ try:
|
|||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
def get_arn_from_kms_alias(kms, aliasname):
|
||||
ret = kms.list_aliases()
|
||||
key_id = None
|
||||
|
@ -138,12 +139,14 @@ def get_arn_from_kms_alias(kms, aliasname):
|
|||
return k['KeyArn']
|
||||
raise Exception('could not find key from id: {}'.format(key_id))
|
||||
|
||||
|
||||
def get_arn_from_role_name(iam, rolename):
|
||||
ret = iam.get_role(RoleName=rolename)
|
||||
if ret.get('Role') and ret['Role'].get('Arn'):
|
||||
return ret['Role']['Arn']
|
||||
raise Exception('could not find arn for name {}.'.format(rolename))
|
||||
|
||||
|
||||
def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clean_invalid_entries=True):
|
||||
ret = {}
|
||||
keyret = kms.get_key_policy(KeyId=keyarn, PolicyName='default')
|
||||
|
@ -210,6 +213,7 @@ def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clea
|
|||
|
||||
return ret
|
||||
|
||||
|
||||
def assert_policy_shape(policy):
|
||||
'''Since the policy seems a little, uh, fragile, make sure we know approximately what we're looking at.'''
|
||||
errors = []
|
||||
|
@ -230,6 +234,7 @@ def assert_policy_shape(policy):
|
|||
raise Exception('Problems asserting policy shape. Cowardly refusing to modify it: {}'.format(' '.join(errors)))
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ansible.module_utils.ec2.ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
|
@ -255,7 +260,6 @@ def main():
|
|||
result = {}
|
||||
mode = module.params['mode']
|
||||
|
||||
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True)
|
||||
kms = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='kms', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
|
@ -263,7 +267,6 @@ def main():
|
|||
except botocore.exceptions.NoCredentialsError as e:
|
||||
module.fail_json(msg='cannot connect to AWS', exception=traceback.format_exc())
|
||||
|
||||
|
||||
try:
|
||||
if module.params['key_alias'] and not module.params['key_arn']:
|
||||
module.params['key_arn'] = get_arn_from_kms_alias(kms, module.params['key_alias'])
|
||||
|
|
|
@ -524,12 +524,14 @@ class CloudFrontServiceManager:
|
|||
keyed_list.update({distribution_id: item})
|
||||
return keyed_list
|
||||
|
||||
|
||||
def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases):
|
||||
facts[distribution_id].update(details)
|
||||
for alias in aliases:
|
||||
facts[alias].update(details)
|
||||
return facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
|
|
|
@ -223,7 +223,6 @@ def create_or_update_dynamo_table(connection, module, boto3_dynamodb=None, boto3
|
|||
try:
|
||||
table = Table(table_name, connection=connection)
|
||||
|
||||
|
||||
if dynamo_table_exists(table):
|
||||
result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
|
||||
else:
|
||||
|
@ -397,6 +396,7 @@ def validate_index(index, module):
|
|||
if index['type'] not in INDEX_TYPE_OPTIONS:
|
||||
module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))
|
||||
|
||||
|
||||
def get_indexes(all_indexes):
|
||||
indexes = []
|
||||
global_indexes = []
|
||||
|
@ -429,7 +429,6 @@ def get_indexes(all_indexes):
|
|||
return indexes, global_indexes
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
|
|
|
@ -158,7 +158,6 @@ except ImportError:
|
|||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
|
||||
def copy_image(module, ec2):
|
||||
"""
|
||||
Copies an AMI
|
||||
|
|
|
@ -425,6 +425,7 @@ def _throttleable_operation(max_retries):
|
|||
return _do_op
|
||||
return _operation_wrapper
|
||||
|
||||
|
||||
def _get_vpc_connection(module, region, aws_connect_params):
|
||||
try:
|
||||
return connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||
|
@ -434,6 +435,7 @@ def _get_vpc_connection(module, region, aws_connect_params):
|
|||
|
||||
_THROTTLING_RETRIES = 5
|
||||
|
||||
|
||||
class ElbManager(object):
|
||||
"""Handles ELB creation and destruction"""
|
||||
|
||||
|
|
|
@ -252,6 +252,7 @@ def create_metric_alarm(connection, module):
|
|||
threshold=result.threshold,
|
||||
unit=result.unit)
|
||||
|
||||
|
||||
def delete_metric_alarm(connection, module):
|
||||
name = module.params.get('name')
|
||||
|
||||
|
|
|
@ -330,6 +330,7 @@ def boto_supports_volume_encryption():
|
|||
"""
|
||||
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
|
||||
|
||||
|
||||
def boto_supports_kms_key_id():
|
||||
"""
|
||||
Check if Boto library supports kms_key_ids (added in 2.39.0)
|
||||
|
@ -339,6 +340,7 @@ def boto_supports_kms_key_id():
|
|||
"""
|
||||
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.39.0')
|
||||
|
||||
|
||||
def create_volume(module, ec2, zone):
|
||||
changed = False
|
||||
name = module.params.get('name')
|
||||
|
|
|
@ -98,6 +98,7 @@ def get_volume_info(volume):
|
|||
|
||||
return volume_info
|
||||
|
||||
|
||||
def list_ec2_volumes(connection, module):
|
||||
|
||||
filters = module.params.get("filters")
|
||||
|
|
|
@ -235,6 +235,7 @@ def ensure_tags(module, vpc_conn, resource_id, tags, add_only, check_mode):
|
|||
except EC2ResponseError as e:
|
||||
module.fail_json(msg="Failed to modify tags: %s" % e.message, exception=traceback.format_exc())
|
||||
|
||||
|
||||
def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id):
|
||||
"""
|
||||
Returns the DHCP options object currently associated with the requested VPC ID using the VPC
|
||||
|
|
|
@ -321,7 +321,7 @@ def construct_acl_entries(nacl, client, module):
|
|||
create_network_acl_entry(params, client, module)
|
||||
|
||||
|
||||
## Module invocations
|
||||
# Module invocations
|
||||
def setup_network_acl(client, module):
|
||||
changed = False
|
||||
nacl = describe_network_acl(client, module)
|
||||
|
|
|
@ -199,6 +199,7 @@ def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
|
|||
else:
|
||||
return False
|
||||
|
||||
|
||||
def get_vpc_values(vpc_obj):
|
||||
|
||||
if vpc_obj is not None:
|
||||
|
@ -213,6 +214,7 @@ def get_vpc_values(vpc_obj):
|
|||
else:
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
|
|
|
@ -147,6 +147,7 @@ def get_vgw_info(vgws):
|
|||
|
||||
return vgw_info
|
||||
|
||||
|
||||
def wait_for_status(client, module, vpn_gateway_id, status):
|
||||
polling_increment_secs = 15
|
||||
max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
|
||||
|
|
|
@ -162,6 +162,7 @@ class EcsClusterManager:
|
|||
def delete_cluster(self, clusterName):
|
||||
return self.ecs.delete_cluster(cluster=clusterName)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
|
|
|
@ -199,6 +199,7 @@ class EcsServiceManager:
|
|||
e['createdAt'] = str(e['createdAt'])
|
||||
return service
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
|
|
|
@ -224,6 +224,7 @@ class EcsExecManager:
|
|||
response = self.ecs.stop_task(cluster=cluster, task=task)
|
||||
return response['task']
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
|
|
|
@ -131,6 +131,7 @@ from ansible.module_utils.basic import AnsibleModule
|
|||
from ansible.module_utils.ec2 import boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info
|
||||
from ansible.module_utils._text import to_text
|
||||
|
||||
|
||||
class EcsTaskManager:
|
||||
"""Handles ECS Tasks"""
|
||||
|
||||
|
|
|
@ -107,7 +107,6 @@ def main():
|
|||
if not region:
|
||||
module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
|
||||
|
||||
|
||||
"""Get an elasticache connection"""
|
||||
try:
|
||||
conn = connect_to_region(region_name=region, **aws_connect_kwargs)
|
||||
|
|
|
@ -657,7 +657,6 @@ def compare_listeners(connection, module, current_listeners, new_listeners, purg
|
|||
|
||||
|
||||
def compare_rules(connection, module, current_listeners, listener):
|
||||
|
||||
"""
|
||||
Compare rules and return rules to add, rules to modify and rules to remove
|
||||
Rules are compared based on priority
|
||||
|
|
|
@ -425,6 +425,7 @@ def _throttleable_operation(max_retries):
|
|||
return _do_op
|
||||
return _operation_wrapper
|
||||
|
||||
|
||||
def _get_vpc_connection(module, region, aws_connect_params):
|
||||
try:
|
||||
return connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||
|
@ -434,6 +435,7 @@ def _get_vpc_connection(module, region, aws_connect_params):
|
|||
|
||||
_THROTTLING_RETRIES = 5
|
||||
|
||||
|
||||
class ElbManager(object):
|
||||
"""Handles ELB creation and destruction"""
|
||||
|
||||
|
|
|
@ -240,8 +240,8 @@ def create_user(module, iam, name, pwd, path, key_state, key_count):
|
|||
if key_count:
|
||||
while key_count > key_qty:
|
||||
keys.append(iam.create_access_key(
|
||||
user_name=name).create_access_key_response.\
|
||||
create_access_key_result.\
|
||||
user_name=name).create_access_key_response.
|
||||
create_access_key_result.
|
||||
access_key)
|
||||
key_qty += 1
|
||||
else:
|
||||
|
@ -526,6 +526,7 @@ def delete_group(module=None, iam=None, name=None):
|
|||
changed = True
|
||||
return changed, name
|
||||
|
||||
|
||||
def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
|
||||
changed = False
|
||||
try:
|
||||
|
|
|
@ -166,6 +166,7 @@ def find_health_check(conn, wanted):
|
|||
return check
|
||||
return None
|
||||
|
||||
|
||||
def to_health_check(config):
|
||||
return HealthCheck(
|
||||
config.get('IPAddress'),
|
||||
|
@ -178,6 +179,7 @@ def to_health_check(config):
|
|||
failure_threshold=int(config.get('FailureThreshold')),
|
||||
)
|
||||
|
||||
|
||||
def health_check_diff(a, b):
|
||||
a = a.__dict__
|
||||
b = b.__dict__
|
||||
|
@ -189,6 +191,7 @@ def health_check_diff(a, b):
|
|||
diff[key] = b.get(key)
|
||||
return diff
|
||||
|
||||
|
||||
def to_template_params(health_check):
|
||||
params = {
|
||||
'ip_addr_part': '',
|
||||
|
@ -240,6 +243,7 @@ UPDATEHCXMLBody = """
|
|||
</UpdateHealthCheckRequest>
|
||||
"""
|
||||
|
||||
|
||||
def create_health_check(conn, health_check, caller_ref=None):
|
||||
if caller_ref is None:
|
||||
caller_ref = str(uuid.uuid4())
|
||||
|
@ -259,6 +263,7 @@ def create_health_check(conn, health_check, caller_ref = None):
|
|||
else:
|
||||
raise exception.DNSServerError(response.status, response.reason, body)
|
||||
|
||||
|
||||
def update_health_check(conn, health_check_id, health_check_version, health_check):
|
||||
uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id)
|
||||
params = to_template_params(health_check)
|
||||
|
@ -279,6 +284,7 @@ def update_health_check(conn, health_check_id, health_check_version, health_chec
|
|||
h.parse(body)
|
||||
return e
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
|
|
|
@ -256,6 +256,7 @@ def create_lifecycle_rule(connection, module):
|
|||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
def compare_rule(rule_a, rule_b):
|
||||
|
||||
# Copy objects
|
||||
|
|
|
@ -256,7 +256,6 @@ DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024
|
|||
|
||||
|
||||
def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
|
||||
|
||||
"""
|
||||
calculates a multipart upload etag for amazon s3
|
||||
|
||||
|
|
|
@ -194,7 +194,6 @@ class SnsTopicManager(object):
|
|||
break
|
||||
return [t['TopicArn'] for t in topics]
|
||||
|
||||
|
||||
def _arn_topic_lookup(self):
|
||||
# topic names cannot have colons, so this captures the full topic name
|
||||
all_topics = self._get_all_topics()
|
||||
|
@ -203,7 +202,6 @@ class SnsTopicManager(object):
|
|||
if topic.endswith(lookup_topic):
|
||||
return topic
|
||||
|
||||
|
||||
def _create_topic(self):
|
||||
self.changed = True
|
||||
self.topic_created = True
|
||||
|
@ -214,11 +212,8 @@ class SnsTopicManager(object):
|
|||
time.sleep(3)
|
||||
self.arn_topic = self._arn_topic_lookup()
|
||||
|
||||
|
||||
def _set_topic_attrs(self):
|
||||
topic_attributes = self.connection.get_topic_attributes(self.arn_topic) \
|
||||
['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \
|
||||
['Attributes']
|
||||
topic_attributes = self.connection.get_topic_attributes(self.arn_topic)['GetTopicAttributesResponse']['GetTopicAttributesResult']['Attributes']
|
||||
|
||||
if self.display_name and self.display_name != topic_attributes['DisplayName']:
|
||||
self.changed = True
|
||||
|
@ -234,7 +229,7 @@ class SnsTopicManager(object):
|
|||
self.connection.set_topic_attributes(self.arn_topic, 'Policy',
|
||||
json.dumps(self.policy))
|
||||
|
||||
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or \
|
||||
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or
|
||||
self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
|
||||
self.changed = True
|
||||
self.attributes_set.append('delivery_policy')
|
||||
|
@ -242,21 +237,18 @@ class SnsTopicManager(object):
|
|||
self.connection.set_topic_attributes(self.arn_topic, 'DeliveryPolicy',
|
||||
json.dumps(self.delivery_policy))
|
||||
|
||||
|
||||
def _canonicalize_endpoint(self, protocol, endpoint):
|
||||
if protocol == 'sms':
|
||||
return re.sub('[^0-9]*', '', endpoint)
|
||||
return endpoint
|
||||
|
||||
|
||||
def _get_topic_subs(self):
|
||||
next_token = None
|
||||
while True:
|
||||
response = self.connection.get_all_subscriptions_by_topic(self.arn_topic, next_token)
|
||||
self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse'] \
|
||||
self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse']
|
||||
['ListSubscriptionsByTopicResult']['Subscriptions'])
|
||||
next_token = response['ListSubscriptionsByTopicResponse'] \
|
||||
['ListSubscriptionsByTopicResult']['NextToken']
|
||||
next_token = response['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['NextToken']
|
||||
if not next_token:
|
||||
break
|
||||
|
||||
|
@ -284,7 +276,6 @@ class SnsTopicManager(object):
|
|||
if not self.check_mode:
|
||||
self.connection.subscribe(self.arn_topic, protocol, endpoint)
|
||||
|
||||
|
||||
def _delete_subscriptions(self):
|
||||
# NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
|
||||
# https://forums.aws.amazon.com/thread.jspa?threadID=85993
|
||||
|
@ -295,14 +286,12 @@ class SnsTopicManager(object):
|
|||
if not self.check_mode:
|
||||
self.connection.unsubscribe(sub['SubscriptionArn'])
|
||||
|
||||
|
||||
def _delete_topic(self):
|
||||
self.topic_deleted = True
|
||||
self.changed = True
|
||||
if not self.check_mode:
|
||||
self.connection.delete_topic(self.arn_topic)
|
||||
|
||||
|
||||
def ensure_ok(self):
|
||||
self.arn_topic = self._arn_topic_lookup()
|
||||
if not self.arn_topic:
|
||||
|
@ -319,7 +308,6 @@ class SnsTopicManager(object):
|
|||
self._delete_subscriptions()
|
||||
self._delete_topic()
|
||||
|
||||
|
||||
def get_info(self):
|
||||
info = {
|
||||
'name': self.name,
|
||||
|
@ -341,7 +329,6 @@ class SnsTopicManager(object):
|
|||
return info
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
|
|
|
@ -113,6 +113,7 @@ def assume_role_policy(connection, module):
|
|||
|
||||
module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
|
|
|
@ -108,6 +108,7 @@ def normalize_credentials(credentials):
|
|||
'expiration': expiration
|
||||
}
|
||||
|
||||
|
||||
def get_session_token(connection, module):
|
||||
duration_seconds = module.params.get('duration_seconds')
|
||||
mfa_serial_number = module.params.get('mfa_serial_number')
|
||||
|
@ -131,6 +132,7 @@ def get_session_token(connection, module):
|
|||
credentials = normalize_credentials(response.get('Credentials', {}))
|
||||
module.exit_json(changed=changed, sts_creds=credentials)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
|
|
|
@ -154,6 +154,7 @@ def managed_disk_to_dict(managed_disk):
|
|||
|
||||
class AzureRMManagedDisk(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM Managed Disk resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
|
|
|
@ -475,8 +475,7 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
|
|||
tags=results['tags'],
|
||||
ip_configurations=[
|
||||
NetworkInterfaceIPConfiguration(
|
||||
private_ip_allocation_method=
|
||||
results['ip_configuration']['private_ip_allocation_method']
|
||||
private_ip_allocation_method=results['ip_configuration']['private_ip_allocation_method']
|
||||
)
|
||||
]
|
||||
)
|
||||
|
|
|
@ -183,7 +183,6 @@ class AzureRMPublicIPFacts(AzureRMModuleBase):
|
|||
return results
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMPublicIPFacts()
|
||||
|
||||
|
|
|
@ -132,7 +132,6 @@ except ImportError:
|
|||
pass
|
||||
|
||||
|
||||
|
||||
def subnet_to_dict(subnet):
|
||||
result = dict(
|
||||
id=subnet.id,
|
||||
|
|
|
@ -1461,6 +1461,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
|
|||
return ImageReference(id=vm_image.id)
|
||||
|
||||
self.fail("Error could not find image with name {0}".format(name))
|
||||
|
||||
def get_availability_set(self, resource_group, name):
|
||||
try:
|
||||
return self.compute_client.availability_sets.get(resource_group, name)
|
||||
|
|
|
@ -327,7 +327,6 @@ class AzureRMVirtualNetwork(AzureRMModuleBase):
|
|||
self.delete_virtual_network()
|
||||
self.results['state']['status'] = 'Deleted'
|
||||
|
||||
|
||||
return self.results
|
||||
|
||||
def create_or_update_vnet(self, vnet):
|
||||
|
|
|
@ -178,6 +178,7 @@ class AzureRMNetworkInterfaceFacts(AzureRMModuleBase):
|
|||
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMNetworkInterfaceFacts()
|
||||
|
||||
|
|
|
@ -1217,7 +1217,6 @@ class TaskParameters(DockerBaseClass):
|
|||
return network_id
|
||||
|
||||
|
||||
|
||||
class Container(DockerBaseClass):
|
||||
|
||||
def __init__(self, container, parameters):
|
||||
|
|
|
@ -184,6 +184,7 @@ class TaskParameters(DockerBaseClass):
|
|||
def container_names_in_network(network):
|
||||
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
|
||||
|
||||
|
||||
class DockerNetworkManager(object):
|
||||
|
||||
def __init__(self, client):
|
||||
|
|
|
@ -193,7 +193,6 @@ def grant_check(module, gs, obj):
|
|||
return True
|
||||
|
||||
|
||||
|
||||
def key_check(module, gs, bucket, obj):
|
||||
try:
|
||||
bucket = gs.lookup(bucket)
|
||||
|
|
|
@ -151,6 +151,7 @@ ZONE_VERIFICATION_URL= 'https://www.google.com/webmasters/verification/'
|
|||
# Functions
|
||||
################################################################################
|
||||
|
||||
|
||||
def create_zone(module, gcdns, zone):
|
||||
"""Creates a new Google Cloud DNS zone."""
|
||||
|
||||
|
@ -273,6 +274,7 @@ def _get_zone(gcdns, zone_name):
|
|||
|
||||
return found_zone
|
||||
|
||||
|
||||
def _sanity_check(module):
|
||||
"""Run module sanity checks."""
|
||||
|
||||
|
@ -301,6 +303,7 @@ def _sanity_check(module):
|
|||
# Main
|
||||
################################################################################
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function"""
|
||||
|
||||
|
|
|
@ -527,6 +527,7 @@ class LXDContainerManagement(object):
|
|||
fail_params['logs'] = e.kwargs['logs']
|
||||
self.module.fail_json(**fail_params)
|
||||
|
||||
|
||||
def main():
|
||||
"""Ansible Main module."""
|
||||
|
||||
|
|
|
@ -347,6 +347,7 @@ failed = False
|
|||
|
||||
class RHEVConn(object):
|
||||
'Connection to RHEV-M'
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
|
|
|
@ -176,6 +176,7 @@ ENTRY_STATE_PERSISTENT_MAP = {
|
|||
1: "yes"
|
||||
}
|
||||
|
||||
|
||||
class EntryNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
|
|
@ -137,12 +137,14 @@ def change_keys(recs, key='uuid', filter_func=None):
|
|||
|
||||
return new_recs
|
||||
|
||||
|
||||
def get_host(session):
|
||||
"""Get the host"""
|
||||
host_recs = session.xenapi.host.get_all()
|
||||
# We only have one host, so just return its entry
|
||||
return session.xenapi.host.get_record(host_recs[0])
|
||||
|
||||
|
||||
def get_vms(session):
|
||||
xs_vms = {}
|
||||
recs = session.xenapi.VM.get_all()
|
||||
|
@ -165,6 +167,7 @@ def get_srs(session):
|
|||
xs_srs[sr['name_label']] = sr
|
||||
return xs_srs
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule({})
|
||||
|
||||
|
|
|
@ -306,14 +306,17 @@ def _get_volume_quotas(cloud, project):
|
|||
|
||||
return cloud.get_volume_quotas(project)
|
||||
|
||||
|
||||
def _get_network_quotas(cloud, project):
|
||||
|
||||
return cloud.get_network_quotas(project)
|
||||
|
||||
|
||||
def _get_compute_quotas(cloud, project):
|
||||
|
||||
return cloud.get_compute_quotas(project)
|
||||
|
||||
|
||||
def _get_quotas(module, cloud, project):
|
||||
|
||||
quota = {}
|
||||
|
@ -334,6 +337,7 @@ def _get_quotas(module, cloud, project):
|
|||
|
||||
return quota
|
||||
|
||||
|
||||
def _scrub_results(quota):
|
||||
|
||||
filter_attr = [
|
||||
|
@ -350,6 +354,7 @@ def _scrub_results(quota):
|
|||
|
||||
return quota
|
||||
|
||||
|
||||
def _system_state_change_details(module, project_quota_output):
|
||||
|
||||
quota_change_request = {}
|
||||
|
@ -368,6 +373,7 @@ def _system_state_change_details(module, project_quota_output):
|
|||
|
||||
return (changes_required, quota_change_request)
|
||||
|
||||
|
||||
def _system_state_change(module, project_quota_output):
|
||||
"""
|
||||
Determine if changes are required to the current project quota.
|
||||
|
@ -386,6 +392,7 @@ def _system_state_change(module, project_quota_output):
|
|||
else:
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
|
|
|
@ -433,6 +433,7 @@ def _parse_nics(nics):
|
|||
else:
|
||||
yield net
|
||||
|
||||
|
||||
def _network_args(module, cloud):
|
||||
args = []
|
||||
nics = module.params['nics']
|
||||
|
|
|
@ -181,6 +181,7 @@ def _create_stack(module, stack, cloud):
|
|||
except shade.OpenStackCloudException as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def _update_stack(module, stack, cloud):
|
||||
try:
|
||||
stack = cloud.update_stack(
|
||||
|
@ -200,6 +201,7 @@ def _update_stack(module, stack, cloud):
|
|||
except shade.OpenStackCloudException as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def _system_state_change(module, stack, cloud):
|
||||
state = module.params['state']
|
||||
if state == 'present':
|
||||
|
@ -209,6 +211,7 @@ def _system_state_change(module, stack, cloud):
|
|||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
|
|
|
@ -120,12 +120,14 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
|
|||
promise['requestId']
|
||||
) + '" to complete.')
|
||||
|
||||
|
||||
def _remove_datacenter(module, profitbricks, datacenter):
|
||||
try:
|
||||
profitbricks.delete_datacenter(datacenter)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
|
||||
|
||||
|
||||
def create_datacenter(module, profitbricks):
|
||||
"""
|
||||
Creates a Datacenter
|
||||
|
@ -166,6 +168,7 @@ def create_datacenter(module, profitbricks):
|
|||
except Exception as e:
|
||||
module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
|
||||
|
||||
|
||||
def remove_datacenter(module, profitbricks):
|
||||
"""
|
||||
Removes a Datacenter.
|
||||
|
@ -197,6 +200,7 @@ def remove_datacenter(module, profitbricks):
|
|||
|
||||
return changed
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -123,6 +123,7 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
|
|||
promise['requestId']
|
||||
) + '" to complete.')
|
||||
|
||||
|
||||
def create_nic(module, profitbricks):
|
||||
"""
|
||||
Creates a NIC.
|
||||
|
@ -173,6 +174,7 @@ def create_nic(module, profitbricks):
|
|||
except Exception as e:
|
||||
module.fail_json(msg="failed to create the NIC: %s" % str(e))
|
||||
|
||||
|
||||
def delete_nic(module, profitbricks):
|
||||
"""
|
||||
Removes a NIC
|
||||
|
@ -228,6 +230,7 @@ def delete_nic(module, profitbricks):
|
|||
except Exception as e:
|
||||
module.fail_json(msg="failed to remove the NIC: %s" % str(e))
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
@ -255,7 +258,6 @@ def main():
|
|||
if not module.params.get('server'):
|
||||
module.fail_json(msg='server parameter is required')
|
||||
|
||||
|
||||
subscription_user = module.params.get('subscription_user')
|
||||
subscription_password = module.params.get('subscription_password')
|
||||
|
||||
|
|
|
@ -120,6 +120,7 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
|
|||
promise['requestId']
|
||||
) + '" to complete.')
|
||||
|
||||
|
||||
def attach_volume(module, profitbricks):
|
||||
"""
|
||||
Attaches a volume.
|
||||
|
@ -163,6 +164,7 @@ def attach_volume(module, profitbricks):
|
|||
|
||||
return profitbricks.attach_volume(datacenter, server, volume)
|
||||
|
||||
|
||||
def detach_volume(module, profitbricks):
|
||||
"""
|
||||
Detaches a volume.
|
||||
|
@ -206,6 +208,7 @@ def detach_volume(module, profitbricks):
|
|||
|
||||
return profitbricks.detach_volume(datacenter, server, volume)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -102,6 +102,7 @@ from ansible.module_utils.rax import (rax_argument_spec,
|
|||
setup_rax_module,
|
||||
)
|
||||
|
||||
|
||||
def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
|
||||
certificate, intermediate_certificate, secure_port,
|
||||
secure_traffic_only, https_redirect,
|
||||
|
@ -222,6 +223,7 @@ def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
|
|||
else:
|
||||
module.fail_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
|
|
|
@ -180,6 +180,7 @@ def alarm(module, state, label, entity_id, check_id, notification_plan_id, crite
|
|||
else:
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
|
|
|
@ -256,6 +256,7 @@ def cloud_check(module, state, entity_id, label, check_type,
|
|||
else:
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
|
|
|
@ -152,6 +152,7 @@ def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
|
|||
else:
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
|
|
|
@ -138,6 +138,7 @@ def notification(module, state, label, notification_type, details):
|
|||
else:
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
|
|
|
@ -141,6 +141,7 @@ def notification_plan(module, state, label, critical_state, warning_state, ok_st
|
|||
else:
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = rax_argument_spec()
|
||||
argument_spec.update(
|
||||
|
|
|
@ -507,6 +507,7 @@ class PyVmomiDeviceHelper(object):
|
|||
|
||||
class PyVmomiCache(object):
|
||||
""" This class caches references to objects which are requested multiples times but not modified """
|
||||
|
||||
def __init__(self, content, dc_name=None):
|
||||
self.content = content
|
||||
self.dc_name = dc_name
|
||||
|
|
|
@ -156,7 +156,6 @@ class PyVmomiHelper(object):
|
|||
return tree
|
||||
|
||||
def _build_folder_map(self, folder, inpath='/'):
|
||||
|
||||
""" Build a searchable index for vms+uuids+folders """
|
||||
if isinstance(folder, tuple):
|
||||
folder = folder[1]
|
||||
|
|
|
@ -627,6 +627,7 @@ def spec_singleton(spec, request, vm):
|
|||
spec = request.new_spec()
|
||||
return spec
|
||||
|
||||
|
||||
def get_cdrom_params(module, s, vm_cdrom):
|
||||
cdrom_type = None
|
||||
cdrom_iso_path = None
|
||||
|
@ -648,6 +649,7 @@ def get_cdrom_params(module, s, vm_cdrom):
|
|||
|
||||
return cdrom_type, cdrom_iso_path
|
||||
|
||||
|
||||
def vmdisk_id(vm, current_datastore_name):
|
||||
id_list = []
|
||||
for vm_disk in vm._disks:
|
||||
|
@ -778,6 +780,8 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
|
|||
|
||||
# example from https://github.com/kalazzerx/pysphere/blob/master/examples/pysphere_create_disk_and_add_to_vm.py
|
||||
# was used.
|
||||
|
||||
|
||||
def update_disks(vsphere_client, vm, module, vm_disk, changes):
|
||||
request = VI.ReconfigVM_TaskRequestMsg()
|
||||
changed = False
|
||||
|
@ -1791,7 +1795,6 @@ def main():
|
|||
power_on_after_clone = module.params['power_on_after_clone']
|
||||
validate_certs = module.params['validate_certs']
|
||||
|
||||
|
||||
# CONNECT TO THE SERVER
|
||||
viserver = VIServer()
|
||||
if validate_certs and not hasattr(ssl, 'SSLContext') and not vcenter_hostname.startswith('http://'):
|
||||
|
@ -1899,7 +1902,6 @@ def main():
|
|||
vm_hardware, vm_disk, vm_nic, esxi)):
|
||||
module.exit_json(changed=False, msg="vm %s not present" % guest)
|
||||
|
||||
|
||||
# Create the VM
|
||||
elif state in ['present', 'powered_off', 'powered_on']:
|
||||
|
||||
|
|
|
@ -188,7 +188,6 @@ def main():
|
|||
else:
|
||||
module.fail_json(msg="Unknown state specified: {}".format(app_state))
|
||||
|
||||
|
||||
module.exit_json(
|
||||
changed=True,
|
||||
result=result
|
||||
|
|
|
@ -156,7 +156,6 @@ def main():
|
|||
changed=False,
|
||||
)
|
||||
|
||||
|
||||
if not module.check_mode:
|
||||
# If this isn't a dry run, create the db
|
||||
# and default user.
|
||||
|
|
|
@ -456,6 +456,7 @@ class Configuration:
|
|||
"""
|
||||
Configuration for this module.
|
||||
"""
|
||||
|
||||
def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
|
||||
rules=None, state=None, token=None, token_type=None):
|
||||
self.management_token = management_token # type: str
|
||||
|
@ -474,6 +475,7 @@ class Output:
|
|||
"""
|
||||
Output of an action of this module.
|
||||
"""
|
||||
|
||||
def __init__(self, changed=None, token=None, rules=None, operation=None):
|
||||
self.changed = changed # type: bool
|
||||
self.token = token # type: str
|
||||
|
@ -485,6 +487,7 @@ class ACL:
|
|||
"""
|
||||
Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
|
||||
"""
|
||||
|
||||
def __init__(self, rules, token_type, token, name):
|
||||
self.rules = rules
|
||||
self.token_type = token_type
|
||||
|
@ -507,6 +510,7 @@ class Rule:
|
|||
"""
|
||||
ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
|
||||
"""
|
||||
|
||||
def __init__(self, scope, policy, pattern=None):
|
||||
self.scope = scope
|
||||
self.policy = policy
|
||||
|
@ -532,6 +536,7 @@ class RuleCollection:
|
|||
"""
|
||||
Collection of ACL rules, which are part of a Consul ACL.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._rules = {}
|
||||
for scope in RULE_SCOPES:
|
||||
|
|
|
@ -147,9 +147,11 @@ def parse_plugin_repo(string):
|
|||
|
||||
return repo
|
||||
|
||||
|
||||
def is_plugin_present(plugin_dir, working_dir):
|
||||
return os.path.isdir(os.path.join(working_dir, plugin_dir))
|
||||
|
||||
|
||||
def parse_error(string):
|
||||
reason = "reason: "
|
||||
try:
|
||||
|
@ -157,6 +159,7 @@ def parse_error(string):
|
|||
except ValueError:
|
||||
return string
|
||||
|
||||
|
||||
def install_plugin(module, plugin_bin, plugin_name, url, timeout):
|
||||
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
|
||||
|
||||
|
@ -178,6 +181,7 @@ def install_plugin(module, plugin_bin, plugin_name, url, timeout):
|
|||
|
||||
return True, cmd, out, err
|
||||
|
||||
|
||||
def remove_plugin(module, plugin_bin, plugin_name):
|
||||
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
|
||||
|
||||
|
@ -193,6 +197,7 @@ def remove_plugin(module, plugin_bin, plugin_name):
|
|||
|
||||
return True, cmd, out, err
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -102,6 +102,7 @@ def ring_check(module, riak_admin_bin):
|
|||
else:
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
|
@ -118,7 +119,6 @@ def main():
|
|||
validate_certs=dict(default='yes', type='bool'))
|
||||
)
|
||||
|
||||
|
||||
command = module.params.get('command')
|
||||
http_conn = module.params.get('http_conn')
|
||||
target_node = module.params.get('target_node')
|
||||
|
@ -126,7 +126,6 @@ def main():
|
|||
wait_for_ring = module.params.get('wait_for_ring')
|
||||
wait_for_service = module.params.get('wait_for_service')
|
||||
|
||||
|
||||
# make sure riak commands are on the path
|
||||
riak_bin = module.get_bin_path('riak')
|
||||
riak_admin_bin = module.get_bin_path('riak-admin')
|
||||
|
|
|
@ -279,6 +279,7 @@ def user_add(module, client, db_name, user, password, roles):
|
|||
else:
|
||||
db.add_user(user, password, None, roles=roles)
|
||||
|
||||
|
||||
def user_remove(module, client, db_name, user):
|
||||
exists = user_find(client, user, db_name)
|
||||
if exists:
|
||||
|
@ -289,6 +290,7 @@ def user_remove(module, client, db_name, user):
|
|||
else:
|
||||
module.exit_json(changed=False, user=user)
|
||||
|
||||
|
||||
def load_mongocnf():
|
||||
config = configparser.RawConfigParser()
|
||||
mongocnf = os.path.expanduser('~/.mongodb.cnf')
|
||||
|
@ -305,7 +307,6 @@ def load_mongocnf():
|
|||
return creds
|
||||
|
||||
|
||||
|
||||
def check_if_roles_changed(uinfo, roles, db_name):
|
||||
# We must be aware of users which can read the oplog on a replicaset
|
||||
# Such users must have access to the local DB, but since this DB does not store users credentials
|
||||
|
@ -341,7 +342,6 @@ def check_if_roles_changed(uinfo, roles, db_name):
|
|||
return True
|
||||
|
||||
|
||||
|
||||
# =========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
|
|
@ -126,6 +126,7 @@ def db_delete(conn, cursor, db):
|
|||
cursor.execute("DROP DATABASE [%s]" % db)
|
||||
return not db_exists(conn, cursor, db)
|
||||
|
||||
|
||||
def db_import(conn, cursor, module, db, target):
|
||||
if os.path.isfile(target):
|
||||
backup = open(target, 'r')
|
||||
|
|
|
@ -99,6 +99,7 @@ def ext_exists(cursor, ext):
|
|||
cursor.execute(query, {'ext': ext})
|
||||
return cursor.rowcount == 1
|
||||
|
||||
|
||||
def ext_delete(cursor, ext):
|
||||
if ext_exists(cursor, ext):
|
||||
query = "DROP EXTENSION \"%s\"" % ext
|
||||
|
@ -107,6 +108,7 @@ def ext_delete(cursor, ext):
|
|||
else:
|
||||
return False
|
||||
|
||||
|
||||
def ext_create(cursor, ext):
|
||||
if not ext_exists(cursor, ext):
|
||||
query = 'CREATE EXTENSION "%s"' % ext
|
||||
|
@ -119,6 +121,7 @@ def ext_create(cursor, ext):
|
|||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -161,18 +161,21 @@ def lang_exists(cursor, lang):
|
|||
cursor.execute(query)
|
||||
return cursor.rowcount > 0
|
||||
|
||||
|
||||
def lang_istrusted(cursor, lang):
|
||||
"""Checks if language is trusted for db"""
|
||||
query = "SELECT lanpltrusted FROM pg_language WHERE lanname='%s'" % lang
|
||||
cursor.execute(query)
|
||||
return cursor.fetchone()[0]
|
||||
|
||||
|
||||
def lang_altertrust(cursor, lang, trust):
|
||||
"""Changes if language is trusted for db"""
|
||||
query = "UPDATE pg_language SET lanpltrusted = %s WHERE lanname=%s"
|
||||
cursor.execute(query, (trust, lang))
|
||||
return True
|
||||
|
||||
|
||||
def lang_add(cursor, lang, trust):
|
||||
"""Adds language for db"""
|
||||
if trust:
|
||||
|
@ -182,6 +185,7 @@ def lang_add(cursor, lang, trust):
|
|||
cursor.execute(query)
|
||||
return True
|
||||
|
||||
|
||||
def lang_drop(cursor, lang, cascade):
|
||||
"""Drops language for db"""
|
||||
cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
|
||||
|
@ -197,6 +201,7 @@ def lang_drop(cursor, lang, cascade):
|
|||
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -125,6 +125,7 @@ def set_owner(cursor, schema, owner):
|
|||
cursor.execute(query)
|
||||
return True
|
||||
|
||||
|
||||
def get_schema_info(cursor, schema):
|
||||
query = """
|
||||
SELECT schema_owner AS owner
|
||||
|
@ -134,11 +135,13 @@ def get_schema_info(cursor, schema):
|
|||
cursor.execute(query, {'schema': schema})
|
||||
return cursor.fetchone()
|
||||
|
||||
|
||||
def schema_exists(cursor, schema):
|
||||
query = "SELECT schema_name FROM information_schema.schemata WHERE schema_name = %(schema)s"
|
||||
cursor.execute(query, {'schema': schema})
|
||||
return cursor.rowcount == 1
|
||||
|
||||
|
||||
def schema_delete(cursor, schema):
|
||||
if schema_exists(cursor, schema):
|
||||
query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
|
||||
|
@ -147,6 +150,7 @@ def schema_delete(cursor, schema):
|
|||
else:
|
||||
return False
|
||||
|
||||
|
||||
def schema_create(cursor, schema, owner):
|
||||
if not schema_exists(cursor, schema):
|
||||
query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
|
||||
|
@ -162,6 +166,7 @@ def schema_create(cursor, schema, owner):
|
|||
else:
|
||||
return False
|
||||
|
||||
|
||||
def schema_matches(cursor, schema, owner):
|
||||
if not schema_exists(cursor, schema):
|
||||
return False
|
||||
|
@ -176,6 +181,7 @@ def schema_matches(cursor, schema, owner):
|
|||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -86,11 +86,13 @@ from ansible.module_utils._text import to_native
|
|||
class NotSupportedError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CannotDropError(Exception):
|
||||
pass
|
||||
|
||||
# module specific functions
|
||||
|
||||
|
||||
def get_configuration_facts(cursor, parameter_name=''):
|
||||
facts = {}
|
||||
cursor.execute("""
|
||||
|
@ -110,12 +112,14 @@ def get_configuration_facts(cursor, parameter_name=''):
|
|||
'default_value': row.default_value}
|
||||
return facts
|
||||
|
||||
|
||||
def check(configuration_facts, parameter_name, current_value):
|
||||
parameter_key = parameter_name.lower()
|
||||
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def present(configuration_facts, cursor, parameter_name, current_value):
|
||||
parameter_key = parameter_name.lower()
|
||||
changed = False
|
||||
|
@ -128,6 +132,7 @@ def present(configuration_facts, cursor, parameter_name, current_value):
|
|||
|
||||
# module logic
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
|
|
|
@ -81,6 +81,7 @@ class NotSupportedError(Exception):
|
|||
|
||||
# module specific functions
|
||||
|
||||
|
||||
def get_schema_facts(cursor, schema=''):
|
||||
facts = {}
|
||||
cursor.execute("""
|
||||
|
@ -121,6 +122,7 @@ def get_schema_facts(cursor, schema=''):
|
|||
facts[schema_key]['usage_roles'].append(row.role_name)
|
||||
return facts
|
||||
|
||||
|
||||
def get_user_facts(cursor, user=''):
|
||||
facts = {}
|
||||
cursor.execute("""
|
||||
|
@ -155,6 +157,7 @@ def get_user_facts(cursor, user=''):
|
|||
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
|
||||
return facts
|
||||
|
||||
|
||||
def get_role_facts(cursor, role=''):
|
||||
facts = {}
|
||||
cursor.execute("""
|
||||
|
@ -175,6 +178,7 @@ def get_role_facts(cursor, role=''):
|
|||
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
|
||||
return facts
|
||||
|
||||
|
||||
def get_configuration_facts(cursor, parameter=''):
|
||||
facts = {}
|
||||
cursor.execute("""
|
||||
|
@ -194,6 +198,7 @@ def get_configuration_facts(cursor, parameter=''):
|
|||
'default_value': row.default_value}
|
||||
return facts
|
||||
|
||||
|
||||
def get_node_facts(cursor, schema=''):
|
||||
facts = {}
|
||||
cursor.execute("""
|
||||
|
@ -216,6 +221,7 @@ def get_node_facts(cursor, schema=''):
|
|||
|
||||
# module logic
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
|
|
|
@ -98,11 +98,13 @@ from ansible.module_utils._text import to_native
|
|||
class NotSupportedError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CannotDropError(Exception):
|
||||
pass
|
||||
|
||||
# module specific functions
|
||||
|
||||
|
||||
def get_role_facts(cursor, role=''):
|
||||
facts = {}
|
||||
cursor.execute("""
|
||||
|
@ -123,6 +125,7 @@ def get_role_facts(cursor, role=''):
|
|||
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
|
||||
return facts
|
||||
|
||||
|
||||
def update_roles(role_facts, cursor, role,
|
||||
existing, required):
|
||||
for assigned_role in set(existing) - set(required):
|
||||
|
@ -130,6 +133,7 @@ def update_roles(role_facts, cursor, role,
|
|||
for assigned_role in set(required) - set(existing):
|
||||
cursor.execute("grant {0} to {1}".format(assigned_role, role))
|
||||
|
||||
|
||||
def check(role_facts, role, assigned_roles):
|
||||
role_key = role.lower()
|
||||
if role_key not in role_facts:
|
||||
|
@ -138,6 +142,7 @@ def check(role_facts, role, assigned_roles):
|
|||
return False
|
||||
return True
|
||||
|
||||
|
||||
def present(role_facts, cursor, role, assigned_roles):
|
||||
role_key = role.lower()
|
||||
if role_key not in role_facts:
|
||||
|
@ -155,6 +160,7 @@ def present(role_facts, cursor, role, assigned_roles):
|
|||
role_facts.update(get_role_facts(cursor, role))
|
||||
return changed
|
||||
|
||||
|
||||
def absent(role_facts, cursor, role, assigned_roles):
|
||||
role_key = role.lower()
|
||||
if role_key in role_facts:
|
||||
|
@ -168,6 +174,7 @@ def absent(role_facts, cursor, role, assigned_roles):
|
|||
|
||||
# module logic
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
|
|
|
@ -122,11 +122,13 @@ from ansible.module_utils._text import to_native
|
|||
class NotSupportedError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CannotDropError(Exception):
|
||||
pass
|
||||
|
||||
# module specific functions
|
||||
|
||||
|
||||
def get_schema_facts(cursor, schema=''):
|
||||
facts = {}
|
||||
cursor.execute("""
|
||||
|
@ -167,6 +169,7 @@ def get_schema_facts(cursor, schema=''):
|
|||
facts[schema_key]['usage_roles'].append(row.role_name)
|
||||
return facts
|
||||
|
||||
|
||||
def update_roles(schema_facts, cursor, schema,
|
||||
existing, required,
|
||||
create_existing, create_required):
|
||||
|
@ -180,6 +183,7 @@ def update_roles(schema_facts, cursor, schema,
|
|||
for role in set(create_required) - set(create_existing):
|
||||
cursor.execute("grant create on schema {0} to {1}".format(schema, role))
|
||||
|
||||
|
||||
def check(schema_facts, schema, usage_roles, create_roles, owner):
|
||||
schema_key = schema.lower()
|
||||
if schema_key not in schema_facts:
|
||||
|
@ -192,6 +196,7 @@ def check(schema_facts, schema, usage_roles, create_roles, owner):
|
|||
return False
|
||||
return True
|
||||
|
||||
|
||||
def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
|
||||
schema_key = schema.lower()
|
||||
if schema_key not in schema_facts:
|
||||
|
@ -220,6 +225,7 @@ def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
|
|||
schema_facts.update(get_schema_facts(cursor, schema))
|
||||
return changed
|
||||
|
||||
|
||||
def absent(schema_facts, cursor, schema, usage_roles, create_roles):
|
||||
schema_key = schema.lower()
|
||||
if schema_key in schema_facts:
|
||||
|
@ -236,6 +242,7 @@ def absent(schema_facts, cursor, schema, usage_roles, create_roles):
|
|||
|
||||
# module logic
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
|
|
|
@ -134,11 +134,13 @@ from ansible.module_utils._text import to_native
|
|||
class NotSupportedError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CannotDropError(Exception):
|
||||
pass
|
||||
|
||||
# module specific functions
|
||||
|
||||
|
||||
def get_user_facts(cursor, user=''):
|
||||
facts = {}
|
||||
cursor.execute("""
|
||||
|
@ -173,6 +175,7 @@ def get_user_facts(cursor, user=''):
|
|||
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
|
||||
return facts
|
||||
|
||||
|
||||
def update_roles(user_facts, cursor, user,
|
||||
existing_all, existing_default, required):
|
||||
del_roles = list(set(existing_all) - set(required))
|
||||
|
@ -184,6 +187,7 @@ def update_roles(user_facts, cursor, user,
|
|||
if required:
|
||||
cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
|
||||
|
||||
|
||||
def check(user_facts, user, profile, resource_pool,
|
||||
locked, password, expired, ldap, roles):
|
||||
user_key = user.lower()
|
||||
|
@ -200,11 +204,12 @@ def check(user_facts, user, profile, resource_pool,
|
|||
if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or
|
||||
ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')):
|
||||
return False
|
||||
if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or \
|
||||
if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
|
||||
sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def present(user_facts, cursor, user, profile, resource_pool,
|
||||
locked, password, expired, ldap, roles):
|
||||
user_key = user.lower()
|
||||
|
@ -267,7 +272,7 @@ def present(user_facts, cursor, user, profile, resource_pool,
|
|||
changed = True
|
||||
if changed:
|
||||
cursor.execute(' '.join(query_fragments))
|
||||
if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or \
|
||||
if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
|
||||
sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
|
||||
update_roles(user_facts, cursor, user,
|
||||
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
|
||||
|
@ -276,6 +281,7 @@ def present(user_facts, cursor, user, profile, resource_pool,
|
|||
user_facts.update(get_user_facts(cursor, user))
|
||||
return changed
|
||||
|
||||
|
||||
def absent(user_facts, cursor, user, roles):
|
||||
user_key = user.lower()
|
||||
if user_key in user_facts:
|
||||
|
@ -292,6 +298,7 @@ def absent(user_facts, cursor, user, roles):
|
|||
|
||||
# module logic
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
|
|
|
@ -135,6 +135,7 @@ def post_annotation(module):
|
|||
response = response.read()
|
||||
module.exit_json(changed=True, annotation=response)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
|
|
|
@ -100,6 +100,7 @@ def follow_log(module, le_path, logs, name=None, logtype=None):
|
|||
|
||||
module.exit_json(changed=False, msg="logs(s) already followed")
|
||||
|
||||
|
||||
def unfollow_log(module, le_path, logs):
|
||||
""" Unfollows one or more logs if followed. """
|
||||
|
||||
|
@ -125,6 +126,7 @@ def unfollow_log(module, le_path, logs):
|
|||
|
||||
module.exit_json(changed=False, msg="logs(s) already unfollowed")
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -549,7 +549,6 @@ except ImportError:
|
|||
HAS_LIB_JSON = False
|
||||
|
||||
|
||||
|
||||
class LogicMonitor(object):
|
||||
|
||||
def __init__(self, module, **params):
|
||||
|
|
|
@ -259,7 +259,6 @@ def main():
|
|||
'servicegroup_service_downtime',
|
||||
]
|
||||
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
action=dict(required=True, default=None, choices=ACTION_CHOICES),
|
||||
|
@ -595,7 +594,6 @@ class Nagios(object):
|
|||
dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment)
|
||||
self._write_command(dt_del_cmd_str)
|
||||
|
||||
|
||||
def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30):
|
||||
"""
|
||||
This command is used to schedule downtime for all hosts in a
|
||||
|
@ -1077,7 +1075,7 @@ class Nagios(object):
|
|||
|
||||
# wtf?
|
||||
else:
|
||||
self.module.fail_json(msg="unknown action specified: '%s'" % \
|
||||
self.module.fail_json(msg="unknown action specified: '%s'" %
|
||||
self.action)
|
||||
|
||||
self.module.exit_json(nagios_commands=self.command_results,
|
||||
|
|
|
@ -86,6 +86,7 @@ from ansible.module_utils.six.moves.urllib.parse import urlencode
|
|||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
|
|
|
@ -171,6 +171,7 @@ import json
|
|||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
def auth_header(user, passwd, token):
|
||||
if token:
|
||||
return "Token token=%s" % token
|
||||
|
@ -178,6 +179,7 @@ def auth_header(user, passwd, token):
|
|||
auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
|
||||
return "Basic %s" % auth
|
||||
|
||||
|
||||
def ongoing(module, name, user, passwd, token):
|
||||
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing"
|
||||
headers = {"Authorization": auth_header(user, passwd, token)}
|
||||
|
@ -225,6 +227,7 @@ def create(module, name, user, passwd, token, requester_id, service, hours, minu
|
|||
|
||||
return False, json_out, True
|
||||
|
||||
|
||||
def absent(module, name, user, passwd, token, requester_id, service):
|
||||
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/" + service[0]
|
||||
headers = {
|
||||
|
@ -301,7 +304,6 @@ def main():
|
|||
if rc != 0:
|
||||
module.fail_json(msg="failed", result=out)
|
||||
|
||||
|
||||
module.exit_json(msg="success", result=out, changed=changed)
|
||||
|
||||
|
||||
|
|
|
@ -124,6 +124,7 @@ def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_
|
|||
|
||||
return do_send_request(module, deploy_api, params, key)
|
||||
|
||||
|
||||
def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None):
|
||||
"""Send an annotation event to Stackdriver"""
|
||||
annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent"
|
||||
|
@ -141,6 +142,7 @@ def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None,
|
|||
|
||||
return do_send_request(module, annotation_api, params, key)
|
||||
|
||||
|
||||
def do_send_request(module, url, params, key):
|
||||
data = json.dumps(params)
|
||||
headers = {
|
||||
|
|
|
@ -278,7 +278,6 @@ from ansible.module_utils._text import to_native, to_text
|
|||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
|
||||
class CloudflareAPI(object):
|
||||
|
||||
cf_api_endpoint = 'https://api.cloudflare.com/client/v4'
|
||||
|
@ -594,6 +593,7 @@ class CloudflareAPI(object):
|
|||
self.changed = True
|
||||
return result, self.changed
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -551,6 +551,7 @@ class DME2(object):
|
|||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
|
|
|
@ -546,7 +546,6 @@ class Nmcli(object):
|
|||
120: "Failed"
|
||||
}
|
||||
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.state = module.params['state']
|
||||
|
|
|
@ -141,6 +141,7 @@ def decode_hex(hexstring):
|
|||
else:
|
||||
return hexstring
|
||||
|
||||
|
||||
def decode_mac(hexstring):
|
||||
|
||||
if len(hexstring) != 14:
|
||||
|
@ -150,6 +151,7 @@ def decode_mac(hexstring):
|
|||
else:
|
||||
return hexstring
|
||||
|
||||
|
||||
def lookup_adminstatus(int_adminstatus):
|
||||
adminstatus_options = {
|
||||
1: 'up',
|
||||
|
@ -161,6 +163,7 @@ def lookup_adminstatus(int_adminstatus):
|
|||
else:
|
||||
return ""
|
||||
|
||||
|
||||
def lookup_operstatus(int_operstatus):
|
||||
operstatus_options = {
|
||||
1: 'up',
|
||||
|
@ -176,6 +179,7 @@ def lookup_operstatus(int_operstatus):
|
|||
else:
|
||||
return ""
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
@ -211,7 +215,6 @@ def main():
|
|||
if m_args['level'] == "authPriv" and m_args['privacy'] is None:
|
||||
module.fail_json(msg='Privacy algorithm not set when using authPriv')
|
||||
|
||||
|
||||
if m_args['integrity'] == "sha":
|
||||
integrity_proto = cmdgen.usmHMACSHAAuthProtocol
|
||||
elif m_args['integrity'] == "md5":
|
||||
|
@ -240,7 +243,7 @@ def main():
|
|||
# Use v without a prefix to use with return values
|
||||
v = DefineOid(dotprefix=False)
|
||||
|
||||
Tree = lambda: defaultdict(Tree)
|
||||
def Tree(): return defaultdict(Tree)
|
||||
|
||||
results = Tree()
|
||||
|
||||
|
@ -256,7 +259,6 @@ def main():
|
|||
lookupMib=False
|
||||
)
|
||||
|
||||
|
||||
if errorIndication:
|
||||
module.fail_json(msg=str(errorIndication))
|
||||
|
||||
|
@ -294,7 +296,6 @@ def main():
|
|||
lookupMib=False
|
||||
)
|
||||
|
||||
|
||||
if errorIndication:
|
||||
module.fail_json(msg=str(errorIndication))
|
||||
|
||||
|
|
|
@ -171,7 +171,6 @@ def main():
|
|||
# validate the ports data structure
|
||||
validate_ports(module, slb_server_ports)
|
||||
|
||||
|
||||
json_post = {
|
||||
"server-list": [
|
||||
{
|
||||
|
|
|
@ -146,6 +146,7 @@ def validate_ports(module, ports):
|
|||
if 'service_group' not in item:
|
||||
item['service_group'] = ''
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = a10_argument_spec()
|
||||
argument_spec.update(url_argument_spec())
|
||||
|
|
|
@ -142,6 +142,7 @@ import json
|
|||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.network.aos.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
|
||||
|
||||
|
||||
def check_ranges_are_valid(module, ranges):
|
||||
|
||||
i = 1
|
||||
|
@ -161,6 +162,7 @@ def check_ranges_are_valid(module, ranges):
|
|||
|
||||
return True
|
||||
|
||||
|
||||
def get_list_of_range(asn_pool):
|
||||
ranges = []
|
||||
|
||||
|
@ -169,6 +171,7 @@ def get_list_of_range(asn_pool):
|
|||
|
||||
return ranges
|
||||
|
||||
|
||||
def create_new_asn_pool(asn_pool, name, ranges):
|
||||
|
||||
# Create value
|
||||
|
@ -178,7 +181,7 @@ def create_new_asn_pool(asn_pool, name, ranges):
|
|||
|
||||
asn_pool.datum = datum
|
||||
|
||||
## Write to AOS
|
||||
# Write to AOS
|
||||
return asn_pool.write()
|
||||
|
||||
|
||||
|
@ -190,7 +193,7 @@ def asn_pool_absent(module, aos, my_pool):
|
|||
if my_pool.exists is False:
|
||||
module.exit_json(changed=False, name=margs['name'], id='', value={})
|
||||
|
||||
## Check if object is currently in Use or Not
|
||||
# Check if object is currently in Use or Not
|
||||
# If in Use, return an error
|
||||
if my_pool.value:
|
||||
if my_pool.value['status'] != 'not_in_use':
|
||||
|
@ -256,6 +259,8 @@ def asn_pool_present(module, aos, my_pool):
|
|||
# ########################################################
|
||||
# Main Function
|
||||
# ########################################################
|
||||
|
||||
|
||||
def asn_pool(module):
|
||||
|
||||
margs = module.params
|
||||
|
@ -309,6 +314,7 @@ def asn_pool(module):
|
|||
|
||||
asn_pool_present(module, aos, my_pool)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -136,6 +136,7 @@ from ansible.module_utils.basic import AnsibleModule
|
|||
from ansible.module_utils.network.aos.aos import get_aos_session, check_aos_version, find_collection_item
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
|
||||
|
||||
def create_blueprint(module, aos, name):
|
||||
|
||||
margs = module.params
|
||||
|
@ -177,6 +178,7 @@ def ensure_absent(module, aos, blueprint):
|
|||
id=blueprint.id,
|
||||
name=blueprint.name)
|
||||
|
||||
|
||||
def ensure_present(module, aos, blueprint):
|
||||
margs = module.params
|
||||
|
||||
|
@ -211,6 +213,7 @@ def ensure_present(module, aos, blueprint):
|
|||
module.exit_json(changed=True,
|
||||
name=margs['name'])
|
||||
|
||||
|
||||
def ensure_build_ready(module, aos, blueprint):
|
||||
margs = module.params
|
||||
|
||||
|
|
|
@ -189,6 +189,7 @@ param_map_list = dict(
|
|||
)
|
||||
)
|
||||
|
||||
|
||||
def get_collection_from_param_map(module, aos):
|
||||
|
||||
param_map = None
|
||||
|
@ -220,6 +221,7 @@ def get_collection_from_param_map(module, aos):
|
|||
|
||||
return None
|
||||
|
||||
|
||||
def blueprint_param_present(module, aos, blueprint, param, param_value):
|
||||
|
||||
margs = module.params
|
||||
|
@ -278,6 +280,7 @@ def blueprint_param_absent(module, aos, blueprint, param, param_value):
|
|||
name=param.name,
|
||||
value=param.value)
|
||||
|
||||
|
||||
def blueprint_param(module):
|
||||
|
||||
margs = module.params
|
||||
|
@ -350,6 +353,7 @@ def blueprint_param(module):
|
|||
|
||||
blueprint_param_present(module, aos, blueprint, param, param_value)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -88,6 +88,7 @@ from ansible.module_utils.pycompat24 import get_exception
|
|||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.network.aos.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
|
||||
|
||||
|
||||
def ensure_present(module, aos, blueprint, virtnet):
|
||||
|
||||
# if exist already return tru
|
||||
|
@ -130,6 +131,7 @@ def ensure_absent(module, aos, blueprint, virtnet):
|
|||
module.exit_json(changed=False,
|
||||
blueprint=blueprint.name)
|
||||
|
||||
|
||||
def blueprint_virtnet(module):
|
||||
|
||||
margs = module.params
|
||||
|
|
|
@ -140,6 +140,7 @@ def aos_device_normal(module, aos, dev):
|
|||
else:
|
||||
module.fail_json(msg="Device is in '%s' state" % dev.state)
|
||||
|
||||
|
||||
def aos_device(module):
|
||||
margs = module.params
|
||||
|
||||
|
@ -189,6 +190,7 @@ def aos_device(module):
|
|||
if margs['state'] == 'normal':
|
||||
aos_device_normal(module, aos, dev)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
|
|
|
@ -159,12 +159,14 @@ def create_new_ext_router(module, my_ext_router, name, loopback, asn):
|
|||
|
||||
my_ext_router.datum = datum
|
||||
|
||||
## Write to AOS
|
||||
# Write to AOS
|
||||
return my_ext_router.write()
|
||||
|
||||
#########################################################
|
||||
# State Processing
|
||||
#########################################################
|
||||
|
||||
|
||||
def ext_router_absent(module, aos, my_ext_router):
|
||||
|
||||
margs = module.params
|
||||
|
@ -190,6 +192,7 @@ def ext_router_absent(module, aos, my_ext_router):
|
|||
id=my_ext_router.id,
|
||||
value={})
|
||||
|
||||
|
||||
def ext_router_present(module, aos, my_ext_router):
|
||||
|
||||
margs = module.params
|
||||
|
@ -215,7 +218,6 @@ def ext_router_present(module, aos, my_ext_router):
|
|||
except:
|
||||
module.fail_json(msg="An error occurred while trying to create a new External Router")
|
||||
|
||||
|
||||
module.exit_json(changed=True,
|
||||
name=my_ext_router.name,
|
||||
id=my_ext_router.id,
|
||||
|
@ -257,6 +259,8 @@ def ext_router_present(module, aos, my_ext_router):
|
|||
#########################################################
|
||||
# Main Function
|
||||
#########################################################
|
||||
|
||||
|
||||
def ext_router(module):
|
||||
|
||||
margs = module.params
|
||||
|
@ -305,6 +309,7 @@ def ext_router(module):
|
|||
|
||||
ext_router_present(module, aos, my_ext_router)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -167,6 +167,7 @@ import json
|
|||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.network.aos.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
|
||||
|
||||
|
||||
def get_list_of_subnets(ip_pool):
|
||||
subnets = []
|
||||
|
||||
|
@ -175,6 +176,7 @@ def get_list_of_subnets(ip_pool):
|
|||
|
||||
return subnets
|
||||
|
||||
|
||||
def create_new_ip_pool(ip_pool, name, subnets):
|
||||
|
||||
# Create value
|
||||
|
@ -184,12 +186,14 @@ def create_new_ip_pool(ip_pool, name, subnets):
|
|||
|
||||
ip_pool.datum = datum
|
||||
|
||||
## Write to AOS
|
||||
# Write to AOS
|
||||
return ip_pool.write()
|
||||
|
||||
#########################################################
|
||||
# State Processing
|
||||
#########################################################
|
||||
|
||||
|
||||
def ip_pool_absent(module, aos, my_pool):
|
||||
|
||||
margs = module.params
|
||||
|
@ -198,7 +202,7 @@ def ip_pool_absent(module, aos, my_pool):
|
|||
if my_pool.exists is False:
|
||||
module.exit_json(changed=False, name=margs['name'], id='', value={})
|
||||
|
||||
## Check if object is currently in Use or Not
|
||||
# Check if object is currently in Use or Not
|
||||
# If in Use, return an error
|
||||
if my_pool.value:
|
||||
if my_pool.value['status'] != 'not_in_use':
|
||||
|
@ -218,6 +222,7 @@ def ip_pool_absent(module, aos, my_pool):
|
|||
id=my_pool.id,
|
||||
value={})
|
||||
|
||||
|
||||
def ip_pool_present(module, aos, my_pool):
|
||||
|
||||
margs = module.params
|
||||
|
@ -266,6 +271,8 @@ def ip_pool_present(module, aos, my_pool):
|
|||
#########################################################
|
||||
# Main Function
|
||||
#########################################################
|
||||
|
||||
|
||||
def ip_pool(module):
|
||||
|
||||
margs = module.params
|
||||
|
@ -314,6 +321,7 @@ def ip_pool(module):
|
|||
|
||||
ip_pool_present(module, aos, my_pool)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -134,6 +134,8 @@ from ansible.module_utils.network.aos.aos import get_aos_session, find_collectio
|
|||
#########################################################
|
||||
# State Processing
|
||||
#########################################################
|
||||
|
||||
|
||||
def logical_device_absent(module, aos, my_logical_dev):
|
||||
|
||||
margs = module.params
|
||||
|
@ -159,6 +161,7 @@ def logical_device_absent(module, aos, my_logical_dev):
|
|||
id=my_logical_dev.id,
|
||||
value={})
|
||||
|
||||
|
||||
def logical_device_present(module, aos, my_logical_dev):
|
||||
|
||||
margs = module.params
|
||||
|
@ -182,6 +185,8 @@ def logical_device_present(module, aos, my_logical_dev):
|
|||
#########################################################
|
||||
# Main Function
|
||||
#########################################################
|
||||
|
||||
|
||||
def logical_device(module):
|
||||
|
||||
margs = module.params
|
||||
|
@ -227,6 +232,7 @@ def logical_device(module):
|
|||
|
||||
logical_device_present(module, aos, my_logical_dev)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -154,6 +154,8 @@ from ansible.module_utils.network.aos.aos import get_aos_session, find_collectio
|
|||
#########################################################
|
||||
# State Processing
|
||||
#########################################################
|
||||
|
||||
|
||||
def logical_device_map_absent(module, aos, my_log_dev_map):
|
||||
|
||||
margs = module.params
|
||||
|
@ -177,6 +179,7 @@ def logical_device_map_absent(module, aos, my_log_dev_map):
|
|||
id=my_log_dev_map.id,
|
||||
value={})
|
||||
|
||||
|
||||
def logical_device_map_present(module, aos, my_log_dev_map):
|
||||
|
||||
margs = module.params
|
||||
|
@ -202,6 +205,8 @@ def logical_device_map_present(module, aos, my_log_dev_map):
|
|||
#########################################################
|
||||
# Main Function
|
||||
#########################################################
|
||||
|
||||
|
||||
def logical_device_map(module):
|
||||
|
||||
margs = module.params
|
||||
|
@ -250,6 +255,7 @@ def logical_device_map(module):
|
|||
|
||||
logical_device_map_present(module, aos, my_log_dev_map)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -133,6 +133,8 @@ from ansible.module_utils.network.aos.aos import get_aos_session, find_collectio
|
|||
#########################################################
|
||||
# State Processing
|
||||
#########################################################
|
||||
|
||||
|
||||
def rack_type_absent(module, aos, my_rack_type):
|
||||
|
||||
margs = module.params
|
||||
|
@ -156,6 +158,7 @@ def rack_type_absent(module, aos, my_rack_type):
|
|||
id=my_rack_type.id,
|
||||
value={})
|
||||
|
||||
|
||||
def rack_type_present(module, aos, my_rack_type):
|
||||
|
||||
margs = module.params
|
||||
|
@ -179,6 +182,8 @@ def rack_type_present(module, aos, my_rack_type):
|
|||
#########################################################
|
||||
# Main Function
|
||||
#########################################################
|
||||
|
||||
|
||||
def rack_type(module):
|
||||
|
||||
margs = module.params
|
||||
|
@ -224,6 +229,7 @@ def rack_type(module):
|
|||
|
||||
rack_type_present(module, aos, my_rack_type)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -143,6 +143,8 @@ from ansible.module_utils.network.aos.aos import get_aos_session, find_collectio
|
|||
#########################################################
|
||||
# State Processing
|
||||
#########################################################
|
||||
|
||||
|
||||
def template_absent(module, aos, my_template):
|
||||
|
||||
margs = module.params
|
||||
|
@ -168,6 +170,7 @@ def template_absent(module, aos, my_template):
|
|||
id=my_template.id,
|
||||
value={})
|
||||
|
||||
|
||||
def template_present(module, aos, my_template):
|
||||
|
||||
margs = module.params
|
||||
|
@ -243,6 +246,7 @@ def aos_template(module):
|
|||
|
||||
template_present(module, aos, my_template)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
|
@ -151,6 +151,7 @@ def get_acl_config(module, acl_name):
|
|||
|
||||
return NetworkConfig(indent=1, contents='\n'.join(filtered_config))
|
||||
|
||||
|
||||
def parse_acl_name(module):
|
||||
first_line = True
|
||||
for line in module.params['lines']:
|
||||
|
@ -168,6 +169,7 @@ def parse_acl_name(module):
|
|||
|
||||
return acl_name
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = dict(
|
||||
|
|
|
@ -187,7 +187,6 @@ def main():
|
|||
msg = 'One or more conditional statements have not be satisfied'
|
||||
module.fail_json(msg=msg, failed_conditions=failed_conditions)
|
||||
|
||||
|
||||
result.update({
|
||||
'changed': False,
|
||||
'stdout': responses,
|
||||
|
|
|
@ -201,7 +201,6 @@ from ansible.module_utils.network.common.config import NetworkConfig, dumps
|
|||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
|
||||
def get_candidate(module):
|
||||
candidate = NetworkConfig(indent=1)
|
||||
if module.params['src']:
|
||||
|
@ -211,6 +210,7 @@ def get_candidate(module):
|
|||
candidate.add(module.params['lines'], parents=parents)
|
||||
return candidate
|
||||
|
||||
|
||||
def run(module, result):
|
||||
match = module.params['match']
|
||||
replace = module.params['replace']
|
||||
|
@ -251,6 +251,7 @@ def run(module, result):
|
|||
run_commands(module, 'write mem')
|
||||
result['changed'] = True
|
||||
|
||||
|
||||
def main():
|
||||
""" main entry point for module execution
|
||||
"""
|
||||
|
@ -293,7 +294,6 @@ def main():
|
|||
|
||||
config = None
|
||||
|
||||
|
||||
if module.params['backup']:
|
||||
result['__backup__'] = get_config(module)
|
||||
|
||||
|
|
|
@ -115,6 +115,7 @@ def chain(module):
|
|||
else:
|
||||
module.fail_json(msg="error deleting chain '{}': {}".format(name, response.json['description']))
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue