PEP 8 W291 whitespace cleanup.

This commit is contained in:
Matt Clay 2017-01-27 15:20:31 -08:00
commit d913f69ba1
166 changed files with 493 additions and 565 deletions

View file

@ -87,7 +87,7 @@ options:
description:
- Optional attribute which with to sort the results.
- If specifying 'tag', the 'tag_name' parameter is required.
- Starting at version 2.1, additional sort choices of architecture, block_device_mapping, creationDate, hypervisor, is_public, location, owner_id, platform, root_device_name, root_device_type, state, and virtualization_type are supported.
- Starting at version 2.1, additional sort choices of architecture, block_device_mapping, creationDate, hypervisor, is_public, location, owner_id, platform, root_device_name, root_device_type, state, and virtualization_type are supported.
choices: ['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location', 'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']
default: null
required: false

View file

@ -43,7 +43,7 @@ options:
name:
description:
- Name of the customer gateway.
required: true
required: true
state:
description:
- Create or terminate the Customer Gateway.
@ -108,7 +108,7 @@ gateway.customer_gateways:
type:
description: encryption type.
returned: when gateway exists and is available.
sample: ipsec.1
sample: ipsec.1
type: string
'''
@ -178,13 +178,13 @@ class Ec2CustomerGatewayManager:
DryRun=False,
Filters=[
{
'Name': 'state',
'Name': 'state',
'Values': [
'available',
]
},
{
'Name': 'ip-address',
'Name': 'ip-address',
'Values': [
ip_address,
]

View file

@ -129,7 +129,7 @@ class ElbManager:
to report it out-of-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
initial_state = self._get_instance_health(lb)
if initial_state is None:
# Instance isn't registered with this load
# balancer. Ignore it and try the next one.

View file

@ -163,7 +163,7 @@ def main():
if key:
# existing key found
if key_material:
# EC2's fingerprints are non-trivial to generate, so push this key
# EC2's fingerprints are non-trivial to generate, so push this key
# to a temporary name and make ec2 calculate the fingerprint for us.
#
# http://blog.jbrowne.com/?p=23
@ -186,7 +186,7 @@ def main():
if key.fingerprint != tmpfingerprint:
if not module.check_mode:
key.delete()
key = ec2.import_key_pair(name, key_material)
key = ec2.import_key_pair(name, key_material)
if wait:
start = time.time()
@ -211,7 +211,7 @@ def main():
key = ec2.import_key_pair(name, key_material)
else:
'''
No material provided, let AWS handle the key creation and
No material provided, let AWS handle the key creation and
retrieve the private key
'''
key = ec2.create_key_pair(name)

View file

@ -113,7 +113,7 @@ options:
extends_documentation_fragment:
- aws
- ec2
requires:
requires:
- "boto >= 2.39.0"
"""

View file

@ -20,7 +20,7 @@ ANSIBLE_METADATA = {'status': ['stableinterface'],
DOCUMENTATION = '''
---
module: ec2_tag
module: ec2_tag
short_description: create and remove tag(s) to ec2 resources.
description:
- Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
@ -28,9 +28,9 @@ version_added: "1.3"
options:
resource:
description:
- The EC2 resource id.
- The EC2 resource id.
required: true
default: null
default: null
aliases: []
state:
description:
@ -97,10 +97,10 @@ EXAMPLES = '''
region: eu-west-1
resource: '{{ item.id }}'
state: present
tags:
tags:
Name: dbserver
Env: production
with_subelements:
with_subelements:
- ec2_vol.results
- volumes
@ -164,7 +164,7 @@ def main():
if set(tags.items()).issubset(set(tagdict.items())):
module.exit_json(msg="Tags already exists in %s." %resource, changed=False)
else:
for (key, value) in set(tags.items()):
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
dictadd[key] = value
tagger = ec2.create_tags(resource, dictadd)

View file

@ -101,7 +101,7 @@ options:
description:
- The resource_id of an existing DHCP options set.
If this is specified, then it will override other settings, except tags
(which will be updated to match)
(which will be updated to match)
required: False
default: None
version_added: "2.1"

View file

@ -357,11 +357,11 @@ def remove_network_acl(client, module):
changed = True
result[nacl_id] = "Successfully deleted"
return changed, result
if not assoc_ids:
if not assoc_ids:
delete_network_acl(nacl_id, client, module)
changed = True
result[nacl_id] = "Successfully deleted"
return changed, result
return changed, result
return changed, result
@ -510,7 +510,7 @@ def subnets_to_associate(nacl, client, module):
{'Name': 'tag:Name', 'Values': params}])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
return [s['SubnetId'] for s in subnets['Subnets'] if s['SubnetId']]
return [s['SubnetId'] for s in subnets['Subnets'] if s['SubnetId']]
def main():

View file

@ -44,7 +44,7 @@ options:
tags:
description:
- Dictionary of tags to look for and apply when creating a Peering Connection.
required: false
required: false
state:
description:
- Create, delete, accept, reject a peering connection.
@ -67,7 +67,7 @@ EXAMPLES = '''
tags:
Name: Peering connection for VPC 21 to VPC 22
CostCode: CC1234
Project: phoenix
Project: phoenix
register: vpc_peer
- name: Accept local VPC peering request
@ -87,7 +87,7 @@ EXAMPLES = '''
tags:
Name: Peering connection for VPC 21 to VPC 22
CostCode: CC1234
Project: phoenix
Project: phoenix
register: vpc_peer
- name: delete a local VPC peering Connection
@ -108,7 +108,7 @@ EXAMPLES = '''
tags:
Name: Peering connection for VPC 21 to VPC 22
CostCode: CC1234
Project: phoenix
Project: phoenix
register: vpc_peer
- name: Accept peering connection from remote account
@ -129,7 +129,7 @@ EXAMPLES = '''
tags:
Name: Peering connection for VPC 21 to VPC 22
CostCode: CC1234
Project: phoenix
Project: phoenix
register: vpc_peer
- name: Reject a local VPC peering Connection
@ -149,7 +149,7 @@ EXAMPLES = '''
tags:
Name: Peering connection for VPC 21 to VPC 22
CostCode: CC1234
Project: phoenix
Project: phoenix
register: vpc_peer
- name: Accept a cross account VPC peering connection request
@ -174,7 +174,7 @@ EXAMPLES = '''
tags:
Name: Peering connection for VPC 21 to VPC 22
CostCode: CC1234
Project: phoenix
Project: phoenix
register: vpc_peer
- name: Reject a cross account VPC peering Connection
@ -269,7 +269,7 @@ def create_peer_connection(client, module):
changed = True
return (changed, peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
module.fail_json(msg=str(e))
def peer_status(client, module):

View file

@ -432,7 +432,7 @@ def ensure_vgw_present(client, module):
else:
# attach the vgw to the supplied vpc
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
vgw = find_vgw(client, module, [vpn_gateway_id])
vgw = find_vgw(client, module, [vpn_gateway_id])
changed = True
# if params['VpcId'] is not provided, check the vgw is attached to a vpc. if so, detach it.
@ -443,7 +443,7 @@ def ensure_vgw_present(client, module):
if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
# detach the vpc from the vgw
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
changed = True
vgw = find_vgw(client, module, [vpn_gateway_id])
@ -492,16 +492,16 @@ def ensure_vgw_absent(client, module):
if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
if params['VpcId']:
if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
else:
# detach the vpc from the vgw
# detach the vpc from the vgw
detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId'])
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
changed = True
else:
# attempt to detach any attached vpcs
# attempt to detach any attached vpcs
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach)
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
@ -519,7 +519,7 @@ def ensure_vgw_absent(client, module):
else:
#Check that a name and type argument has been supplied if no vgw-id
if not module.params.get('name') or not module.params.get('type'):
module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is suppled')
module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is suppled')
existing_vgw = find_vgw(client, module)
if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
@ -527,10 +527,10 @@ def ensure_vgw_absent(client, module):
if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
if params['VpcId']:
if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
else:
# detach the vpc from the vgw
# detach the vpc from the vgw
detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
#now that the vpc has been detached, delete the vgw
@ -538,7 +538,7 @@ def ensure_vgw_absent(client, module):
changed = True
else:
# attempt to detach any attached vpcs
# attempt to detach any attached vpcs
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
changed = True
@ -562,7 +562,7 @@ def ensure_vgw_absent(client, module):
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent']),
state=dict(default='present', choices=['present', 'absent']),
region=dict(required=True),
name=dict(),
vpn_gateway_id=dict(),

View file

@ -36,7 +36,7 @@ options:
default: None
vpn_gateway_ids:
description:
- Get details of a specific Virtual Gateway ID. This value should be provided as a list.
- Get details of a specific Virtual Gateway ID. This value should be provided as a list.
required: false
default: None
author: "Nick Aslanidis (@naslanidis)"
@ -66,7 +66,7 @@ EXAMPLES = '''
ec2_vpc_vgw_facts:
region: ap-southeast-2
profile: production
vpn_gateway_ids: vgw-c432f6a7
vpn_gateway_ids: vgw-c432f6a7
register: vgw_facts
'''
@ -83,7 +83,7 @@ virtual_gateways:
"key": "Name",
"value": "TEST-VGW"
}
],
],
"type": "ipsec.1",
"vpc_attachments": [
{

View file

@ -28,7 +28,7 @@ author: "Rick Mendes (@rickmendes)"
options:
instance_id:
description:
- The instance id to get the password data from.
- The instance id to get the password data from.
required: true
key_file:
description:
@ -37,7 +37,7 @@ options:
key_passphrase:
version_added: "2.0"
description:
- The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
- The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
required: false
default: null
wait:

View file

@ -166,7 +166,7 @@ options:
default: null
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
@ -296,7 +296,7 @@ EXAMPLES = '''
wait: yes
wait_timeout: 600
tags:
Name: pg1_test_name_tag
Name: pg1_test_name_tag
register: rds
- local_action:

View file

@ -30,7 +30,7 @@ options:
description:
- "Name of the s3 bucket"
required: true
default: null
default: null
error_key:
description:
- "The object key name to use when a 4XX class error occurs. To remove an error key, set to None."

View file

@ -81,16 +81,16 @@ EXAMPLES = """
name: "alarms"
state: present
display_name: "alarm SNS topic"
delivery_policy:
delivery_policy:
http:
defaultHealthyRetryPolicy:
defaultHealthyRetryPolicy:
minDelayTarget: 2
maxDelayTarget: 4
numRetries: 3
numMaxDelayRetries: 5
backoffFunction: "<linear|arithmetic|geometric|exponential>"
disableSubscriptionOverrides: True
defaultThrottlePolicy:
defaultThrottlePolicy:
maxReceivesPerSecond: 10
subscriptions:
- endpoint: "my_email_address@example.com"

View file

@ -86,19 +86,19 @@ extends_documentation_fragment:
"""
RETURN = '''
default_visibility_timeout:
default_visibility_timeout:
description: The default visibility timeout in seconds.
returned: always
sample: 30
delivery_delay:
delivery_delay:
description: The delivery delay in seconds.
returned: always
sample: 0
maximum_message_size:
maximum_message_size:
description: The maximum message size in bytes.
returned: always
sample: 262144
message_retention_period:
message_retention_period:
description: The message retention period in seconds.
returned: always
sample: 345600
@ -110,7 +110,7 @@ queue_arn:
description: The queue's Amazon resource name (ARN).
returned: on successful creation or update of the queue
sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0'
receive_message_wait_time:
receive_message_wait_time:
description: The receive message wait time in seconds.
returned: always
sample: 0
@ -179,7 +179,7 @@ def create_or_update_sqs_queue(connection, module):
try:
queue = connection.get_queue(queue_name)
if queue:
# Update existing
# Update existing
result['changed'] = update_sqs_queue(queue, check_mode=module.check_mode, **queue_attributes)
else:
# Create new

View file

@ -43,7 +43,7 @@ options:
default: westus
deployment_mode:
description:
- In incremental mode, resources are deployed without deleting existing resources that are not included in the template.
- In incremental mode, resources are deployed without deleting existing resources that are not included in the template.
In complete mode resources are deployed and existing resources in the resource group not included in the template are deleted.
required: false
default: incremental

View file

@ -114,7 +114,7 @@ EXAMPLES = '''
port: 443
nodes:
- ipAddress: 10.11.22.123
privatePort: 80
privatePort: 80
state: present
- name: Add node to an existing loadbalancer pool
@ -130,7 +130,7 @@ EXAMPLES = '''
port: 443
nodes:
- ipAddress: 10.11.22.234
privatePort: 80
privatePort: 80
state: nodes_present
- name: Remove node from an existing loadbalancer pool
@ -146,7 +146,7 @@ EXAMPLES = '''
port: 443
nodes:
- ipAddress: 10.11.22.234
privatePort: 80
privatePort: 80
state: nodes_absent
- name: Delete LoadbalancerPool
@ -162,7 +162,7 @@ EXAMPLES = '''
port: 443
nodes:
- ipAddress: 10.11.22.123
privatePort: 80
privatePort: 80
state: port_absent
- name: Delete Loadbalancer
@ -178,7 +178,7 @@ EXAMPLES = '''
port: 443
nodes:
- ipAddress: 10.11.22.123
privatePort: 80
privatePort: 80
state: absent
'''

View file

@ -138,7 +138,7 @@ options:
- exposed
force_kill:
description:
- Use the kill command when stopping a running container.
- Use the kill command when stopping a running container.
default: false
required: false
groups:
@ -159,7 +159,7 @@ options:
recreated. Stop this behavior by setting C(ignore_image) to I(True).
default: false
required: false
version_added: "2.2"
version_added: "2.2"
image:
description:
- Repository path and tag used to create the container. If an image is not found or pull is true, the image
@ -313,10 +313,10 @@ options:
any other mappings.
- If C(networks) parameter is provided, will inspect each network to see if there exists
a bridge network with optional parameter com.docker.network.bridge.host_binding_ipv4.
If such a network is found, then published ports where no host IP address is specified
If such a network is found, then published ports where no host IP address is specified
will be bound to the host IP pointed to by com.docker.network.bridge.host_binding_ipv4.
Note that the first bridge network with a com.docker.network.bridge.host_binding_ipv4
value encountered in the list of C(networks) is the one that will be used.
Note that the first bridge network with a com.docker.network.bridge.host_binding_ipv4
value encountered in the list of C(networks) is the one that will be used.
aliases:
- ports
required: false
@ -394,7 +394,7 @@ options:
re-create a matching container, even if it is running. Use restart to force a matching container to be stopped and
restarted. Use force_kill to kill a container rather than stopping it. Use keep_volumes to retain volumes associated
with a removed container.'
- 'I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped
- 'I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped
state. Use force_kill to kill a container rather than stopping it.'
required: false
default: started
@ -518,7 +518,7 @@ EXAMPLES = '''
name: mycontainer
state: present
image: ubuntu:14.04
command: sleep infinity
command: sleep infinity
- name: Stop a container
docker_container:
@ -1210,7 +1210,7 @@ class Container(DockerBaseClass):
# assuming if the container was running, it must have been detached.
detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
# "ExposedPorts": null returns None type & causes AttributeError - PR #5517
# "ExposedPorts": null returns None type & causes AttributeError - PR #5517
if config.get('ExposedPorts') is not None:
expected_exposed = [re.sub(r'/.+$', '', p) for p in config.get('ExposedPorts', dict()).keys()]
else:
@ -1874,7 +1874,7 @@ class ContainerManager(DockerBaseClass):
return self._get_container(container_id)
def container_remove(self, container_id, link=False, force=False):
volume_state = (not self.parameters.keep_volumes)
volume_state = (not self.parameters.keep_volumes)
self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
self.results['changed'] = True

View file

@ -503,24 +503,24 @@ AUTH_PARAM_MAPPING = {
@contextmanager
def stdout_redirector(path_name):
old_stdout = sys.stdout
old_stdout = sys.stdout
fd = open(path_name, 'w')
sys.stdout = fd
try:
yield
finally:
sys.stdout = old_stdout
sys.stdout = old_stdout
def get_stdout(path_name):
full_stdout = ''
last_line = ''
last_line = ''
with open(path_name, 'r') as fd:
for line in fd:
# strip terminal format/color chars
new_line = re.sub(r'\x1b\[.+m', '', line.encode('ascii'))
full_stdout += new_line
if new_line.strip():
# Assuming last line contains the error message
# Assuming last line contains the error message
last_line = new_line.strip().encode('utf-8')
fd.close()
os.remove(path_name)
@ -666,12 +666,12 @@ class ContainerManager(DockerBaseClass):
if self.pull:
pull_output = self.cmd_pull()
result['changed'] = pull_output['changed']
result['changed'] = pull_output['changed']
result['actions'] += pull_output['actions']
if self.build:
build_output = self.cmd_build()
result['changed'] = build_output['changed']
result['changed'] = build_output['changed']
result['actions'] += build_output['actions']
for service in self.project.services:
@ -679,8 +679,8 @@ class ContainerManager(DockerBaseClass):
plan = service.convergence_plan(strategy=converge)
if plan.action != 'noop':
result['changed'] = True
result_action = dict(service=service.name)
result_action[plan.action] = []
result_action = dict(service=service.name)
result_action[plan.action] = []
for container in plan.containers:
result_action[plan.action].append(dict(
id=container.id,
@ -712,17 +712,17 @@ class ContainerManager(DockerBaseClass):
if self.stopped:
stop_output = self.cmd_stop(service_names)
result['changed'] = stop_output['changed']
result['changed'] = stop_output['changed']
result['actions'] += stop_output['actions']
if self.restarted:
restart_output = self.cmd_restart(service_names)
result['changed'] = restart_output['changed']
result['changed'] = restart_output['changed']
result['actions'] += restart_output['actions']
if self.scale:
scale_output = self.cmd_scale()
result['changed'] = scale_output['changed']
result['changed'] = scale_output['changed']
result['actions'] += scale_output['actions']
for service in self.project.services:
@ -791,7 +791,7 @@ class ContainerManager(DockerBaseClass):
if not self.check_mode:
for service in self.project.get_services(self.services, include_deps=False):
if 'image' not in service.options:
continue
continue
self.log('Pulling image for service %s' % service.name)
# store the existing image ID
@ -809,16 +809,16 @@ class ContainerManager(DockerBaseClass):
try:
service.pull(ignore_pull_failures=False)
except Exception as exc:
self.client.fail("Error: pull failed with %s" % str(exc))
self.client.fail("Error: pull failed with %s" % str(exc))
# store the new image ID
new_image_id = ''
new_image_id = ''
try:
image = service.image()
if image and image.get('Id'):
new_image_id = image['Id']
except NoSuchImageError as exc:
self.client.fail("Error: service image lookup failed after pull - %s" % str(exc))
self.client.fail("Error: service image lookup failed after pull - %s" % str(exc))
if new_image_id != old_image_id:
# if a new image was pulled
@ -856,13 +856,13 @@ class ContainerManager(DockerBaseClass):
try:
new_image_id = service.build(pull=True, no_cache=self.nocache)
except Exception as exc:
self.client.fail("Error: build failed with %s" % str(exc))
self.client.fail("Error: build failed with %s" % str(exc))
if new_image_id not in old_image_id:
# if a new image was built
result['changed'] = True
result['actions'].append(dict(
service=service.name,
service=service.name,
built_image=dict(
name=service.image_name,
id=new_image_id
@ -901,7 +901,7 @@ class ContainerManager(DockerBaseClass):
service_res = dict(
service=service.name,
stop=[]
)
)
for container in service.containers(stopped=False):
result['changed'] = True
service_res['stop'].append(dict(
@ -977,7 +977,7 @@ class ContainerManager(DockerBaseClass):
service.scale(int(self.scale[service.name]))
except Exception as exc:
self.client.fail("Error scaling %s - %s" % (service.name, str(exc)))
result['actions'].append(service_res)
result['actions'].append(service_res)
return result

View file

@ -155,12 +155,12 @@ def grant_check(module, gs, obj):
grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
if not grant:
obj.set_acl('public-read')
module.exit_json(changed=True, result="The objects permission as been set to public-read")
module.exit_json(changed=True, result="The objects permission as been set to public-read")
if module.params.get('permission') == 'authenticated-read':
grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
if not grant:
obj.set_acl('authenticated-read')
module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
except gs.provider.storage_response_error as e:
module.fail_json(msg= str(e))
return True
@ -240,7 +240,7 @@ def create_dirkey(module, gs, bucket, obj):
def path_check(path):
if os.path.exists(path):
return True
return True
else:
return False
@ -262,7 +262,7 @@ def transform_headers(headers):
def upload_gsfile(module, gs, bucket, obj, src, expiry):
try:
bucket = gs.lookup(bucket)
key = bucket.new_key(obj)
key = bucket.new_key(obj)
key.set_contents_from_filename(
filename=src,
headers=transform_headers(module.params.get('headers'))
@ -326,7 +326,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
else:
upload_gsfile(module, gs, bucket, obj, src, expiration)
if not bucket_rc:
if not bucket_rc:
create_bucket(module, gs, bucket)
upload_gsfile(module, gs, bucket, obj, src, expiration)
@ -352,7 +352,7 @@ def handle_delete(module, gs, bucket, obj):
module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True)
def handle_create(module, gs, bucket, obj):
if bucket and not obj:
if bucket and not obj:
if bucket_check(module, gs, bucket):
module.exit_json(msg="Bucket already exists.", changed=False)
else:
@ -366,7 +366,7 @@ def handle_create(module, gs, bucket, obj):
if bucket_check(module, gs, bucket):
if key_check(module, gs, bucket, dirobj):
module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
else:
else:
create_dirkey(module, gs, bucket, dirobj)
else:
create_bucket(module, gs, bucket)
@ -431,7 +431,7 @@ def main():
module.fail_json(msg="Local object for PUT does not exist", failed=True)
handle_put(module, gs, bucket, obj, overwrite, src, expiry)
# Support for deleting an object if we have both params.
# Support for deleting an object if we have both params.
if mode == 'delete':
handle_delete(module, gs, bucket, obj)

View file

@ -144,7 +144,7 @@ notes:
- See also M(gcdns_zone).
- This modules's underlying library does not support in-place updates for
DNS resource records. Instead, resource records are quickly deleted and
recreated.
recreated.
- SOA records are technically supported, but their functionality is limited
to verifying that a zone's existing SOA record matches a pre-determined
value. The SOA record cannot be updated.

View file

@ -86,7 +86,7 @@ options:
description:
- the protocol used for the load-balancer packet forwarding, tcp or udp
required: false
default: "tcp"
default: "tcp"
choices: ['tcp', 'udp']
region:
description:
@ -151,7 +151,7 @@ author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
EXAMPLES = '''
# Simple example of creating a new LB, adding members, and a health check
- local_action:
- local_action:
module: gce_lb
name: testlb
region: us-central1

View file

@ -293,7 +293,7 @@ except ImportError:
def _check_params(params, field_list):
"""
"""
Helper to validate params.
Use this in function definitions if they require specific fields
@ -323,7 +323,7 @@ def _check_params(params, field_list):
def _validate_autoscaling_params(params):
"""
"""
Validate that the minimum configuration is present for autoscaling.
:param params: Ansible dictionary containing autoscaling configuration
@ -331,7 +331,7 @@ def _validate_autoscaling_params(params):
key 'autoscaling'.
:type params: ``dict``
:return: Tuple containing a boolean and a string. True if autoscaler
:return: Tuple containing a boolean and a string. True if autoscaler
is valid, False otherwise, plus str for message.
:rtype: ``(``bool``, ``str``)``
"""
@ -372,7 +372,7 @@ def _validate_autoscaling_params(params):
def _validate_named_port_params(params):
"""
"""
Validate the named ports parameters
:param params: Ansible dictionary containing named_ports configuration
@ -404,7 +404,7 @@ def _validate_named_port_params(params):
def _get_instance_list(mig, field='name', filter_list=['NONE']):
"""
"""
Helper to grab field from instances response.
:param mig: Managed Instance Group Object from libcloud.
@ -427,10 +427,10 @@ def _get_instance_list(mig, field='name', filter_list=['NONE']):
def _gen_gce_as_policy(as_params):
"""
"""
Take Autoscaler params and generate GCE-compatible policy.
:param as_params: Dictionary in Ansible-playbook format
:param as_params: Dictionary in Ansible-playbook format
containing policy arguments.
:type as_params: ``dict``

View file

@ -41,7 +41,7 @@ options:
description:
- Dictionary containing a subscripton name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull. For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields. See subfields name, push_endpoint and ack_deadline for more information.
required: False
name:
name:
description: Subfield of subscription. Required if subscription is specified. See examples.
required: False
ack_deadline:
@ -156,13 +156,13 @@ state:
type: str
sample: "present"
subscription:
subscription:
description: Name of subscription.
returned: When subscription fields are specified
type: str
sample: "mysubscription"
topic:
topic:
description: Name of topic.
returned: Always
type: str

View file

@ -45,7 +45,7 @@ options:
state:
description:
- list is the only valid option.
required: False
required: False
'''
EXAMPLES = '''
@ -72,7 +72,7 @@ subscriptions:
returned: When view is set to subscriptions.
type: list
sample: ["mysubscription", "mysubscription2"]
topic:
topic:
description: Name of topic. Used to filter subscriptions.
returned: Always
type: str

View file

@ -179,14 +179,14 @@ def randompass():
'''
Generate a long random password that comply to Linode requirements
'''
# Linode API currently requires the following:
# It must contain at least two of these four character classes:
# Linode API currently requires the following:
# It must contain at least two of these four character classes:
# lower case letters - upper case letters - numbers - punctuation
# we play it safe :)
import random
import string
# as of python 2.4, this reseeds the PRNG from urandom
random.seed()
random.seed()
lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6))
number = ''.join(random.choice(string.digits) for x in range(6))
@ -218,11 +218,11 @@ def getInstanceDetails(api, server):
'ip_id': ip['IPADDRESSID']})
return instance
def linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id,
def linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id,
payment_term, password, ssh_pub_key, swap, wait, wait_timeout):
instances = []
changed = False
new_server = False
new_server = False
servers = []
disks = []
configs = []
@ -233,7 +233,7 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino
# For the moment we only consider linode_id as criteria for match
# Later we can use more (size, name, etc.) and update existing
servers = api.linode_list(LinodeId=linode_id)
# Attempt to fetch details about disks and configs only if servers are
# Attempt to fetch details about disks and configs only if servers are
# found with linode_id
if servers:
disks = api.linode_disk_list(LinodeId=linode_id)
@ -256,7 +256,7 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino
# Create linode entity
new_server = True
try:
res = api.linode_create(DatacenterID=datacenter, PlanID=plan,
res = api.linode_create(DatacenterID=datacenter, PlanID=plan,
PaymentTerm=payment_term)
linode_id = res['LinodeID']
# Update linode Label to match name
@ -282,17 +282,17 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino
size = servers[0]['TOTALHD'] - swap
if ssh_pub_key:
res = api.linode_disk_createfromdistribution(
LinodeId=linode_id, DistributionID=distribution,
LinodeId=linode_id, DistributionID=distribution,
rootPass=password, rootSSHKey=ssh_pub_key,
Label='%s data disk (lid: %s)' % (name, linode_id), Size=size)
else:
res = api.linode_disk_createfromdistribution(
LinodeId=linode_id, DistributionID=distribution, rootPass=password,
LinodeId=linode_id, DistributionID=distribution, rootPass=password,
Label='%s data disk (lid: %s)' % (name, linode_id), Size=size)
jobs.append(res['JobID'])
# Create SWAP disk
res = api.linode_disk_create(LinodeId=linode_id, Type='swap',
Label='%s swap disk (lid: %s)' % (name, linode_id),
res = api.linode_disk_create(LinodeId=linode_id, Type='swap',
Label='%s swap disk (lid: %s)' % (name, linode_id),
Size=swap)
jobs.append(res['JobID'])
except Exception as e:
@ -364,12 +364,12 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = 'Timeout waiting on %s (lid: %s)' %
module.fail_json(msg = 'Timeout waiting on %s (lid: %s)' %
(server['LABEL'], server['LINODEID']))
# Get a fresh copy of the server details
server = api.linode_list(LinodeId=server['LINODEID'])[0]
if server['STATUS'] == -2:
module.fail_json(msg = '%s (lid: %s) failed to boot' %
module.fail_json(msg = '%s (lid: %s) failed to boot' %
(server['LABEL'], server['LINODEID']))
# From now on we know the task is a success
# Build instance report
@ -380,7 +380,7 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino
else:
instance['status'] = 'Starting'
# Return the root password if this is a new box and no SSH key
# Return the root password if this is a new box and no SSH key
# has been provided
if new_server and not ssh_pub_key:
instance['password'] = password
@ -495,7 +495,7 @@ def main():
except Exception as e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id,
linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id,
payment_term, password, ssh_pub_key, swap, wait, wait_timeout)
# import module snippets

View file

@ -446,7 +446,7 @@ def main():
vmcpus = module.params['instance_cpus'] # number of cpu
vmnic = module.params['instance_nic'] # network interface
vmnetwork = module.params['instance_network'] # logical network
vmmem = module.params['instance_mem'] # mem size
vmmem = module.params['instance_mem'] # mem size
vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
vmos = module.params['instance_os'] # Operating System

View file

@ -600,7 +600,7 @@ EXAMPLES = '''
node : sabrewulf
state : absent
# Get VM current state
# Get VM current state
- proxmox_kvm:
api_user : root@pam
api_password: secret
@ -638,7 +638,7 @@ vmid:
type: int
sample: 115
status:
description:
description:
- The current virtual machine status.
- Returned only when C(state=current)
returned: success
@ -646,7 +646,7 @@ status:
sample: '{
"changed": false,
"msg": "VM kropta with vmid = 110 is running",
"status": "running"
"status": "running"
}'
'''

View file

@ -96,12 +96,12 @@ tasks:
RETURN = '''
# for list_vms command
list_vms:
list_vms:
description: The list of vms defined on the remote system
type: dictionary
returned: success
sample: [
"build.example.org",
"build.example.org",
"dev.example.org"
]
# for status command

View file

@ -260,7 +260,7 @@ class LibvirtConnection(object):
else:
# pretend there was a change
res = 0
if res == 0:
if res == 0:
return True
else:
# change the host

View file

@ -157,7 +157,7 @@ def _get_ksclient(module, kwargs):
auth_url=kwargs.get('auth_url'))
except Exception as e:
module.fail_json(msg="Error authenticating to the keystone: %s " % e.message)
return client
return client
def _get_endpoint(module, client, endpoint_type):
@ -186,7 +186,7 @@ def _glance_image_present(module, params, client):
try:
for image in client.images.list():
if image.name == params['name']:
return image.id
return image.id
return None
except Exception as e:
module.fail_json(msg="Error in fetching image list: %s" % e.message)

View file

@ -337,7 +337,7 @@ def _add_floating_ip_from_pool(module, nova, server):
if not pool_ips:
try:
new_ip = nova.floating_ips.create(pool)
except Exception as e:
except Exception as e:
module.fail_json(msg = "Unable to create floating ip: %s" % (e.message))
pool_ips.append(new_ip.ip)
# Add to the main list

View file

@ -135,7 +135,7 @@ def main():
if module.params['public_key'] and (module.params['public_key'] != key.public_key ):
module.fail_json(msg = "name {} present but key hash not the same as offered. Delete key first.".format(key['name']))
else:
module.exit_json(changed = False, result = "Key present")
module.exit_json(changed = False, result = "Key present")
try:
key = nova.keypairs.create(module.params['name'], module.params['public_key'])
except Exception as e:

View file

@ -72,7 +72,7 @@ def main():
clouds.append(cloud.config)
module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds)))
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *

View file

@ -96,7 +96,7 @@ openstack_projects:
returned: always, but can be null
type: complex
contains:
id:
id:
description: Unique UUID.
returned: success
type: string

View file

@ -26,8 +26,8 @@ description:
version_added: "2.2"
author: Pascal HERAUD @pascalheraud
notes:
- Uses the python OVH Api U(https://github.com/ovh/python-ovh).
You have to create an application (a key and secret) with a consummer
- Uses the python OVH Api U(https://github.com/ovh/python-ovh).
You have to create an application (a key and secret) with a consummer
key as described into U(https://eu.api.ovh.com/g934.first_step_with_api)
requirements:
- ovh > 0.3.5
@ -79,13 +79,13 @@ options:
type: "int"
default: 120
description:
- The timeout in seconds used to wait for a task to be
- The timeout in seconds used to wait for a task to be
completed. Default is 120 seconds.
'''
EXAMPLES = '''
# Adds or modify the backend '212.1.1.1' to a
# Adds or modify the backend '212.1.1.1' to a
# loadbalancing 'ip-1.1.1.1'
- ovh_ip_loadbalancing:
name: ip-1.1.1.1

View file

@ -43,7 +43,7 @@ options:
count_offset:
description:
- From which number to start the count.
- From which number to start the count.
device_ids:
description:
@ -59,9 +59,9 @@ options:
hostnames:
description:
- A hostname of a device, or a list of hostnames.
- A hostname of a device, or a list of hostnames.
- If given string or one-item list, you can use the C("%d") Python string format to expand numbers from count.
- If only one hostname, it might be expanded to list if count>1.
- If only one hostname, it might be expanded to list if count>1.
aliases: [name]
lock:
@ -127,7 +127,7 @@ EXAMPLES = '''
plan: baremetal_0
facility: sjc1
- name: create 3 ubuntu devices called server-01, server-02 and server-03
- name: create 3 ubuntu devices called server-01, server-02 and server-03
hosts: localhost
tasks:
- packet_device:
@ -435,7 +435,7 @@ def wait_for_ips(module, packet_conn, created_devices):
% [d.hostname for d in created_devices])
def get_existing_devices(module, packet_conn):
def get_existing_devices(module, packet_conn):
project_id = module.params.get('project_id')
return packet_conn.list_devices(project_id, params={'per_page': MAX_DEVICES})
@ -545,7 +545,7 @@ def main():
if not module.params.get('auth_token'):
_fail_msg = ( "if Packet API token is not in environment variable %s, "
"the auth_token parameter is required" %
"the auth_token parameter is required" %
PACKET_API_TOKEN_ENV_VAR)
module.fail_json(msg=_fail_msg)

View file

@ -92,9 +92,9 @@ sshkeys:
type: array
sample: [
{
"fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46",
"id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7",
"key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2",
"fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46",
"id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7",
"key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2",
"label": "mynewkey33"
}
]
@ -251,7 +251,7 @@ def main():
if not module.params.get('auth_token'):
_fail_msg = ( "if Packet API token is not in environment variable %s, "
"the auth_token parameter is required" %
"the auth_token parameter is required" %
PACKET_API_TOKEN_ENV_VAR)
module.fail_json(msg=_fail_msg)

View file

@ -76,7 +76,7 @@ EXAMPLES = '''
datacenter: Tardis One
wait_timeout: 500
# Destroy a Datacenter. This will remove all servers, volumes, and other objects in the datacenter.
# Destroy a Datacenter. This will remove all servers, volumes, and other objects in the datacenter.
- profitbricks_datacenter:
datacenter: Tardis One
wait_timeout: 500
@ -177,7 +177,7 @@ def remove_datacenter(module, profitbricks):
"""
Removes a Datacenter.
This will remove a datacenter.
This will remove a datacenter.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.

View file

@ -172,7 +172,7 @@ def detach_volume(module, profitbricks):
"""
Detaches a volume.
This will remove a volume from the server.
This will remove a volume from the server.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.

View file

@ -83,7 +83,7 @@ options:
description:
- how long before wait gives up, in seconds
default: 300
author:
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack

View file

@ -62,7 +62,7 @@ options:
description:
- how long before wait gives up, in seconds
default: 300
author:
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack

View file

@ -107,7 +107,7 @@ options:
description:
- how long before wait gives up, in seconds
default: 300
author:
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace

View file

@ -34,7 +34,7 @@ options:
choices: ['present', 'absent']
default: present
required: false
author:
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack

View file

@ -43,7 +43,7 @@ options:
description:
- cidr of the network being created
default: null
author:
author:
- "Christopher H. Laco (@claco)"
- "Jesse Keating (@j2sol)"
extends_documentation_fragment: rackspace.openstack

View file

@ -39,7 +39,7 @@ options:
- present
- absent
default: present
author:
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace

View file

@ -206,7 +206,7 @@ EXAMPLES = '''
- hostname: instance-3
domain: anydomain.com
datacenter: dal09
tags:
tags:
- ansible-module-test
- ansible-module-test-slaves
hourly: True

View file

@ -26,7 +26,7 @@ DOCUMENTATION = '''
---
module: vsphere_copy
short_description: Copy a file to a vCenter datastore
description:
description:
- Upload files to a vCenter datastore
version_added: 2.0
author: Dag Wieers (@dagwieers) <dag@wieers.com>

View file

@ -101,7 +101,7 @@ EXAMPLES = '''
webfaction_app:
name="my_wsgi_app1"
state=present
type=mod_wsgi35-python27
type=mod_wsgi35-python27
login_name={{webfaction_user}}
login_password={{webfaction_passwd}}
machine={{webfaction_machine}}
@ -168,8 +168,8 @@ def main():
# If this isn't a dry run, create the app
result.update(
webfaction.create_app(
session_id, app_name, app_type,
module.boolean(module.params['autostart']),
session_id, app_name, app_type,
module.boolean(module.params['autostart']),
module.params['extra_info'],
module.boolean(module.params['port_open'])
)

View file

@ -95,8 +95,8 @@ EXAMPLES = '''
webfaction_site:
name: testsite1
state: present
host: myhost.webfaction.com
subdomains:
host: myhost.webfaction.com
subdomains:
- 'testsite1.my_domain.org'
site_apps:
- ['testapp1', '/']
@ -167,8 +167,8 @@ def main():
changed = False
)
positional_args = [
session_id, site_name, site_ip,
positional_args = [
session_id, site_name, site_ip,
module.boolean(module.params['https']),
module.params['subdomains'],
]