mirror of
https://github.com/ansible-collections/community.general.git
synced 2025-07-22 12:50:22 -07:00
Enable more pylint rules and fix reported issues. (#30539)
* Enable pylint unreachable test. * Enable pylint suppressed-message test. * Enable pylint redundant-unittest-assert test. * Enable pylint bad-open-mode test. * Enable pylint signature-differs test. * Enable pylint unnecessary-pass test. * Enable pylint unnecessary-lambda test. * Enable pylint raising-bad-type test. * Enable pylint logging-not-lazy test. * Enable pylint logging-format-interpolation test. * Enable pylint useless-else-on-loop test.
This commit is contained in:
parent
01563ccd5d
commit
7714dcd04e
36 changed files with 92 additions and 135 deletions
|
@ -434,13 +434,11 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
|
|||
if x >= retries:
|
||||
module.fail_json(msg="Failed while downloading %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
|
||||
# otherwise, try again, this may be a transient timeout.
|
||||
pass
|
||||
except SSLError as e: # will ClientError catch SSLError?
|
||||
# actually fail on last pass through the loop.
|
||||
if x >= retries:
|
||||
module.fail_json(msg="s3 download failed: %s." % e, exception=traceback.format_exc())
|
||||
# otherwise, try again, this may be a transient timeout.
|
||||
pass
|
||||
|
||||
|
||||
def download_s3str(module, s3, bucket, obj, version=None, validate=True):
|
||||
|
|
|
@ -200,8 +200,7 @@ def pipeline_id(client, name):
|
|||
for dp in pipelines['pipelineIdList']:
|
||||
if dp['name'] == name:
|
||||
return dp['id']
|
||||
else:
|
||||
raise DataPipelineNotFound
|
||||
raise DataPipelineNotFound
|
||||
|
||||
|
||||
def pipeline_description(client, dp_id):
|
||||
|
@ -233,8 +232,7 @@ def pipeline_field(client, dp_id, field):
|
|||
for field_key in dp_description['pipelineDescriptionList'][0]['fields']:
|
||||
if field_key['key'] == field:
|
||||
return field_key['stringValue']
|
||||
else:
|
||||
raise KeyError("Field key {0} not found!".format(field))
|
||||
raise KeyError("Field key {0} not found!".format(field))
|
||||
|
||||
|
||||
def run_with_timeout(timeout, func, *func_args, **func_kwargs):
|
||||
|
|
|
@ -587,7 +587,7 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
|
|||
|
||||
for lb in as_group['LoadBalancerNames']:
|
||||
deregister_lb_instances(elb_connection, lb, instance_id)
|
||||
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
|
||||
log.debug("De-registering %s from ELB %s", instance_id, lb)
|
||||
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
while wait_timeout > time.time() and count > 0:
|
||||
|
@ -597,7 +597,7 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
|
|||
for i in lb_instances['InstanceStates']:
|
||||
if i['InstanceId'] == instance_id and i['State'] == "InService":
|
||||
count += 1
|
||||
log.debug("{0}: {1}, {2}".format(i['InstanceId'], i['State'], i['Description']))
|
||||
log.debug("%s: %s, %s", i['InstanceId'], i['State'], i['Description'])
|
||||
time.sleep(10)
|
||||
|
||||
if wait_timeout <= time.time():
|
||||
|
@ -614,7 +614,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
|
|||
for instance, settings in props['instance_facts'].items():
|
||||
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
|
||||
instances.append(dict(InstanceId=instance))
|
||||
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
|
||||
log.debug("ASG considers the following instances InService and Healthy: %s", instances)
|
||||
log.debug("ELB instance status:")
|
||||
lb_instances = list()
|
||||
for lb in as_group.get('LoadBalancerNames'):
|
||||
|
@ -635,7 +635,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
|
|||
for i in lb_instances.get('InstanceStates'):
|
||||
if i['State'] == "InService":
|
||||
healthy_instances.add(i['InstanceId'])
|
||||
log.debug("ELB Health State {0}: {1}".format(i['InstanceId'], i['State']))
|
||||
log.debug("ELB Health State %s: %s", i['InstanceId'], i['State'])
|
||||
return len(healthy_instances)
|
||||
|
||||
|
||||
|
@ -648,7 +648,7 @@ def tg_healthy(asg_connection, elbv2_connection, module, group_name):
|
|||
for instance, settings in props['instance_facts'].items():
|
||||
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
|
||||
instances.append(dict(Id=instance))
|
||||
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
|
||||
log.debug("ASG considers the following instances InService and Healthy: %s", instances)
|
||||
log.debug("Target Group instance status:")
|
||||
tg_instances = list()
|
||||
for tg in as_group.get('TargetGroupARNs'):
|
||||
|
@ -669,7 +669,7 @@ def tg_healthy(asg_connection, elbv2_connection, module, group_name):
|
|||
for i in tg_instances.get('TargetHealthDescriptions'):
|
||||
if i['TargetHealth']['State'] == "healthy":
|
||||
healthy_instances.add(i['Target']['Id'])
|
||||
log.debug("Target Group Health State {0}: {1}".format(i['Target']['Id'], i['TargetHealth']['State']))
|
||||
log.debug("Target Group Health State %s: %s", i['Target']['Id'], i['TargetHealth']['State'])
|
||||
return len(healthy_instances)
|
||||
|
||||
|
||||
|
@ -695,12 +695,12 @@ def wait_for_elb(asg_connection, module, group_name):
|
|||
|
||||
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
|
||||
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
|
||||
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
|
||||
log.debug("ELB thinks %s instances are healthy.", healthy_instances)
|
||||
time.sleep(10)
|
||||
if wait_timeout <= time.time():
|
||||
# waiting took too long
|
||||
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
|
||||
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
|
||||
log.debug("Waiting complete. ELB thinks %s instances are healthy.", healthy_instances)
|
||||
|
||||
|
||||
def wait_for_target_group(asg_connection, module, group_name):
|
||||
|
@ -725,12 +725,12 @@ def wait_for_target_group(asg_connection, module, group_name):
|
|||
|
||||
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
|
||||
healthy_instances = tg_healthy(asg_connection, elbv2_connection, module, group_name)
|
||||
log.debug("Target Group thinks {0} instances are healthy.".format(healthy_instances))
|
||||
log.debug("Target Group thinks %s instances are healthy.", healthy_instances)
|
||||
time.sleep(10)
|
||||
if wait_timeout <= time.time():
|
||||
# waiting took too long
|
||||
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
|
||||
log.debug("Waiting complete. Target Group thinks {0} instances are healthy.".format(healthy_instances))
|
||||
log.debug("Waiting complete. Target Group thinks %s instances are healthy.", healthy_instances)
|
||||
|
||||
|
||||
def suspend_processes(ec2_connection, as_group, module):
|
||||
|
@ -1042,7 +1042,7 @@ def get_chunks(l, n):
|
|||
def update_size(connection, group, max_size, min_size, dc):
|
||||
|
||||
log.debug("setting ASG sizes")
|
||||
log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size))
|
||||
log.debug("minimum size: %s, desired_capacity: %s, max size: %s", min_size, dc, max_size)
|
||||
updated_group = dict()
|
||||
updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName']
|
||||
updated_group['MinSize'] = min_size
|
||||
|
@ -1083,7 +1083,7 @@ def replace(connection, module):
|
|||
|
||||
# we don't want to spin up extra instances if not necessary
|
||||
if num_new_inst_needed < batch_size:
|
||||
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
|
||||
log.debug("Overriding batch size to %s", num_new_inst_needed)
|
||||
batch_size = num_new_inst_needed
|
||||
|
||||
if not old_instances:
|
||||
|
@ -1143,14 +1143,14 @@ def get_instances_by_lc(props, lc_check, initial_instances):
|
|||
old_instances.append(i)
|
||||
|
||||
else:
|
||||
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
|
||||
log.debug("Comparing initial instances with current: %s", initial_instances)
|
||||
for i in props['instances']:
|
||||
if i not in initial_instances:
|
||||
new_instances.append(i)
|
||||
else:
|
||||
old_instances.append(i)
|
||||
log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances))
|
||||
log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances))
|
||||
log.debug("New instances: %s, %s", len(new_instances), new_instances)
|
||||
log.debug("Old instances: %s, %s", len(old_instances), old_instances)
|
||||
|
||||
return new_instances, old_instances
|
||||
|
||||
|
@ -1192,17 +1192,17 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
|
|||
# and they have a non-current launch config
|
||||
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
|
||||
|
||||
log.debug("new instances needed: {0}".format(num_new_inst_needed))
|
||||
log.debug("new instances: {0}".format(new_instances))
|
||||
log.debug("old instances: {0}".format(old_instances))
|
||||
log.debug("batch instances: {0}".format(",".join(instances_to_terminate)))
|
||||
log.debug("new instances needed: %s", num_new_inst_needed)
|
||||
log.debug("new instances: %s", new_instances)
|
||||
log.debug("old instances: %s", old_instances)
|
||||
log.debug("batch instances: %s", ",".join(instances_to_terminate))
|
||||
|
||||
if num_new_inst_needed == 0:
|
||||
decrement_capacity = True
|
||||
if as_group['MinSize'] != min_size:
|
||||
updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size)
|
||||
update_asg(connection, **updated_params)
|
||||
log.debug("Updating minimum size back to original of {0}".format(min_size))
|
||||
log.debug("Updating minimum size back to original of %s", min_size)
|
||||
# if are some leftover old instances, but we are already at capacity with new ones
|
||||
# we don't want to decrement capacity
|
||||
if leftovers:
|
||||
|
@ -1216,13 +1216,13 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
|
|||
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
|
||||
decrement_capacity = False
|
||||
break_loop = False
|
||||
log.debug("{0} new instances needed".format(num_new_inst_needed))
|
||||
log.debug("%s new instances needed", num_new_inst_needed)
|
||||
|
||||
log.debug("decrementing capacity: {0}".format(decrement_capacity))
|
||||
log.debug("decrementing capacity: %s", decrement_capacity)
|
||||
|
||||
for instance_id in instances_to_terminate:
|
||||
elb_dreg(connection, module, group_name, instance_id)
|
||||
log.debug("terminating instance: {0}".format(instance_id))
|
||||
log.debug("terminating instance: %s", instance_id)
|
||||
terminate_asg_instance(connection, instance_id, decrement_capacity)
|
||||
|
||||
# we wait to make sure the machines we marked as Unhealthy are
|
||||
|
@ -1248,7 +1248,7 @@ def wait_for_term_inst(connection, module, term_instances):
|
|||
for i in instances:
|
||||
lifecycle = instance_facts[i]['lifecycle_state']
|
||||
health = instance_facts[i]['health_status']
|
||||
log.debug("Instance {0} has state of {1},{2}".format(i, lifecycle, health))
|
||||
log.debug("Instance %s has state of %s,%s", i, lifecycle, health)
|
||||
if lifecycle == 'Terminating' or health == 'Unhealthy':
|
||||
count += 1
|
||||
time.sleep(10)
|
||||
|
@ -1263,18 +1263,18 @@ def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size
|
|||
# make sure we have the latest stats after that last loop.
|
||||
as_group = describe_autoscaling_groups(connection, group_name)[0]
|
||||
props = get_properties(as_group, module)
|
||||
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
|
||||
log.debug("Waiting for %s = %s, currently %s", prop, desired_size, props[prop])
|
||||
# now we make sure that we have enough instances in a viable state
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
while wait_timeout > time.time() and desired_size > props[prop]:
|
||||
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
|
||||
log.debug("Waiting for %s = %s, currently %s", prop, desired_size, props[prop])
|
||||
time.sleep(10)
|
||||
as_group = describe_autoscaling_groups(connection, group_name)[0]
|
||||
props = get_properties(as_group, module)
|
||||
if wait_timeout <= time.time():
|
||||
# waiting took too long
|
||||
module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime())
|
||||
log.debug("Reached {0}: {1}".format(prop, desired_size))
|
||||
log.debug("Reached %s: %s", prop, desired_size)
|
||||
return props
|
||||
|
||||
|
||||
|
|
|
@ -219,7 +219,6 @@ def main():
|
|||
module.fail_json(msg="timed out while waiting for the key to be re-created")
|
||||
|
||||
changed = True
|
||||
pass
|
||||
|
||||
# if the key doesn't exist, create it now
|
||||
else:
|
||||
|
|
|
@ -613,7 +613,6 @@ class Wrapper(object):
|
|||
except AzureException as e:
|
||||
if not str(e).lower().find("temporary redirect") == -1:
|
||||
time.sleep(5)
|
||||
pass
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
|
|
@ -668,8 +668,7 @@ class LxcContainerManagement(object):
|
|||
build_command.append(
|
||||
'%s %s' % (key, value)
|
||||
)
|
||||
else:
|
||||
return build_command
|
||||
return build_command
|
||||
|
||||
def _get_vars(self, variables):
|
||||
"""Return a dict of all variables as found within the module.
|
||||
|
@ -689,8 +688,7 @@ class LxcContainerManagement(object):
|
|||
_var = self.module.params.get(k)
|
||||
if _var not in false_values:
|
||||
return_dict[v] = _var
|
||||
else:
|
||||
return return_dict
|
||||
return return_dict
|
||||
|
||||
def _run_command(self, build_command, unsafe_shell=False):
|
||||
"""Return information from running an Ansible Command.
|
||||
|
@ -975,16 +973,15 @@ class LxcContainerManagement(object):
|
|||
time.sleep(1)
|
||||
else:
|
||||
return True
|
||||
else:
|
||||
self.failure(
|
||||
lxc_container=self._container_data(),
|
||||
error='Failed to start container'
|
||||
' [ %s ]' % self.container_name,
|
||||
rc=1,
|
||||
msg='The container [ %s ] failed to start. Check to lxc is'
|
||||
' available and that the container is in a functional'
|
||||
' state.' % self.container_name
|
||||
)
|
||||
self.failure(
|
||||
lxc_container=self._container_data(),
|
||||
error='Failed to start container'
|
||||
' [ %s ]' % self.container_name,
|
||||
rc=1,
|
||||
msg='The container [ %s ] failed to start. Check to lxc is'
|
||||
' available and that the container is in a functional'
|
||||
' state.' % self.container_name
|
||||
)
|
||||
|
||||
def _check_archive(self):
|
||||
"""Create a compressed archive of a container.
|
||||
|
|
|
@ -1229,6 +1229,7 @@ class RHEV(object):
|
|||
self.__get_conn()
|
||||
return self.conn.set_VM_Host(vmname, vmhost)
|
||||
|
||||
# pylint: disable=unreachable
|
||||
VM = self.conn.get_VM(vmname)
|
||||
HOST = self.conn.get_Host(vmhost)
|
||||
|
||||
|
|
|
@ -122,7 +122,6 @@ def rax_dns(module, comment, email, name, state, ttl):
|
|||
domain = dns.find(name=name)
|
||||
except pyrax.exceptions.NotFound:
|
||||
domain = {}
|
||||
pass
|
||||
except Exception as e:
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
|
||||
|
|
|
@ -270,7 +270,6 @@ def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
|
|||
record = domain.find_record(record_type, name=name, data=data)
|
||||
except pyrax.exceptions.DomainRecordNotFound as e:
|
||||
record = {}
|
||||
pass
|
||||
except pyrax.exceptions.DomainRecordNotUnique as e:
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
|
||||
|
|
|
@ -246,6 +246,7 @@ USER_AGENT = "ansible-k8s-module/0.0.1"
|
|||
|
||||
def decode_cert_data(module):
|
||||
return
|
||||
# pylint: disable=unreachable
|
||||
d = module.params.get("certificate_authority_data")
|
||||
if d and not d.startswith("-----BEGIN"):
|
||||
module.params["certificate_authority_data"] = base64.b64decode(d)
|
||||
|
|
|
@ -111,8 +111,7 @@ def main():
|
|||
if parts != '':
|
||||
return parts
|
||||
|
||||
else:
|
||||
return ''
|
||||
return ''
|
||||
|
||||
def run_command(command):
|
||||
"""Runs a monit command, and returns the new status."""
|
||||
|
|
|
@ -286,7 +286,6 @@ class ModuleManager(object):
|
|||
if status in ['Changes Pending']:
|
||||
details = self._get_details_from_resource(resource)
|
||||
self._validate_pending_status(details)
|
||||
pass
|
||||
elif status in ['Awaiting Initial Sync', 'Not All Devices Synced']:
|
||||
pass
|
||||
elif status == 'In Sync':
|
||||
|
|
|
@ -277,7 +277,7 @@ class MavenDownloader:
|
|||
if self.latest_version_found:
|
||||
return self.latest_version_found
|
||||
path = "/%s/maven-metadata.xml" % (artifact.path(False))
|
||||
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
|
||||
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", etree.parse)
|
||||
v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
|
||||
if v:
|
||||
self.latest_version_found = v[0]
|
||||
|
@ -289,7 +289,7 @@ class MavenDownloader:
|
|||
|
||||
if artifact.is_snapshot():
|
||||
path = "/%s/maven-metadata.xml" % (artifact.path())
|
||||
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
|
||||
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", etree.parse)
|
||||
timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0]
|
||||
buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
|
||||
for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
|
||||
|
|
|
@ -267,15 +267,15 @@ class NetAppESeriesFlashCache(object):
|
|||
@property
|
||||
def needs_more_disks(self):
|
||||
if len(self.cache_detail['driveRefs']) < self.disk_count:
|
||||
self.debug("needs resize: current disk count %s < requested requested count %s" % (
|
||||
len(self.cache_detail['driveRefs']), self.disk_count))
|
||||
self.debug("needs resize: current disk count %s < requested requested count %s",
|
||||
len(self.cache_detail['driveRefs']), self.disk_count)
|
||||
return True
|
||||
|
||||
@property
|
||||
def needs_less_disks(self):
|
||||
if len(self.cache_detail['driveRefs']) > self.disk_count:
|
||||
self.debug("needs resize: current disk count %s < requested requested count %s" % (
|
||||
len(self.cache_detail['driveRefs']), self.disk_count))
|
||||
self.debug("needs resize: current disk count %s < requested requested count %s",
|
||||
len(self.cache_detail['driveRefs']), self.disk_count)
|
||||
return True
|
||||
|
||||
@property
|
||||
|
@ -292,8 +292,8 @@ class NetAppESeriesFlashCache(object):
|
|||
@property
|
||||
def needs_more_capacity(self):
|
||||
if self.current_size_bytes < self.requested_size_bytes:
|
||||
self.debug("needs resize: current capacity %sb is less than requested minimum %sb" % (
|
||||
self.current_size_bytes, self.requested_size_bytes))
|
||||
self.debug("needs resize: current capacity %sb is less than requested minimum %sb",
|
||||
self.current_size_bytes, self.requested_size_bytes)
|
||||
return True
|
||||
|
||||
@property
|
||||
|
@ -405,7 +405,7 @@ def main():
|
|||
try:
|
||||
sp.apply()
|
||||
except Exception as e:
|
||||
sp.debug("Exception in apply(): \n%s" % to_native(e))
|
||||
sp.debug("Exception in apply(): \n%s", to_native(e))
|
||||
sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % to_native(e),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
|
|
@ -269,7 +269,7 @@ class NetAppESeriesStoragePool(object):
|
|||
min_total_capacity = min_total_capacity * self._size_unit_map[size_unit]
|
||||
|
||||
# filter clearly invalid/unavailable drives first
|
||||
drives = select(lambda d: self._is_valid_drive(d), drives)
|
||||
drives = select(self._is_valid_drive, drives)
|
||||
|
||||
if interface_type:
|
||||
drives = select(lambda d: d['phyDriveType'] == interface_type, drives)
|
||||
|
@ -390,7 +390,7 @@ class NetAppESeriesStoragePool(object):
|
|||
msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]. RC[%s]." %
|
||||
(self.ssid, str(err), self.state, rc))
|
||||
|
||||
self.debug("searching for storage pool '%s'" % storage_pool_name)
|
||||
self.debug("searching for storage pool '%s'", storage_pool_name)
|
||||
|
||||
pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None)
|
||||
|
||||
|
@ -514,7 +514,7 @@ class NetAppESeriesStoragePool(object):
|
|||
return needs_migration
|
||||
|
||||
def migrate_raid_level(self):
|
||||
self.debug("migrating storage pool to raid level '%s'..." % self.raid_level)
|
||||
self.debug("migrating storage pool to raid level '%s'...", self.raid_level)
|
||||
sp_raid_migrate_req = dict(
|
||||
raidLevel=self.raid_level
|
||||
)
|
||||
|
@ -637,7 +637,7 @@ class NetAppESeriesStoragePool(object):
|
|||
def expand_storage_pool(self):
|
||||
drives_to_add = self.get_expansion_candidate_drives()
|
||||
|
||||
self.debug("adding %s drives to storage pool..." % len(drives_to_add))
|
||||
self.debug("adding %s drives to storage pool...", len(drives_to_add))
|
||||
sp_expand_req = dict(
|
||||
drives=drives_to_add
|
||||
)
|
||||
|
@ -723,8 +723,8 @@ class NetAppESeriesStoragePool(object):
|
|||
|
||||
if self.needs_raid_level_migration:
|
||||
self.debug(
|
||||
"CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'" % (
|
||||
self.pool_detail['raidLevel'], self.raid_level))
|
||||
"CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'",
|
||||
self.pool_detail['raidLevel'], self.raid_level)
|
||||
changed = True
|
||||
|
||||
# if self.reserved_drive_count_differs:
|
||||
|
@ -813,7 +813,7 @@ def main():
|
|||
sp.apply()
|
||||
except Exception:
|
||||
e = get_exception()
|
||||
sp.debug("Exception in apply(): \n%s" % format_exc(e))
|
||||
sp.debug("Exception in apply(): \n%s", format_exc(e))
|
||||
raise
|
||||
|
||||
|
||||
|
|
|
@ -235,7 +235,7 @@ class NetAppESeriesVolume(object):
|
|||
|
||||
volumes.extend(thinvols)
|
||||
|
||||
self.debug("searching for volume '%s'" % volume_name)
|
||||
self.debug("searching for volume '%s'", volume_name)
|
||||
volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None)
|
||||
|
||||
if volume_detail:
|
||||
|
@ -257,7 +257,7 @@ class NetAppESeriesVolume(object):
|
|||
self.module.fail_json(
|
||||
msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
|
||||
|
||||
self.debug("searching for storage pool '%s'" % storage_pool_name)
|
||||
self.debug("searching for storage pool '%s'", storage_pool_name)
|
||||
pool_detail = next(ifilter(lambda a: a['name'] == storage_pool_name, resp), None)
|
||||
|
||||
if pool_detail:
|
||||
|
@ -277,7 +277,7 @@ class NetAppESeriesVolume(object):
|
|||
dataAssuranceEnabled=data_assurance_enabled,
|
||||
)
|
||||
|
||||
self.debug("creating volume '%s'" % name)
|
||||
self.debug("creating volume '%s'", name)
|
||||
try:
|
||||
(rc, resp) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
|
||||
data=json.dumps(volume_add_req), headers=HEADERS, method='POST',
|
||||
|
@ -302,7 +302,7 @@ class NetAppESeriesVolume(object):
|
|||
dataAssuranceEnabled=data_assurance_enabled,
|
||||
)
|
||||
|
||||
self.debug("creating thin-volume '%s'" % name)
|
||||
self.debug("creating thin-volume '%s'", name)
|
||||
try:
|
||||
(rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
|
||||
data=json.dumps(thin_volume_add_req), headers=HEADERS, method='POST',
|
||||
|
@ -318,7 +318,7 @@ class NetAppESeriesVolume(object):
|
|||
|
||||
def delete_volume(self):
|
||||
# delete the volume
|
||||
self.debug("deleting volume '%s'" % self.volume_detail['name'])
|
||||
self.debug("deleting volume '%s'", self.volume_detail['name'])
|
||||
try:
|
||||
(rc, resp) = request(
|
||||
self.api_url + "/storage-systems/%s/%s/%s" % (self.ssid, self.volume_resource_name,
|
||||
|
@ -445,7 +445,7 @@ class NetAppESeriesVolume(object):
|
|||
action = resp['action']
|
||||
percent_complete = resp['percentComplete']
|
||||
|
||||
self.debug('expand action %s, %s complete...' % (action, percent_complete))
|
||||
self.debug('expand action %s, %s complete...', action, percent_complete)
|
||||
|
||||
if action == 'none':
|
||||
self.debug('expand complete')
|
||||
|
@ -469,11 +469,8 @@ class NetAppESeriesVolume(object):
|
|||
elif self.state == 'present':
|
||||
# check requested volume size, see if expansion is necessary
|
||||
if self.volume_needs_expansion:
|
||||
self.debug(
|
||||
"CHANGED: requested volume size %s%s is larger than current size %sb" % (self.size,
|
||||
self.size_unit,
|
||||
self.volume_detail[
|
||||
'capacity']))
|
||||
self.debug("CHANGED: requested volume size %s%s is larger than current size %sb",
|
||||
self.size, self.size_unit, self.volume_detail['capacity'])
|
||||
changed = True
|
||||
|
||||
if self.volume_properties_changed:
|
||||
|
@ -543,7 +540,7 @@ def main():
|
|||
v.apply()
|
||||
except Exception:
|
||||
e = get_exception()
|
||||
v.debug("Exception in apply(): \n%s" % format_exc(e))
|
||||
v.debug("Exception in apply(): \n%s", format_exc(e))
|
||||
v.module.fail_json(msg="Module failed. Error [%s]." % (str(e)))
|
||||
|
||||
|
||||
|
|
|
@ -306,7 +306,6 @@ class SolidFireVolume(object):
|
|||
if changed:
|
||||
if self.module.check_mode:
|
||||
result_message = "Check mode, skipping changes"
|
||||
pass
|
||||
else:
|
||||
if self.state == 'present':
|
||||
if not volume_exists:
|
||||
|
|
|
@ -126,7 +126,6 @@ def get_snapshot(module, array):
|
|||
for s in array.get_volume(module.params['name'], snap='true'):
|
||||
if s['name'] == snapname:
|
||||
return snapname
|
||||
break
|
||||
except:
|
||||
return None
|
||||
|
||||
|
|
|
@ -306,7 +306,6 @@ def setInterfaceOption(module, lines, iface, option, raw_value, state):
|
|||
module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
|
||||
|
||||
return changed, lines
|
||||
pass
|
||||
|
||||
|
||||
def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue