Bulk autopep8 (modules)

As agreed in 2017-12-07 Core meeting bulk fix pep8 issues

Generated using:
autopep8 1.3.3 (pycodestyle: 2.3.1)
autopep8 -r  --max-line-length 160 --in-place --ignore E305,E402,E722,E741 lib/ansible/modules

Manually fix issues that autopep8 has introduced
This commit is contained in:
John Barker 2017-12-07 16:27:06 +00:00 committed by John R Barker
commit c57a7f05e1
314 changed files with 3462 additions and 3383 deletions

View file

@ -94,8 +94,8 @@ def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(change_hostname_to=dict(required=True, type='str'),
domainname=dict(required=True, type='str'),
dns_servers=dict(required=True, type='list')))
domainname=dict(required=True, type='str'),
dns_servers=dict(required=True, type='list')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)

View file

@ -234,9 +234,9 @@ def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
vmnics=dict(required=True, type='list'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
switch_name=dict(required=True, type='str'),
vmnics=dict(required=True, type='list'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)

View file

@ -507,6 +507,7 @@ class PyVmomiDeviceHelper(object):
class PyVmomiCache(object):
""" This class caches references to objects which are requested multiples times but not modified """
def __init__(self, content, dc_name=None):
self.content = content
self.dc_name = dc_name
@ -849,7 +850,7 @@ class PyVmomiHelper(PyVmomi):
if (nic.device.backing and not hasattr(nic.device.backing, 'port')):
nic_change_detected = True
elif (nic.device.backing and (nic.device.backing.port.portgroupKey != pg_obj.key or
nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid)):
nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid)):
nic_change_detected = True
dvs_port_connection = vim.dvs.PortConnection()

View file

@ -156,7 +156,6 @@ class PyVmomiHelper(object):
return tree
def _build_folder_map(self, folder, inpath='/'):
""" Build a searchable index for vms+uuids+folders """
if isinstance(folder, tuple):
folder = folder[1]

View file

@ -154,15 +154,15 @@ def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),
ip_address=dict(required=True, type='str'),
subnet_mask=dict(required=True, type='str'),
mtu=dict(required=False, type='int'),
enable_vsan=dict(required=False, type='bool'),
enable_vmotion=dict(required=False, type='bool'),
enable_mgmt=dict(required=False, type='bool'),
enable_ft=dict(required=False, type='bool'),
vswitch_name=dict(required=True, type='str'),
vlan_id=dict(required=True, type='int')))
ip_address=dict(required=True, type='str'),
subnet_mask=dict(required=True, type='str'),
mtu=dict(required=False, type='int'),
enable_vsan=dict(required=False, type='bool'),
enable_vmotion=dict(required=False, type='bool'),
enable_mgmt=dict(required=False, type='bool'),
enable_ft=dict(required=False, type='bool'),
vswitch_name=dict(required=True, type='str'),
vlan_id=dict(required=True, type='int')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)

View file

@ -87,8 +87,8 @@ def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(vmk_name=dict(required=True, type='str'),
ip_address=dict(required=True, type='str'),
subnet_mask=dict(required=True, type='str')))
ip_address=dict(required=True, type='str'),
subnet_mask=dict(required=True, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)

View file

@ -627,6 +627,7 @@ def spec_singleton(spec, request, vm):
spec = request.new_spec()
return spec
def get_cdrom_params(module, s, vm_cdrom):
cdrom_type = None
cdrom_iso_path = None
@ -648,6 +649,7 @@ def get_cdrom_params(module, s, vm_cdrom):
return cdrom_type, cdrom_iso_path
def vmdisk_id(vm, current_datastore_name):
id_list = []
for vm_disk in vm._disks:
@ -666,9 +668,9 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor=dclist[0]
dcmor = dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
@ -744,7 +746,7 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
cloneArgs = dict(resourcepool=rpmor, power_on=False)
if snapshot_to_clone is not None:
#check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone.
# check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone.
cloneArgs["linked"] = True
cloneArgs["snapshot"] = snapshot_to_clone
@ -778,6 +780,8 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
# example from https://github.com/kalazzerx/pysphere/blob/master/examples/pysphere_create_disk_and_add_to_vm.py
# was used.
def update_disks(vsphere_client, vm, module, vm_disk, changes):
request = VI.ReconfigVM_TaskRequestMsg()
changed = False
@ -868,7 +872,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
if vm_extra_config:
spec = spec_singleton(spec, request, vm)
extra_config = []
for k,v in vm_extra_config.items():
for k, v in vm_extra_config.items():
ec = spec.new_extraConfig()
ec.set_element_key(str(k))
ec.set_element_value(str(v))
@ -988,7 +992,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
spec = spec_singleton(spec, request, vm)
# Get a list of the VM's hard drives
dev_list = [d for d in vm.properties.config.hardware.device if d._type=='VirtualDisk']
dev_list = [d for d in vm.properties.config.hardware.device if d._type == 'VirtualDisk']
if len(vm_disk) > len(dev_list):
vsphere_client.disconnect()
module.fail_json(msg="Error in vm_disk definition. Too many disks defined in comparison to the VM's disk profile.")
@ -1072,102 +1076,102 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
def reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name=None):
s = vsphere_client
nics = {}
request = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
nic_changes = []
datacenter = esxi['datacenter']
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor=dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
dcprops = VIProperty(vsphere_client, dcmor)
nfmor = dcprops.networkFolder._obj
for k,v in vm_nic.items():
nicNum = k[len(k) -1]
if vm_nic[k]['network_type'] == 'dvs':
portgroupKey = find_portgroup_key(module, s, nfmor, vm_nic[k]['network'])
todvs = True
elif vm_nic[k]['network_type'] == 'standard':
todvs = False
# Detect cards that need to be changed and network type (and act accordingly)
for dev in vm.properties.config.hardware.device:
if dev._type in ["VirtualE1000", "VirtualE1000e",
"VirtualPCNet32", "VirtualVmxnet",
"VirtualNmxnet2", "VirtualVmxnet3"]:
devNum = dev.deviceInfo.label[len(dev.deviceInfo.label) - 1]
if devNum == nicNum:
fromdvs = dev.deviceInfo.summary.split(':')[0] == 'DVSwitch'
if todvs and fromdvs:
if dev.backing.port._obj.get_element_portgroupKey() != portgroupKey:
nics[k] = (dev, portgroupKey, 1)
elif fromdvs and not todvs:
s = vsphere_client
nics = {}
request = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
nic_changes = []
datacenter = esxi['datacenter']
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor = dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
dcprops = VIProperty(vsphere_client, dcmor)
nfmor = dcprops.networkFolder._obj
for k, v in vm_nic.items():
nicNum = k[len(k) - 1]
if vm_nic[k]['network_type'] == 'dvs':
portgroupKey = find_portgroup_key(module, s, nfmor, vm_nic[k]['network'])
todvs = True
elif vm_nic[k]['network_type'] == 'standard':
todvs = False
# Detect cards that need to be changed and network type (and act accordingly)
for dev in vm.properties.config.hardware.device:
if dev._type in ["VirtualE1000", "VirtualE1000e",
"VirtualPCNet32", "VirtualVmxnet",
"VirtualNmxnet2", "VirtualVmxnet3"]:
devNum = dev.deviceInfo.label[len(dev.deviceInfo.label) - 1]
if devNum == nicNum:
fromdvs = dev.deviceInfo.summary.split(':')[0] == 'DVSwitch'
if todvs and fromdvs:
if dev.backing.port._obj.get_element_portgroupKey() != portgroupKey:
nics[k] = (dev, portgroupKey, 1)
elif fromdvs and not todvs:
nics[k] = (dev, '', 2)
elif not fromdvs and todvs:
nics[k] = (dev, portgroupKey, 3)
elif not fromdvs and not todvs:
if dev.backing._obj.get_element_deviceName() != vm_nic[k]['network']:
nics[k] = (dev, '', 2)
elif not fromdvs and todvs:
nics[k] = (dev, portgroupKey, 3)
elif not fromdvs and not todvs:
if dev.backing._obj.get_element_deviceName() != vm_nic[k]['network']:
nics[k] = (dev, '', 2)
else:
pass
else:
module.exit_json()
pass
else:
module.exit_json()
if len(nics) > 0:
for nic, obj in nics.items():
"""
1,2 and 3 are used to mark which action should be taken
1 = from a distributed switch to a distributed switch
2 = to a standard switch
3 = to a distributed switch
"""
dev = obj[0]
pgKey = obj[1]
dvsKey = obj[2]
if dvsKey == 1:
dev.backing.port._obj.set_element_portgroupKey(pgKey)
dev.backing.port._obj.set_element_portKey('')
if dvsKey == 3:
dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, pgKey)
nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
"nic_backing_port").pyclass()
nic_backing_port.set_element_switchUuid(dvswitch_uuid)
nic_backing_port.set_element_portgroupKey(pgKey)
nic_backing_port.set_element_portKey('')
nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_port(nic_backing_port)
dev._obj.set_element_backing(nic_backing)
if dvsKey == 2:
nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_deviceName(vm_nic[nic]['network'])
dev._obj.set_element_backing(nic_backing)
for nic, obj in nics.items():
dev = obj[0]
spec = request.new_spec()
nic_change = spec.new_deviceChange()
nic_change.set_element_device(dev._obj)
nic_change.set_element_operation("edit")
nic_changes.append(nic_change)
spec.set_element_deviceChange(nic_changes)
request.set_element_spec(spec)
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
task = VITask(ret, vsphere_client)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
return(True)
elif status == task.STATE_ERROR:
module.fail_json(msg="Could not change network %s" % task.get_error_message())
elif len(nics) == 0:
return(False)
if len(nics) > 0:
for nic, obj in nics.items():
"""
1,2 and 3 are used to mark which action should be taken
1 = from a distributed switch to a distributed switch
2 = to a standard switch
3 = to a distributed switch
"""
dev = obj[0]
pgKey = obj[1]
dvsKey = obj[2]
if dvsKey == 1:
dev.backing.port._obj.set_element_portgroupKey(pgKey)
dev.backing.port._obj.set_element_portKey('')
if dvsKey == 3:
dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, pgKey)
nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
"nic_backing_port").pyclass()
nic_backing_port.set_element_switchUuid(dvswitch_uuid)
nic_backing_port.set_element_portgroupKey(pgKey)
nic_backing_port.set_element_portKey('')
nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_port(nic_backing_port)
dev._obj.set_element_backing(nic_backing)
if dvsKey == 2:
nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_deviceName(vm_nic[nic]['network'])
dev._obj.set_element_backing(nic_backing)
for nic, obj in nics.items():
dev = obj[0]
spec = request.new_spec()
nic_change = spec.new_deviceChange()
nic_change.set_element_device(dev._obj)
nic_change.set_element_operation("edit")
nic_changes.append(nic_change)
spec.set_element_deviceChange(nic_changes)
request.set_element_spec(spec)
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
task = VITask(ret, vsphere_client)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
return(True)
elif status == task.STATE_ERROR:
module.fail_json(msg="Could not change network %s" % task.get_error_message())
elif len(nics) == 0:
return(False)
def _build_folder_tree(nodes, parent):
@ -1218,9 +1222,9 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
esxi_hostname = esxi['hostname']
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor=dclist[0]
dcmor = dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
@ -1419,7 +1423,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
" to be specified." % vm_hardware['vm_floppy'])
# Add a floppy to the VM.
add_floppy(module, vsphere_client, config_target, config, devices,
default_devs, floppy_type, floppy_image_path)
default_devs, floppy_type, floppy_image_path)
if vm_nic:
for nic in sorted(vm_nic):
try:
@ -1479,7 +1483,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
# Power on the VM if it was requested
power_state(vm, state, True)
vmfacts=gather_facts(vm)
vmfacts = gather_facts(vm)
vsphere_client.disconnect()
module.exit_json(
ansible_facts=vmfacts,
@ -1579,13 +1583,13 @@ def gather_facts(vm):
'module_hw': True,
'hw_name': vm.properties.name,
'hw_power_status': vm.get_status(),
'hw_guest_full_name': vm.properties.config.guestFullName,
'hw_guest_full_name': vm.properties.config.guestFullName,
'hw_guest_id': vm.properties.config.guestId,
'hw_product_uuid': vm.properties.config.uuid,
'hw_instance_uuid': vm.properties.config.instanceUuid,
'hw_processor_count': vm.properties.config.hardware.numCPU,
'hw_memtotal_mb': vm.properties.config.hardware.memoryMB,
'hw_interfaces':[],
'hw_interfaces': [],
}
netInfo = vm.get_property('net')
netDict = {}
@ -1608,7 +1612,7 @@ def gather_facts(vm):
'macaddress_dash': entry.macAddress.replace(':', '-'),
'summary': entry.deviceInfo.summary,
}
facts['hw_interfaces'].append('eth'+str(ifidx))
facts['hw_interfaces'].append('eth' + str(ifidx))
ifidx += 1
@ -1753,7 +1757,7 @@ def main():
),
supports_check_mode=False,
mutually_exclusive=[['state', 'vmware_guest_facts'],['state', 'from_template']],
mutually_exclusive=[['state', 'vmware_guest_facts'], ['state', 'from_template']],
required_together=[
['state', 'force'],
[
@ -1791,7 +1795,6 @@ def main():
power_on_after_clone = module.params['power_on_after_clone']
validate_certs = module.params['validate_certs']
# CONNECT TO THE SERVER
viserver = VIServer()
if validate_certs and not hasattr(ssl, 'SSLContext') and not vcenter_hostname.startswith('http://'):
@ -1896,10 +1899,9 @@ def main():
# check if user is trying to perform state operation on a vm which doesn't exists
elif state in ['present', 'powered_off', 'powered_on'] and not all((vm_extra_config,
vm_hardware, vm_disk, vm_nic, esxi)):
vm_hardware, vm_disk, vm_nic, esxi)):
module.exit_json(changed=False, msg="vm %s not present" % guest)
# Create the VM
elif state in ['present', 'powered_off', 'powered_on']: