mirror of
https://github.com/ansible-collections/community.general.git
synced 2025-07-21 04:10:23 -07:00
vmware_guest: use the datacenter as a context for clone specs (#26511)
* Slight refactor on vmware_guest to fix path searching and vcsim compatibility. * Clean up pep8 errors * Fix more pep8 issues * Add assertions * Remove debug failure * Update docsting for folder with examples * Revise _get_vm_prop based on suggestions * Implement folder walker to find a folder path * More enhancements for datacenter->datacenter cloning * Fix a few pep8 issues * Remove useless check for subclass of None * Fix a few suggestions * Move serialize_spec to a util function * Group exceptions together Fixes #25011
This commit is contained in:
parent
5b898a7732
commit
10fc4417f7
6 changed files with 364 additions and 37 deletions
|
@ -74,8 +74,21 @@ options:
|
|||
version_added: '2.3'
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute path to find an existing guest or create the new guest.
|
||||
default: /
|
||||
- Destination folder, absolute or relative path to find an existing guest or create the new guest.
|
||||
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
- ' folder: vm/folder2'
|
||||
- ' fodler: folder2'
|
||||
default: /vm
|
||||
hardware:
|
||||
description:
|
||||
- Manage some VM hardware attributes.
|
||||
|
@ -303,9 +316,8 @@ import time
|
|||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.vmware import connect_to_api, find_obj, gather_vm_facts, get_all_objs
|
||||
from ansible.module_utils.vmware import serialize_spec
|
||||
|
||||
try:
|
||||
import json
|
||||
|
@ -424,30 +436,77 @@ class PyVmomiDeviceHelper(object):
|
|||
|
||||
class PyVmomiCache(object):
|
||||
""" This class caches references to objects which are requested multiples times but not modified """
|
||||
def __init__(self, content):
|
||||
def __init__(self, content, dc_name=None):
|
||||
self.content = content
|
||||
self.dc_name = dc_name
|
||||
self.networks = {}
|
||||
self.clusters = {}
|
||||
self.esx_hosts = {}
|
||||
self.parent_datacenters = {}
|
||||
|
||||
def find_obj(self, content, types, name, confine_to_datacenter=True):
|
||||
""" Wrapper around find_obj to set datacenter context """
|
||||
result = find_obj(content, types, name)
|
||||
if result and confine_to_datacenter:
|
||||
if self.get_parent_datacenter(result).name != self.dc_name:
|
||||
objects = self.get_all_objs(content, types, confine_to_datacenter=True)
|
||||
for obj in objects:
|
||||
if obj.name == name:
|
||||
return obj
|
||||
|
||||
return result
|
||||
|
||||
def get_all_objs(self, content, types, confine_to_datacenter=True):
|
||||
""" Wrapper around get_all_objs to set datacenter context """
|
||||
objects = get_all_objs(content, [vim.Datastore])
|
||||
if confine_to_datacenter:
|
||||
if hasattr(objects, 'items'):
|
||||
# resource pools come back as a dictionary
|
||||
for k, v in objects.items():
|
||||
parent_dc = self.get_parent_datacenter(k)
|
||||
if parent_dc.name != self.dc_name:
|
||||
objects.pop(k, None)
|
||||
else:
|
||||
# everything else should be a list
|
||||
objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name]
|
||||
|
||||
return objects
|
||||
|
||||
def get_network(self, network):
|
||||
if network not in self.networks:
|
||||
self.networks[network] = find_obj(self.content, [vim.Network], network)
|
||||
self.networks[network] = self.find_obj(self.content, [vim.Network], network)
|
||||
|
||||
return self.networks[network]
|
||||
|
||||
def get_cluster(self, cluster):
|
||||
if cluster not in self.clusters:
|
||||
self.clusters[cluster] = find_obj(self.content, [vim.ClusterComputeResource], cluster)
|
||||
self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster)
|
||||
|
||||
return self.clusters[cluster]
|
||||
|
||||
def get_esx_host(self, host):
|
||||
if host not in self.esx_hosts:
|
||||
self.esx_hosts[host] = find_obj(self.content, [vim.HostSystem], host)
|
||||
self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host)
|
||||
|
||||
return self.esx_hosts[host]
|
||||
|
||||
def get_parent_datacenter(self, obj):
|
||||
""" Walk the parent tree to find the objects datacenter """
|
||||
if isinstance(obj, vim.Datacenter):
|
||||
return obj
|
||||
if obj in self.parent_datacenters:
|
||||
return self.parent_datacenters[obj]
|
||||
datacenter = None
|
||||
while True:
|
||||
if not hasattr(obj, 'parent'):
|
||||
break
|
||||
obj = obj.parent
|
||||
if isinstance(obj, vim.Datacenter):
|
||||
datacenter = obj
|
||||
break
|
||||
self.parent_datacenters[obj] = datacenter
|
||||
return datacenter
|
||||
|
||||
|
||||
class PyVmomiHelper(object):
|
||||
def __init__(self, module):
|
||||
|
@ -463,7 +522,7 @@ class PyVmomiHelper(object):
|
|||
self.change_detected = False
|
||||
self.customspec = None
|
||||
self.current_vm_obj = None
|
||||
self.cache = PyVmomiCache(self.content)
|
||||
self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
|
||||
|
||||
def getvm(self, name=None, uuid=None, folder=None):
|
||||
|
||||
|
@ -476,19 +535,21 @@ class PyVmomiHelper(object):
|
|||
if uuid:
|
||||
vm = self.content.searchIndex.FindByUuid(uuid=uuid, vmSearch=True)
|
||||
elif folder:
|
||||
# Build the absolute folder path to pass into the search method
|
||||
if not self.params['folder'].startswith('/'):
|
||||
self.module.fail_json(msg="Folder %(folder)s needs to be an absolute path, starting with '/'." % self.params)
|
||||
searchpath = '%(datacenter)s%(folder)s' % self.params
|
||||
# searchpaths do not need to be absolute
|
||||
searchpath = self.params['folder']
|
||||
|
||||
# get all objects for this path ...
|
||||
f_obj = self.content.searchIndex.FindByInventoryPath(searchpath)
|
||||
|
||||
if f_obj:
|
||||
if isinstance(f_obj, vim.Datacenter):
|
||||
f_obj = f_obj.vmFolder
|
||||
|
||||
for c_obj in f_obj.childEntity:
|
||||
|
||||
if not isinstance(c_obj, vim.VirtualMachine):
|
||||
continue
|
||||
|
||||
if c_obj.name == name:
|
||||
vm = c_obj
|
||||
if self.params['name_match'] == 'first':
|
||||
|
@ -650,7 +711,7 @@ class PyVmomiHelper(object):
|
|||
self.module.fail_json(msg="Network '%(name)s' does not exists" % network)
|
||||
|
||||
elif 'vlan' in network:
|
||||
dvps = get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
|
||||
dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
|
||||
for dvp in dvps:
|
||||
if hasattr(dvp.config.defaultPortConfig, 'vlan') and dvp.config.defaultPortConfig.vlan.vlanId == network['vlan']:
|
||||
network['name'] = dvp.config.name
|
||||
|
@ -994,13 +1055,32 @@ class PyVmomiHelper(object):
|
|||
|
||||
return hostsystem
|
||||
|
||||
def autoselect_datastore(self):
|
||||
datastore = None
|
||||
datastore_name = None
|
||||
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
|
||||
|
||||
if datastores is None or len(datastores) == 0:
|
||||
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
|
||||
|
||||
datastore_freespace = 0
|
||||
for ds in datastores:
|
||||
if ds.summary.freeSpace > datastore_freespace:
|
||||
datastore = ds
|
||||
datastore_name = datastore.name
|
||||
datastore_freespace = ds.summary.freeSpace
|
||||
|
||||
return datastore
|
||||
|
||||
def select_datastore(self, vm_obj=None):
|
||||
datastore = None
|
||||
datastore_name = None
|
||||
|
||||
if len(self.params['disk']) != 0:
|
||||
# TODO: really use the datastore for newly created disks
|
||||
if 'autoselect_datastore' in self.params['disk'][0] and self.params['disk'][0]['autoselect_datastore']:
|
||||
datastores = get_all_objs(self.content, [vim.Datastore])
|
||||
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
|
||||
datastores = [x for x in datastores if self.cache.get_parent_datacenter(x).name == self.params['datacenter']]
|
||||
if datastores is None or len(datastores) == 0:
|
||||
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
|
||||
|
||||
|
@ -1019,14 +1099,23 @@ class PyVmomiHelper(object):
|
|||
|
||||
elif 'datastore' in self.params['disk'][0]:
|
||||
datastore_name = self.params['disk'][0]['datastore']
|
||||
datastore = find_obj(self.content, [vim.Datastore], datastore_name)
|
||||
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
|
||||
else:
|
||||
self.module.fail_json(msg="Either datastore or autoselect_datastore should be provided to select datastore")
|
||||
|
||||
if not datastore and self.params['template']:
|
||||
# use the template's existing DS
|
||||
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
|
||||
datastore = disks[0].backing.datastore
|
||||
datastore_name = datastore.name
|
||||
if disks:
|
||||
datastore = disks[0].backing.datastore
|
||||
datastore_name = datastore.name
|
||||
# validation
|
||||
if datastore:
|
||||
dc = self.cache.get_parent_datacenter(datastore)
|
||||
if dc.name != self.params['datacenter']:
|
||||
datastore = self.autoselect_datastore()
|
||||
datastore_name = datastore.name
|
||||
|
||||
if not datastore:
|
||||
self.module.fail_json(msg="Failed to find a matching datastore")
|
||||
|
||||
|
@ -1045,13 +1134,13 @@ class PyVmomiHelper(object):
|
|||
return False
|
||||
|
||||
def select_resource_pool_by_name(self, resource_pool_name):
|
||||
resource_pool = find_obj(self.content, [vim.ResourcePool], resource_pool_name)
|
||||
resource_pool = self.cache.find_obj(self.content, [vim.ResourcePool], resource_pool_name)
|
||||
if resource_pool is None:
|
||||
self.module.fail_json(msg='Could not find resource_pool "%s"' % resource_pool_name)
|
||||
return resource_pool
|
||||
|
||||
def select_resource_pool_by_host(self, host):
|
||||
resource_pools = get_all_objs(self.content, [vim.ResourcePool])
|
||||
resource_pools = self.cache.get_all_objs(self.content, [vim.ResourcePool])
|
||||
for rp in resource_pools.items():
|
||||
if not rp[0]:
|
||||
continue
|
||||
|
@ -1082,6 +1171,39 @@ class PyVmomiHelper(object):
|
|||
self.module.fail_json(msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'")
|
||||
return disk_controller_type
|
||||
|
||||
def find_folder(self, searchpath):
|
||||
""" Walk inventory objects one position of the searchpath at a time """
|
||||
|
||||
# split the searchpath so we can iterate through it
|
||||
paths = [x.replace('/', '') for x in searchpath.split('/')]
|
||||
paths_total = len(paths) - 1
|
||||
position = 0
|
||||
|
||||
# recursive walk while looking for next element in searchpath
|
||||
root = self.content.rootFolder
|
||||
while root and position <= paths_total:
|
||||
change = False
|
||||
if hasattr(root, 'childEntity'):
|
||||
for child in root.childEntity:
|
||||
if child.name == paths[position]:
|
||||
root = child
|
||||
position += 1
|
||||
change = True
|
||||
break
|
||||
elif isinstance(root, vim.Datacenter):
|
||||
if hasattr(root, 'vmFolder'):
|
||||
if root.vmFolder.name == paths[position]:
|
||||
root = root.vmFolder
|
||||
position += 1
|
||||
change = True
|
||||
else:
|
||||
root = None
|
||||
|
||||
if not change:
|
||||
root = None
|
||||
|
||||
return root
|
||||
|
||||
def deploy_vm(self):
|
||||
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
|
||||
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
|
||||
|
@ -1094,15 +1216,21 @@ class PyVmomiHelper(object):
|
|||
# - static IPs
|
||||
|
||||
# datacenters = get_all_objs(self.content, [vim.Datacenter])
|
||||
datacenter = find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
|
||||
datacenter = self.cache.find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
|
||||
if datacenter is None:
|
||||
self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params)
|
||||
|
||||
destfolder = None
|
||||
if not self.params['folder'].startswith('/'):
|
||||
self.module.fail_json(msg="Folder %(folder)s needs to be an absolute path, starting with '/'." % self.params)
|
||||
|
||||
f_obj = self.content.searchIndex.FindByInventoryPath('/%(datacenter)s%(folder)s' % self.params)
|
||||
# make an attempt with findinventorybypath, although this works poorly
|
||||
# when datacenters are nested under folders
|
||||
f_obj = self.content.searchIndex.FindByInventoryPath('%(folder)s' % self.params)
|
||||
|
||||
# retry by walking down the object tree
|
||||
if f_obj is None:
|
||||
f_obj = self.find_folder(self.params['folder'])
|
||||
|
||||
# abort if no strategy was successful
|
||||
if f_obj is None:
|
||||
self.module.fail_json(msg='No folder matched the path: %(folder)s' % self.params)
|
||||
destfolder = f_obj
|
||||
|
@ -1115,8 +1243,13 @@ class PyVmomiHelper(object):
|
|||
else:
|
||||
vm_obj = None
|
||||
|
||||
if self.params['resource_pool']:
|
||||
resource_pool = self.select_resource_pool_by_name(self.params['resource_pool'])
|
||||
# need a resource pool if cloning from template
|
||||
if self.params['resource_pool'] or self.params['template']:
|
||||
if self.params['esxi_hostname']:
|
||||
host = self.select_host()
|
||||
resource_pool = self.select_resource_pool_by_host(host)
|
||||
else:
|
||||
resource_pool = self.select_resource_pool_by_name(self.params['resource_pool'])
|
||||
|
||||
if resource_pool is None:
|
||||
self.module.fail_json(msg='Unable to find resource pool "%(resource_pool)s"' % self.params)
|
||||
|
@ -1143,6 +1276,8 @@ class PyVmomiHelper(object):
|
|||
if len(self.params['customization']) > 0 or network_changes is True:
|
||||
self.customize_vm(vm_obj=vm_obj)
|
||||
|
||||
clonespec = None
|
||||
clone_method = None
|
||||
try:
|
||||
if self.params['template']:
|
||||
# create the relocation spec
|
||||
|
@ -1153,8 +1288,9 @@ class PyVmomiHelper(object):
|
|||
relospec.host = self.select_host()
|
||||
relospec.datastore = datastore
|
||||
|
||||
if self.params['resource_pool']:
|
||||
relospec.pool = resource_pool
|
||||
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
|
||||
# > pool: For a clone operation from a template to a virtual machine, this argument is required.
|
||||
relospec.pool = resource_pool
|
||||
|
||||
if self.params['snapshot_src'] is not None and self.params['linked_clone']:
|
||||
relospec.diskMoveType = vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking
|
||||
|
@ -1171,6 +1307,7 @@ class PyVmomiHelper(object):
|
|||
clonespec.snapshot = snapshot[0].snapshot
|
||||
|
||||
clonespec.config = self.configspec
|
||||
clone_method = 'Clone'
|
||||
task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
|
||||
self.change_detected = True
|
||||
else:
|
||||
|
@ -1181,6 +1318,7 @@ class PyVmomiHelper(object):
|
|||
suspendDirectory=None,
|
||||
vmPathName="[" + datastore_name + "] " + self.params["name"])
|
||||
|
||||
clone_method = 'CreateVM_Task'
|
||||
task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool)
|
||||
self.change_detected = True
|
||||
self.wait_for_task(task)
|
||||
|
@ -1191,7 +1329,20 @@ class PyVmomiHelper(object):
|
|||
if task.info.state == 'error':
|
||||
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
|
||||
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
|
||||
return {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg}
|
||||
|
||||
# provide these to the user for debugging
|
||||
clonespec_json = serialize_spec(clonespec)
|
||||
configspec_json = serialize_spec(self.configspec)
|
||||
kwargs = {
|
||||
'changed': self.change_detected,
|
||||
'failed': True,
|
||||
'msg': task.info.error.msg,
|
||||
'clonespec': clonespec_json,
|
||||
'configspec': configspec_json,
|
||||
'clone_method': clone_method
|
||||
}
|
||||
|
||||
return kwargs
|
||||
else:
|
||||
# set annotation
|
||||
vm = task.info.result
|
||||
|
@ -1340,9 +1491,8 @@ def main():
|
|||
|
||||
result = {'failed': False, 'changed': False}
|
||||
|
||||
# Prepend /vm if it was missing from the folder path, also strip trailing slashes
|
||||
if not module.params['folder'].startswith('/vm') and module.params['folder'].startswith('/'):
|
||||
module.params['folder'] = '/vm%(folder)s' % module.params
|
||||
# FindByInventoryPath() does not require an absolute path
|
||||
# so we should leave the input folder path unmodified
|
||||
module.params['folder'] = module.params['folder'].rstrip('/')
|
||||
|
||||
pyv = PyVmomiHelper(module)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue