diff --git a/lib/ansible/modules/cloud/opennebula/one_vm.py b/lib/ansible/modules/cloud/opennebula/one_vm.py index e7fb7c20d1..87292a2d02 100644 --- a/lib/ansible/modules/cloud/opennebula/one_vm.py +++ b/lib/ansible/modules/cloud/opennebula/one_vm.py @@ -127,6 +127,15 @@ options: - C(count_labels) parameters should be deployed. Instances are either - created or terminated based on this value. - NOTE':' Instances with the least IDs will be terminated first. + mode: + description: + - Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others. + owner_id: + description: + - ID of the user which will be set as the owner of the instance + group_id: + description: + - ID of the group which will be set as the group of the instance memory: description: - The size of the memory for new instances (in MB, GB, ...) @@ -175,6 +184,17 @@ EXAMPLES = ''' attributes: name: foo +# Deploy a new VM and set its group_id and mode +- one_vm: + template_id: 90 + group_id: 16 + mode: 660 + +# Change VM's permissions to 640 +- one_vm: + instance_ids: 5 + mode: 640 + # Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks - one_vm: template_id: 15 @@ -356,14 +376,19 @@ instances: description: vm's group name type: string sample: one-users - user_id: - description: vm's user id + owner_id: + description: vm's owner id type: integer sample: 143 - user_name: - description: vm's user name + owner_name: + description: vm's owner name type: string sample: app-user + mode: + description: vm's mode + type: string + returned: success + sample: 660 state: description: state of an instance type: string @@ -453,14 +478,19 @@ tagged_instances: description: vm's group name type: string sample: one-users - user_id: + owner_id: description: vm's user id type: integer sample: 143 - user_name: + owner_name: description: vm's user name type: string sample: app-user + mode: + description: vm's mode + type: string + returned: success + sample: 660 state: description: state of an instance type: string @@ -620,6 +650,8 @@ def get_vm_info(client, vm): vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time) vm_uptime /= (60 * 60) + permissions_str = parse_vm_permissions(client, vm) + # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE vm_lcm_state = None if vm.state == VM_STATES.index('ACTIVE'): @@ -632,8 +664,8 @@ def get_vm_info(client, vm): 'vm_name': vm.name, 'state': VM_STATES[vm.state], 'lcm_state': vm_lcm_state, - 'user_name': vm.uname, - 'user_id': vm.uid, + 'owner_name': vm.uname, + 'owner_id': vm.uid, 'networks': networks_info, 'disk_size': disk_size, 'memory': vm.template.memory + ' MB', @@ -643,12 +675,93 @@ def get_vm_info(client, vm): 'group_id': vm.gid, 'uptime_h': int(vm_uptime), 'attributes': vm_attributes, + 'mode': permissions_str, 'labels': vm_labels } return info +def parse_vm_permissions(client, vm): + + import xml.etree.ElementTree as ET + vm_XML = client.call('vm.info', vm.id) + root = ET.fromstring(vm_XML) + + perm_dict = {} + + root = root.find('PERMISSIONS') + + for child in root: + perm_dict[child.tag] = child.text + + ''' + This is the structure of the 'PERMISSIONS' dictionary: + + "PERMISSIONS": { + "OWNER_U": "1", + "OWNER_M": "1", + "OWNER_A": "0", + "GROUP_U": "0", + "GROUP_M": "0", + "GROUP_A": "0", + "OTHER_U": "0", + "OTHER_M": "0", + "OTHER_A": "0" + } + ''' + + owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"]) + group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"]) + other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"]) + + permissions = str(owner_octal) + str(group_octal) + str(other_octal) + + return permissions + + +def set_vm_permissions(module, client, vms, permissions): + changed = False + + for vm in vms: + vm.info() + print(vm.id) + old_permissions = parse_vm_permissions(client, vm) + changed = changed or old_permissions != permissions + + if not module.check_mode and old_permissions != permissions: + permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000 + mode_bits = [int(d) for d in permissions_str] + try: + client.call('vm.chmod', vm.id, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], + mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8]) + except oca.OpenNebulaException: + module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.") + + return changed + + +def set_vm_ownership(module, client, vms, owner_id, group_id): + changed = False + + for vm in vms: + vm.info() + if owner_id is None: + owner_id = vm.uid + if group_id is None: + group_id = vm.gid + + changed = changed or owner_id != vm.uid or group_id != vm.gid + + if not module.check_mode and (owner_id != vm.uid or group_id != vm.gid): + try: + client.call('vm.chown', vm.id, owner_id, group_id) + except oca.OpenNebulaException: + module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.") + + return changed + + def get_size_in_MB(module, size_str): SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB'] @@ -831,15 +944,13 @@ def get_all_vms_by_attributes(client, attributes_dict, labels_list): def create_count_of_vms(module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout): new_vms_list = [] - instances_ids = [] - instances = [] vm_name = '' if attributes_dict: vm_name = attributes_dict.get('NAME', '') if module.check_mode: - return {'changed': True} + return True, [], [] # Create list of used indexes vm_filled_indexes_list = None @@ -870,12 +981,7 @@ def create_count_of_vms(module, client, template_id, count, attributes_dict, lab for vm in new_vms_list: wait_for_running(module, vm, wait_timeout) - for vm in new_vms_list: - vm_info = get_vm_info(client, vm) - instances.append(vm_info) - instances_ids.append(vm.id) - - return {'changed': True, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': instances} + return True, new_vms_list, [] def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict, @@ -886,23 +992,19 @@ def create_exact_count_of_vms(module, client, template_id, exact_count, attribut vm_count_diff = exact_count - len(vm_list) changed = vm_count_diff != 0 - result = {} new_vms_list = [] - instances_ids = [] - instances = [] - tagged_instances = list(get_vm_info(client, vm) for vm in vm_list) + instances_list = [] + tagged_instances_list = vm_list if module.check_mode: - return {'changed': changed, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances} + return changed, instances_list, tagged_instances_list if vm_count_diff > 0: # Add more VMs - result = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict, - labels_list, disk_size, network_attrs_list, wait, wait_timeout) - - result['tagged_instances'] += tagged_instances - return result + changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict, + labels_list, disk_size, network_attrs_list, wait, wait_timeout) + tagged_instances_list += instances_list elif vm_count_diff < 0: # Delete surplus VMs old_vms_list = [] @@ -917,13 +1019,12 @@ def create_exact_count_of_vms(module, client, template_id, exact_count, attribut for vm in old_vms_list: wait_for_done(module, vm, wait_timeout) - for vm in old_vms_list: - vm_info = get_vm_info(client, vm) - instances.append(vm_info) - instances_ids.append(vm.id) - tagged_instances[:] = [dct for dct in tagged_instances if dct.get('vm_id') != vm.id] + instances_list = old_vms_list + # store only the remaining instances + old_vms_set = set(old_vms_list) + tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set] - return {'changed': changed, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances} + return changed, instances_list, tagged_instances_list VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE'] LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP', @@ -983,28 +1084,13 @@ def terminate_vm(module, client, vm, hard=False): return changed -def terminate_vms(module, client, vms, wait, wait_timeout, hard, tagged): +def terminate_vms(module, client, vms, hard): changed = False - instances_ids = [] - instances = [] - - if tagged: - module.fail_json(msg='Option `instance_ids` is required when state is `absent`.') for vm in vms: changed = terminate_vm(module, client, vm, hard) or changed - if wait and not module.check_mode: - for vm in vms: - if vm is not None: - wait_for_done(module, vm, wait_timeout) - - for vm in vms: - if vm is not None: - instances_ids.append(vm.id) - instances.append(get_vm_info(client, vm)) - - return {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': []} + return changed def poweroff_vm(module, vm, hard): @@ -1026,32 +1112,16 @@ def poweroff_vm(module, vm, hard): return changed -def poweroff_vms(module, client, vms, wait, wait_timeout, hard, tagged): - instances_ids = [] - instances = [] - tagged_instances = [] +def poweroff_vms(module, client, vms, hard): changed = False for vm in vms: changed = poweroff_vm(module, vm, hard) or changed - if wait and not module.check_mode: - for vm in vms: - wait_for_poweroff(module, vm, wait_timeout) - - for vm in vms: - instances_ids.append(vm.id) - instances.append(get_vm_info(client, vm)) - if tagged: - tagged_instances.append(get_vm_info(client, vm)) - - return {'changed': changed, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances} + return changed -def reboot_vms(module, client, vms, wait, wait_timeout, hard, tagged): - instances_ids = [] - instances = [] - tagged_instances = [] +def reboot_vms(module, client, vms, wait_timeout, hard): if not module.check_mode: # Firstly, power-off all instances @@ -1069,17 +1139,7 @@ def reboot_vms(module, client, vms, wait, wait_timeout, hard, tagged): for vm in vms: resume_vm(module, vm) - if wait: - for vm in vms: - wait_for_running(module, vm, wait_timeout) - - for vm in vms: - instances_ids.append(vm.id) - instances.append(get_vm_info(client, vm)) - if tagged: - tagged_instances.append(get_vm_info(client, vm)) - - return {'changed': True, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances} + return True def resume_vm(module, vm): @@ -1099,27 +1159,13 @@ def resume_vm(module, vm): return changed -def resume_vms(module, client, vms, wait, wait_timeout, tagged): - instances_ids = [] - instances = [] - tagged_instances = [] - +def resume_vms(module, client, vms): changed = False for vm in vms: changed = resume_vm(module, vm) or changed - if wait and changed and not module.check_mode: - for vm in vms: - wait_for_running(module, vm, wait_timeout) - - for vm in vms: - instances_ids.append(vm.id) - instances.append(get_vm_info(client, vm)) - if tagged: - tagged_instances.append(get_vm_info(client, vm)) - - return {'changed': changed, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances} + return changed def check_name_attribute(module, attributes): @@ -1153,7 +1199,7 @@ def disk_save_as(module, client, vm, disk_saveas, wait_timeout): if vm.state != VM_STATES.index('POWEROFF'): module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state") client.call('vm.disksaveas', vm.id, disk_id, image_name, 'OS', -1) - wait_for_poweroff(module, vm, wait_timeout) + wait_for_poweroff(module, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state def get_connection_info(module): @@ -1193,6 +1239,9 @@ def main(): "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'], "type": "str" }, + "mode": {"required": False, "type": "str"}, + "owner_id": {"required": False, "type": "int"}, + "group_id": {"required": False, "type": "int"}, "wait": {"default": True, "type": "bool"}, "wait_timeout": {"default": 300, "type": "int"}, "hard": {"default": False, "type": "bool"}, @@ -1213,6 +1262,7 @@ def main(): module = AnsibleModule(argument_spec=fields, mutually_exclusive=[ ['template_id', 'template_name', 'instance_ids'], + ['template_id', 'template_name', 'disk_saveas'], ['instance_ids', 'count_attributes', 'count'], ['instance_ids', 'count_labels', 'count'], ['instance_ids', 'exact_count'], @@ -1237,6 +1287,9 @@ def main(): requested_template_name = params.get('template_name') requested_template_id = params.get('template_id') state = params.get('state') + permissions = params.get('mode') + owner_id = params.get('owner_id') + group_id = params.get('group_id') wait = params.get('wait') wait_timeout = params.get('wait_timeout') hard = params.get('hard') @@ -1305,13 +1358,24 @@ def main(): if count <= 0: module.fail_json(msg='`count` has to be grater than 0') + if permissions is not None: + import re + if re.match("^[0-7]{3}$", permissions) is None: + module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600") + if exact_count is not None: # Deploy an exact count of VMs - result = create_exact_count_of_vms(module, client, template_id, exact_count, attributes, count_attributes, - labels, count_labels, disk_size, networks, hard, wait, wait_timeout) + changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, client, template_id, exact_count, attributes, + count_attributes, labels, count_labels, disk_size, + networks, hard, wait, wait_timeout) + vms = tagged_instances_list elif template_id and state == 'present': # Deploy count VMs - result = create_count_of_vms(module, client, template_id, count, attributes, labels, disk_size, networks, wait, wait_timeout) + changed, instances_list, tagged_instances_list = create_count_of_vms(module, client, template_id, count, + attributes, labels, disk_size, networks, wait, wait_timeout) + # instances_list - new instances + # tagged_instances_list - all instances with specified `count_attributes` and `count_labels` + vms = instances_list else: # Fetch data of instances, or change their state if not (instance_ids or attributes or labels): @@ -1323,8 +1387,9 @@ def main(): if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']: module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'") - vms = None + vms = [] tagged = False + changed = False if instance_ids: vms = get_vms_by_ids(module, client, state, instance_ids) @@ -1332,34 +1397,57 @@ def main(): tagged = True vms = get_all_vms_by_attributes(client, attributes, labels) - instances = list(get_vm_info(client, vm) for vm in vms if vm is not None) - instances_ids = list(vm.id for vm in vms if vm is not None) - - if tagged: - result = {'changed': False, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': instances} - else: - result = {'changed': False, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': []} - if len(vms) == 0 and state != 'absent' and state != 'present': module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`') if len(vms) == 0 and state == 'present' and not tagged: module.fail_json(msg='There are no instances with specified `instance_ids`.') - if state == 'absent': - result = terminate_vms(module, client, vms, wait, wait_timeout, hard, tagged) - elif state == 'rebooted': - result = reboot_vms(module, client, vms, wait, wait_timeout, hard, tagged) - elif state == 'poweredoff': - result = poweroff_vms(module, client, vms, wait, wait_timeout, hard, tagged) - elif state == 'running': - result = resume_vms(module, client, vms, wait, wait_timeout, tagged) + if tagged and state == 'absent': + module.fail_json(msg='Option `instance_ids` is required when state is `absent`.') - if disk_saveas is not None: - if len(vms) == 0: - module.fail_json(msg="There is no VM whose disk will be saved.") - disk_save_as(module, client, vms[0], disk_saveas, wait_timeout) - result['changed'] = True + if state == 'absent': + changed = terminate_vms(module, client, vms, hard) + elif state == 'rebooted': + changed = reboot_vms(module, client, vms, wait_timeout, hard) + elif state == 'poweredoff': + changed = poweroff_vms(module, client, vms, hard) + elif state == 'running': + changed = resume_vms(module, client, vms) + + instances_list = vms + tagged_instances_list = [] + + if permissions is not None: + changed = set_vm_permissions(module, client, vms, permissions) or changed + + if owner_id is not None or group_id is not None: + changed = set_vm_ownership(module, client, vms, owner_id, group_id) or changed + + if wait and not module.check_mode and state != 'present': + wait_for = { + 'absent': wait_for_done, + 'rebooted': wait_for_running, + 'poweredoff': wait_for_poweroff, + 'running': wait_for_running + } + for vm in vms: + if vm is not None: + wait_for[state](module, vm, wait_timeout) + + if disk_saveas is not None: + if len(vms) == 0: + module.fail_json(msg="There is no VM whose disk will be saved.") + disk_save_as(module, client, vms[0], disk_saveas, wait_timeout) + changed = True + + # instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option + instances = list(get_vm_info(client, vm) for vm in instances_list if vm is not None) + instances_ids = list(vm.id for vm in instances_list if vm is not None) + # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels) + tagged_instances = list(get_vm_info(client, vm) for vm in tagged_instances_list if vm is not None) + + result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances} module.exit_json(**result) diff --git a/test/legacy/roles/one_vm/tasks/main.yml b/test/legacy/roles/one_vm/tasks/main.yml index 3952b10a63..1d81a75e0a 100644 --- a/test/legacy/roles/one_vm/tasks/main.yml +++ b/test/legacy/roles/one_vm/tasks/main.yml @@ -28,7 +28,7 @@ template_name: 'unknown' register: template_bad failed_when: not template_bad is failed - + - name: Check if it fails if we try to access a non-existent VM in check-mode one_vm: instance_ids: non-existent-vm-{{ ansible_date_time.iso8601_basic_short }} @@ -84,7 +84,7 @@ always: - name: Delete the VM one_vm: - instance_ids: + instance_ids: - '{{ deployed_vm.instances[0].vm_id }}' state: absent hard: yes @@ -92,7 +92,7 @@ - name: Check if deletion has done assert: - that: + that: - delete_vm is changed - delete_vm.instances_ids|length == 1 - delete_vm.instances_ids[0] == deployed_vm.instances_ids[0] @@ -107,27 +107,27 @@ - name: Check if deletion is idempotent assert: - that: + that: - not delete_vm_idempotent is changed msg: 'Deletion is not idempotent' - + - name: Delete a non-existent VM one_vm: instance_ids: - non-existent-vm-{{ ansible_date_time.iso8601_basic_short }} state: absent register: delete_non_existent_vm - + - name: Check if deletion is not executed assert: - that: + that: - not delete_non_existent_vm is changed msg: 'Deletion is bad, task has deleted non existent VM' - block: - name: Set the unique name of the VM set_fact: - vm_unique_name: test-vm-name-{{ ansible_date_time.iso8601_basic_short }} + vm_unique_name: test-vm-name-{{ ansible_date_time.iso8601_basic_short }} - name: Try to deploy an unique VM with exact_count but without count_attributes and count_labels one_vm: @@ -144,7 +144,7 @@ attributes: name: '{{ vm_unique_name }}' exact_count: 1 - count_attributes: + count_attributes: name: '{{ vm_unique_name }}' register: unique_vm_check_mode check_mode: yes @@ -161,7 +161,7 @@ attributes: name: '{{ vm_unique_name }}' exact_count: 1 - count_attributes: + count_attributes: name: '{{ vm_unique_name }}' register: unique_vm @@ -173,14 +173,14 @@ - unique_vm.instances|length == 1 - unique_vm.instances[0].vm_name == "{{ vm_unique_name }}" msg: Deployment of the unique VM doesn't return as 'changed' - + - name: Deploy an unique VM again to check idempotence one_vm: template_id: '{{ one_template_id }}' attributes: name: '{{ vm_unique_name }}' exact_count: 1 - count_attributes: + count_attributes: name: '{{ vm_unique_name }}' register: unique_vm_idempotent @@ -348,7 +348,7 @@ one_vm: instance_ids: - '{{ vm_register.instances[0].vm_id }}' - state: absent + state: absent hard: yes - block: @@ -364,7 +364,7 @@ - name: Check if deployment in check-mode returns as 'changed' assert: - that: + that: - deploy_vms_with_count_check_mode is changed - name: Deploy 2 VMs with attributes @@ -378,14 +378,14 @@ - name: Check if deployment in returns as 'changed' assert: - that: + that: - deploy_vms_with_count is changed - deploy_vms_with_count.instances_ids|length == 2 - deploy_vms_with_count.instances|length == 2 - - deploy_vms_with_count.tagged_instances|length == 2 - - deploy_vms_with_count.tagged_instances[0].vm_name == "aero" - - deploy_vms_with_count.tagged_instances[1].vm_name == "aero" - + - deploy_vms_with_count.tagged_instances|length == 0 + - deploy_vms_with_count.instances[0].vm_name == "aero" + - deploy_vms_with_count.instances[1].vm_name == "aero" + - name: Deploy 2 VMs with attributes to check it is not idempotent one_vm: template_id: '{{ one_template_id }}' @@ -397,7 +397,7 @@ - name: Check if deployment with count is not idempotent assert: - that: + that: - deploy_vms_with_count_check_idempotence is changed - deploy_vms_with_count_check_idempotence.instances_ids|length == 2 - deploy_vms_with_count_check_idempotence.instances|length == 2 @@ -447,7 +447,7 @@ name: aero hard: yes - - name: Fetch all VMs with name's format 'aero-##' + - name: Fetch all VMs with name's format 'aero-##' one_vm: attributes: name: '{{ vms_indexed_name }}' @@ -460,7 +460,7 @@ attributes: name: '{{ vms_indexed_name }}' exact_count: 3 - count_attributes: + count_attributes: name: '{{ vms_indexed_name }}' register: vms_with_hash @@ -470,11 +470,11 @@ attributes: name: aero exact_count: 2 - count_attributes: + count_attributes: name: aero register: vms_without_hash - - name: Fetch all VMs with name's format 'aero-#' + - name: Fetch all VMs with name's format 'aero-#' one_vm: attributes: name: aero-# @@ -482,9 +482,9 @@ - name: Check there are exactly 3 instances with name's format 'aero-#' assert: - that: + that: - not all_aero_vms_with_hash is changed - - all_aero_vms_with_hash.tagged_instances|length == 3 + - all_aero_vms_with_hash.instances|length == 3 - name: Decrement count of 'aero-#' instances one_vm: @@ -508,13 +508,13 @@ name: 'aero-#' register: new_vm_with_hash - - name: Check if new VM has index 0 + - name: Check if new VM has index 0 assert: - that: + that: - new_vm_with_hash is changed - new_vm_with_hash.instances_ids|length == 1 - new_vm_with_hash.instances|length == 1 - - new_vm_with_hash.tagged_instances|length == 1 + - new_vm_with_hash.tagged_instances|length == 0 - new_vm_with_hash.instances[0].vm_name|regex_replace('(\d+)$','\1')|int == 0 always: @@ -603,7 +603,7 @@ - name: Set special label for a new instance set_fact: - vm_spec_label: spec-label-{{ ansible_date_time.iso8601_basic_short }} + vm_spec_label: spec-label-{{ ansible_date_time.iso8601_basic_short }} - name: Add a new instance in the group of instances with label 'foo' one_vm: @@ -615,7 +615,7 @@ count_labels: - foo register: new_vm_with_label - + - name: Fetch all instances with special label one_vm: labels: @@ -626,7 +626,6 @@ assert: that: - not vm_with_special_label is changed - - vm_with_special_label.tagged_instances|length == 1 - vm_with_special_label.instances_ids|length == 1 - vm_with_special_label.instances_ids[0] == new_vm_with_label.instances_ids[0] @@ -673,7 +672,7 @@ name: 'aero-###' foo_app: foo count: 2 - + - name: Deploy 2 instances with different value for attribute one_vm: template_id: '{{ one_template_id }}' @@ -687,12 +686,12 @@ attributes: foo_app: register: all_foo_app_vms - + - name: Check there are 4 VMs with 'foo_app' key assert: that: - - all_foo_app_vms.tagged_instances|length == 4 - + - all_foo_app_vms.instances|length == 4 + - name: Decrement count of VMs with 'foo_app' key one_vm: template_id: '{{ one_template_id }}' @@ -737,7 +736,7 @@ labels_list: - bar1 - bar2 - + - name: Deploy an instance with name 'app1', attribute 'foo app' and labels 'bar1' and 'bar2' one_vm: template_id: '{{ one_template_id }}' @@ -764,7 +763,7 @@ assert: that: instance_with_labels.instances[0].labels|difference(labels_list)|length == 0 msg: Labels are not correct - + - name: Check that name is correct assert: that: instance_with_labels.instances[0].vm_name == 'app1' @@ -779,21 +778,21 @@ - name: Try to use letters for ids option one_vm: - instance_ids: + instance_ids: - asd - 123 state: running register: ids_with_letters - failed_when: not ids_with_letters is failed + failed_when: not ids_with_letters is failed - name: Try to use letters for ids option when terminate vms one_vm: - instance_ids: + instance_ids: - asd - 123 state: absent register: ids_with_letters - failed_when: ids_with_letters is failed + failed_when: ids_with_letters is failed - name: Try to use restricted attributes when deploying one_vm: @@ -809,12 +808,228 @@ that: - restricted_attributes.msg == "Restricted attribute `DISK` cannot be used when filtering VMs." +- block: + - name: Deploy VM and set its mode + one_vm: + template_id: '{{ one_template_id }}' + mode: 640 + register: deployed_vm + + - name: Check if mode is set correctly + assert: + that: + - deployed_vm is changed + - deployed_vm.instances|length == 1 + - deployed_vm.instances[0].mode == "640" + + - name: Set VM permissions to 660 + one_vm: + instance_ids: '{{ deployed_vm.instances_ids }}' + mode: 660 + register: deployed_vm + + - name: Check if mode is set correctly + assert: + that: + - deployed_vm is changed + - deployed_vm.instances|length == 1 + - deployed_vm.instances[0].mode == "660" + + - name: Set 660 permissions againt to check idempotence + one_vm: + instance_ids: '{{ deployed_vm.instances_ids[0] }}' + mode: 660 + register: chmod_idempotent + + - name: Check if chmod is idempotent + assert: + that: + - chmod_idempotent is not changed + msg: 'Permissions changing is not idempotent' + + - name: Try to set permissions incorectly + one_vm: + instance_ids: '{{ deployed_vm.instances_ids[0] }}' + mode: 8983 + register: chmod_failed + failed_when: not chmod_failed is failed + + - name: Try to set permissions incorectly + one_vm: + instance_ids: '{{ deployed_vm.instances_ids[0] }}' + mode: 64a + register: chmod_failed + failed_when: not chmod_failed is failed + + - name: Set 664 permissions + one_vm: + instance_ids: '{{ deployed_vm.instances_ids[0] }}' + mode: 664 + register: vm_chmod + + - name: Verify permissions changing + assert: + that: + - vm_chmod is changed + - vm_chmod.instances|length == 1 + - vm_chmod.instances[0].mode == "664" + msg: 'Permissions changing is failed' + + - name: Deploy 2 VMs with label 'test-mode' and mode 640 + one_vm: + template_id: '{{ one_template_id }}' + count_labels: + - test-mode + exact_count: 2 + mode: 640 + register: deployed_vm2 + + - name: Verify VMs permissions + assert: + that: + - deployed_vm2 is changed + - deployed_vm2.instances|length == 2 + - deployed_vm2.instances[0].mode == "640" + - deployed_vm2.instances[1].mode == "640" + + - name: Change permissions of first VM + one_vm: + instance_ids: '{{ deployed_vm2.instances_ids[0] }}' + mode: 644 + register: chmod_vm1 + + - name: Verify VM permissions + assert: + that: + - chmod_vm1 is changed + - chmod_vm1.instances|length == 1 + - chmod_vm1.instances[0].mode == "644" + + - name: Change permissions on both VMs + one_vm: + instance_ids: '{{ deployed_vm2.instances_ids }}' + mode: 644 + register: deployed_vm2 + + - name: Verify VMs permissions + assert: + that: + - deployed_vm2 is changed + - deployed_vm2.instances|length == 2 + - deployed_vm2.instances[0].mode == "644" + - deployed_vm2.instances[1].mode == "644" + + - name: Change VMs permissions using the label + one_vm: + labels: + - test-mode + mode: 664 + register: label_chmod + + - name: Verify VMs permissions + assert: + that: + - label_chmod is changed + - label_chmod.instances|length == 2 + - label_chmod.instances[0].mode == "664" + - label_chmod.instances[1].mode == "664" + + - name: Deploy 2 more VMs with label 'test-mode' and mode 640 + one_vm: + template_id: '{{ one_template_id }}' + count_labels: + - test-mode + exact_count: 4 + mode: 640 + register: deployed_vm4 + + - name: Verify VMs permissions + assert: + that: + - deployed_vm4 is changed + - deployed_vm4.tagged_instances|length == 4 + - deployed_vm4.tagged_instances[0].mode == "640" + - deployed_vm4.tagged_instances[1].mode == "640" + - deployed_vm4.tagged_instances[2].mode == "640" + - deployed_vm4.tagged_instances[3].mode == "640" + + - name: Terminate 2 VMs with label 'test-mode' and set mode 660 on remaining VMs + one_vm: + template_id: '{{ one_template_id }}' + count_labels: + - test-mode + exact_count: 2 + mode: 660 + register: terminate_vm4 + + - name: Verify VMs permissions + assert: + that: + - terminate_vm4 is changed + - terminate_vm4.instances|length == 2 # 2 removed + - terminate_vm4.tagged_instances|length == 2 # 2 remaining with label test-mode + - terminate_vm4.instances[0].mode == "640" + - terminate_vm4.instances[1].mode == "640" + - terminate_vm4.tagged_instances[0].mode == "660" + - terminate_vm4.tagged_instances[1].mode == "660" + + always: + - name: Delete VM + one_vm: + instance_ids: '{{ deployed_vm.instances_ids }}' + state: absent + hard: yes + + - name: Delete VMs + one_vm: + instance_ids: '{{ deployed_vm4.instances_ids }}' + state: absent + hard: yes + tags: test-chmod + +- block: + - name: Deploy VM + one_vm: + template_id: '{{ one_template_id }}' + register: deployed_vm + + - name: Change VM's group + one_vm: + instance_ids: '{{ deployed_vm.instances_ids }}' + group_id: 1 + register: changed_group + + - name: Verify group changing + assert: + that: + - deployed_vm is changed + - changed_group is changed + - deployed_vm.instances|length == 1 + - changed_group.instances|length == 1 + - changed_group.instances[0].owner_id == deployed_vm.instances[0].owner_id + - changed_group.instances[0].group_id != deployed_vm.instances[0].group_id + + - name: Try to set non-existent group + one_vm: + instance_ids: '{{ deployed_vm.instances_ids }}' + group_id: -999 + register: changed_group + failed_when: changed_group is not failed + + always: + - name: Delete VM + one_vm: + instance_ids: '{{ deployed_vm.instances_ids }}' + state: absent + hard: yes + tags: test-chown + - name: Test images creation block: - name: Set fact image name set_fact: image_name: test-image-name-{{ ansible_date_time.iso8601_basic_short }} - + - name: Deploy VM one_vm: template_id: '{{ one_template_id }}' @@ -834,7 +1049,7 @@ name: '{{ image_name }}' register: save_disk_labels failed_when: not save_disk_labels is failed - + - name: Try to save disk in running state to check if it will fail one_vm: instance_ids: '{{ vm_image.instances_ids }}'