From bd0c613177c71bb81f2177cd0e7c9effc157cb6f Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Mon, 14 Jul 2025 19:27:34 +0200 Subject: [PATCH 01/14] Added lvm_pv_move_data module --- .github/BOTMETA.yml | 2 + plugins/modules/lvm_pv_move_data.py | 167 ++++++++++++++++++ .../targets/lvm_pv_move_data/aliases | 13 ++ .../targets/lvm_pv_move_data/meta/main.yml | 9 + .../lvm_pv_move_data/tasks/cleanup.yml | 31 ++++ .../lvm_pv_move_data/tasks/creation.yml | 91 ++++++++++ .../targets/lvm_pv_move_data/tasks/main.yml | 25 +++ .../targets/lvm_pv_move_data/tasks/moving.yml | 30 ++++ 8 files changed, 368 insertions(+) create mode 100644 plugins/modules/lvm_pv_move_data.py create mode 100644 tests/integration/targets/lvm_pv_move_data/aliases create mode 100644 tests/integration/targets/lvm_pv_move_data/meta/main.yml create mode 100644 tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml create mode 100644 tests/integration/targets/lvm_pv_move_data/tasks/creation.yml create mode 100644 tests/integration/targets/lvm_pv_move_data/tasks/main.yml create mode 100644 tests/integration/targets/lvm_pv_move_data/tasks/moving.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index fac3fae8f8..9185c11834 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -903,6 +903,8 @@ files: maintainers: abulimov $modules/lvm_pv.py: maintainers: klention + $modules/lvm_pv_move_data.py: + maintainers: klention $modules/lvg_rename.py: maintainers: lszomor $modules/lvol.py: diff --git a/plugins/modules/lvm_pv_move_data.py b/plugins/modules/lvm_pv_move_data.py new file mode 100644 index 0000000000..05e63b7667 --- /dev/null +++ b/plugins/modules/lvm_pv_move_data.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2025, Klention Mali +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r""" +module: lvm_pv_move_data +short_description: Move data between LVM Physical Volumes (PVs) +version_added: "11.2.0" +description: + - Moves data from one LVM Physical Volume (PV) to another. +author: + - Klention Mali (@klention) +options: + source: + description: + - Path to the source block device to move data from. + - Must be an existing PV. + type: path + required: true + destination: + description: + - Path to the destination block device to move data to. + - Must be an existing PV with enough free space. + type: path + required: true +notes: + - Requires LVM2 utilities installed on the target system. + - Both source and destination devices must exist. + - Both source and destination PVs must be in the same volume group. + - The destination PV must have enough free space to accommodate the source PV's allocated extents. +""" + +EXAMPLES = r""" +- name: Moving data from /dev/sdb to /dev/sdc + community.general.lvm_pv_move_data: + source: /dev/sdb + destination: /dev/sdc +""" + +RETURN = r""" +""" + + +import os +from ansible.module_utils.basic import AnsibleModule + + +def get_pv_status(module, device): + """Check if the device is already a PV.""" + cmd = ['pvs', '--noheadings', '--readonly', device] + return module.run_command(cmd)[0] == 0 + + +def get_pv_vg(module, device): + """Get the VG name of a PV.""" + cmd = ['pvs', '--noheadings', '-o', 'vg_name', device] + rc, out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(msg="Failed to get VG for device %s: %s" % (device, err)) + vg = out.strip() + return None if vg == '' else vg + + +def get_pv_allocated_pe(module, device): + """Get count of allocated physical extents in a PV.""" + cmd = ['pvs', '--noheadings', '-o', 'pv_pe_alloc_count', device] + rc, out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(msg="Failed to get allocated PE count for device %s: %s" % (device, err)) + try: + return int(out.strip()) + except ValueError: + module.fail_json(msg="Invalid allocated PE count for device %s: %s" % (device, out)) + + +def get_pv_total_pe(module, device): + """Get total number of physical extents in a PV.""" + cmd = ['pvs', '--noheadings', '-o', 'pv_pe_count', device] + rc, out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(msg="Failed to get total PE count for device %s: %s" % (device, err)) + try: + return int(out.strip()) + except ValueError: + module.fail_json(msg="Invalid total PE count for device %s: %s" % (device, out)) + + +def get_pv_free_pe(module, device): + """Calculate free physical extents in a PV.""" + total_pe = get_pv_total_pe(module, device) + allocated_pe = get_pv_allocated_pe(module, device) + return total_pe - allocated_pe + + +def main(): + module = AnsibleModule( + argument_spec=dict( + source=dict(type='path', required=True), + destination=dict(type='path', required=True), + ), + supports_check_mode=True, + ) + + source = module.params['source'] + destination = module.params['destination'] + changed = False + actions = [] + + # Validate device existence + if not os.path.exists(source): + module.fail_json(msg="Source device %s not found" % source) + if not os.path.exists(destination): + module.fail_json(msg="Destination device %s not found" % destination) + if source == destination: + module.fail_json(msg="Source and destination devices must be different") + + # Check both are PVs + if not get_pv_status(module, source): + module.fail_json(msg="Source device %s is not a PV" % source) + if not get_pv_status(module, destination): + module.fail_json(msg="Destination device %s is not a PV" % destination) + + # Check both are in the same VG + vg_src = get_pv_vg(module, source) + vg_dest = get_pv_vg(module, destination) + if vg_src != vg_dest: + module.fail_json( + msg="Source and destination must be in the same VG. Source VG: '%s', Destination VG: '%s'." % (vg_src, vg_dest) + ) + + # Check source has data to move + allocated = get_pv_allocated_pe(module, source) + if allocated == 0: + actions.append('no allocated extents to move') + else: + # Check destination has enough free space + free_pe_dest = get_pv_free_pe(module, destination) + if free_pe_dest < allocated: + module.fail_json( + msg="Destination device %s has only %d free physical extents, but source device %s has %d allocated extents. Not enough space." % + (destination, free_pe_dest, source, allocated) + ) + + if module.check_mode: + changed = True + actions.append('would move data from %s to %s' % (source, destination)) + else: + cmd = ['pvmove', source, destination] + rc, out, err = module.run_command(cmd, check_rc=True) + changed = True + actions.append('moved data from %s to %s' % (source, destination)) + + if actions: + msg = "PV data move: %s" % ', '.join(actions) + else: + msg = "No data to move from %s" % source + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/lvm_pv_move_data/aliases b/tests/integration/targets/lvm_pv_move_data/aliases new file mode 100644 index 0000000000..232151c541 --- /dev/null +++ b/tests/integration/targets/lvm_pv_move_data/aliases @@ -0,0 +1,13 @@ +# Copyright (c) Contributors to the Ansible project +# Based on the integraton test for the lvm_pv module +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +azp/posix/1 +azp/posix/vm +destructive +needs/privileged +skip/aix +skip/freebsd +skip/osx +skip/macos diff --git a/tests/integration/targets/lvm_pv_move_data/meta/main.yml b/tests/integration/targets/lvm_pv_move_data/meta/main.yml new file mode 100644 index 0000000000..90c5d5cb8d --- /dev/null +++ b/tests/integration/targets/lvm_pv_move_data/meta/main.yml @@ -0,0 +1,9 @@ +--- +# Copyright (c) Ansible Project +# Based on the integraton test for the lvg module +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +dependencies: + - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml b/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml new file mode 100644 index 0000000000..65b8b0bbe4 --- /dev/null +++ b/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml @@ -0,0 +1,31 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Unmounting LV lv_tmp_test from {{ remote_tmp_dir }}/tmp_mount + ansible.posix.mount: + fstab: '{{ remote_tmp_dir }}/fstab' + path: '{{ remote_tmp_dir }}/tmp_mount' + state: absent + +- name: Removing xfs filesystem from LV lv_tmp_test + community.general.filesystem: + dev: /dev/vg_tmp_test/lv_tmp_test + status: absent + +- name: Detaching first loop device + ansible.builtin.command: losetup -d {{ loop_device_01.stdout }} + +- name: Detaching second loop device + ansible.builtin.command: losetup -d {{ loop_device_02.stdout }} + +- name: Removing first loop device file + ansible.builtin.file: + path: "{{ remote_tmp_dir }}/test_lvm_pv_01.img" + state: absent + +- name: Removing second loop device file + ansible.builtin.file: + path: "{{ remote_tmp_dir }}/test_lvm_pv_02.img" + state: absent diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml new file mode 100644 index 0000000000..b48b00bcfb --- /dev/null +++ b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml @@ -0,0 +1,91 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Creating a 3000MB file for the first loop device + ansible.builtin.command: dd if=/dev/zero of={{ remote_tmp_dir }}/test_lvm_pv_01.img bs=1M count=3000 + args: + creates: "{{ remote_tmp_dir }}/test_lvm_pv_01.img" + +- name: Creating a 4000MB file for the second loop device + ansible.builtin.command: dd if=/dev/zero of={{ remote_tmp_dir }}/test_lvm_pv_02.img bs=1M count=4000 + args: + creates: "{{ remote_tmp_dir }}/test_lvm_pv_02.img" + +- name: Creating loop device + ansible.builtin.command: losetup -f + register: loop_device_01 + +- name: Associating loop device with file + ansible.builtin.command: 'losetup {{ loop_device_01.stdout }} {{ remote_tmp_dir }}/test_lvm_pv_01.img' + +- name: Creating loop device + ansible.builtin.command: losetup -f + register: loop_device_02 + +- name: Associating loop device with file + ansible.builtin.command: 'losetup {{ loop_device_02.stdout }} {{ remote_tmp_dir }}/test_lvm_pv_02.img' + +- name: Creating physical volume for the first loop device + community.general.lvm_pv: + device: "{{ loop_device_01.stdout }}" + register: pv_creation_result_01 + +- name: Checking the first physical volume size + ansible.builtin.command: pvs --noheadings -o pv_size --units M {{ loop_device_01.stdout }} + register: pv_size_output_01 + +- name: Creating physical volume for the second loop device + community.general.lvm_pv: + device: "{{ loop_device_02.stdout }}" + register: pv_creation_result_02 + +- name: Checking the second physical volume size + ansible.builtin.command: pvs --noheadings -o pv_size --units M {{ loop_device_02.stdout }} + register: pv_size_output_02 + +- name: Creating volume group on top of first device {{ loop_device_01.stdout }} + community.general.lvg: + vg: vg_tmp_test + pvs: "{{ loop_device_01.stdout }}" + register: vg_creation_result + +- name: Creating LV lv_tmp_test on VG vg_tmp_test + community.general.lvol: + vg: vg_tmp_test + lv: lv_tmp_test + size: 100%FREE + force: true + register: lv_creation_result + +- name: Creating xfs filesystem on LV lv_tmp_test + community.general.filesystem: + dev: /dev/vg_tmp_test/lv_tmp_test + fstype: xfs + register: fs_creation_result + +#- name: Mounting LV lv_tmp_test to {{ remote_tmp_dir }}/tmp_mount +# ansible.builtin.shell: mount -o rw,noauto /dev/vg_tmp_test/lv_tmp_test {{ remote_tmp_dir }}/tmp_mount + +- name: Mounting LV lv_tmp_test to {{ remote_tmp_dir }}/tmp_mount + ansible.posix.mount: + fstab: '{{ remote_tmp_dir }}/fstab' + path: '{{ remote_tmp_dir }}/tmp_mount' + src: '/dev/vg_tmp_test/lv_tmp_test' + fstype: xfs + opts: rw,noauto + state: mounted + +- name: Asserting physical volume was created + ansible.builtin.assert: + that: + - pv_creation_result_01.changed == true + - pv_creation_result_02.changed == true + - vg_creation_result.changed == true + - (pv_size_output_01.stdout | trim | regex_replace('M', '') | float) > 95 + - (pv_size_output_01.stdout | trim | regex_replace('M', '') | float) < 120 + - (pv_size_output_02.stdout | trim | regex_replace('M', '') | float) > 190 + - (pv_size_output_02.stdout | trim | regex_replace('M', '') | float) < 220 + - "'created' in pv_creation_result_01.msg" + - "'created' in pv_creation_result_02.msg" diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/main.yml b/tests/integration/targets/lvm_pv_move_data/tasks/main.yml new file mode 100644 index 0000000000..d25c667114 --- /dev/null +++ b/tests/integration/targets/lvm_pv_move_data/tasks/main.yml @@ -0,0 +1,25 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright (c) Contributors to the Ansible project +# Based on the integraton test for the lvm_pv module +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Install required packages (Linux) + when: ansible_system == 'Linux' + ansible.builtin.package: + name: lvm2 + state: present + +- name: Testing lvm_pv_move_data module + block: + - import_tasks: creation.yml + + - import_tasks: moving.yml + + always: + - import_tasks: cleanup.yml diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml b/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml new file mode 100644 index 0000000000..e132833b3d --- /dev/null +++ b/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml @@ -0,0 +1,30 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Creating a 50MB file on the mounted LV + ansible.builtin.command: dd if=/dev/zero of={{ remote_tmp_dir }}/tmp_mount/test_file bs=1M count=50 + args: + creates: "{{ remote_tmp_dir }}/tmp_mount/test_file" + +- name: Extending VG vg_tmp_test with second device {{ loop_device_02.stdout }} + community.general.lvg: + vg: vg_tmp_test + pvs: "{{ loop_device_02.stdout }}" + +- name: Moving data from {{ loop_device_01.stdout }} to {{ loop_device_02.stdout }} (both in same VG) + community.general.lvm_pv_move_data: + source: "{{ loop_device_01.stdout }}" + destination: "{{ loop_device_02.stdout }}" + register: move_result + +- name: Debugging move result + ansible.builtin.debug: + var: move_result + +- name: Asserting data was moved successfully + ansible.builtin.assert: + that: + - move_result.changed == true + - "'moved data from' in creation_result.msg" From cca174babb203a84e6380f280ddfdd6999cc8dca Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Mon, 14 Jul 2025 19:46:14 +0200 Subject: [PATCH 02/14] Removed trailing whitespace --- plugins/modules/lvm_pv_move_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/lvm_pv_move_data.py b/plugins/modules/lvm_pv_move_data.py index 05e63b7667..82725f0735 100644 --- a/plugins/modules/lvm_pv_move_data.py +++ b/plugins/modules/lvm_pv_move_data.py @@ -143,7 +143,7 @@ def main(): free_pe_dest = get_pv_free_pe(module, destination) if free_pe_dest < allocated: module.fail_json( - msg="Destination device %s has only %d free physical extents, but source device %s has %d allocated extents. Not enough space." % + msg="Destination device %s has only %d free physical extents, but source device %s has %d allocated extents. Not enough space." % (destination, free_pe_dest, source, allocated) ) From e5d08539fec9cb3d161756fc36e9fd9ef2b742e6 Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Mon, 14 Jul 2025 20:03:47 +0200 Subject: [PATCH 03/14] Decreased loop devices file size --- .../targets/lvm_pv_move_data/tasks/cleanup.yml | 4 ++-- .../targets/lvm_pv_move_data/tasks/creation.yml | 11 ++++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml b/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml index 65b8b0bbe4..7c511f4fa5 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml @@ -3,7 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- name: Unmounting LV lv_tmp_test from {{ remote_tmp_dir }}/tmp_mount +- name: Unmounting LV lv_tmp_test ansible.posix.mount: fstab: '{{ remote_tmp_dir }}/fstab' path: '{{ remote_tmp_dir }}/tmp_mount' @@ -12,7 +12,7 @@ - name: Removing xfs filesystem from LV lv_tmp_test community.general.filesystem: dev: /dev/vg_tmp_test/lv_tmp_test - status: absent + state: absent - name: Detaching first loop device ansible.builtin.command: losetup -d {{ loop_device_01.stdout }} diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml index b48b00bcfb..1b09e1a07f 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml @@ -3,13 +3,13 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- name: Creating a 3000MB file for the first loop device - ansible.builtin.command: dd if=/dev/zero of={{ remote_tmp_dir }}/test_lvm_pv_01.img bs=1M count=3000 +- name: Creating a 500MB file for the first loop device + ansible.builtin.command: dd if=/dev/zero of={{ remote_tmp_dir }}/test_lvm_pv_01.img bs=1M count=500 args: creates: "{{ remote_tmp_dir }}/test_lvm_pv_01.img" -- name: Creating a 4000MB file for the second loop device - ansible.builtin.command: dd if=/dev/zero of={{ remote_tmp_dir }}/test_lvm_pv_02.img bs=1M count=4000 +- name: Creating a 1000MB file for the second loop device + ansible.builtin.command: dd if=/dev/zero of={{ remote_tmp_dir }}/test_lvm_pv_02.img bs=1M count=1000 args: creates: "{{ remote_tmp_dir }}/test_lvm_pv_02.img" @@ -63,12 +63,13 @@ community.general.filesystem: dev: /dev/vg_tmp_test/lv_tmp_test fstype: xfs + state: present register: fs_creation_result #- name: Mounting LV lv_tmp_test to {{ remote_tmp_dir }}/tmp_mount # ansible.builtin.shell: mount -o rw,noauto /dev/vg_tmp_test/lv_tmp_test {{ remote_tmp_dir }}/tmp_mount -- name: Mounting LV lv_tmp_test to {{ remote_tmp_dir }}/tmp_mount +- name: Mounting LV lv_tmp_test ansible.posix.mount: fstab: '{{ remote_tmp_dir }}/fstab' path: '{{ remote_tmp_dir }}/tmp_mount' From eb37c64189766d22629d65331f00f16957ce1f19 Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Mon, 14 Jul 2025 20:18:36 +0200 Subject: [PATCH 04/14] Remove test VG if exists --- .../targets/lvm_pv_move_data/tasks/creation.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml index 1b09e1a07f..b2baefc6e6 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml @@ -45,6 +45,11 @@ ansible.builtin.command: pvs --noheadings -o pv_size --units M {{ loop_device_02.stdout }} register: pv_size_output_02 +- name: Making sure volume group vg_tmp_test does not exist + community.general.lvg: + vg: vg_tmp_test + state: absent + - name: Creating volume group on top of first device {{ loop_device_01.stdout }} community.general.lvg: vg: vg_tmp_test @@ -66,9 +71,6 @@ state: present register: fs_creation_result -#- name: Mounting LV lv_tmp_test to {{ remote_tmp_dir }}/tmp_mount -# ansible.builtin.shell: mount -o rw,noauto /dev/vg_tmp_test/lv_tmp_test {{ remote_tmp_dir }}/tmp_mount - - name: Mounting LV lv_tmp_test ansible.posix.mount: fstab: '{{ remote_tmp_dir }}/fstab' From 80fd68c39dc2dc2b53e00142fb8ee00ea098b0f2 Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Mon, 14 Jul 2025 20:23:48 +0200 Subject: [PATCH 05/14] Force remove test VG if exists --- tests/integration/targets/lvm_pv_move_data/tasks/creation.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml index b2baefc6e6..dedee3c750 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml @@ -48,12 +48,14 @@ - name: Making sure volume group vg_tmp_test does not exist community.general.lvg: vg: vg_tmp_test + force: true state: absent - name: Creating volume group on top of first device {{ loop_device_01.stdout }} community.general.lvg: vg: vg_tmp_test pvs: "{{ loop_device_01.stdout }}" + force: true register: vg_creation_result - name: Creating LV lv_tmp_test on VG vg_tmp_test From b28baac61d51444a16ef11dd40ee449ce48cd634 Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Mon, 14 Jul 2025 20:37:00 +0200 Subject: [PATCH 06/14] Renamed test VG and LV names --- .../lvm_pv_move_data/tasks/creation.yml | 20 +++++++++---------- .../targets/lvm_pv_move_data/tasks/moving.yml | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml index dedee3c750..7dce72edd2 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml @@ -45,39 +45,39 @@ ansible.builtin.command: pvs --noheadings -o pv_size --units M {{ loop_device_02.stdout }} register: pv_size_output_02 -- name: Making sure volume group vg_tmp_test does not exist +- name: Making sure volume group testvg does not exist community.general.lvg: - vg: vg_tmp_test + vg: testvg force: true state: absent - name: Creating volume group on top of first device {{ loop_device_01.stdout }} community.general.lvg: - vg: vg_tmp_test + vg: testvg pvs: "{{ loop_device_01.stdout }}" force: true register: vg_creation_result -- name: Creating LV lv_tmp_test on VG vg_tmp_test +- name: Creating LV testlv on VG testvg community.general.lvol: - vg: vg_tmp_test - lv: lv_tmp_test + vg: testvg + lv: testlv size: 100%FREE force: true register: lv_creation_result -- name: Creating xfs filesystem on LV lv_tmp_test +- name: Creating xfs filesystem on LV testlv community.general.filesystem: - dev: /dev/vg_tmp_test/lv_tmp_test + dev: /dev/testvg/testlv fstype: xfs state: present register: fs_creation_result -- name: Mounting LV lv_tmp_test +- name: Mounting LV testlv ansible.posix.mount: fstab: '{{ remote_tmp_dir }}/fstab' path: '{{ remote_tmp_dir }}/tmp_mount' - src: '/dev/vg_tmp_test/lv_tmp_test' + src: '/dev/testvg/testlv' fstype: xfs opts: rw,noauto state: mounted diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml b/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml index e132833b3d..3f18ff444a 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml @@ -8,9 +8,9 @@ args: creates: "{{ remote_tmp_dir }}/tmp_mount/test_file" -- name: Extending VG vg_tmp_test with second device {{ loop_device_02.stdout }} +- name: Extending VG testvg with second device {{ loop_device_02.stdout }} community.general.lvg: - vg: vg_tmp_test + vg: testvg pvs: "{{ loop_device_02.stdout }}" - name: Moving data from {{ loop_device_01.stdout }} to {{ loop_device_02.stdout }} (both in same VG) From 8697026101331ff26a68cfbfbca70b3ad508f244 Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Mon, 14 Jul 2025 22:22:52 +0200 Subject: [PATCH 07/14] Updated assert conditions --- .../targets/lvm_pv_move_data/tasks/cleanup.yml | 2 +- .../targets/lvm_pv_move_data/tasks/creation.yml | 15 +++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml b/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml index 7c511f4fa5..515adadac5 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml @@ -3,7 +3,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -- name: Unmounting LV lv_tmp_test +- name: Unmounting temp file system ansible.posix.mount: fstab: '{{ remote_tmp_dir }}/fstab' path: '{{ remote_tmp_dir }}/tmp_mount' diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml index 7dce72edd2..74a1e7b32c 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml @@ -45,13 +45,16 @@ ansible.builtin.command: pvs --noheadings -o pv_size --units M {{ loop_device_02.stdout }} register: pv_size_output_02 +- name: Showing current device mapper status + ansible.builtin.shell: dmsetup ls + - name: Making sure volume group testvg does not exist community.general.lvg: vg: testvg force: true state: absent -- name: Creating volume group on top of first device {{ loop_device_01.stdout }} + - name: Creating volume group on top of first device {{ loop_device_01.stdout }} community.general.lvg: vg: testvg pvs: "{{ loop_device_01.stdout }}" @@ -73,7 +76,7 @@ state: present register: fs_creation_result -- name: Mounting LV testlv + - name: Mounting LV testlv ansible.posix.mount: fstab: '{{ remote_tmp_dir }}/fstab' path: '{{ remote_tmp_dir }}/tmp_mount' @@ -88,9 +91,9 @@ - pv_creation_result_01.changed == true - pv_creation_result_02.changed == true - vg_creation_result.changed == true - - (pv_size_output_01.stdout | trim | regex_replace('M', '') | float) > 95 - - (pv_size_output_01.stdout | trim | regex_replace('M', '') | float) < 120 - - (pv_size_output_02.stdout | trim | regex_replace('M', '') | float) > 190 - - (pv_size_output_02.stdout | trim | regex_replace('M', '') | float) < 220 + - (pv_size_output_01.stdout | trim | regex_replace('M', '') | float) > 450 + - (pv_size_output_01.stdout | trim | regex_replace('M', '') | float) < 600 + - (pv_size_output_02.stdout | trim | regex_replace('M', '') | float) > 950 + - (pv_size_output_02.stdout | trim | regex_replace('M', '') | float) < 1100 - "'created' in pv_creation_result_01.msg" - "'created' in pv_creation_result_02.msg" From 8b64f1cc5fe4828f15ff474f415b3ef10c0a2b9b Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Mon, 14 Jul 2025 23:00:57 +0200 Subject: [PATCH 08/14] Added .ansible to .gitignore --- .gitignore | 1 + .../targets/lvm_pv_move_data/tasks/creation.yml | 13 ++----------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index 5c6e9c86c6..e427699798 100644 --- a/.gitignore +++ b/.gitignore @@ -530,3 +530,4 @@ tests/integration/cloud-config-*.ini # VSCode specific extensions .vscode/settings.json +.ansible diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml index 74a1e7b32c..a01f837429 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml @@ -45,16 +45,7 @@ ansible.builtin.command: pvs --noheadings -o pv_size --units M {{ loop_device_02.stdout }} register: pv_size_output_02 -- name: Showing current device mapper status - ansible.builtin.shell: dmsetup ls - -- name: Making sure volume group testvg does not exist - community.general.lvg: - vg: testvg - force: true - state: absent - - - name: Creating volume group on top of first device {{ loop_device_01.stdout }} +- name: Creating volume group on top of first device {{ loop_device_01.stdout }} community.general.lvg: vg: testvg pvs: "{{ loop_device_01.stdout }}" @@ -76,7 +67,7 @@ state: present register: fs_creation_result - - name: Mounting LV testlv +- name: Mounting LV testlv ansible.posix.mount: fstab: '{{ remote_tmp_dir }}/fstab' path: '{{ remote_tmp_dir }}/tmp_mount' From f72e945f9e66a176033babee35a22b5be3a75720 Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Mon, 14 Jul 2025 23:20:03 +0200 Subject: [PATCH 09/14] Force extending VG --- tests/integration/targets/lvm_pv_move_data/tasks/moving.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml b/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml index 3f18ff444a..0d125e4c5b 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml @@ -12,6 +12,7 @@ community.general.lvg: vg: testvg pvs: "{{ loop_device_02.stdout }}" + force: true - name: Moving data from {{ loop_device_01.stdout }} to {{ loop_device_02.stdout }} (both in same VG) community.general.lvm_pv_move_data: From 2da7133717ca62e8234d568e101c3b04348ca7e6 Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Tue, 15 Jul 2025 22:33:39 +0200 Subject: [PATCH 10/14] Wiping LVM metadata from PVs before creating VG --- .../targets/lvm_pv_move_data/tasks/creation.yml | 9 +++++++++ .../integration/targets/lvm_pv_move_data/tasks/main.yml | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml index a01f837429..d0c4c74592 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml @@ -27,6 +27,15 @@ - name: Associating loop device with file ansible.builtin.command: 'losetup {{ loop_device_02.stdout }} {{ remote_tmp_dir }}/test_lvm_pv_02.img' +- name: Wiping existing LVM metadata + community.general.lvm_pv: + device: "{{ item }}" + force: true + state: absent + loop: + - "{{ loop_device_01.stdout }}" + - "{{ loop_device_02.stdout }}" + - name: Creating physical volume for the first loop device community.general.lvm_pv: device: "{{ loop_device_01.stdout }}" diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/main.yml b/tests/integration/targets/lvm_pv_move_data/tasks/main.yml index d25c667114..e5e420832a 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/main.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/main.yml @@ -5,7 +5,7 @@ #################################################################### # Copyright (c) Contributors to the Ansible project -# Based on the integraton test for the lvm_pv module +# Based on the integration test for the lvm_pv module # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later From 08b654f48b562b6d75cabdcdbfa7c90651a8d9e7 Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Sat, 19 Jul 2025 15:05:05 +0200 Subject: [PATCH 11/14] Clean FS, LV, VG and PSs before run --- .../lvm_pv_move_data/tasks/cleanup.yml | 8 ++++ .../lvm_pv_move_data/tasks/creation.yml | 37 +++++++++++++------ .../targets/lvm_pv_move_data/tasks/main.yml | 6 ++- .../targets/lvm_pv_move_data/tasks/moving.yml | 21 +++++++---- .../lvm_pv_move_data/tasks/prepare.yml | 23 ++++++++++++ 5 files changed, 74 insertions(+), 21 deletions(-) create mode 100644 tests/integration/targets/lvm_pv_move_data/tasks/prepare.yml diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml b/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml index 515adadac5..1c121008b8 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/cleanup.yml @@ -13,6 +13,14 @@ community.general.filesystem: dev: /dev/vg_tmp_test/lv_tmp_test state: absent + force: true + +- name: Deleting testlv logical volume + community.general.lvol: + vg: testvg + lv: testlv + force: true + state: absent - name: Detaching first loop device ansible.builtin.command: losetup -d {{ loop_device_01.stdout }} diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml index d0c4c74592..d22f882674 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml @@ -13,28 +13,39 @@ args: creates: "{{ remote_tmp_dir }}/test_lvm_pv_02.img" +- name: Pausing for a random time between 5-15 seconds + ansible.builtin.pause: + seconds: "{{ range(5, 16) | random }}" + - name: Creating loop device ansible.builtin.command: losetup -f register: loop_device_01 +- name: Wiping existing LVM metadata + community.general.lvm_pv: + device: "{{ loop_device_01.stdout }}" + force: true + state: absent + - name: Associating loop device with file ansible.builtin.command: 'losetup {{ loop_device_01.stdout }} {{ remote_tmp_dir }}/test_lvm_pv_01.img' +- name: Pausing for a random time between 5-15 seconds + ansible.builtin.pause: + seconds: "{{ range(5, 16) | random }}" + - name: Creating loop device ansible.builtin.command: losetup -f register: loop_device_02 -- name: Associating loop device with file - ansible.builtin.command: 'losetup {{ loop_device_02.stdout }} {{ remote_tmp_dir }}/test_lvm_pv_02.img' - - name: Wiping existing LVM metadata community.general.lvm_pv: - device: "{{ item }}" + device: "{{ loop_device_02.stdout }}" force: true state: absent - loop: - - "{{ loop_device_01.stdout }}" - - "{{ loop_device_02.stdout }}" + +- name: Associating loop device with file + ansible.builtin.command: 'losetup {{ loop_device_02.stdout }} {{ remote_tmp_dir }}/test_lvm_pv_02.img' - name: Creating physical volume for the first loop device community.general.lvm_pv: @@ -54,10 +65,12 @@ ansible.builtin.command: pvs --noheadings -o pv_size --units M {{ loop_device_02.stdout }} register: pv_size_output_02 -- name: Creating volume group on top of first device {{ loop_device_01.stdout }} +- name: Creating volume group community.general.lvg: vg: testvg - pvs: "{{ loop_device_01.stdout }}" + pvs: + - "{{ loop_device_01.stdout }}" + - "{{ loop_device_02.stdout }}" force: true register: vg_creation_result @@ -88,9 +101,9 @@ - name: Asserting physical volume was created ansible.builtin.assert: that: - - pv_creation_result_01.changed == true - - pv_creation_result_02.changed == true - - vg_creation_result.changed == true + - pv_creation_result_01 is changed + - pv_creation_result_02 is changed + - vg_creation_result is changed - (pv_size_output_01.stdout | trim | regex_replace('M', '') | float) > 450 - (pv_size_output_01.stdout | trim | regex_replace('M', '') | float) < 600 - (pv_size_output_02.stdout | trim | regex_replace('M', '') | float) > 950 diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/main.yml b/tests/integration/targets/lvm_pv_move_data/tasks/main.yml index e5e420832a..908e1b9a99 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/main.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/main.yml @@ -12,11 +12,15 @@ - name: Install required packages (Linux) when: ansible_system == 'Linux' ansible.builtin.package: - name: lvm2 + name: + - lvm2 + - xfsprogs state: present - name: Testing lvm_pv_move_data module block: + - import_tasks: prepare.yml + - import_tasks: creation.yml - import_tasks: moving.yml diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml b/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml index 0d125e4c5b..9b52f35f7e 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/moving.yml @@ -8,13 +8,18 @@ args: creates: "{{ remote_tmp_dir }}/tmp_mount/test_file" -- name: Extending VG testvg with second device {{ loop_device_02.stdout }} - community.general.lvg: - vg: testvg - pvs: "{{ loop_device_02.stdout }}" - force: true +- name: Growing the second loop device file to 1500MB + ansible.builtin.shell: truncate -s 1500M {{ remote_tmp_dir }}/test_lvm_pv_02.img -- name: Moving data from {{ loop_device_01.stdout }} to {{ loop_device_02.stdout }} (both in same VG) +- name: Refreshing the second loop device + ansible.builtin.shell: losetup -c {{ loop_device_02.stdout }} + +- name: Resizing the second physical volume + community.general.lvm_pv: + device: "{{ loop_device_02.stdout }}" + resize: true + +- name: Moving data from between PVs (both in same VG) community.general.lvm_pv_move_data: source: "{{ loop_device_01.stdout }}" destination: "{{ loop_device_02.stdout }}" @@ -27,5 +32,5 @@ - name: Asserting data was moved successfully ansible.builtin.assert: that: - - move_result.changed == true - - "'moved data from' in creation_result.msg" + - move_result is changed + - "'moved data from' in move_result.msg" diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/prepare.yml b/tests/integration/targets/lvm_pv_move_data/tasks/prepare.yml new file mode 100644 index 0000000000..ced3f74dc6 --- /dev/null +++ b/tests/integration/targets/lvm_pv_move_data/tasks/prepare.yml @@ -0,0 +1,23 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Removing xfs filesystem from LV lv_tmp_test + community.general.filesystem: + dev: /dev/vg_tmp_test/lv_tmp_test + state: absent + force: true + +- name: Deleting testlv logical volume + community.general.lvol: + vg: testvg + lv: testlv + force: true + state: absent + +- name: Deleting volume group testvg + community.general.lvg: + vg: testvg + force: true + state: absent From 3d84493202b1216f34148775e9c5060876a9a451 Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Mon, 21 Jul 2025 20:00:43 +0200 Subject: [PATCH 12/14] Migrated to CmdRunner --- plugins/modules/lvm_pv_move_data.py | 165 +++++++++++------- .../lvm_pv_move_data/tasks/creation.yml | 5 +- 2 files changed, 105 insertions(+), 65 deletions(-) diff --git a/plugins/modules/lvm_pv_move_data.py b/plugins/modules/lvm_pv_move_data.py index 82725f0735..c5f660685d 100644 --- a/plugins/modules/lvm_pv_move_data.py +++ b/plugins/modules/lvm_pv_move_data.py @@ -29,11 +29,27 @@ options: - Must be an existing PV with enough free space. type: path required: true + auto_answer: + description: + - Answer yes to all prompts automatically. + type: bool + default: false + atomic: + description: + - Makes a pvmove operation atomic, ensuring that all affected LVs are moved to the destination PV, or none are if the operation is aborted. + type: bool + default: true + autobackup: + description: + - Automatically backup metadata before changes (strongly advised!). + type: bool + default: true notes: - Requires LVM2 utilities installed on the target system. - - Both source and destination devices must exist. - - Both source and destination PVs must be in the same volume group. - - The destination PV must have enough free space to accommodate the source PV's allocated extents. + - Both O(source) and O(destination) devices must exist. + - Both O(source) and O(destination) PVs must be in the same volume group. + - The O(destination) PV must have enough free space to accommodate the O(source) PV's allocated extents. + - Verbosity is automatically controlled by Ansible's verbosity level (using multiple C(-v) flags). """ EXAMPLES = r""" @@ -49,53 +65,7 @@ RETURN = r""" import os from ansible.module_utils.basic import AnsibleModule - - -def get_pv_status(module, device): - """Check if the device is already a PV.""" - cmd = ['pvs', '--noheadings', '--readonly', device] - return module.run_command(cmd)[0] == 0 - - -def get_pv_vg(module, device): - """Get the VG name of a PV.""" - cmd = ['pvs', '--noheadings', '-o', 'vg_name', device] - rc, out, err = module.run_command(cmd) - if rc != 0: - module.fail_json(msg="Failed to get VG for device %s: %s" % (device, err)) - vg = out.strip() - return None if vg == '' else vg - - -def get_pv_allocated_pe(module, device): - """Get count of allocated physical extents in a PV.""" - cmd = ['pvs', '--noheadings', '-o', 'pv_pe_alloc_count', device] - rc, out, err = module.run_command(cmd) - if rc != 0: - module.fail_json(msg="Failed to get allocated PE count for device %s: %s" % (device, err)) - try: - return int(out.strip()) - except ValueError: - module.fail_json(msg="Invalid allocated PE count for device %s: %s" % (device, out)) - - -def get_pv_total_pe(module, device): - """Get total number of physical extents in a PV.""" - cmd = ['pvs', '--noheadings', '-o', 'pv_pe_count', device] - rc, out, err = module.run_command(cmd) - if rc != 0: - module.fail_json(msg="Failed to get total PE count for device %s: %s" % (device, err)) - try: - return int(out.strip()) - except ValueError: - module.fail_json(msg="Invalid total PE count for device %s: %s" % (device, out)) - - -def get_pv_free_pe(module, device): - """Calculate free physical extents in a PV.""" - total_pe = get_pv_total_pe(module, device) - allocated_pe = get_pv_allocated_pe(module, device) - return total_pe - allocated_pe +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt def main(): @@ -103,14 +73,31 @@ def main(): argument_spec=dict( source=dict(type='path', required=True), destination=dict(type='path', required=True), + auto_answer=dict(type='bool', default=False), + atomic=dict(type='bool', default=True), + autobackup=dict(type='bool', default=True), ), supports_check_mode=True, ) + pvs_runner = CmdRunner( + module, + command="pvs", + arg_formats=dict( + noheadings=cmd_runner_fmt.as_fixed("--noheadings"), + readonly=cmd_runner_fmt.as_fixed("--readonly"), + vg_name=cmd_runner_fmt.as_fixed("-o", "vg_name"), + pv_pe_alloc_count=cmd_runner_fmt.as_fixed("-o", "pv_pe_alloc_count"), + pv_pe_count=cmd_runner_fmt.as_fixed("-o", "pv_pe_count"), + device=cmd_runner_fmt.as_list(), + ) + ) + source = module.params['source'] destination = module.params['destination'] changed = False actions = [] + result = {'changed': False} # Validate device existence if not os.path.exists(source): @@ -120,27 +107,51 @@ def main(): if source == destination: module.fail_json(msg="Source and destination devices must be different") - # Check both are PVs - if not get_pv_status(module, source): + def run_pvs_command(arguments, device): + with pvs_runner(arguments) as ctx: + rc, out, err = ctx.run(device=device) + if rc != 0: + module.fail_json(msg="Command failed: %s" % err) + return out.strip() + + def is_pv(device): + with pvs_runner("noheadings readonly device", check_rc=False) as ctx: + rc, out, err = ctx.run(device=device) + return rc == 0 + + if not is_pv(source): module.fail_json(msg="Source device %s is not a PV" % source) - if not get_pv_status(module, destination): + if not is_pv(destination): module.fail_json(msg="Destination device %s is not a PV" % destination) - # Check both are in the same VG - vg_src = get_pv_vg(module, source) - vg_dest = get_pv_vg(module, destination) + vg_src = run_pvs_command("noheadings vg_name device", source) + vg_dest = run_pvs_command("noheadings vg_name device", destination) if vg_src != vg_dest: module.fail_json( msg="Source and destination must be in the same VG. Source VG: '%s', Destination VG: '%s'." % (vg_src, vg_dest) ) - # Check source has data to move - allocated = get_pv_allocated_pe(module, source) + def get_allocated_pe(device): + try: + return int(run_pvs_command("noheadings pv_pe_alloc_count device", device)) + except ValueError: + module.fail_json(msg="Invalid allocated PE count for device %s" % device) + + allocated = get_allocated_pe(source) if allocated == 0: actions.append('no allocated extents to move') else: # Check destination has enough free space - free_pe_dest = get_pv_free_pe(module, destination) + def get_total_pe(device): + try: + return int(run_pvs_command("noheadings pv_pe_count device", device)) + except ValueError: + module.fail_json(msg="Invalid total PE count for device %s" % device) + + def get_free_pe(device): + return get_total_pe(device) - get_allocated_pe(device) + + free_pe_dest = get_free_pe(destination) if free_pe_dest < allocated: module.fail_json( msg="Destination device %s has only %d free physical extents, but source device %s has %d allocated extents. Not enough space." % @@ -151,16 +162,42 @@ def main(): changed = True actions.append('would move data from %s to %s' % (source, destination)) else: - cmd = ['pvmove', source, destination] - rc, out, err = module.run_command(cmd, check_rc=True) + pvmove_runner = CmdRunner( + module, + command="pvmove", + arg_formats=dict( + auto_answer=cmd_runner_fmt.as_bool("-y"), + atomic=cmd_runner_fmt.as_bool("--atomic"), + autobackup=cmd_runner_fmt.as_fixed("--autobackup", "y" if module.params['autobackup'] else "n"), + verbosity=cmd_runner_fmt.as_func(lambda v: ['-' + 'v' * v] if v > 0 else []), + source=cmd_runner_fmt.as_list(), + destination=cmd_runner_fmt.as_list(), + ) + ) + + verbosity = module._verbosity + with pvmove_runner("auto_answer atomic autobackup verbosity source destination") as ctx: + rc, out, err = ctx.run( + auto_answer=module.params['auto_answer'], + atomic=module.params['atomic'], + verbosity=verbosity, + source=source, + destination=destination + ) + result['stdout'] = out + result['stderr'] = err + changed = True actions.append('moved data from %s to %s' % (source, destination)) + result['changed'] = changed + result['actions'] = actions if actions: - msg = "PV data move: %s" % ', '.join(actions) + result['msg'] = "PV data move: " + ", ".join(actions) else: - msg = "No data to move from %s" % source - module.exit_json(changed=changed, msg=msg) + result['msg'] = "No data to move from %s" % source + + module.exit_json(**result) if __name__ == '__main__': diff --git a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml index d22f882674..8ce11fb5c5 100644 --- a/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml +++ b/tests/integration/targets/lvm_pv_move_data/tasks/creation.yml @@ -97,13 +97,16 @@ fstype: xfs opts: rw,noauto state: mounted + register: mount_result -- name: Asserting physical volume was created +- name: Asserting PVs, VG, LV and filesystem were created successfully ansible.builtin.assert: that: - pv_creation_result_01 is changed - pv_creation_result_02 is changed - vg_creation_result is changed + - lv_creation_result is changed + - fs_creation_result is changed - (pv_size_output_01.stdout | trim | regex_replace('M', '') | float) > 450 - (pv_size_output_01.stdout | trim | regex_replace('M', '') | float) < 600 - (pv_size_output_02.stdout | trim | regex_replace('M', '') | float) > 950 From 46d669dc41712119e6e6dabf8605bdceb91f1c99 Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Sat, 26 Jul 2025 20:37:49 +0200 Subject: [PATCH 13/14] Added more detailed info in case of failure and cosmetic changes --- plugins/modules/lvm_pv_move_data.py | 31 ++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/plugins/modules/lvm_pv_move_data.py b/plugins/modules/lvm_pv_move_data.py index c5f660685d..2ef72be8ce 100644 --- a/plugins/modules/lvm_pv_move_data.py +++ b/plugins/modules/lvm_pv_move_data.py @@ -36,7 +36,8 @@ options: default: false atomic: description: - - Makes a pvmove operation atomic, ensuring that all affected LVs are moved to the destination PV, or none are if the operation is aborted. + - Makes the C(pvmove) operation atomic, ensuring that all affected LVs are moved to the destination PV, + or none are if the operation is aborted. type: bool default: true autobackup: @@ -44,10 +45,9 @@ options: - Automatically backup metadata before changes (strongly advised!). type: bool default: true -notes: - - Requires LVM2 utilities installed on the target system. - - Both O(source) and O(destination) devices must exist. - - Both O(source) and O(destination) PVs must be in the same volume group. +requirements: + - LVM2 utilities + - Both O(source) and O(destination) devices must exist, and the PVs must be in the same volume group. - The O(destination) PV must have enough free space to accommodate the O(source) PV's allocated extents. - Verbosity is automatically controlled by Ansible's verbosity level (using multiple C(-v) flags). """ @@ -60,6 +60,15 @@ EXAMPLES = r""" """ RETURN = r""" +actions: + description: List of actions performed during module execution. + returned: always + type: list + sample: [ + "moved data from /dev/sdb to /dev/sdc", + "no allocated extents to move", + "would move data from /dev/sdb to /dev/sdc" + ] """ @@ -111,7 +120,15 @@ def main(): with pvs_runner(arguments) as ctx: rc, out, err = ctx.run(device=device) if rc != 0: - module.fail_json(msg="Command failed: %s" % err) + module.fail_json( + msg="Command failed: %s" % err, + stdout=out, + stderr=err, + rc=rc, + cmd=ctx.cmd, + arguments=arguments, + device=device, + ) return out.strip() def is_pv(device): @@ -193,7 +210,7 @@ def main(): result['changed'] = changed result['actions'] = actions if actions: - result['msg'] = "PV data move: " + ", ".join(actions) + result['msg'] = "PV data move: %s" % ", ".join(actions) else: result['msg'] = "No data to move from %s" % source From 5d4771076c75076a00b4bfca3f3c13b7fe35342a Mon Sep 17 00:00:00 2001 From: Klention Mali Date: Mon, 28 Jul 2025 10:48:17 +0200 Subject: [PATCH 14/14] Remove redundant params from CmdRunner call --- plugins/modules/lvm_pv_move_data.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugins/modules/lvm_pv_move_data.py b/plugins/modules/lvm_pv_move_data.py index 2ef72be8ce..ab111c090e 100644 --- a/plugins/modules/lvm_pv_move_data.py +++ b/plugins/modules/lvm_pv_move_data.py @@ -195,8 +195,6 @@ def main(): verbosity = module._verbosity with pvmove_runner("auto_answer atomic autobackup verbosity source destination") as ctx: rc, out, err = ctx.run( - auto_answer=module.params['auto_answer'], - atomic=module.params['atomic'], verbosity=verbosity, source=source, destination=destination