New module zpool (#10146)

* Add zpool module

* Add botmeta

* Use str.format instead of f-strings

* Remove nonlocal usage

* Add check to only pass ashift to zpool add

* Extend ansible_spec and remove unnecessary validation

* Apply suggestions and fix style

* Fix indentation of yaml lists

* Add method to normalize vdevs

Fix role: none in vdevs

* Use CmdRunner instead of run_command

* Fix styling and documentation

* Use str.format instead of f-strings

* Make sure vdevs are only required when state is present

* Add support for loop devices and normalize vdev type

* Add integration tests

* Add missing test dependencies for alpine and redhat

* Skip integration tests on rhel10 until there there packages available

* Use package module for better auto detection of package manager on rhel

* Add copyright header

* Skip tests on rhel and remove redhat install requirements

* Ensure loop devices under /dev exist

* Enable usage of files as pool devices

* Remove disk setup

* Use files as disks

* Apply suggestions

* Fix argument_spec
This commit is contained in:
Tom Hesse 2025-06-07 17:52:01 +02:00 committed by GitHub
parent 8bd68e7e41
commit 928622703d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 1064 additions and 0 deletions

2
.github/BOTMETA.yml vendored
View file

@ -1476,6 +1476,8 @@ files:
maintainers: natefoo
$modules/znode.py:
maintainers: treyperry
$modules/zpool.py:
maintainers: tomhesse
$modules/zpool_facts:
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
labels: solaris

614
plugins/modules/zpool.py Normal file
View file

@ -0,0 +1,614 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2025, Tom Hesse <contact@tomhesse.xyz>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
module: zpool
short_description: Manage ZFS zpools
version_added: 11.0.0
description:
- Create, destroy, and modify ZFS zpools and their vdev layouts, pool properties, and filesystem properties.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: partial
details:
- In check mode, any C(zpool) subcommand that supports the dry-run flag (C(-n)) will be run with C(-n) and its
simulated output is included in the module's diff results.
diff_mode:
support: full
author:
- Tom Hesse (@tomhesse)
options:
name:
description:
- Name of the zpool to manage.
required: true
type: str
state:
description:
- Whether the pool should exist.
choices: [ present, absent ]
default: present
type: str
disable_new_features:
description:
- If V(true), disable new ZFS feature flags when creating.
type: bool
default: false
force:
description:
- If V(true), force operations (for example overwrite existing devices).
type: bool
default: false
pool_properties:
description:
- Dictionary of ZFS pool properties to set (for example V(autoexpand), V(cachefile)).
type: dict
default: {}
filesystem_properties:
description:
- Dictionary of ZFS filesystem properties to set on the root dataset (for example V(compression), V(dedup)).
type: dict
default: {}
mountpoint:
description:
- Filesystem mountpoint for the root dataset.
type: str
altroot:
description:
- Alternate root for mounting filesystems.
type: str
temp_name:
description:
- Temporary name used during pool creation.
type: str
vdevs:
description:
- List of vdev definitions for the pool.
type: list
elements: dict
suboptions:
role:
description:
- Special vdev role (for example V(log), V(cache), V(spare)).
type: str
choices: [ log, cache, spare, dedup, special ]
type:
description:
- Vdev topology (for example V(stripe), V(mirror), V(raidz)).
type: str
choices: [ stripe, mirror, raidz, raidz1, raidz2, raidz3 ]
default: stripe
disks:
description:
- List of device paths to include in this vdev.
required: true
type: list
elements: path
'''
EXAMPLES = r'''
- name: Create pool "tank" on /dev/sda
community.general.zpool:
name: tank
vdevs:
- disks:
- /dev/sda
- name: Create mirrored pool "tank"
community.general.zpool:
name: tank
vdevs:
- type: mirror
disks:
- /dev/sda
- /dev/sdb
- name: Add a cache device to tank
community.general.zpool:
name: tank
vdevs:
- disks:
- /dev/sda
- role: cache
disks:
- /dev/nvme0n1
- name: Set pool and filesystem properties
community.general.zpool:
name: tank
pool_properties:
ashift: 12
filesystem_properties:
compression: lz4
vdevs:
- disks:
- /dev/sda
- name: Destroy pool "tank"
community.general.zpool:
name: tank
state: absent
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
class Zpool(object):
def __init__(self, module, name, disable_new_features, force, pool_properties, filesystem_properties, mountpoint, altroot, temp_name, vdevs):
self.module = module
self.name = name
self.disable_new_features = disable_new_features
self.force = force
self.pool_properties = pool_properties
self.filesystem_properties = filesystem_properties
self.mountpoint = mountpoint
self.altroot = altroot
self.temp_name = temp_name
self.vdevs = vdevs
self.zpool_cmd = module.get_bin_path('zpool', required=True)
self.zfs_cmd = module.get_bin_path('zfs', required=True)
self.changed = False
self.zpool_runner = CmdRunner(
module,
command=self.zpool_cmd,
arg_formats=dict(
subcommand=cmd_runner_fmt.as_list(),
disable_new_features=cmd_runner_fmt.as_bool('-d'),
force=cmd_runner_fmt.as_bool('-f'),
dry_run=cmd_runner_fmt.as_bool('-n'),
pool_properties=cmd_runner_fmt.as_func(
lambda props: sum([['-o', '{}={}'.format(prop, value)] for prop, value in (props or {}).items()], [])
),
filesystem_properties=cmd_runner_fmt.as_func(
lambda props: sum([['-O', '{}={}'.format(prop, value)] for prop, value in (props or {}).items()], [])
),
mountpoint=cmd_runner_fmt.as_opt_val('-m'),
altroot=cmd_runner_fmt.as_opt_val('-R'),
temp_name=cmd_runner_fmt.as_opt_val('-t'),
name=cmd_runner_fmt.as_list(),
vdevs=cmd_runner_fmt.as_func(
lambda vdevs: sum(
[
([vdev['role']] if vdev.get('role') else [])
+ ([] if vdev.get('type', 'stripe') == 'stripe' else [vdev['type']])
+ vdev.get('disks', [])
for vdev in (vdevs or [])
],
[],
)
),
vdev_name=cmd_runner_fmt.as_list(),
scripted=cmd_runner_fmt.as_bool('-H'),
parsable=cmd_runner_fmt.as_bool('-p'),
columns=cmd_runner_fmt.as_opt_val('-o'),
properties=cmd_runner_fmt.as_list(),
assignment=cmd_runner_fmt.as_list(),
full_paths=cmd_runner_fmt.as_bool('-P'),
real_paths=cmd_runner_fmt.as_bool('-L'),
)
)
self.zfs_runner = CmdRunner(
module,
command=self.zfs_cmd,
arg_formats=dict(
subcommand=cmd_runner_fmt.as_list(),
scripted=cmd_runner_fmt.as_bool('-H'),
columns=cmd_runner_fmt.as_opt_val('-o'),
properties=cmd_runner_fmt.as_list(),
assignment=cmd_runner_fmt.as_list(),
name=cmd_runner_fmt.as_list()
)
)
def exists(self):
with self.zpool_runner('subcommand name') as ctx:
rc, stdout, stderr = ctx.run(subcommand='list', name=self.name)
return rc == 0
def create(self):
with self.zpool_runner(
'subcommand disable_new_features force dry_run pool_properties filesystem_properties mountpoint altroot temp_name name vdevs',
check_rc=True
) as ctx:
rc, stdout, stderr = ctx.run(subcommand='create', dry_run=self.module.check_mode)
self.changed = True
if self.module.check_mode:
return {'prepared': stdout}
def destroy(self):
if self.module.check_mode:
self.changed = True
return
with self.zpool_runner('subcommand name', check_rc=True) as ctx:
rc, stdout, stderr = ctx.run(subcommand='destroy')
self.changed = True
def list_pool_properties(self):
with self.zpool_runner('subcommand scripted columns properties name', check_rc=True) as ctx:
rc, stdout, stderr = ctx.run(
subcommand='get',
scripted=True,
columns='property,value',
properties='all',
)
props = {}
for line in stdout.splitlines():
prop, value = line.split('\t', 1)
props[prop] = value
return props
def set_pool_properties_if_changed(self):
current = self.list_pool_properties()
before = {}
after = {}
for prop, value in self.pool_properties.items():
if current.get(prop) != str(value):
before[prop] = current.get(prop)
if not self.module.check_mode:
with self.zpool_runner('subcommand assignment name', check_rc=True) as ctx:
rc, stdout, stderr = ctx.run(subcommand='set', assignment='{}={}'.format(prop, value))
after[prop] = str(value)
self.changed = True
return {'before': {'pool_properties': before}, 'after': {'pool_properties': after}}
def list_filesystem_properties(self):
with self.zfs_runner('subcommand scripted columns properties name', check_rc=True) as ctx:
rc, stdout, stderr = ctx.run(
subcommand='get',
scripted=True,
columns='property,value',
properties='all',
)
props = {}
for line in stdout.splitlines():
prop, value = line.split('\t', 1)
props[prop] = value
return props
def set_filesystem_properties_if_changed(self):
current = self.list_filesystem_properties()
before = {}
after = {}
for prop, value in self.filesystem_properties.items():
if current.get(prop) != str(value):
before[prop] = current.get(prop)
if not self.module.check_mode:
with self.zfs_runner('subcommand assignment name', check_rc=True) as ctx:
rc, stdout, stderr = ctx.run(subcommand='set', assignment='{}={}'.format(prop, value))
after[prop] = str(value)
self.changed = True
return {'before': {'filesystem_properties': before}, 'after': {'filesystem_properties': after}}
def base_device(self, device):
if not device.startswith('/dev/'):
return device
# loop devices
match = re.match(r'^(/dev/loop\d+)$', device)
if match:
return match.group(1)
# nvme drives
match = re.match(r'^(.*?)(p\d+)$', device)
if match:
return match.group(1)
# sata/scsi drives
match = re.match(r'^(/dev/(?:sd|vd)[a-z])\d+$', device)
if match:
return match.group(1)
return device
def get_current_layout(self):
with self.zpool_runner('subcommand full_paths real_paths name', check_rc=True) as ctx:
rc, stdout, stderr = ctx.run(subcommand='status', full_paths=True, real_paths=True)
vdevs = []
current = None
in_config = False
def flush_current(current):
if current:
if current.get('role') is None:
current.pop('role', None)
vdevs.append(current)
return None
for line in stdout.splitlines():
if not in_config:
if line.strip().startswith('config:'):
in_config = True
continue
if not line.strip() or line.strip().startswith('NAME'):
continue
partitions = line.split()
device = partitions[0]
if device == self.name:
continue
if device in ('logs', 'cache', 'spares'):
current = flush_current(current)
role = 'spare' if device == 'spares' else device.rstrip('s')
current = {'role': role, 'type': None, 'disks': []}
continue
match_group = re.match(r'^(mirror|raidz\d?)-\d+$', device)
if match_group:
if current and current.get('type') is not None:
current = flush_current(current)
kind = match_group.group(1)
role = current.get('role') if current and current.get('type') is None else None
current = {'role': role, 'type': kind, 'disks': []}
continue
if device.startswith('/'):
base_device = self.base_device(device)
if current:
if current.get('type') is None:
entry = {
'type': 'stripe',
'disks': [base_device]
}
if current.get('role'):
entry['role'] = current['role']
vdevs.append(entry)
current = None
else:
current['disks'].append(base_device)
else:
vdevs.append({'type': 'stripe', 'disks': [base_device]})
continue
if current and current.get('type') is not None:
current = flush_current(current)
return vdevs
def normalize_vdevs(self, vdevs):
alias = {'raidz': 'raidz1'}
normalized = []
for vdev in vdevs:
normalized_type = alias.get(vdev.get('type', 'stripe'), vdev.get('type', 'stripe'))
entry = {
'type': normalized_type,
'disks': sorted(vdev['disks']),
}
role = vdev.get('role')
if role is not None:
entry['role'] = role
normalized.append(entry)
return sorted(normalized, key=lambda x: (x.get('role', ''), x['type'], x['disks']))
def diff_layout(self):
current = self.normalize_vdevs(self.get_current_layout())
desired = self.normalize_vdevs(self.vdevs)
before = {'vdevs': current}
after = {'vdevs': desired}
if current != desired:
self.changed = True
return {'before': before, 'after': after}
def add_vdevs(self):
invalid_properties = [k for k in self.pool_properties if k != 'ashift']
if invalid_properties:
self.module.warn("zpool add only supports 'ashift', ignoring: {}".format(invalid_properties))
diff = self.diff_layout()
before_vdevs = diff['before']['vdevs']
after_vdevs = diff['after']['vdevs']
to_add = [vdev for vdev in after_vdevs if vdev not in before_vdevs]
if not to_add:
return {}
with self.zpool_runner('subcommand force dry_run pool_properties name vdevs', check_rc=True) as ctx:
rc, stdout, stderr = ctx.run(
subcommand='add',
dry_run=self.module.check_mode,
pool_properties={'ashift': self.pool_properties['ashift']} if 'ashift' in self.pool_properties else {},
vdevs=to_add,
)
self.changed = True
if self.module.check_mode:
return {'prepared': stdout}
def list_vdevs_with_names(self):
with self.zpool_runner('subcommand full_paths real_paths name', check_rc=True) as ctx:
rc, stdout, stderr = ctx.run(subcommand='status', full_paths=True, real_paths=True)
in_cfg = False
saw_pool = False
vdevs = []
current = None
for line in stdout.splitlines():
if not in_cfg:
if line.strip().startswith('config:'):
in_cfg = True
continue
if not line.strip() or line.strip().startswith('NAME'):
continue
partitions = line.strip().split()
device = partitions[0]
if not saw_pool:
if device == self.name:
saw_pool = True
continue
if re.match(r'^(mirror|raidz\d?)\-\d+$', device) or device in ('cache', 'logs', 'spares'):
if current:
vdevs.append(current)
vdev_type = ('stripe' if device in ('cache', 'logs', 'spares') else ('mirror' if device.startswith('mirror') else 'raidz'))
current = {'name': device, 'type': vdev_type, 'disks': []}
continue
if device.startswith('/') and current:
current['disks'].append(self.base_device(device))
continue
if device.startswith('/'):
base_device = self.base_device(device)
vdevs.append({'name': base_device, 'type': 'stripe', 'disks': [base_device]})
if current:
vdevs.append(current)
return vdevs
def remove_vdevs(self):
current = self.list_vdevs_with_names()
current_disks = {disk for vdev in current for disk in vdev['disks']}
desired_disks = {disk for vdev in self.vdevs for disk in vdev.get('disks', [])}
gone = current_disks - desired_disks
to_remove = [vdev['name'] for vdev in current if any(disk in gone for disk in vdev['disks'])]
if not to_remove:
return {}
with self.zpool_runner('subcommand dry_run name vdev_name', check_rc=True) as ctx:
rc, stdout, stderr = ctx.run(
subcommand='remove', dry_run=self.module.check_mode, vdev_name=to_remove)
self.changed = True
if self.module.check_mode:
return {'prepared': stdout}
before = [vdev['name'] for vdev in current]
after = [name for name in before if name not in to_remove]
return {'before': {'vdevs': before}, 'after': {'vdevs': after}}
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=['present', 'absent'], default='present'),
disable_new_features=dict(type='bool', default=False),
force=dict(type='bool', default=False),
pool_properties=dict(type='dict', default={}),
filesystem_properties=dict(type='dict', default={}),
mountpoint=dict(type='str'),
altroot=dict(type='str'),
temp_name=dict(type='str'),
vdevs=dict(
type='list',
elements='dict',
options=dict(
role=dict(
type='str',
choices=['log', 'cache', 'spare', 'dedup', 'special'],
),
type=dict(
type='str',
choices=['stripe', 'mirror', 'raidz', 'raidz1', 'raidz2', 'raidz3'],
default='stripe',
),
disks=dict(
type='list',
elements='path',
required=True,
),
),
),
),
supports_check_mode=True,
required_if=[('state', 'present', ['vdevs'])]
)
name = module.params.get('name')
state = module.params.get('state')
disable_new_features = module.params.get('disable_new_features')
force = module.params.get('force')
pool_properties = module.params.get('pool_properties')
filesystem_properties = module.params.get('filesystem_properties')
mountpoint = module.params.get('mountpoint')
altroot = module.params.get('altroot')
temp_name = module.params.get('temp_name')
vdevs = module.params.get('vdevs')
for property_key in ('pool_properties', 'filesystem_properties'):
for key, value in list(module.params.get(property_key, {}).items()):
if isinstance(value, bool):
module.params[property_key][key] = 'on' if value else 'off'
if state != 'absent':
for idx, vdev in enumerate(vdevs, start=1):
disks = vdev.get('disks')
if not isinstance(disks, list) or len(disks) == 0:
module.fail_json(msg="vdev #{idx}: at least one disk is required (got: {disks!r})".format(idx=idx, disks=disks))
result = dict(
name=name,
state=state,
)
zpool = Zpool(module, name, disable_new_features, force, pool_properties, filesystem_properties, mountpoint, altroot, temp_name, vdevs)
if state == 'present':
if zpool.exists():
vdev_layout_diff = zpool.diff_layout()
add_vdev_diff = zpool.add_vdevs() or {}
remove_vdev_diff = zpool.remove_vdevs() or {}
pool_properties_diff = zpool.set_pool_properties_if_changed()
filesystem_properties_diff = zpool.set_filesystem_properties_if_changed()
before = {}
after = {}
for diff in (vdev_layout_diff, pool_properties_diff, filesystem_properties_diff):
before.update(diff.get('before', {}))
after.update(diff.get('after', {}))
result['diff'] = {'before': before, 'after': after}
if module.check_mode:
prepared = ''
for diff in (add_vdev_diff, remove_vdev_diff):
if 'prepared' in diff:
prepared += (diff['prepared'] if not prepared else '\n' + diff['prepared'])
result['diff']['prepared'] = prepared
else:
if module.check_mode:
result['diff'] = zpool.create()
else:
before_vdevs = []
desired_vdevs = zpool.normalize_vdevs(zpool.vdevs)
zpool.create()
result['diff'] = {
'before': {'state': 'absent', 'vdevs': before_vdevs},
'after': {'state': state, 'vdevs': desired_vdevs},
}
elif state == 'absent':
if zpool.exists():
before_vdevs = zpool.get_current_layout()
zpool.destroy()
result['diff'] = {
'before': {'state': 'present', 'vdevs': before_vdevs},
'after': {'state': state, 'vdevs': []},
}
else:
result['diff'] = {}
result['diff']['before_header'] = name
result['diff']['after_header'] = name
result['changed'] = zpool.changed
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,14 @@
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
azp/posix/3
azp/posix/vm
destructive
needs/privileged
skip/aix
skip/freebsd
skip/osx
skip/macos
skip/rhel
skip/docker

View file

@ -0,0 +1,34 @@
---
# Copyright (c) 2025, Tom Hesse <contact@tomhesse.xyz>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
zpool_single_disk_config:
- "{{ remote_tmp_dir }}/disk0.img"
zpool_mirror_disk_config:
- "{{ remote_tmp_dir }}/disk1.img"
- "{{ remote_tmp_dir }}/disk2.img"
zpool_raidz_disk_config:
- "{{ remote_tmp_dir }}/disk3.img"
- "{{ remote_tmp_dir }}/disk4.img"
zpool_vdevs_disk_config:
vdev1:
- "{{ remote_tmp_dir }}/disk5.img"
vdev2:
- "{{ remote_tmp_dir }}/disk6.img"
vdev3:
- "{{ remote_tmp_dir }}/disk7.img"
- "{{ remote_tmp_dir }}/disk8.img"
vdev4:
- "{{ remote_tmp_dir }}/disk9.img"
- "{{ remote_tmp_dir }}/disk10.img"
zpool_disk_configs: "{{ zpool_single_disk_config + zpool_mirror_disk_config + zpool_raidz_disk_config + (zpool_vdevs_disk_config.values() | flatten) }}"
zpool_single_disk_pool_name: spool
zpool_mirror_disk_pool_name: mpool
zpool_raidz_disk_pool_name: rpool
zpool_generic_pool_name: tank

View file

@ -0,0 +1,7 @@
---
# Copyright (c) 2025, Tom Hesse <contact@tomhesse.xyz>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
dependencies:
- setup_remote_tmp_dir

View file

@ -0,0 +1,147 @@
---
# Copyright (c) 2025, Tom Hesse <contact@tomhesse.xyz>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- name: Test adding a single disk vdev
block:
- name: Ensure a single disk pool exists
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
vdevs:
- disks: "{{ zpool_vdevs_disk_config.vdev1 }}"
state: present
- name: Add a single disk vdev
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
vdevs:
- disks: "{{ zpool_vdevs_disk_config.vdev1 }}"
- disks: "{{ zpool_vdevs_disk_config.vdev2 }}"
state: present
- name: Check if vdev was added
ansible.builtin.command:
cmd: "zpool status -P -L {{ zpool_generic_pool_name }}"
register: single_disk_pool_check
changed_when: false
- name: Assert that added disk is present
ansible.builtin.assert:
that:
- "zpool_vdevs_disk_config.vdev2[0] in single_disk_pool_check.stdout"
- name: Ensure the single disk pool is absent
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
state: absent
- name: Test adding a mirror vdev
block:
- name: Ensure a single disk pool exists
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
vdevs:
- disks: "{{ zpool_vdevs_disk_config.vdev1 }}"
state: present
- name: Add a mirror vdev
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
force: true # This is necessary because of the mismatched replication level
vdevs:
- disks: "{{ zpool_vdevs_disk_config.vdev1 }}"
- type: mirror
disks: "{{ zpool_vdevs_disk_config.vdev3 }}"
state: present
- name: Check if vdev was added
ansible.builtin.command:
cmd: "zpool status -P -L {{ zpool_generic_pool_name }}"
register: mirror_pool_check
changed_when: false
- name: Assert that added vdev is present
ansible.builtin.assert:
that:
- "zpool_vdevs_disk_config.vdev3[0] in mirror_pool_check.stdout"
- "zpool_vdevs_disk_config.vdev3[1] in mirror_pool_check.stdout"
- name: Ensure the single disk pool is absent
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
state: absent
- name: Test adding a raidz vdev
block:
- name: Ensure a single disk pool exists
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
vdevs:
- disks: "{{ zpool_vdevs_disk_config.vdev1 }}"
state: present
- name: Add a raidz vdev
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
force: true # This is necessary because of the mismatched replication level
vdevs:
- disks: "{{ zpool_vdevs_disk_config.vdev1 }}"
- type: raidz
disks: "{{ zpool_vdevs_disk_config.vdev3 }}"
state: present
- name: Check if vdev was added
ansible.builtin.command:
cmd: "zpool status -P -L {{ zpool_generic_pool_name }}"
register: raidz_pool_check
changed_when: false
- name: Assert that added vdev is present
ansible.builtin.assert:
that:
- "zpool_vdevs_disk_config.vdev3[0] in raidz_pool_check.stdout"
- "zpool_vdevs_disk_config.vdev3[1] in raidz_pool_check.stdout"
- name: Ensure the single disk pool is absent
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
state: absent
- name: Test removing an existing vdev
block:
- name: Ensure a pool with two mirrored vdevs exists
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
vdevs:
- type: mirror
disks: "{{ zpool_vdevs_disk_config.vdev3 }}"
- type: mirror
disks: "{{ zpool_vdevs_disk_config.vdev4 }}"
state: present
- name: Remove a vdev
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
vdevs:
- type: mirror
disks: "{{ zpool_vdevs_disk_config.vdev4 }}"
state: present
- name: Check if vdev was removed
ansible.builtin.command:
cmd: "zpool status -P -L {{ zpool_generic_pool_name }}"
register: remove_vdev_check
changed_when: false
- name: Assert that removed vdev is absent
ansible.builtin.assert:
that:
- "zpool_vdevs_disk_config.vdev3[0] not in remove_vdev_check.stdout"
- "zpool_vdevs_disk_config.vdev3[1] not in remove_vdev_check.stdout"
- "'Removal of vdev' in remove_vdev_check.stdout"
- name: Ensure the pool is absent
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
state: absent

View file

@ -0,0 +1,123 @@
---
# Copyright (c) 2025, Tom Hesse <contact@tomhesse.xyz>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- name: Test single disk pool creation
block:
- name: Ensure single disk pool exists
community.general.zpool:
name: "{{ zpool_single_disk_pool_name }}"
vdevs:
- disks: "{{ zpool_single_disk_config }}"
- name: Check if single disk pool exists
ansible.builtin.command:
cmd: "zpool list -H -o name,health {{ zpool_single_disk_pool_name }}"
register: single_disk_pool_check
changed_when: false
- name: Assert that single disk pool is online
ansible.builtin.assert:
that:
- "zpool_single_disk_pool_name in single_disk_pool_check.stdout"
- "'ONLINE' in single_disk_pool_check.stdout"
- name: Test mirror disk pool creation
block:
- name: Ensure mirror disk pool exists
community.general.zpool:
name: "{{ zpool_mirror_disk_pool_name }}"
vdevs:
- type: mirror
disks: "{{ zpool_mirror_disk_config }}"
- name: Check if mirror disk pool exists
ansible.builtin.command:
cmd: "zpool list -H -o name,health {{ zpool_mirror_disk_pool_name }}"
register: mirror_disk_pool_check
changed_when: false
- name: Assert that mirror disk pool is online
ansible.builtin.assert:
that:
- "zpool_mirror_disk_pool_name in mirror_disk_pool_check.stdout"
- "'ONLINE' in mirror_disk_pool_check.stdout"
- name: Test raidz disk pool creation
block:
- name: Ensure raidz disk pool exists
community.general.zpool:
name: "{{ zpool_raidz_disk_pool_name }}"
vdevs:
- type: raidz
disks: "{{ zpool_raidz_disk_config }}"
- name: Check if raidz disk pool exists
ansible.builtin.command:
cmd: "zpool list -H -o name,health {{ zpool_raidz_disk_pool_name }}"
register: raidz_disk_pool_check
changed_when: false
- name: Assert that raidz disk pool is online
ansible.builtin.assert:
that:
- "zpool_raidz_disk_pool_name in raidz_disk_pool_check.stdout"
- "'ONLINE' in raidz_disk_pool_check.stdout"
- name: Test single disk pool deletion
block:
- name: Ensure single disk pool is absent
community.general.zpool:
name: "{{ zpool_single_disk_pool_name }}"
state: absent
- name: Check if single disk pool is absent
ansible.builtin.command:
cmd: "zpool list -H -o name,health {{ zpool_single_disk_pool_name }}"
register: single_disk_pool_check
ignore_errors: true
changed_when: false
- name: Assert that single disk pool is online
ansible.builtin.assert:
that:
- "'no such pool' in single_disk_pool_check.stderr"
- name: Test mirror disk pool deletion
block:
- name: Ensure mirror disk pool is absent
community.general.zpool:
name: "{{ zpool_mirror_disk_pool_name }}"
state: absent
- name: Check if mirror disk pool is absent
ansible.builtin.command:
cmd: "zpool list -H -o name,health {{ zpool_mirror_disk_pool_name }}"
register: mirror_disk_pool_check
ignore_errors: true
changed_when: false
- name: Assert that mirror disk pool is online
ansible.builtin.assert:
that:
- "'no such pool' in mirror_disk_pool_check.stderr"
- name: Test raidz disk pool deletion
block:
- name: Ensure raidz disk pool is absent
community.general.zpool:
name: "{{ zpool_raidz_disk_pool_name }}"
state: absent
- name: Check if raidz disk pool is absent
ansible.builtin.command:
cmd: "zpool list -H -o name,health {{ zpool_raidz_disk_pool_name }}"
register: raidz_disk_pool_check
ignore_errors: true
changed_when: false
- name: Assert that raidz disk pool is online
ansible.builtin.assert:
that:
- "'no such pool' in raidz_disk_pool_check.stderr"

View file

@ -0,0 +1,15 @@
---
# Copyright (c) 2025, Tom Hesse <contact@tomhesse.xyz>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- name: Install required packages
community.general.apk:
name:
- zfs
- zfs-lts
- name: Load zfs module
community.general.modprobe:
name: zfs
state: present

View file

@ -0,0 +1,10 @@
---
# Copyright (c) 2025, Tom Hesse <contact@tomhesse.xyz>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- name: Install required packages
ansible.builtin.apt:
name:
- zfsutils-linux
- util-linux

View file

@ -0,0 +1,25 @@
---
# Copyright (c) 2025, Tom Hesse <contact@tomhesse.xyz>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- name: Execute integration tests
become: true
block:
- name: Ensure disk files exists
ansible.builtin.command:
cmd: "dd if=/dev/zero of={{ item }} bs=1M count=256 conv=fsync"
creates: "{{ item }}"
loop: "{{ zpool_disk_configs }}"
- name: Include distribution specific install_requirements.yml
ansible.builtin.include_tasks: install_requirements_{{ ansible_distribution | lower }}.yml
- name: Include create_destroy.yml
ansible.builtin.include_tasks: create_destroy.yml
- name: Include add_remove_vdevs.yml
ansible.builtin.include_tasks: add_remove_vdevs.yml
- name: Include properties.yml
ansible.builtin.include_tasks: properties.yml

View file

@ -0,0 +1,73 @@
---
# Copyright (c) 2025, Tom Hesse <contact@tomhesse.xyz>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- name: Ensure pool with two mirrored disks exists
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
pool_properties:
ashift: 12
filesystem_properties:
compression: off
vdevs:
- type: mirror
disks: "{{ zpool_vdevs_disk_config.vdev3 }}"
- type: mirror
disks: "{{ zpool_vdevs_disk_config.vdev4 }}"
state: present
- name: Test changing of a pool property
block:
- name: Change ashift from 12 to 13
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
pool_properties:
ashift: 13
vdevs:
- type: mirror
disks: "{{ zpool_vdevs_disk_config.vdev3 }}"
- type: mirror
disks: "{{ zpool_vdevs_disk_config.vdev4 }}"
state: present
- name: Check ashift
ansible.builtin.command:
cmd: "zpool get -H -o value ashift {{ zpool_generic_pool_name }}"
changed_when: false
register: ashift_check
- name: Assert ashift has changed
ansible.builtin.assert:
that:
- "'13' in ashift_check.stdout"
- name: Test changing of a dataset property
block:
- name: Change compression from off to lz4
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
filesystem_properties:
compression: lz4
vdevs:
- type: mirror
disks: "{{ zpool_vdevs_disk_config.vdev3 }}"
- type: mirror
disks: "{{ zpool_vdevs_disk_config.vdev4 }}"
state: present
- name: Check compression
ansible.builtin.command:
cmd: "zfs get -H -o value compression {{ zpool_generic_pool_name }}"
changed_when: false
register: compression_check
- name: Assert compression has changed
ansible.builtin.assert:
that:
- "'lz4' in compression_check.stdout"
- name: Cleanup pool
community.general.zpool:
name: "{{ zpool_generic_pool_name }}"
state: absent