From 36b14615e34a4eef9dd18864da588d719a408650 Mon Sep 17 00:00:00 2001 From: Jan Pokorny Date: Tue, 30 Apr 2024 16:26:38 +0200 Subject: [PATCH] feat: PV resize support There is an usecase when the physical device size can change (e.g. on VM). We need to be able to change the size of the LVM PV to accomodate that. This adds a new pool parameter 'grow_to_fill'. When set, pool PVs will try to take all available space on their respective devices. Defaults to False. Requires blivet version that supports this feature. For tests this is checked by using 'does_library_support' script. --- defaults/main.yml | 1 + library/blivet.py | 22 ++++++- tests/test-verify-pool-members.yml | 17 ++++++ tests/tests_lvm_pool_pv_grow.yml | 95 +++++++++++++++++++++++++++++ tests/verify-pool-member-pvsize.yml | 23 +++++++ 5 files changed, 156 insertions(+), 2 deletions(-) create mode 100644 tests/tests_lvm_pool_pv_grow.yml create mode 100644 tests/verify-pool-member-pvsize.yml diff --git a/defaults/main.yml b/defaults/main.yml index 755364ae..f57a4a94 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -13,6 +13,7 @@ storage_pool_defaults: type: lvm disks: [] volumes: [] + grow_to_fill: false encryption: false encryption_password: null diff --git a/library/blivet.py b/library/blivet.py index 20389ea6..9c24f604 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -51,6 +51,9 @@ encryption_password: description: encryption_password type: str + grow_to_fill: + description: grow_to_fill + type: bool name: description: name type: str @@ -370,7 +373,7 @@ from blivet3.callbacks import callbacks from blivet3 import devicelibs from blivet3 import devices - from blivet3.deviceaction import ActionConfigureFormat, ActionAddMember, ActionRemoveMember + from blivet3.deviceaction import ActionConfigureFormat, ActionResizeFormat, ActionAddMember, ActionRemoveMember from blivet3.devicefactory import DEFAULT_THPOOL_RESERVE from blivet3.flags import flags as blivet_flags from blivet3.formats import fslib, get_format @@ -386,7 +389,7 @@ from blivet.callbacks import callbacks from blivet import devicelibs from blivet import devices - from blivet.deviceaction import ActionConfigureFormat, ActionAddMember, ActionRemoveMember + from blivet.deviceaction import ActionConfigureFormat, ActionResizeFormat, ActionAddMember, ActionRemoveMember from blivet.devicefactory import DEFAULT_THPOOL_RESERVE from blivet.flags import flags as blivet_flags from blivet.formats import fslib, get_format @@ -412,6 +415,7 @@ def __getattr__(self, val): blivet_flags.allow_online_fs_resize = True blivet_flags.gfs2 = True set_up_logging() + log = logging.getLogger(BLIVET_PACKAGE + ".ansible") # XXX add support for LVM RAID raid0 level @@ -1744,6 +1748,18 @@ def _manage_members(self): add_disks = [d for d in self._disks if d not in self._device.ancestors] remove_disks = [pv for pv in self._device.pvs if not any(d in pv.ancestors for d in self._disks)] + if self._pool['grow_to_fill']: + grow_pv_candidates = [pv for pv in self._device.pvs if not pv in remove_disks and not pv in add_disks] + + for pv in grow_pv_candidates: + if abs(self._device.size - self._device.current_size) < 2*self._device.pe_size: + continue + + pv.format.update_size_info() # set pv to be resizable + pv.grow_to_fill = True + ac = ActionResizeFormat(pv, self._device.size) + self._blivet.devicetree.actions.add(ac) + if not (add_disks or remove_disks): return @@ -2132,6 +2148,7 @@ def run_module(): encryption_key_size=dict(type='int'), encryption_luks_version=dict(type='str'), encryption_password=dict(type='str', no_log=True), + grow_to_fill=dict(type='bool'), name=dict(type='str'), raid_level=dict(type='str'), raid_device_count=dict(type='int'), @@ -2276,6 +2293,7 @@ def action_dict(action): # execute the scheduled actions, committing changes to disk callbacks.action_executed.add(record_action) callbacks.action_executed.add(ensure_udev_update) + try: b.devicetree.actions.process(devices=b.devicetree.devices, dry_run=module.check_mode) except Exception as e: diff --git a/tests/test-verify-pool-members.yml b/tests/test-verify-pool-members.yml index 0cca087d..ea85f24a 100644 --- a/tests/test-verify-pool-members.yml +++ b/tests/test-verify-pool-members.yml @@ -70,6 +70,23 @@ loop_var: pv when: storage_test_pool.type == 'lvm' +- name: Check that blivet supports PV grow to fill + ansible.builtin.script: >- + scripts/does_library_support.py + blivet.formats.lvmpv.LVMPhysicalVolume.grow_to_fill + register: grow_supported + changed_when: false + +- name: Verify that PVs fill the whole devices when they should + include_tasks: verify-pool-member-pvsize.yml + loop: "{{ _storage_test_pool_pvs | default([], true) }}" + loop_control: + loop_var: st_pool_pv + when: + - grow_supported.stdout | trim == 'True' + - storage_test_pool.type == "lvm" + - storage_test_pool.grow_to_fill | bool + - name: Check MD RAID include_tasks: verify-pool-md.yml diff --git a/tests/tests_lvm_pool_pv_grow.yml b/tests/tests_lvm_pool_pv_grow.yml new file mode 100644 index 00000000..765ee27e --- /dev/null +++ b/tests/tests_lvm_pool_pv_grow.yml @@ -0,0 +1,95 @@ +--- +- name: Test create disk and remove + hosts: all + become: true + vars: + storage_safe_mode: false + mount_location1: '/opt/test1' + mount_location2: '/opt/test2' + pv_size: '8g' + volume1_size: '2g' + volume2_size: '3g' + tags: + - tests::lvm + + tasks: + - name: Run the role + include_role: + name: linux-system-roles.storage + + - name: Mark tasks to be skipped + set_fact: + storage_skip_checks: + - blivet_available + - service_facts + - "{{ (lookup('env', + 'SYSTEM_ROLES_REMOVE_CLOUD_INIT') in ['', 'false']) | + ternary('packages_installed', '') }}" + + - name: Get unused disks + include_tasks: get_unused_disk.yml + vars: + max_return: 1 + + - name: Create PV with a space to grow + command: "pvcreate --setphysicalvolumesize {{ pv_size }} /dev/{{ unused_disks[0] }}" + + # VG has to be present, the role otherwise automatically reformats empty PV, + # taking all available space + - name: Create VG + command: "vgcreate foo /dev/{{ unused_disks[0] }}" + + - name: Create LVM + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + grow_to_fill: true + state: present + volumes: + - name: test1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + - name: test2 + size: "{{ volume2_size }}" + mount_point: "{{ mount_location2 }}" + + - name: Verify role results + include_tasks: verify-role-results.yml + + - name: Rerun the task to verify idempotence + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + grow_to_fill: true + state: present + volumes: + - name: test1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + - name: test2 + size: "{{ volume2_size }}" + mount_point: "{{ mount_location2 }}" + + - name: Verify role results + include_tasks: verify-role-results.yml + + - name: Remove 'foo' pool created above + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + state: "absent" + volumes: + - name: test1 + - name: test2 + + - name: Verify role results + include_tasks: verify-role-results.yml diff --git a/tests/verify-pool-member-pvsize.yml b/tests/verify-pool-member-pvsize.yml new file mode 100644 index 00000000..bf4227e1 --- /dev/null +++ b/tests/verify-pool-member-pvsize.yml @@ -0,0 +1,23 @@ +--- +- name: Get actual PV size + command: "pvs --noheadings --nosuffix --units b -o SIZE {{ st_pool_pv }}" + register: actual_pv_size + changed_when: false + +- name: Convert blkinfo size to bytes + bsize: + size: "{{ storage_test_blkinfo.info[st_pool_pv]['size'] }}" + register: dev_size + +- name: Verify each PV size + assert: + that: (dev_size.bytes - actual_pv_size.stdout | int) | + abs / actual_pv_size.stdout | int < 0.04 + msg: >- + PV resize failure; size difference too big + (device size: {{ dev_size.bytes }}) + (actual PV size: {{ actual_pv_size.stdout }}) + +- name: Clean up test variables + set_fact: + actual_pv_size: null