diff --git a/README.md b/README.md index 7f6f5c58..73bc3fc8 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,17 @@ keys: This specifies the type of pool to manage. Valid values for `type`: `lvm`. +- `shared` + + If set to `true`, the role creates or manages a shared volume group. Requires lvmlockd and + dlm services configured and running. + + Default: `false` + + __WARNING__: Modifying the `shared` value on an existing pool is a + destructive operation. The pool itself will be removed as part of the + process. + - `disks` A list which specifies the set of disks to use as backing storage for the pool. diff --git a/defaults/main.yml b/defaults/main.yml index 7703c982..755364ae 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -27,6 +27,8 @@ storage_pool_defaults: raid_chunk_size: null raid_metadata_version: null + shared: false + storage_volume_defaults: state: "present" type: lvm diff --git a/library/blivet.py b/library/blivet.py index 5e03a9d8..db4d398f 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -1527,7 +1527,7 @@ def _create(self): if not self._device: members = self._manage_encryption(self._create_members()) try: - pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members) + pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members, shared=self._pool['shared']) except Exception as e: raise BlivetAnsibleError("failed to set up pool '%s': %s" % (self._pool['name'], str(e))) @@ -1823,6 +1823,7 @@ def run_module(): raid_spare_count=dict(type='int'), raid_metadata_version=dict(type='str'), raid_chunk_size=dict(type='str'), + shared=dict(type='bool'), state=dict(type='str', default='present', choices=['present', 'absent']), type=dict(type='str'), volumes=dict(type='list', elements='dict', default=list(), diff --git a/tests/collection-requirements.yml b/tests/collection-requirements.yml new file mode 100644 index 00000000..f85d0521 --- /dev/null +++ b/tests/collection-requirements.yml @@ -0,0 +1,3 @@ +--- +collections: + - fedora.linux_system_roles diff --git a/tests/test-verify-pool.yml b/tests/test-verify-pool.yml index 55efa196..2ba1b4db 100644 --- a/tests/test-verify-pool.yml +++ b/tests/test-verify-pool.yml @@ -15,6 +15,20 @@ # compression # deduplication +- name: Get VG shared value status + command: vgs --noheadings --binary -o shared {{ storage_test_pool.name }} + register: vgs_dump + when: storage_test_pool.type == 'lvm' and storage_test_pool.state == 'present' + changed_when: false + +- name: Verify that VG shared value checks out + assert: + that: (storage_test_pool.shared | bool) and ('1' in vgs_dump.stdout) + msg: >- + Shared VG presence ({{ storage_test_pool.shared }}) + does not match its expected state ({{ '1' in vgs_dump.stdout }}) + when: storage_test_pool.type == 'lvm' and storage_test_pool.state == 'present' + - name: Verify pool subset include_tasks: "test-verify-pool-{{ storage_test_pool_subset }}.yml" loop: "{{ _storage_pool_tests }}" diff --git a/tests/tests_lvm_pool_shared.yml b/tests/tests_lvm_pool_shared.yml new file mode 100644 index 00000000..29ef3fd4 --- /dev/null +++ b/tests/tests_lvm_pool_shared.yml @@ -0,0 +1,145 @@ +--- +- name: Test LVM shared pools + hosts: all + become: true + vars: + storage_safe_mode: false + storage_use_partitions: true + mount_location1: '/opt/test1' + volume1_size: '4g' + + tasks: + # Skip this test if run on localhost. + # Local run will not set up lvmlockd + # correctly and the test would fail. + - name: Skip test if the control node of the managed node is localhost + meta: end_host + when: inventory_hostname == "localhost" + + - name: Gather package facts + package_facts: + + - name: Set blivet package name + set_fact: + blivet_pkg_name: "{{ ansible_facts.packages | + select('search', 'blivet') | select('search', 'python') | list }}" + + - name: Set blivet package version + set_fact: + blivet_pkg_version: "{{ + ansible_facts.packages[blivet_pkg_name[0]][0]['version'] + + '-' + ansible_facts.packages[blivet_pkg_name[0]][0]['release'] }}" + + - name: Skip test if the blivet version does not support shared VGs + meta: end_host + when: blivet_pkg_version is version("3.8.2-1", "<") + + - name: Set up test environment for the ha_cluster role + include_role: + name: fedora.linux_system_roles.ha_cluster + tasks_from: test_setup.yml + + - name: Create cluster + ansible.builtin.include_role: + name: fedora.linux_system_roles.ha_cluster + vars: + ha_cluster_cluster_name: rhel9-1node + # Users should vault-encrypt the password + ha_cluster_hacluster_password: hapasswd + ha_cluster_extra_packages: + - dlm + - lvm2-lockd + ha_cluster_cluster_properties: + - attrs: + # Don't do this in production + - name: stonith-enabled + value: 'false' + ha_cluster_resource_primitives: + - id: dlm + agent: 'ocf:pacemaker:controld' + instance_attrs: + - attrs: + # Don't do this in production + - name: allow_stonith_disabled + value: 'true' + - id: lvmlockd + agent: 'ocf:heartbeat:lvmlockd' + ha_cluster_resource_groups: + - id: locking + resource_ids: + - dlm + - lvmlockd + + - name: Run the role + include_role: + name: linux-system-roles.storage + + - name: Get unused disks + include_tasks: get_unused_disk.yml + vars: + max_return: 1 + + - name: >- + Create a disk device; specify disks as non-list mounted on + {{ mount_location }} + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: vg1 + disks: "{{ unused_disks }}" + type: lvm + shared: true + state: present + volumes: + - name: lv1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + + - name: Verify role results + include_tasks: verify-role-results.yml + + - name: Repeat the previous step to verify idempotence + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: vg1 + disks: "{{ unused_disks }}" + type: lvm + shared: true + state: present + volumes: + - name: lv1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + + - name: Verify role results + include_tasks: verify-role-results.yml + + - name: >- + Remove the device created above + {{ mount_location }} + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: vg1 + disks: "{{ unused_disks }}" + type: lvm + shared: true + state: absent + volumes: + - name: lv1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + + - name: Verify role results + include_tasks: verify-role-results.yml + + - name: Remove cluster + ansible.builtin.include_role: + name: fedora.linux_system_roles.ha_cluster + vars: + ha_cluster_cluster_name: rhel9-1node + ha_cluster_cluster_present: false