From 939ebebb61c3859a8a8bb496baa6ad2784658399 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Wed, 25 Sep 2019 14:32:29 -0400 Subject: [PATCH 01/28] Add flag to prevent implicit removal of existing device/fs. --- defaults/main.yml | 1 + library/blivet.py | 23 ++++++++-- tasks/main-blivet.yml | 1 + tests/tests_disk_errors.yml | 81 ++++++++++++++++++++++++++++++++++ tests/tests_lvm_errors.yml | 86 +++++++++++++++++++++++++++++++++++++ 5 files changed, 189 insertions(+), 3 deletions(-) diff --git a/defaults/main.yml b/defaults/main.yml index 7b500e5e..4b07b4e6 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -3,6 +3,7 @@ storage_provider: "blivet" storage_use_partitions: null storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type +storage_safe_mode: false # do not remove devices as needed to make space for new volumes storage_pool_defaults: state: "present" diff --git a/library/blivet.py b/library/blivet.py index 5a408801..bf5a4dec 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -31,6 +31,9 @@ disklabel_type: description: - disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet + safe_mode: + description: + - boolean indicating that existing devices and file system should never be removed author: - David Lehman (dlehman@redhat.com) @@ -116,6 +119,7 @@ use_partitions = None # create partitions on pool backing device disks? disklabel_type = None # user-specified disklabel type +safe_mode = None # do not remove any existing devices or formatting class BlivetAnsibleError(Exception): @@ -193,6 +197,9 @@ def _resize(self): raise BlivetAnsibleError("invalid size specification for volume '%s': '%s'" % (self._volume['name'], self._volume['size'])) if size and self._device.resizable and self._device.size != size: + if safe_mode: + raise BlivetAnsibleError("cannot resize existing volume '%s' in safe mode" % self._volume['name']) + if self._device.format.resizable: self._device.format.update_size_info() @@ -214,6 +221,9 @@ def _reformat(self): if self._device.format.type == fmt.type: return + if safe_mode: + raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) + if self._device.format.status: self._device.format.teardown() self._blivet.format_device(self._device, fmt) @@ -428,8 +438,11 @@ def _create_members(self): """ Schedule actions as needed to ensure pool member devices exist. """ members = list() for disk in self._disks: - if not disk.isleaf: - self._blivet.devicetree.recursive_remove(disk) + if not disk.isleaf or disk.format.type is not None: + if not safe_mode: + self._blivet.devicetree.recursive_remove(disk) + else: + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) if use_partitions: label = get_format("disklabel", device=disk.path) @@ -490,7 +503,10 @@ def _look_up_device(self): def _create(self): if self._device.format.type != "disklabel" or \ self._device.format.label_type != disklabel_type: - self._blivet.devicetree.recursive_remove(self._device, remove_device=False) + if not safe_mode: + self._blivet.devicetree.recursive_remove(self._device, remove_device=False) + else: + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) label = get_format("disklabel", device=self._device.path, label_type=disklabel_type) self._blivet.format_device(self._device, label) @@ -660,6 +676,7 @@ def run_module(): volumes=dict(type='list'), packages_only=dict(type='bool', required=False, default=False), disklabel_type=dict(type='str', required=False, default=None), + safe_mode=dict(type='bool', required=False, default=False), use_partitions=dict(type='bool', required=False, default=True)) # seed the result dict in the object diff --git a/tasks/main-blivet.yml b/tasks/main-blivet.yml index 061195c1..88a5a012 100644 --- a/tasks/main-blivet.yml +++ b/tasks/main-blivet.yml @@ -105,6 +105,7 @@ volumes: "{{ _storage_volumes }}" use_partitions: "{{ storage_use_partitions }}" disklabel_type: "{{ storage_disklabel_type }}" + safe_mode: "{{ storage_safe_mode }}" register: blivet_output - debug: diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 36eec41e..9f0b7ef4 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -5,6 +5,14 @@ mount_location: '/opt/test1' tasks: + - include_role: + name: storage + + - include_tasks: get_unused_disk.yml + vars: + min_size: "10g" + max_return: 1 + - name: Verify that the play fails with the expected error message block: - name: Create a disk volume mounted at "{{ mount_location }}" @@ -22,3 +30,76 @@ that: "{{ blivet_output.failed }}" msg: "Expected error message not found for missing disk" ignore_errors: yes + + - name: Test for correct handling of safe_mode + block: + - name: Create a file system on disk + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + + - name: Try to replace the file system on disk in safe mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext3' + disks: "{{ unused_disks }}" + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Try to create a partition pool on the disk already containing a file system in safe_mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Create a partition pool on the disk already containing a file system w/o safe_mode + include_role: + name: storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition + + - name: Verify the output + assert: + that: "{{ not blivet_output.failed }}" + msg: "failed to create partition pool over existing file system using default value of safe_mode" + + - name: Clean up + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + disks: "{{ unused_disks }}" + present: false + + ignore_errors: yes diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml index ab236744..05a2128c 100644 --- a/tests/tests_lvm_errors.yml +++ b/tests/tests_lvm_errors.yml @@ -149,3 +149,89 @@ not blivet_output.changed }}" msg: "Unexpected behavior w/ LVM volume defined outside of any pool" ignore_errors: yes + + - name: Test for correct handling of safe_mode + block: + - name: Create a pool + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext4' + size: '1g' + + - name: Try to replace file system in safe mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext3' + size: '1g' + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Try to resize in safe mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext4' + size: '2g' + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot resize existing volume.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Try to create LVM pool on disks that already belong to an existing pool + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: lvm + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Clean up + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + state: absent + + ignore_errors: yes From 55767dcddc8dcae9b3d25e96fff155f0ede4ae51 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Wed, 25 Sep 2019 15:23:29 -0400 Subject: [PATCH 02/28] Improve accuracy of safe mode description. --- defaults/main.yml | 2 +- library/blivet.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/defaults/main.yml b/defaults/main.yml index 4b07b4e6..8306f35e 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -3,7 +3,7 @@ storage_provider: "blivet" storage_use_partitions: null storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type -storage_safe_mode: false # do not remove devices as needed to make space for new volumes +storage_safe_mode: false # fail instead of implicitly/automatically removing devices or formatting storage_pool_defaults: state: "present" diff --git a/library/blivet.py b/library/blivet.py index bf5a4dec..592fd752 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -33,7 +33,7 @@ - disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet safe_mode: description: - - boolean indicating that existing devices and file system should never be removed + - boolean indicating that we should fail rather than implicitly/automatically removing devices or formatting author: - David Lehman (dlehman@redhat.com) From 5d1645d19f605112efbadcd5559e09fb1c662aa0 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Tue, 1 Oct 2019 11:20:01 -0400 Subject: [PATCH 03/28] Do not protect resize ops in safe mode. --- library/blivet.py | 3 --- tests/tests_lvm_errors.yml | 4 +--- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/library/blivet.py b/library/blivet.py index 592fd752..577124a4 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -197,9 +197,6 @@ def _resize(self): raise BlivetAnsibleError("invalid size specification for volume '%s': '%s'" % (self._volume['name'], self._volume['size'])) if size and self._device.resizable and self._device.size != size: - if safe_mode: - raise BlivetAnsibleError("cannot resize existing volume '%s' in safe mode" % self._volume['name']) - if self._device.format.resizable: self._device.format.update_size_info() diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml index 05a2128c..adcbb905 100644 --- a/tests/tests_lvm_errors.yml +++ b/tests/tests_lvm_errors.yml @@ -202,9 +202,7 @@ - name: Verify the output assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('cannot resize existing volume.*in safe mode') and - not blivet_output.changed }}" + that: "{{ not blivet_output.failed and blivet_output.changed }}" msg: "Unexpected behavior w/ existing data on specified disks" - name: Try to create LVM pool on disks that already belong to an existing pool From 1392ad6a168a32a4c2c33b72f03cad851fec9d09 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Tue, 1 Oct 2019 16:29:01 -0400 Subject: [PATCH 04/28] Enable safe mode by default. --- defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/defaults/main.yml b/defaults/main.yml index 8306f35e..476616b9 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -3,7 +3,7 @@ storage_provider: "blivet" storage_use_partitions: null storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type -storage_safe_mode: false # fail instead of implicitly/automatically removing devices or formatting +storage_safe_mode: true # fail instead of implicitly/automatically removing devices or formatting storage_pool_defaults: state: "present" From efa0cdbce9f00c20d5f02bca392896c5a9a2532f Mon Sep 17 00:00:00 2001 From: David Lehman Date: Tue, 1 Oct 2019 16:29:12 -0400 Subject: [PATCH 05/28] Document safe mode. --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index c2debc97..f808adcd 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,9 @@ The `mount_point` specifies the directory on which the file system will be mount ##### `mount_options` The `mount_options` specifies custom mount options as a string, e.g.: 'ro'. +#### `storage_safe_mode` +When true (the default), an error will occur instead of automatically removing existing devices and/or formatting. + Example Playbook ---------------- From 26496e8e87b32b399d14b3d0a7d9bd63e848819c Mon Sep 17 00:00:00 2001 From: David Lehman Date: Thu, 3 Oct 2019 13:54:36 -0400 Subject: [PATCH 06/28] Actually set safe_mode var in python. --- library/blivet.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/library/blivet.py b/library/blivet.py index 577124a4..a47e6329 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -706,6 +706,9 @@ def run_module(): global use_partitions use_partitions = module.params['use_partitions'] + global safe_mode + safe_mode = module.params['safe_mode'] + b = Blivet() b.reset() fstab = FSTab(b) From 7c9e6ee36c405b2c23b111c5c153f725f1b920b2 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Thu, 3 Oct 2019 13:55:01 -0400 Subject: [PATCH 07/28] Update tests to adjust safe mode as needed. --- tests/tests_change_disk_fs.yml | 1 + tests/tests_change_fs.yml | 1 + tests/tests_change_fs_use_partitions.yml | 1 + tests/tests_create_disk_then_remove.yml | 1 + tests/tests_create_lvm_pool_then_remove.yml | 1 + tests/tests_create_partition_volume_then_remove.yml | 1 + tests/tests_disk_errors.yml | 1 + 7 files changed, 7 insertions(+) diff --git a/tests/tests_change_disk_fs.yml b/tests/tests_change_disk_fs.yml index b6aa80ba..f7962c6e 100644 --- a/tests/tests_change_disk_fs.yml +++ b/tests/tests_change_disk_fs.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test' volume_size: '5g' fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') else 'ext4' }}" diff --git a/tests/tests_change_fs.yml b/tests/tests_change_fs.yml index cca23ebe..b88e7689 100644 --- a/tests/tests_change_fs.yml +++ b/tests/tests_change_fs.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' volume_size: '5g' fs_after: "{{ (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') | ternary('ext4', 'xfs') }}" diff --git a/tests/tests_change_fs_use_partitions.yml b/tests/tests_change_fs_use_partitions.yml index e4aa76cd..eb93c116 100644 --- a/tests/tests_change_fs_use_partitions.yml +++ b/tests/tests_change_fs_use_partitions.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false storage_use_partitions: true mount_location: '/opt/test1' volume_size: '5g' diff --git a/tests/tests_create_disk_then_remove.yml b/tests/tests_create_disk_then_remove.yml index b19ae352..c5290eb1 100644 --- a/tests/tests_create_disk_then_remove.yml +++ b/tests/tests_create_disk_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' tasks: diff --git a/tests/tests_create_lvm_pool_then_remove.yml b/tests/tests_create_lvm_pool_then_remove.yml index 6b259396..f2c06fb9 100644 --- a/tests/tests_create_lvm_pool_then_remove.yml +++ b/tests/tests_create_lvm_pool_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location1: '/opt/test1' mount_location2: '/opt/test2' volume_group_size: '10g' diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml index 40b3e620..01a7db7d 100644 --- a/tests/tests_create_partition_volume_then_remove.yml +++ b/tests/tests_create_partition_volume_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' tasks: diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 9f0b7ef4..06e2fb4d 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -82,6 +82,7 @@ include_role: name: storage vars: + storage_safe_mode: false storage_pools: - name: foo disks: "{{ unused_disks }}" From 8fac471c84009be19e502297322bc25fed7e9408 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Thu, 3 Oct 2019 14:08:12 -0400 Subject: [PATCH 08/28] Special case for format destroy on unformatted device. --- library/blivet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/blivet.py b/library/blivet.py index a47e6329..d474bbf5 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -218,7 +218,7 @@ def _reformat(self): if self._device.format.type == fmt.type: return - if safe_mode: + if safe_mode and self._device.format.type is None and self._device.format.name != get_format(None).name: raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) if self._device.format.status: From f0617b45d5b8f27e15ddd21fd3f4e0a5cf5e5303 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Fri, 4 Oct 2019 08:13:42 -0400 Subject: [PATCH 09/28] Fix logic of safe mode guard on reformat. A type of None means no formatting. An exception is when there is a blkid-reported type that blivet doesn't have special handling for, in which case type will be None but the name attribute will reflect the type reported by blkid. In this special case we treat the formatting as though it had a non-None type. --- library/blivet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/blivet.py b/library/blivet.py index d474bbf5..90f49cd6 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -218,7 +218,7 @@ def _reformat(self): if self._device.format.type == fmt.type: return - if safe_mode and self._device.format.type is None and self._device.format.name != get_format(None).name: + if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name): raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) if self._device.format.status: From 3f6b84c63086c5a5ceacc7e7a2753847c1b03bc1 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Tue, 15 Oct 2019 07:36:15 -0400 Subject: [PATCH 10/28] Fix key for partition pool class lookup. --- library/blivet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/blivet.py b/library/blivet.py index 90f49cd6..bd864ab7 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -537,7 +537,7 @@ def _create(self): _BLIVET_POOL_TYPES = { - "disk": BlivetPartitionPool, + "partition": BlivetPartitionPool, "lvm": BlivetLVMPool } From e66688db3288843a8c29b9d24d888e34c32e74cb Mon Sep 17 00:00:00 2001 From: David Lehman Date: Tue, 15 Oct 2019 09:02:27 -0400 Subject: [PATCH 11/28] Update partition pool type in tests. --- tests/tests_create_partition_volume_then_remove.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml index 01a7db7d..ae589d3d 100644 --- a/tests/tests_create_partition_volume_then_remove.yml +++ b/tests/tests_create_partition_volume_then_remove.yml @@ -19,7 +19,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" volumes: - name: test1 @@ -34,7 +34,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" volumes: - name: test1 @@ -49,7 +49,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" state: absent volumes: @@ -66,7 +66,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" state: absent volumes: From 1e3f2c9dd9fb7095fe394ca83986ba4a4307370e Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 15 Oct 2019 13:40:01 +0200 Subject: [PATCH 12/28] Use correct array syntax for an invalid disk While both before and after it should trigger an error in the role, and therefore both are valid tests, a valid array with a nonexistent disk is more likely to be what was intended, and therefore preferable. (The former variant actually checked for bad syntax, not for handling of nonexistent disks.) --- tests/tests_disk_errors.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 06e2fb4d..15391e99 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -22,7 +22,7 @@ storage_volumes: - name: test1 type: disk - disks: "['/dev/surelyidonotexist']" + disks: ['/dev/surelyidonotexist'] mount_point: "{{ mount_location }}" - name: Check the error output From 611553af0dc5b12a2916b4bd4f91ac7b2489fe53 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 15 Oct 2019 13:44:46 +0200 Subject: [PATCH 13/28] Test that the role fails, not that it sets blivet_output.failed The latter would be a valid test as well, but a less important one, and currently it does not work properly. Do not use ignore_errors: yes on the test block, it makes all assert pointless. Use a rescue task instead. --- tests/tests_disk_errors.yml | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 15391e99..01513562 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -25,11 +25,31 @@ disks: ['/dev/surelyidonotexist'] mount_point: "{{ mount_location }}" - - name: Check the error output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed }}" - msg: "Expected error message not found for missing disk" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly, + # blivet_output.failed is false. + # - name: Show the error output + # debug: + # msg: "{{ blivet_output.failed }}" + + # - name: Check the error output + # assert: + # that: blivet_output.failed | bool + # msg: "Expected error message not found for missing disk" + - name: Test for correct handling of safe_mode block: From a597f0f6a5384fe442073d0bc6c1151af7002fa6 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 15 Oct 2019 13:51:09 +0200 Subject: [PATCH 14/28] Create a file in the test fs to actually detect data loss --- tests/tests_disk_errors.yml | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 01513562..2d28dac3 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -3,6 +3,7 @@ become: true vars: mount_location: '/opt/test1' + testfile: "{{ mount_location }}/quux" tasks: - include_role: @@ -50,19 +51,24 @@ # that: blivet_output.failed | bool # msg: "Expected error message not found for missing disk" + - name: Create a file system on disk + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location }}" + + - name: create a file + file: + path: "{{ testfile }}" + state: touch - name: Test for correct handling of safe_mode block: - - name: Create a file system on disk - include_role: - name: storage - vars: - storage_volumes: - - name: test1 - type: disk - fs_type: 'ext4' - disks: "{{ unused_disks }}" - - name: Try to replace the file system on disk in safe mode include_role: name: storage From 2fd8c334533efdb40e00e27ff25add1e9ab7d7b0 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 15 Oct 2019 13:53:46 +0200 Subject: [PATCH 15/28] Do not use ignore_errors: yes in a test block - this makes all test asserts pointless. Instead catch the error using rescue. Actually verify that data heve not been destroyed. --- tests/tests_disk_errors.yml | 39 +++++++++++++++++++++++++++++++------ 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 2d28dac3..e0309879 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -80,14 +80,41 @@ fs_type: 'ext3' disks: "{{ unused_disks }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ existing data on specified disks" + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" - - name: Try to create a partition pool on the disk already containing a file system in safe_mode + - name: stat the file + stat: + path: "{{ testfile }}" + register: stat_r + + - name: assert file presence + assert: + that: + stat_r.stat.isreg is defined and stat_r.stat.isreg + msg: "data lost!" + + - name: Test for correct handling of safe_mode + block: + - name: Try to create a LVM pool on the disk already containing a file system in safe_mode include_role: name: storage vars: From d2f3d97b05d133fd889a2b960dfd7509a823aac2 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 15 Oct 2019 13:59:54 +0200 Subject: [PATCH 16/28] Remove last instance of ignore_errors: yes and check for errors properly. Verify again that data have not been lost. Do not use a pool type of partition, seems to be unimplemented. Create one volume in the pool, the role does not cope with empty pools. Note that even with those changes the role application fails - apparently it refuses to create a VG when there is already a filesystem, even with safe mode off. --- tests/tests_disk_errors.yml | 75 ++++++++++++++++++++++++------------- 1 file changed, 49 insertions(+), 26 deletions(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index e0309879..71fcf0b2 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -124,36 +124,59 @@ disks: "{{ unused_disks }}" type: partition - - name: Verify the output - assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ existing data on specified disks" + - name: UNREACH + fail: + msg: "this should be unreachable" - - name: Create a partition pool on the disk already containing a file system w/o safe_mode - include_role: - name: storage + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" vars: - storage_safe_mode: false - storage_pools: - - name: foo - disks: "{{ unused_disks }}" - type: partition + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet - name: Verify the output assert: - that: "{{ not blivet_output.failed }}" - msg: "failed to create partition pool over existing file system using default value of safe_mode" + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" - - name: Clean up - include_role: - name: storage - vars: - storage_volumes: - - name: test1 - type: disk - disks: "{{ unused_disks }}" - present: false + - name: stat the file + stat: + path: "{{ testfile }}" + register: stat_r + + - name: assert file presence + assert: + that: + stat_r.stat.isreg is defined and stat_r.stat.isreg + msg: "data lost!" + + - name: Create a LVM pool on the disk already containing a file system w/o safe_mode + include_role: + name: storage + vars: + storage_safe_mode: false + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition - ignore_errors: yes + - name: Verify the output + assert: + that: not blivet_output.failed + msg: "failed to create LVM pool over existing file system using default value of safe_mode" + + - name: Clean up + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + disks: "{{ unused_disks }}" + present: false From 0494d06348f844e2fdadb96fa7f8d52d3b6c97d6 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 15 Oct 2019 15:02:05 +0200 Subject: [PATCH 17/28] Turn off safe mode in cleanup --- tests/tests_disk_errors.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 71fcf0b2..874bb235 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -175,6 +175,7 @@ include_role: name: storage vars: + storage_safe_mode: false storage_volumes: - name: test1 type: disk From afd45f8b79d89626dac68e829c4a6ab3a2a98e4d Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Wed, 16 Oct 2019 18:36:18 +0200 Subject: [PATCH 18/28] Assert that "get required packages" has not changed mounts It seems that the "get required packages" task (invocation of blivet with packages_only: true) silently (without even reporting changed: true) changes mounts. Add an assert for that. (It would be better to make it a separate unit test.) --- tasks/main-blivet.yml | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/tasks/main-blivet.yml b/tasks/main-blivet.yml index 061195c1..e7c18bb7 100644 --- a/tasks/main-blivet.yml +++ b/tasks/main-blivet.yml @@ -85,6 +85,15 @@ - debug: var: _storage_volumes +- name: load mount facts + setup: + gather_subset: '!all,!min,mounts' + register: __storage_mounts_before_packages + +# - name: show mounts before get required packages +# debug: +# var: __storage_mounts_before_packages + - name: get required packages blivet: pools: "{{ _storage_pools }}" @@ -94,6 +103,30 @@ packages_only: true register: package_info +- name: load mount facts + setup: + gather_subset: '!all,!min,mounts' + register: __storage_mounts_after_packages + +- name: detect mount alteration by 'get required packages' + block: + - name: show mounts before manage the pools and volumes + debug: + var: __storage_mounts_before_packages.ansible_facts.ansible_mounts + + - name: show mounts after manage the pools and volumes + debug: + var: __storage_mounts_after_packages.ansible_facts.ansible_mounts + + - name: fail if mounts changed + fail: + msg: "get required packages changed mounts. Changed status is + {{ package_info.changed }}" + when: + - __storage_mounts_before_packages.ansible_facts.ansible_mounts | + count != + __storage_mounts_after_packages.ansible_facts.ansible_mounts | count + - name: make sure required packages are installed package: name: "{{ package_info.packages }}" From 0d2ab64206b75d1de8724b1086399d755249cc22 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Fri, 18 Oct 2019 16:11:58 -0400 Subject: [PATCH 19/28] Don't unmount anything if only getting package list. --- library/blivet.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/library/blivet.py b/library/blivet.py index bd864ab7..617a2a18 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -120,6 +120,7 @@ use_partitions = None # create partitions on pool backing device disks? disklabel_type = None # user-specified disklabel type safe_mode = None # do not remove any existing devices or formatting +packages_only = None # only set things up enough to get a list of required packages class BlivetAnsibleError(Exception): @@ -214,6 +215,8 @@ def _resize(self): def _reformat(self): """ Schedule actions as needed to ensure the volume is formatted as specified. """ + global packages_only + fmt = self._get_format() if self._device.format.type == fmt.type: return @@ -221,7 +224,7 @@ def _reformat(self): if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name): raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) - if self._device.format.status: + if self._device.format.status and not packages_only: self._device.format.teardown() self._blivet.format_device(self._device, fmt) @@ -709,6 +712,9 @@ def run_module(): global safe_mode safe_mode = module.params['safe_mode'] + global packages_only + packages_only = module.params['packages_only'] + b = Blivet() b.reset() fstab = FSTab(b) From 6d512fdbfb04221b8b752df367c8c7aaf03c8510 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Fri, 18 Oct 2019 17:29:26 -0400 Subject: [PATCH 20/28] Add proper checking of specified disk for disk volumes. --- library/blivet.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/library/blivet.py b/library/blivet.py index 617a2a18..eab13a20 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -265,6 +265,19 @@ def _get_device_id(self): def _type_check(self): return self._device.is_disk + def _look_up_device(self): + super(BlivetDiskVolume, self)._look_up_device() + if not self._get_device_id(): + # FAIL: no disks specified for volume + raise BlivetAnsibleError("no disks specified for volume '%s'" % self._volume['name']) # sure about this one? + elif not isinstance(self._volume['disks'], list): + raise BlivetAnsibleError("volume disks must be specified as a list") + + if self._device is None: + # FAIL: failed to find the disk + raise BlivetAnsibleError("unable to resolve disk specified for volume '%s' (%s)" % (self._volume['name'], self._volume['disks'])) + + class BlivetPartitionVolume(BlivetVolume): def _type_check(self): From 5f910b7ac347fcdd90df3fe55be8cd8ebd4c448f Mon Sep 17 00:00:00 2001 From: David Lehman Date: Fri, 18 Oct 2019 17:30:10 -0400 Subject: [PATCH 21/28] Fix error in exception raising when clearing disk for disklabel. --- library/blivet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/blivet.py b/library/blivet.py index eab13a20..d671d09b 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -519,7 +519,7 @@ def _create(self): if not safe_mode: self._blivet.devicetree.recursive_remove(self._device, remove_device=False) else: - raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (self._device.name, self._pool['name'])) label = get_format("disklabel", device=self._device.path, label_type=disklabel_type) self._blivet.format_device(self._device, label) From b488658863afd8e635028ae075e83d9b941ab1e5 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Fri, 18 Oct 2019 17:30:34 -0400 Subject: [PATCH 22/28] Don't crash when a pool contains no volumes. --- tasks/main-blivet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks/main-blivet.yml b/tasks/main-blivet.yml index 22f810e2..65b8580b 100644 --- a/tasks/main-blivet.yml +++ b/tasks/main-blivet.yml @@ -38,7 +38,7 @@ _storage_vols_no_defaults: "{{ _storage_vols_no_defaults|default([]) }} + [{{ item.1 }}]" _storage_vol_defaults: "{{ _storage_vol_defaults|default([]) }} + [{{ storage_volume_defaults }}]" _storage_vol_pools: "{{ _storage_vol_pools|default([]) }} + ['{{ item.0.name }}']" - loop: "{{ _storage_pools|subelements('volumes') }}" + loop: "{{ _storage_pools|subelements('volumes', skip_missing=true) }}" when: storage_pools is defined - name: Apply defaults to pools and volumes [3/6] From d937f5b39f1a04df43983af7c9f91b23156374ca Mon Sep 17 00:00:00 2001 From: David Lehman Date: Fri, 18 Oct 2019 17:35:30 -0400 Subject: [PATCH 23/28] Adjust names of tests to match reality. --- tests/tests_disk_errors.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 874bb235..fab86937 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -114,7 +114,7 @@ - name: Test for correct handling of safe_mode block: - - name: Try to create a LVM pool on the disk already containing a file system in safe_mode + - name: Try to create a partition pool on the disk already containing a file system in safe_mode include_role: name: storage vars: @@ -156,7 +156,7 @@ stat_r.stat.isreg is defined and stat_r.stat.isreg msg: "data lost!" - - name: Create a LVM pool on the disk already containing a file system w/o safe_mode + - name: Create a partition pool on the disk already containing a file system w/o safe_mode include_role: name: storage vars: @@ -169,7 +169,7 @@ - name: Verify the output assert: that: not blivet_output.failed - msg: "failed to create LVM pool over existing file system using default value of safe_mode" + msg: "failed to create partition pool over existing file system using default value of safe_mode" - name: Clean up include_role: From dd695e84232051524556c082d6e9d66011498960 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Mon, 21 Oct 2019 14:41:07 +0200 Subject: [PATCH 24/28] Make all classes new-style (derive from object) Otherwise super() fails. Not necessary with Python 3, but necessary with Python 2 (where classic classes are the default) --- library/blivet.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/library/blivet.py b/library/blivet.py index d671d09b..17043646 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -127,7 +127,7 @@ class BlivetAnsibleError(Exception): pass -class BlivetVolume: +class BlivetVolume(object): def __init__(self, blivet_obj, volume, bpool=None): self._blivet = blivet_obj self._volume = volume @@ -369,7 +369,7 @@ def _get_blivet_volume(blivet_obj, volume, bpool=None): return _BLIVET_VOLUME_TYPES[volume_type](blivet_obj, volume, bpool=bpool) -class BlivetPool: +class BlivetPool(object): def __init__(self, blivet_obj, pool): self._blivet = blivet_obj self._pool = pool @@ -583,7 +583,7 @@ def manage_pool(b, pool): volume['_mount_id'] = bvolume._volume.get('_mount_id', '') -class FSTab: +class FSTab(object): def __init__(self, blivet_obj): self._blivet = blivet_obj self._entries = list() From 09972bf4dfa6e9fd540fa1e56852507b70a98170 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Mon, 21 Oct 2019 20:23:50 +0200 Subject: [PATCH 25/28] Safe mode should now be the default, account for this --- tests/tests_disk_errors.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index fab86937..e0d7df1e 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -73,7 +73,6 @@ include_role: name: storage vars: - storage_safe_mode: true storage_volumes: - name: test1 type: disk @@ -118,7 +117,6 @@ include_role: name: storage vars: - storage_safe_mode: true storage_pools: - name: foo disks: "{{ unused_disks }}" @@ -169,7 +167,7 @@ - name: Verify the output assert: that: not blivet_output.failed - msg: "failed to create partition pool over existing file system using default value of safe_mode" + msg: "failed to create partition pool over existing file system w/o safe_mode" - name: Clean up include_role: From 7f021399910e8ebaf788385b8314a26090d4b4a1 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Mon, 21 Oct 2019 20:24:48 +0200 Subject: [PATCH 26/28] Check that safe mode works even when the filesystem is unmounted. --- tests/tests_disk_errors.yml | 55 +++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index e0d7df1e..5d93a4d1 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -100,6 +100,61 @@ not blivet_output.changed" msg: "Unexpected behavior w/ existing data on specified disks" + - name: Unmount file system + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: none + + - name: Test for correct handling of safe_mode with unmounted filesystem + block: + - name: Try to replace the file system on disk in safe mode + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext3' + disks: "{{ unused_disks }}" + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Remount file system + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location }}" + - name: stat the file stat: path: "{{ testfile }}" From 9a25938b6fbf00ce5cf876d0b4e7f2bc373dcd59 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Mon, 21 Oct 2019 20:25:33 +0200 Subject: [PATCH 27/28] Verify that replacing a fs by a LVM pool fails in safe mode --- tests/tests_disk_errors.yml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 5d93a4d1..34b03d97 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -198,6 +198,38 @@ not blivet_output.changed" msg: "Unexpected behavior w/ existing data on specified disks" + - name: Test for correct handling of safe_mode with existing filesystem + block: + - name: Try to create LVM pool on disk that already belongs to an existing filesystem + include_role: + name: storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: lvm + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + - name: stat the file stat: path: "{{ testfile }}" From da350a9816351be09d010bf21ade80596a2317db Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 22 Oct 2019 16:53:19 +0200 Subject: [PATCH 28/28] Cleanup properly. --- tests/tests_disk_errors.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 34b03d97..7112f6ed 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -261,8 +261,8 @@ name: storage vars: storage_safe_mode: false - storage_volumes: - - name: test1 - type: disk + storage_pools: + - name: foo + type: partition disks: "{{ unused_disks }}" - present: false + state: absent