From 939ebebb61c3859a8a8bb496baa6ad2784658399 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Wed, 25 Sep 2019 14:32:29 -0400 Subject: [PATCH 01/17] Add flag to prevent implicit removal of existing device/fs. --- defaults/main.yml | 1 + library/blivet.py | 23 ++++++++-- tasks/main-blivet.yml | 1 + tests/tests_disk_errors.yml | 81 ++++++++++++++++++++++++++++++++++ tests/tests_lvm_errors.yml | 86 +++++++++++++++++++++++++++++++++++++ 5 files changed, 189 insertions(+), 3 deletions(-) diff --git a/defaults/main.yml b/defaults/main.yml index 7b500e5e..4b07b4e6 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -3,6 +3,7 @@ storage_provider: "blivet" storage_use_partitions: null storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type +storage_safe_mode: false # do not remove devices as needed to make space for new volumes storage_pool_defaults: state: "present" diff --git a/library/blivet.py b/library/blivet.py index 5a408801..bf5a4dec 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -31,6 +31,9 @@ disklabel_type: description: - disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet + safe_mode: + description: + - boolean indicating that existing devices and file system should never be removed author: - David Lehman (dlehman@redhat.com) @@ -116,6 +119,7 @@ use_partitions = None # create partitions on pool backing device disks? disklabel_type = None # user-specified disklabel type +safe_mode = None # do not remove any existing devices or formatting class BlivetAnsibleError(Exception): @@ -193,6 +197,9 @@ def _resize(self): raise BlivetAnsibleError("invalid size specification for volume '%s': '%s'" % (self._volume['name'], self._volume['size'])) if size and self._device.resizable and self._device.size != size: + if safe_mode: + raise BlivetAnsibleError("cannot resize existing volume '%s' in safe mode" % self._volume['name']) + if self._device.format.resizable: self._device.format.update_size_info() @@ -214,6 +221,9 @@ def _reformat(self): if self._device.format.type == fmt.type: return + if safe_mode: + raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) + if self._device.format.status: self._device.format.teardown() self._blivet.format_device(self._device, fmt) @@ -428,8 +438,11 @@ def _create_members(self): """ Schedule actions as needed to ensure pool member devices exist. """ members = list() for disk in self._disks: - if not disk.isleaf: - self._blivet.devicetree.recursive_remove(disk) + if not disk.isleaf or disk.format.type is not None: + if not safe_mode: + self._blivet.devicetree.recursive_remove(disk) + else: + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) if use_partitions: label = get_format("disklabel", device=disk.path) @@ -490,7 +503,10 @@ def _look_up_device(self): def _create(self): if self._device.format.type != "disklabel" or \ self._device.format.label_type != disklabel_type: - self._blivet.devicetree.recursive_remove(self._device, remove_device=False) + if not safe_mode: + self._blivet.devicetree.recursive_remove(self._device, remove_device=False) + else: + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) label = get_format("disklabel", device=self._device.path, label_type=disklabel_type) self._blivet.format_device(self._device, label) @@ -660,6 +676,7 @@ def run_module(): volumes=dict(type='list'), packages_only=dict(type='bool', required=False, default=False), disklabel_type=dict(type='str', required=False, default=None), + safe_mode=dict(type='bool', required=False, default=False), use_partitions=dict(type='bool', required=False, default=True)) # seed the result dict in the object diff --git a/tasks/main-blivet.yml b/tasks/main-blivet.yml index 061195c1..88a5a012 100644 --- a/tasks/main-blivet.yml +++ b/tasks/main-blivet.yml @@ -105,6 +105,7 @@ volumes: "{{ _storage_volumes }}" use_partitions: "{{ storage_use_partitions }}" disklabel_type: "{{ storage_disklabel_type }}" + safe_mode: "{{ storage_safe_mode }}" register: blivet_output - debug: diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 36eec41e..9f0b7ef4 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -5,6 +5,14 @@ mount_location: '/opt/test1' tasks: + - include_role: + name: storage + + - include_tasks: get_unused_disk.yml + vars: + min_size: "10g" + max_return: 1 + - name: Verify that the play fails with the expected error message block: - name: Create a disk volume mounted at "{{ mount_location }}" @@ -22,3 +30,76 @@ that: "{{ blivet_output.failed }}" msg: "Expected error message not found for missing disk" ignore_errors: yes + + - name: Test for correct handling of safe_mode + block: + - name: Create a file system on disk + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + + - name: Try to replace the file system on disk in safe mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext3' + disks: "{{ unused_disks }}" + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Try to create a partition pool on the disk already containing a file system in safe_mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Create a partition pool on the disk already containing a file system w/o safe_mode + include_role: + name: storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition + + - name: Verify the output + assert: + that: "{{ not blivet_output.failed }}" + msg: "failed to create partition pool over existing file system using default value of safe_mode" + + - name: Clean up + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + disks: "{{ unused_disks }}" + present: false + + ignore_errors: yes diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml index ab236744..05a2128c 100644 --- a/tests/tests_lvm_errors.yml +++ b/tests/tests_lvm_errors.yml @@ -149,3 +149,89 @@ not blivet_output.changed }}" msg: "Unexpected behavior w/ LVM volume defined outside of any pool" ignore_errors: yes + + - name: Test for correct handling of safe_mode + block: + - name: Create a pool + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext4' + size: '1g' + + - name: Try to replace file system in safe mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext3' + size: '1g' + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Try to resize in safe mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext4' + size: '2g' + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot resize existing volume.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Try to create LVM pool on disks that already belong to an existing pool + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: lvm + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Clean up + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + state: absent + + ignore_errors: yes From 55767dcddc8dcae9b3d25e96fff155f0ede4ae51 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Wed, 25 Sep 2019 15:23:29 -0400 Subject: [PATCH 02/17] Improve accuracy of safe mode description. --- defaults/main.yml | 2 +- library/blivet.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/defaults/main.yml b/defaults/main.yml index 4b07b4e6..8306f35e 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -3,7 +3,7 @@ storage_provider: "blivet" storage_use_partitions: null storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type -storage_safe_mode: false # do not remove devices as needed to make space for new volumes +storage_safe_mode: false # fail instead of implicitly/automatically removing devices or formatting storage_pool_defaults: state: "present" diff --git a/library/blivet.py b/library/blivet.py index bf5a4dec..592fd752 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -33,7 +33,7 @@ - disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet safe_mode: description: - - boolean indicating that existing devices and file system should never be removed + - boolean indicating that we should fail rather than implicitly/automatically removing devices or formatting author: - David Lehman (dlehman@redhat.com) From 5d1645d19f605112efbadcd5559e09fb1c662aa0 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Tue, 1 Oct 2019 11:20:01 -0400 Subject: [PATCH 03/17] Do not protect resize ops in safe mode. --- library/blivet.py | 3 --- tests/tests_lvm_errors.yml | 4 +--- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/library/blivet.py b/library/blivet.py index 592fd752..577124a4 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -197,9 +197,6 @@ def _resize(self): raise BlivetAnsibleError("invalid size specification for volume '%s': '%s'" % (self._volume['name'], self._volume['size'])) if size and self._device.resizable and self._device.size != size: - if safe_mode: - raise BlivetAnsibleError("cannot resize existing volume '%s' in safe mode" % self._volume['name']) - if self._device.format.resizable: self._device.format.update_size_info() diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml index 05a2128c..adcbb905 100644 --- a/tests/tests_lvm_errors.yml +++ b/tests/tests_lvm_errors.yml @@ -202,9 +202,7 @@ - name: Verify the output assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('cannot resize existing volume.*in safe mode') and - not blivet_output.changed }}" + that: "{{ not blivet_output.failed and blivet_output.changed }}" msg: "Unexpected behavior w/ existing data on specified disks" - name: Try to create LVM pool on disks that already belong to an existing pool From 1392ad6a168a32a4c2c33b72f03cad851fec9d09 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Tue, 1 Oct 2019 16:29:01 -0400 Subject: [PATCH 04/17] Enable safe mode by default. --- defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/defaults/main.yml b/defaults/main.yml index 8306f35e..476616b9 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -3,7 +3,7 @@ storage_provider: "blivet" storage_use_partitions: null storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type -storage_safe_mode: false # fail instead of implicitly/automatically removing devices or formatting +storage_safe_mode: true # fail instead of implicitly/automatically removing devices or formatting storage_pool_defaults: state: "present" From efa0cdbce9f00c20d5f02bca392896c5a9a2532f Mon Sep 17 00:00:00 2001 From: David Lehman Date: Tue, 1 Oct 2019 16:29:12 -0400 Subject: [PATCH 05/17] Document safe mode. --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index c2debc97..f808adcd 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,9 @@ The `mount_point` specifies the directory on which the file system will be mount ##### `mount_options` The `mount_options` specifies custom mount options as a string, e.g.: 'ro'. +#### `storage_safe_mode` +When true (the default), an error will occur instead of automatically removing existing devices and/or formatting. + Example Playbook ---------------- From 26496e8e87b32b399d14b3d0a7d9bd63e848819c Mon Sep 17 00:00:00 2001 From: David Lehman Date: Thu, 3 Oct 2019 13:54:36 -0400 Subject: [PATCH 06/17] Actually set safe_mode var in python. --- library/blivet.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/library/blivet.py b/library/blivet.py index 577124a4..a47e6329 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -706,6 +706,9 @@ def run_module(): global use_partitions use_partitions = module.params['use_partitions'] + global safe_mode + safe_mode = module.params['safe_mode'] + b = Blivet() b.reset() fstab = FSTab(b) From 7c9e6ee36c405b2c23b111c5c153f725f1b920b2 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Thu, 3 Oct 2019 13:55:01 -0400 Subject: [PATCH 07/17] Update tests to adjust safe mode as needed. --- tests/tests_change_disk_fs.yml | 1 + tests/tests_change_fs.yml | 1 + tests/tests_change_fs_use_partitions.yml | 1 + tests/tests_create_disk_then_remove.yml | 1 + tests/tests_create_lvm_pool_then_remove.yml | 1 + tests/tests_create_partition_volume_then_remove.yml | 1 + tests/tests_disk_errors.yml | 1 + 7 files changed, 7 insertions(+) diff --git a/tests/tests_change_disk_fs.yml b/tests/tests_change_disk_fs.yml index b6aa80ba..f7962c6e 100644 --- a/tests/tests_change_disk_fs.yml +++ b/tests/tests_change_disk_fs.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test' volume_size: '5g' fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') else 'ext4' }}" diff --git a/tests/tests_change_fs.yml b/tests/tests_change_fs.yml index cca23ebe..b88e7689 100644 --- a/tests/tests_change_fs.yml +++ b/tests/tests_change_fs.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' volume_size: '5g' fs_after: "{{ (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') | ternary('ext4', 'xfs') }}" diff --git a/tests/tests_change_fs_use_partitions.yml b/tests/tests_change_fs_use_partitions.yml index e4aa76cd..eb93c116 100644 --- a/tests/tests_change_fs_use_partitions.yml +++ b/tests/tests_change_fs_use_partitions.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false storage_use_partitions: true mount_location: '/opt/test1' volume_size: '5g' diff --git a/tests/tests_create_disk_then_remove.yml b/tests/tests_create_disk_then_remove.yml index b19ae352..c5290eb1 100644 --- a/tests/tests_create_disk_then_remove.yml +++ b/tests/tests_create_disk_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' tasks: diff --git a/tests/tests_create_lvm_pool_then_remove.yml b/tests/tests_create_lvm_pool_then_remove.yml index 6b259396..f2c06fb9 100644 --- a/tests/tests_create_lvm_pool_then_remove.yml +++ b/tests/tests_create_lvm_pool_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location1: '/opt/test1' mount_location2: '/opt/test2' volume_group_size: '10g' diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml index 40b3e620..01a7db7d 100644 --- a/tests/tests_create_partition_volume_then_remove.yml +++ b/tests/tests_create_partition_volume_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' tasks: diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 9f0b7ef4..06e2fb4d 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -82,6 +82,7 @@ include_role: name: storage vars: + storage_safe_mode: false storage_pools: - name: foo disks: "{{ unused_disks }}" From 8fac471c84009be19e502297322bc25fed7e9408 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Thu, 3 Oct 2019 14:08:12 -0400 Subject: [PATCH 08/17] Special case for format destroy on unformatted device. --- library/blivet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/blivet.py b/library/blivet.py index a47e6329..d474bbf5 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -218,7 +218,7 @@ def _reformat(self): if self._device.format.type == fmt.type: return - if safe_mode: + if safe_mode and self._device.format.type is None and self._device.format.name != get_format(None).name: raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) if self._device.format.status: From f0617b45d5b8f27e15ddd21fd3f4e0a5cf5e5303 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Fri, 4 Oct 2019 08:13:42 -0400 Subject: [PATCH 09/17] Fix logic of safe mode guard on reformat. A type of None means no formatting. An exception is when there is a blkid-reported type that blivet doesn't have special handling for, in which case type will be None but the name attribute will reflect the type reported by blkid. In this special case we treat the formatting as though it had a non-None type. --- library/blivet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/blivet.py b/library/blivet.py index d474bbf5..90f49cd6 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -218,7 +218,7 @@ def _reformat(self): if self._device.format.type == fmt.type: return - if safe_mode and self._device.format.type is None and self._device.format.name != get_format(None).name: + if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name): raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) if self._device.format.status: From 3f6b84c63086c5a5ceacc7e7a2753847c1b03bc1 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Tue, 15 Oct 2019 07:36:15 -0400 Subject: [PATCH 10/17] Fix key for partition pool class lookup. --- library/blivet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/blivet.py b/library/blivet.py index 90f49cd6..bd864ab7 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -537,7 +537,7 @@ def _create(self): _BLIVET_POOL_TYPES = { - "disk": BlivetPartitionPool, + "partition": BlivetPartitionPool, "lvm": BlivetLVMPool } From e66688db3288843a8c29b9d24d888e34c32e74cb Mon Sep 17 00:00:00 2001 From: David Lehman Date: Tue, 15 Oct 2019 09:02:27 -0400 Subject: [PATCH 11/17] Update partition pool type in tests. --- tests/tests_create_partition_volume_then_remove.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml index 01a7db7d..ae589d3d 100644 --- a/tests/tests_create_partition_volume_then_remove.yml +++ b/tests/tests_create_partition_volume_then_remove.yml @@ -19,7 +19,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" volumes: - name: test1 @@ -34,7 +34,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" volumes: - name: test1 @@ -49,7 +49,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" state: absent volumes: @@ -66,7 +66,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" state: absent volumes: From 1e3f2c9dd9fb7095fe394ca83986ba4a4307370e Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 15 Oct 2019 13:40:01 +0200 Subject: [PATCH 12/17] Use correct array syntax for an invalid disk While both before and after it should trigger an error in the role, and therefore both are valid tests, a valid array with a nonexistent disk is more likely to be what was intended, and therefore preferable. (The former variant actually checked for bad syntax, not for handling of nonexistent disks.) --- tests/tests_disk_errors.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 06e2fb4d..15391e99 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -22,7 +22,7 @@ storage_volumes: - name: test1 type: disk - disks: "['/dev/surelyidonotexist']" + disks: ['/dev/surelyidonotexist'] mount_point: "{{ mount_location }}" - name: Check the error output From 611553af0dc5b12a2916b4bd4f91ac7b2489fe53 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 15 Oct 2019 13:44:46 +0200 Subject: [PATCH 13/17] Test that the role fails, not that it sets blivet_output.failed The latter would be a valid test as well, but a less important one, and currently it does not work properly. Do not use ignore_errors: yes on the test block, it makes all assert pointless. Use a rescue task instead. --- tests/tests_disk_errors.yml | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 15391e99..01513562 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -25,11 +25,31 @@ disks: ['/dev/surelyidonotexist'] mount_point: "{{ mount_location }}" - - name: Check the error output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed }}" - msg: "Expected error message not found for missing disk" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly, + # blivet_output.failed is false. + # - name: Show the error output + # debug: + # msg: "{{ blivet_output.failed }}" + + # - name: Check the error output + # assert: + # that: blivet_output.failed | bool + # msg: "Expected error message not found for missing disk" + - name: Test for correct handling of safe_mode block: From a597f0f6a5384fe442073d0bc6c1151af7002fa6 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 15 Oct 2019 13:51:09 +0200 Subject: [PATCH 14/17] Create a file in the test fs to actually detect data loss --- tests/tests_disk_errors.yml | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 01513562..2d28dac3 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -3,6 +3,7 @@ become: true vars: mount_location: '/opt/test1' + testfile: "{{ mount_location }}/quux" tasks: - include_role: @@ -50,19 +51,24 @@ # that: blivet_output.failed | bool # msg: "Expected error message not found for missing disk" + - name: Create a file system on disk + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location }}" + + - name: create a file + file: + path: "{{ testfile }}" + state: touch - name: Test for correct handling of safe_mode block: - - name: Create a file system on disk - include_role: - name: storage - vars: - storage_volumes: - - name: test1 - type: disk - fs_type: 'ext4' - disks: "{{ unused_disks }}" - - name: Try to replace the file system on disk in safe mode include_role: name: storage From 2fd8c334533efdb40e00e27ff25add1e9ab7d7b0 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 15 Oct 2019 13:53:46 +0200 Subject: [PATCH 15/17] Do not use ignore_errors: yes in a test block - this makes all test asserts pointless. Instead catch the error using rescue. Actually verify that data heve not been destroyed. --- tests/tests_disk_errors.yml | 39 +++++++++++++++++++++++++++++++------ 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 2d28dac3..e0309879 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -80,14 +80,41 @@ fs_type: 'ext3' disks: "{{ unused_disks }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ existing data on specified disks" + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" - - name: Try to create a partition pool on the disk already containing a file system in safe_mode + - name: stat the file + stat: + path: "{{ testfile }}" + register: stat_r + + - name: assert file presence + assert: + that: + stat_r.stat.isreg is defined and stat_r.stat.isreg + msg: "data lost!" + + - name: Test for correct handling of safe_mode + block: + - name: Try to create a LVM pool on the disk already containing a file system in safe_mode include_role: name: storage vars: From d2f3d97b05d133fd889a2b960dfd7509a823aac2 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 15 Oct 2019 13:59:54 +0200 Subject: [PATCH 16/17] Remove last instance of ignore_errors: yes and check for errors properly. Verify again that data have not been lost. Do not use a pool type of partition, seems to be unimplemented. Create one volume in the pool, the role does not cope with empty pools. Note that even with those changes the role application fails - apparently it refuses to create a VG when there is already a filesystem, even with safe mode off. --- tests/tests_disk_errors.yml | 75 ++++++++++++++++++++++++------------- 1 file changed, 49 insertions(+), 26 deletions(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index e0309879..71fcf0b2 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -124,36 +124,59 @@ disks: "{{ unused_disks }}" type: partition - - name: Verify the output - assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ existing data on specified disks" + - name: UNREACH + fail: + msg: "this should be unreachable" - - name: Create a partition pool on the disk already containing a file system w/o safe_mode - include_role: - name: storage + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" vars: - storage_safe_mode: false - storage_pools: - - name: foo - disks: "{{ unused_disks }}" - type: partition + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet - name: Verify the output assert: - that: "{{ not blivet_output.failed }}" - msg: "failed to create partition pool over existing file system using default value of safe_mode" + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" - - name: Clean up - include_role: - name: storage - vars: - storage_volumes: - - name: test1 - type: disk - disks: "{{ unused_disks }}" - present: false + - name: stat the file + stat: + path: "{{ testfile }}" + register: stat_r + + - name: assert file presence + assert: + that: + stat_r.stat.isreg is defined and stat_r.stat.isreg + msg: "data lost!" + + - name: Create a LVM pool on the disk already containing a file system w/o safe_mode + include_role: + name: storage + vars: + storage_safe_mode: false + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition - ignore_errors: yes + - name: Verify the output + assert: + that: not blivet_output.failed + msg: "failed to create LVM pool over existing file system using default value of safe_mode" + + - name: Clean up + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + disks: "{{ unused_disks }}" + present: false From 0494d06348f844e2fdadb96fa7f8d52d3b6c97d6 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 15 Oct 2019 15:02:05 +0200 Subject: [PATCH 17/17] Turn off safe mode in cleanup --- tests/tests_disk_errors.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 71fcf0b2..874bb235 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -175,6 +175,7 @@ include_role: name: storage vars: + storage_safe_mode: false storage_volumes: - name: test1 type: disk