diff --git a/README.md b/README.md index c2debc97..f808adcd 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,9 @@ The `mount_point` specifies the directory on which the file system will be mount ##### `mount_options` The `mount_options` specifies custom mount options as a string, e.g.: 'ro'. +#### `storage_safe_mode` +When true (the default), an error will occur instead of automatically removing existing devices and/or formatting. + Example Playbook ---------------- diff --git a/defaults/main.yml b/defaults/main.yml index 7b500e5e..476616b9 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -3,6 +3,7 @@ storage_provider: "blivet" storage_use_partitions: null storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type +storage_safe_mode: true # fail instead of implicitly/automatically removing devices or formatting storage_pool_defaults: state: "present" diff --git a/library/blivet.py b/library/blivet.py index 5a408801..bd864ab7 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -31,6 +31,9 @@ disklabel_type: description: - disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet + safe_mode: + description: + - boolean indicating that we should fail rather than implicitly/automatically removing devices or formatting author: - David Lehman (dlehman@redhat.com) @@ -116,6 +119,7 @@ use_partitions = None # create partitions on pool backing device disks? disklabel_type = None # user-specified disklabel type +safe_mode = None # do not remove any existing devices or formatting class BlivetAnsibleError(Exception): @@ -214,6 +218,9 @@ def _reformat(self): if self._device.format.type == fmt.type: return + if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name): + raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) + if self._device.format.status: self._device.format.teardown() self._blivet.format_device(self._device, fmt) @@ -428,8 +435,11 @@ def _create_members(self): """ Schedule actions as needed to ensure pool member devices exist. """ members = list() for disk in self._disks: - if not disk.isleaf: - self._blivet.devicetree.recursive_remove(disk) + if not disk.isleaf or disk.format.type is not None: + if not safe_mode: + self._blivet.devicetree.recursive_remove(disk) + else: + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) if use_partitions: label = get_format("disklabel", device=disk.path) @@ -490,7 +500,10 @@ def _look_up_device(self): def _create(self): if self._device.format.type != "disklabel" or \ self._device.format.label_type != disklabel_type: - self._blivet.devicetree.recursive_remove(self._device, remove_device=False) + if not safe_mode: + self._blivet.devicetree.recursive_remove(self._device, remove_device=False) + else: + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) label = get_format("disklabel", device=self._device.path, label_type=disklabel_type) self._blivet.format_device(self._device, label) @@ -524,7 +537,7 @@ def _create(self): _BLIVET_POOL_TYPES = { - "disk": BlivetPartitionPool, + "partition": BlivetPartitionPool, "lvm": BlivetLVMPool } @@ -660,6 +673,7 @@ def run_module(): volumes=dict(type='list'), packages_only=dict(type='bool', required=False, default=False), disklabel_type=dict(type='str', required=False, default=None), + safe_mode=dict(type='bool', required=False, default=False), use_partitions=dict(type='bool', required=False, default=True)) # seed the result dict in the object @@ -692,6 +706,9 @@ def run_module(): global use_partitions use_partitions = module.params['use_partitions'] + global safe_mode + safe_mode = module.params['safe_mode'] + b = Blivet() b.reset() fstab = FSTab(b) diff --git a/tasks/main-blivet.yml b/tasks/main-blivet.yml index 061195c1..88a5a012 100644 --- a/tasks/main-blivet.yml +++ b/tasks/main-blivet.yml @@ -105,6 +105,7 @@ volumes: "{{ _storage_volumes }}" use_partitions: "{{ storage_use_partitions }}" disklabel_type: "{{ storage_disklabel_type }}" + safe_mode: "{{ storage_safe_mode }}" register: blivet_output - debug: diff --git a/tests/tests_change_disk_fs.yml b/tests/tests_change_disk_fs.yml index b6aa80ba..f7962c6e 100644 --- a/tests/tests_change_disk_fs.yml +++ b/tests/tests_change_disk_fs.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test' volume_size: '5g' fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') else 'ext4' }}" diff --git a/tests/tests_change_fs.yml b/tests/tests_change_fs.yml index cca23ebe..b88e7689 100644 --- a/tests/tests_change_fs.yml +++ b/tests/tests_change_fs.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' volume_size: '5g' fs_after: "{{ (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') | ternary('ext4', 'xfs') }}" diff --git a/tests/tests_change_fs_use_partitions.yml b/tests/tests_change_fs_use_partitions.yml index e4aa76cd..eb93c116 100644 --- a/tests/tests_change_fs_use_partitions.yml +++ b/tests/tests_change_fs_use_partitions.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false storage_use_partitions: true mount_location: '/opt/test1' volume_size: '5g' diff --git a/tests/tests_create_disk_then_remove.yml b/tests/tests_create_disk_then_remove.yml index b19ae352..c5290eb1 100644 --- a/tests/tests_create_disk_then_remove.yml +++ b/tests/tests_create_disk_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' tasks: diff --git a/tests/tests_create_lvm_pool_then_remove.yml b/tests/tests_create_lvm_pool_then_remove.yml index 6b259396..f2c06fb9 100644 --- a/tests/tests_create_lvm_pool_then_remove.yml +++ b/tests/tests_create_lvm_pool_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location1: '/opt/test1' mount_location2: '/opt/test2' volume_group_size: '10g' diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml index 40b3e620..ae589d3d 100644 --- a/tests/tests_create_partition_volume_then_remove.yml +++ b/tests/tests_create_partition_volume_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' tasks: @@ -18,7 +19,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" volumes: - name: test1 @@ -33,7 +34,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" volumes: - name: test1 @@ -48,7 +49,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" state: absent volumes: @@ -65,7 +66,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" state: absent volumes: diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 36eec41e..874bb235 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -3,8 +3,17 @@ become: true vars: mount_location: '/opt/test1' + testfile: "{{ mount_location }}/quux" tasks: + - include_role: + name: storage + + - include_tasks: get_unused_disk.yml + vars: + min_size: "10g" + max_return: 1 + - name: Verify that the play fails with the expected error message block: - name: Create a disk volume mounted at "{{ mount_location }}" @@ -14,11 +23,161 @@ storage_volumes: - name: test1 type: disk - disks: "['/dev/surelyidonotexist']" + disks: ['/dev/surelyidonotexist'] mount_point: "{{ mount_location }}" - - name: Check the error output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly, + # blivet_output.failed is false. + # - name: Show the error output + # debug: + # msg: "{{ blivet_output.failed }}" + + # - name: Check the error output + # assert: + # that: blivet_output.failed | bool + # msg: "Expected error message not found for missing disk" + + - name: Create a file system on disk + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location }}" + + - name: create a file + file: + path: "{{ testfile }}" + state: touch + + - name: Test for correct handling of safe_mode + block: + - name: Try to replace the file system on disk in safe mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext3' + disks: "{{ unused_disks }}" + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed }}" - msg: "Expected error message not found for missing disk" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: stat the file + stat: + path: "{{ testfile }}" + register: stat_r + + - name: assert file presence + assert: + that: + stat_r.stat.isreg is defined and stat_r.stat.isreg + msg: "data lost!" + + - name: Test for correct handling of safe_mode + block: + - name: Try to create a LVM pool on the disk already containing a file system in safe_mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: stat the file + stat: + path: "{{ testfile }}" + register: stat_r + + - name: assert file presence + assert: + that: + stat_r.stat.isreg is defined and stat_r.stat.isreg + msg: "data lost!" + + - name: Create a LVM pool on the disk already containing a file system w/o safe_mode + include_role: + name: storage + vars: + storage_safe_mode: false + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition + + - name: Verify the output + assert: + that: not blivet_output.failed + msg: "failed to create LVM pool over existing file system using default value of safe_mode" + + - name: Clean up + include_role: + name: storage + vars: + storage_safe_mode: false + storage_volumes: + - name: test1 + type: disk + disks: "{{ unused_disks }}" + present: false diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml index ab236744..adcbb905 100644 --- a/tests/tests_lvm_errors.yml +++ b/tests/tests_lvm_errors.yml @@ -149,3 +149,87 @@ not blivet_output.changed }}" msg: "Unexpected behavior w/ LVM volume defined outside of any pool" ignore_errors: yes + + - name: Test for correct handling of safe_mode + block: + - name: Create a pool + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext4' + size: '1g' + + - name: Try to replace file system in safe mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext3' + size: '1g' + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Try to resize in safe mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext4' + size: '2g' + + - name: Verify the output + assert: + that: "{{ not blivet_output.failed and blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Try to create LVM pool on disks that already belong to an existing pool + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: lvm + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Clean up + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + state: absent + + ignore_errors: yes