Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Many fixes for the test in PR #43 #46

Closed
wants to merge 17 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,9 @@ The `mount_point` specifies the directory on which the file system will be mount
##### `mount_options`
The `mount_options` specifies custom mount options as a string, e.g.: 'ro'.

#### `storage_safe_mode`
When true (the default), an error will occur instead of automatically removing existing devices and/or formatting.


Example Playbook
----------------
Expand Down
1 change: 1 addition & 0 deletions defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
storage_provider: "blivet"
storage_use_partitions: null
storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type
storage_safe_mode: true # fail instead of implicitly/automatically removing devices or formatting

storage_pool_defaults:
state: "present"
Expand Down
25 changes: 21 additions & 4 deletions library/blivet.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@
disklabel_type:
description:
- disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet
safe_mode:
description:
- boolean indicating that we should fail rather than implicitly/automatically removing devices or formatting

author:
- David Lehman ([email protected])
Expand Down Expand Up @@ -116,6 +119,7 @@

use_partitions = None # create partitions on pool backing device disks?
disklabel_type = None # user-specified disklabel type
safe_mode = None # do not remove any existing devices or formatting


class BlivetAnsibleError(Exception):
Expand Down Expand Up @@ -214,6 +218,9 @@ def _reformat(self):
if self._device.format.type == fmt.type:
return

if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name):
raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name'])

if self._device.format.status:
self._device.format.teardown()
self._blivet.format_device(self._device, fmt)
Expand Down Expand Up @@ -428,8 +435,11 @@ def _create_members(self):
""" Schedule actions as needed to ensure pool member devices exist. """
members = list()
for disk in self._disks:
if not disk.isleaf:
self._blivet.devicetree.recursive_remove(disk)
if not disk.isleaf or disk.format.type is not None:
if not safe_mode:
self._blivet.devicetree.recursive_remove(disk)
else:
raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name']))

if use_partitions:
label = get_format("disklabel", device=disk.path)
Expand Down Expand Up @@ -490,7 +500,10 @@ def _look_up_device(self):
def _create(self):
if self._device.format.type != "disklabel" or \
self._device.format.label_type != disklabel_type:
self._blivet.devicetree.recursive_remove(self._device, remove_device=False)
if not safe_mode:
self._blivet.devicetree.recursive_remove(self._device, remove_device=False)
else:
raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name']))

label = get_format("disklabel", device=self._device.path, label_type=disklabel_type)
self._blivet.format_device(self._device, label)
Expand Down Expand Up @@ -524,7 +537,7 @@ def _create(self):


_BLIVET_POOL_TYPES = {
"disk": BlivetPartitionPool,
"partition": BlivetPartitionPool,
"lvm": BlivetLVMPool
}

Expand Down Expand Up @@ -660,6 +673,7 @@ def run_module():
volumes=dict(type='list'),
packages_only=dict(type='bool', required=False, default=False),
disklabel_type=dict(type='str', required=False, default=None),
safe_mode=dict(type='bool', required=False, default=False),
use_partitions=dict(type='bool', required=False, default=True))

# seed the result dict in the object
Expand Down Expand Up @@ -692,6 +706,9 @@ def run_module():
global use_partitions
use_partitions = module.params['use_partitions']

global safe_mode
safe_mode = module.params['safe_mode']

b = Blivet()
b.reset()
fstab = FSTab(b)
Expand Down
1 change: 1 addition & 0 deletions tasks/main-blivet.yml
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@
volumes: "{{ _storage_volumes }}"
use_partitions: "{{ storage_use_partitions }}"
disklabel_type: "{{ storage_disklabel_type }}"
safe_mode: "{{ storage_safe_mode }}"
register: blivet_output

- debug:
Expand Down
1 change: 1 addition & 0 deletions tests/tests_change_disk_fs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
- hosts: all
become: true
vars:
storage_safe_mode: false
mount_location: '/opt/test'
volume_size: '5g'
fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') else 'ext4' }}"
Expand Down
1 change: 1 addition & 0 deletions tests/tests_change_fs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
- hosts: all
become: true
vars:
storage_safe_mode: false
mount_location: '/opt/test1'
volume_size: '5g'
fs_after: "{{ (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') | ternary('ext4', 'xfs') }}"
Expand Down
1 change: 1 addition & 0 deletions tests/tests_change_fs_use_partitions.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
- hosts: all
become: true
vars:
storage_safe_mode: false
storage_use_partitions: true
mount_location: '/opt/test1'
volume_size: '5g'
Expand Down
1 change: 1 addition & 0 deletions tests/tests_create_disk_then_remove.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
- hosts: all
become: true
vars:
storage_safe_mode: false
mount_location: '/opt/test1'

tasks:
Expand Down
1 change: 1 addition & 0 deletions tests/tests_create_lvm_pool_then_remove.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
- hosts: all
become: true
vars:
storage_safe_mode: false
mount_location1: '/opt/test1'
mount_location2: '/opt/test2'
volume_group_size: '10g'
Expand Down
9 changes: 5 additions & 4 deletions tests/tests_create_partition_volume_then_remove.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
- hosts: all
become: true
vars:
storage_safe_mode: false
mount_location: '/opt/test1'

tasks:
Expand All @@ -18,7 +19,7 @@
vars:
storage_pools:
- name: "{{ unused_disks[0] }}"
type: disk
type: partition
disks: "{{ unused_disks }}"
volumes:
- name: test1
Expand All @@ -33,7 +34,7 @@
vars:
storage_pools:
- name: "{{ unused_disks[0] }}"
type: disk
type: partition
disks: "{{ unused_disks }}"
volumes:
- name: test1
Expand All @@ -48,7 +49,7 @@
vars:
storage_pools:
- name: "{{ unused_disks[0] }}"
type: disk
type: partition
disks: "{{ unused_disks }}"
state: absent
volumes:
Expand All @@ -65,7 +66,7 @@
vars:
storage_pools:
- name: "{{ unused_disks[0] }}"
type: disk
type: partition
disks: "{{ unused_disks }}"
state: absent
volumes:
Expand Down
169 changes: 164 additions & 5 deletions tests/tests_disk_errors.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,17 @@
become: true
vars:
mount_location: '/opt/test1'
testfile: "{{ mount_location }}/quux"

tasks:
- include_role:
name: storage

- include_tasks: get_unused_disk.yml
vars:
min_size: "10g"
max_return: 1

- name: Verify that the play fails with the expected error message
block:
- name: Create a disk volume mounted at "{{ mount_location }}"
Expand All @@ -14,11 +23,161 @@
storage_volumes:
- name: test1
type: disk
disks: "['/dev/surelyidonotexist']"
disks: ['/dev/surelyidonotexist']
mount_point: "{{ mount_location }}"

- name: Check the error output
- name: UNREACH
fail:
msg: "this should be unreachable"

rescue:
- name: Check that we failed in the role
assert:
that:
- ansible_failed_task.name != 'UNREACH'
msg: "Role has not failed when it should have"
vars:
# Ugh! needed to expand ansible_failed_task
storage_provider: blivet

# the following does not work properly,
# blivet_output.failed is false.
# - name: Show the error output
# debug:
# msg: "{{ blivet_output.failed }}"

# - name: Check the error output
# assert:
# that: blivet_output.failed | bool
# msg: "Expected error message not found for missing disk"

- name: Create a file system on disk
include_role:
name: storage
vars:
storage_volumes:
- name: test1
type: disk
fs_type: 'ext4'
disks: "{{ unused_disks }}"
mount_point: "{{ mount_location }}"

- name: create a file
file:
path: "{{ testfile }}"
state: touch

- name: Test for correct handling of safe_mode
block:
- name: Try to replace the file system on disk in safe mode
include_role:
name: storage
vars:
storage_safe_mode: true
storage_volumes:
- name: test1
type: disk
fs_type: 'ext3'
disks: "{{ unused_disks }}"

- name: UNREACH
fail:
msg: "this should be unreachable"

rescue:
- name: Check that we failed in the role
assert:
that: "{{ blivet_output.failed }}"
msg: "Expected error message not found for missing disk"
ignore_errors: yes
that:
- ansible_failed_task.name != 'UNREACH'
msg: "Role has not failed when it should have"
vars:
# Ugh! needed to expand ansible_failed_task
storage_provider: blivet

- name: Verify the output
assert:
that: "blivet_output.failed and
blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and
not blivet_output.changed"
msg: "Unexpected behavior w/ existing data on specified disks"

- name: stat the file
stat:
path: "{{ testfile }}"
register: stat_r

- name: assert file presence
assert:
that:
stat_r.stat.isreg is defined and stat_r.stat.isreg
msg: "data lost!"

- name: Test for correct handling of safe_mode
block:
- name: Try to create a LVM pool on the disk already containing a file system in safe_mode
include_role:
name: storage
vars:
storage_safe_mode: true
storage_pools:
- name: foo
disks: "{{ unused_disks }}"
type: partition

- name: UNREACH
fail:
msg: "this should be unreachable"

rescue:
- name: Check that we failed in the role
assert:
that:
- ansible_failed_task.name != 'UNREACH'
msg: "Role has not failed when it should have"
vars:
# Ugh! needed to expand ansible_failed_task
storage_provider: blivet

- name: Verify the output
assert:
that: "blivet_output.failed and
blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and
not blivet_output.changed"
msg: "Unexpected behavior w/ existing data on specified disks"

- name: stat the file
stat:
path: "{{ testfile }}"
register: stat_r

- name: assert file presence
assert:
that:
stat_r.stat.isreg is defined and stat_r.stat.isreg
msg: "data lost!"

- name: Create a LVM pool on the disk already containing a file system w/o safe_mode
include_role:
name: storage
vars:
storage_safe_mode: false
storage_pools:
- name: foo
disks: "{{ unused_disks }}"
type: partition

- name: Verify the output
assert:
that: not blivet_output.failed
msg: "failed to create LVM pool over existing file system using default value of safe_mode"

- name: Clean up
include_role:
name: storage
vars:
storage_safe_mode: false
storage_volumes:
- name: test1
type: disk
disks: "{{ unused_disks }}"
present: false
Loading