From d7212c2c26e4b38b9a2ed3e6b9f49f9cfdc0f7c8 Mon Sep 17 00:00:00 2001 From: Emilien Macchi Date: Tue, 29 Aug 2023 19:43:24 -0400 Subject: [PATCH] Workaround for Ceph issue --- playbooks/prepare_host.yaml | 17 +++++++++++++++++ playbooks/roles/ceph/tasks/main.yaml | 2 +- playbooks/vars/defaults.yaml | 7 +++++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/playbooks/prepare_host.yaml b/playbooks/prepare_host.yaml index 129f436..4e65afe 100644 --- a/playbooks/prepare_host.yaml +++ b/playbooks/prepare_host.yaml @@ -289,6 +289,23 @@ - name: Wipe filesystem from disk # noqa no-changed-when ansible.builtin.command: wipefs -a "{{ item }}" loop: "{{ ceph_devices }}" + - name: Expose Ceph devices to LVM + when: + - ceph_devices_to_lvm + block: + - name: Ensure we have lvm2 + ansible.builtin.package: + name: lvm2 + state: present + - name: Create VG for ceph devices + community.general.lvg: + vg: vg_ceph + pvs: "{{ ceph_devices | join(',') }}" + - name: Create LV for ceph devices + community.general.lvol: + vg: vg_ceph + lv: data + size: 100%VG - name: Prepare kernel and reboot when: diff --git a/playbooks/roles/ceph/tasks/main.yaml b/playbooks/roles/ceph/tasks/main.yaml index d73312d..5cf1dac 100644 --- a/playbooks/roles/ceph/tasks/main.yaml +++ b/playbooks/roles/ceph/tasks/main.yaml @@ -43,7 +43,7 @@ content: | data_devices: paths: - {% if ceph_devices is defined and ceph_devices | length > 0 %} + {% if ceph_devices is defined and (ceph_devices | length > 0) and (ceph_devices is not true) and (not ceph_devices_to_lvm | default(false)) %} {% for d in ceph_devices %} - {{ d }} {% endfor %} diff --git a/playbooks/vars/defaults.yaml b/playbooks/vars/defaults.yaml index e47a61a..d89a97c 100644 --- a/playbooks/vars/defaults.yaml +++ b/playbooks/vars/defaults.yaml @@ -137,6 +137,13 @@ ceph_enabled: true # ceph_devices: # - /dev/disk/by-path/pci-0000:45:00.0-ata-3 +# It will create a LVM PV from all `ceph_devices` and then +# create a VG and LV. Then the VG will be exposed to Ceph to +# create the OSD on it. +# This can be used to workaround hardware issues like +# reported here: https://bugzilla.redhat.com/show_bug.cgi?id=2235819 +ceph_devices_to_lvm: false + # Size of the loop device that will be # used for Ceph (in GB). ceph_loop_device_size: 100