From 25fa47ff4b8d1a94ad0fa8420e21a5020ab0cd36 Mon Sep 17 00:00:00 2001 From: Ondrej Famera Date: Fri, 26 Nov 2021 12:31:59 +0900 Subject: [PATCH 01/13] FIX: when running multiple plays within single playbook because the ansible groups are additive the run of multiple plays with different hosts within single playbook casued the latter playbooks to contain nodes from previous plays. Change here adds random cluster ID to mitigate this situation. Workaround: use separate playbooks for separate hosts. Thanks to @jesusserranost for reporting the issue and suggesting workaround! --- tasks/fence_kdump.yml | 2 +- tasks/fence_vmware_rest.yml | 2 +- tasks/fence_vmware_soap.yml | 2 +- tasks/fence_xvm.yml | 2 +- tasks/main.yml | 23 +++++++++++++++++------ 5 files changed, 21 insertions(+), 10 deletions(-) diff --git a/tasks/fence_kdump.yml b/tasks/fence_kdump.yml index 5ed37b3..fe7e897 100644 --- a/tasks/fence_kdump.yml +++ b/tasks/fence_kdump.yml @@ -33,6 +33,6 @@ resource_class: 'stonith' resource_type: 'fence_kdump' options: >- - pcmk_host_map="{% for item in groups['cluster_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" + pcmk_host_map="{% for item in groups['cluster'+rand_id+'_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" run_once: true when: cluster_configure_stonith_style is defined and cluster_configure_stonith_style == 'one-device-per-cluster' diff --git a/tasks/fence_vmware_rest.yml b/tasks/fence_vmware_rest.yml index 63b6593..080358a 100644 --- a/tasks/fence_vmware_rest.yml +++ b/tasks/fence_vmware_rest.yml @@ -62,7 +62,7 @@ resource_class: 'stonith' resource_type: 'fence_vmware_rest' options: >- - pcmk_host_map="{% for item in groups['cluster_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" + pcmk_host_map="{% for item in groups['cluster'+rand_id+'_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" ipaddr={{ fence_vmware_ipaddr }} login={{ fence_vmware_login }} passwd={{ fence_vmware_passwd }} diff --git a/tasks/fence_vmware_soap.yml b/tasks/fence_vmware_soap.yml index 492c5a2..dbadd0e 100644 --- a/tasks/fence_vmware_soap.yml +++ b/tasks/fence_vmware_soap.yml @@ -57,7 +57,7 @@ resource_class: 'stonith' resource_type: 'fence_vmware_soap' options: >- - pcmk_host_map="{% for item in groups['cluster_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" + pcmk_host_map="{% for item in groups['cluster'+rand_id+'_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" ipaddr={{ fence_vmware_ipaddr }} login={{ fence_vmware_login }} passwd={{ fence_vmware_passwd }} diff --git a/tasks/fence_xvm.yml b/tasks/fence_xvm.yml index 153ebd7..c1b0430 100644 --- a/tasks/fence_xvm.yml +++ b/tasks/fence_xvm.yml @@ -58,7 +58,7 @@ resource_class: 'stonith' resource_type: 'fence_xvm' options: >- - pcmk_host_map="{% for item in groups['cluster_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" + pcmk_host_map="{% for item in groups['cluster'+rand_id+'_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" op monitor interval=30s run_once: true when: cluster_configure_stonith_style is defined and cluster_configure_stonith_style == 'one-device-per-cluster' diff --git a/tasks/main.yml b/tasks/main.yml index 0eda4f5..5996955 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -1,12 +1,23 @@ --- +- name: Generate random ID for this cluster deployment + set_fact: + rand_id: "{{ 1024 | random }}" + run_once: true + changed_when: false + check_mode: false + # This ID is used for ansible group names grouping machines from this run of role. + # If this role is run multiple times in single playbook with different hosts this + # ID will distinguish these different hosts + # FIXME: this is workaround for inability to "remove things from groups" + - name: Add a group to the inventory for remote nodes group_by: - key: "cluster_node_is_remote_{{ cluster_node_is_remote | bool }}" + key: "cluster{{ rand_id }}_node_is_remote_{{ cluster_node_is_remote | bool }}" - name: Check if cluster consist of at least 2 nodes fail: msg: 'Cluster must have at least 2 full members' - when: groups['cluster_node_is_remote_False']|default([])|count() < 2 + when: groups['cluster'+rand_id+'_node_is_remote_False']|default([])|count() < 2 run_once: true - name: Check if cluster_user_pass is not default @@ -131,7 +142,7 @@ - name: Setup cluster pcs_cluster: node_list: >- - {% for item in groups['cluster_node_is_remote_False'] %} + {% for item in groups['cluster'+rand_id+'_node_is_remote_False'] %} {{ hostvars[item][cluster_hostname_fact] }}{% if hostvars[item].rrp_ip is defined %},{{ hostvars[item].rrp_ip }}{% endif %} {% endfor %} cluster_name: "{{ cluster_name }}" @@ -175,13 +186,13 @@ - cluster_node_is_remote | bool - (pcs_status.stdout | regex_search('\\b' ~ cluster_hostname ~ '\\s+\\(ocf::pacemaker:remote\\)') or '') | length == 0 vars: - delegate_host: "{{ hostvars[groups['cluster_node_is_remote_False'][0]].inventory_hostname }}" + delegate_host: "{{ hostvars[groups['cluster'+rand_id+'_node_is_remote_False'][0]].inventory_hostname }}" # NOTE: Without this, the host's ansible_host variable will not be # respected when using delegate_to. ansible_host: "{{ hostvars[delegate_host].ansible_host | default(delegate_host) }}" when: - - groups['cluster_node_is_remote_False'] is defined - - groups['cluster_node_is_remote_True']|default([])|count() > 0 + - groups['cluster'+rand_id+'_node_is_remote_False'] is defined + - groups['cluster'+rand_id+'_node_is_remote_True']|default([])|count() > 0 delegate_to: "{{ delegate_host }}" ### fencing setup From 13b7cd5320ae8470ab10f91cf56a435438276c1d Mon Sep 17 00:00:00 2001 From: Tiemen Ruiten Date: Mon, 24 Jan 2022 15:22:13 +0100 Subject: [PATCH 02/13] add support for Rocky Linux --- tasks/install_normal.yml | 2 +- tasks/main.yml | 4 ++-- tasks/rocky_repos.yml | 21 +++++++++++++++++++++ 3 files changed, 24 insertions(+), 3 deletions(-) create mode 100644 tasks/rocky_repos.yml diff --git a/tasks/install_normal.yml b/tasks/install_normal.yml index 09bdacb..032debf 100644 --- a/tasks/install_normal.yml +++ b/tasks/install_normal.yml @@ -4,7 +4,7 @@ name: 'libselinux-python' state: 'installed' when: - - not (ansible_distribution in ['RedHat','CentOS','AlmaLinux'] and ansible_distribution_major_version == '8') + - not (ansible_distribution in ['RedHat','CentOS','AlmaLinux','Rocky'] and ansible_distribution_major_version == '8') - not (ansible_distribution == 'Fedora' and ansible_distribution_major_version >= '31') - name: Install Pacemaker cluster packages to all nodes diff --git a/tasks/main.yml b/tasks/main.yml index 5996955..2c7312d 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -108,11 +108,11 @@ enabled: true state: 'started' -- name: Setup firewall for RHEL/CentOS/AlmaLinux systems +- name: Setup firewall for RHEL/CentOS/AlmaLinux/Rocky systems include_tasks: "firewall-el{{ ansible_distribution_major_version }}.yml" when: - cluster_firewall|bool - - ansible_distribution in ['RedHat','CentOS','AlmaLinux'] + - ansible_distribution in ['RedHat','CentOS','AlmaLinux','Rocky'] - name: Setup firewall for Fedora systems include_tasks: "firewall-fedora.yml" diff --git a/tasks/rocky_repos.yml b/tasks/rocky_repos.yml new file mode 100644 index 0000000..9d09e0b --- /dev/null +++ b/tasks/rocky_repos.yml @@ -0,0 +1,21 @@ +--- +- name: get list of active repositories + command: yum repolist + args: + warn: false + register: yum_repolist + changed_when: false + check_mode: false + +- name: enable highavailability repository (Rocky) + ini_file: + dest: '/etc/yum.repos.d/Rocky-HighAvailability.repo' + section: 'ha' + option: 'enabled' + value: '1' + create: 'no' + mode: '0644' + when: >- + 'HighAvailability' not in yum_repolist.stdout + and enable_repos | bool + and ansible_distribution_major_version in ['8'] From a1670baa976610a58b0a451ce402ddf5b1aa1596 Mon Sep 17 00:00:00 2001 From: Tiemen Ruiten Date: Tue, 25 Jan 2022 23:23:01 +0100 Subject: [PATCH 03/13] update README.md to reflect added support for Rocky Linux --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 0c7b741..3aeece9 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ ha-cluster-pacemaker ========= -Role for configuring and expanding basic pacemaker cluster on CentOS/RHEL 6/7/8, AlmaLinux 8, Fedora 31/32/33 and CentOS 8 Stream systems. +Role for configuring and expanding basic pacemaker cluster on CentOS/RHEL 6/7/8, AlmaLinux 8, Rocky Linux 8, Fedora 31/32/33 and CentOS 8 Stream systems. This role can configure following aspects of pacemaker cluster: - enable needed system repositories @@ -131,7 +131,7 @@ Role Variables cluster_configure_stonith_style: 'one-device-per-node' ``` - - (RHEL/CentOS/AlmaLinux) enable the repositories containing needed packages + - (RHEL/CentOS/AlmaLinux/Rocky) enable the repositories containing needed packages ``` enable_repos: true ``` From 00f709c5a6892fccb5039de43d9ac851244441a2 Mon Sep 17 00:00:00 2001 From: Ondrej Famera Date: Thu, 12 May 2022 23:00:23 +0900 Subject: [PATCH 04/13] Adjust repository configuration for CentOS Linux 8 in regards to EOL repositories. By default the HA repository on CentOS Linux 8.1-8.5 is configured by this role to point to baseurl of vault.centos.org Inpired by @SpitchAG in #23, Thank you! --- README.md | 2 ++ tasks/centos_repos.yml | 54 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3aeece9..987b3f9 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,8 @@ This role depend on role [ondrejhome.pcs-modules-2](https://github.com/OndrejHom Ansible version **2.9.10** and **2.9.11** will fail with error `"'hostvars' is undefined"` when trying to configure remote nodes. This applies only when there is at least one node with `cluster_node_is_remote=True`. **Avoid these Ansible versions** if you plan to configure remote nodes with this role. +On **CentOS Linux 8** you have to ensure that BaseOS and Appstream repositories are working properly. As the CentOS Linux 8 is in the End-Of-Life phase, this role will configure HA repository to point to vault.centos.org if repository configuration (`enable_repos: true`) is requested (it is by default). + Role Variables -------------- diff --git a/tasks/centos_repos.yml b/tasks/centos_repos.yml index c7aee03..48847f6 100644 --- a/tasks/centos_repos.yml +++ b/tasks/centos_repos.yml @@ -7,7 +7,7 @@ changed_when: false check_mode: false -- name: enable highavailability repository (CentOS 8.1/8.2) +- name: EOL enable highavailability repository (CentOS 8.1, 8.2) ini_file: dest: '/etc/yum.repos.d/CentOS-HA.repo' section: 'HighAvailability' @@ -20,7 +20,7 @@ and enable_repos | bool and ansible_distribution_version in ['8.1', '8.2'] -- name: enable highavailability repository (CentOS 8.3+) +- name: EOL enable highavailability repository (CentOS 8.3, 8.4, 8.5) ini_file: dest: '/etc/yum.repos.d/CentOS-Linux-HighAvailability.repo' section: 'ha' @@ -34,6 +34,56 @@ and ansible_distribution_major_version in ['8'] and ansible_distribution_version not in ['8.0', '8.1', '8.2', '8'] +- name: EOL disable mirrorlist for CentOS Linux 8.1, 8.2 HA repository + ini_file: + dest: '/etc/yum.repos.d/CentOS-HA.repo' + section: 'HighAvailability' + option: 'mirrorlist' + create: 'no' + mode: '0644' + state: absent + when: >- + enable_repos | bool + and ansible_distribution_version in ['8.1', '8.2'] + +- name: EOL disable mirrorlist for CentOS Linux 8.3, 8.4, 8.5 HA repository + ini_file: + dest: '/etc/yum.repos.d/CentOS-Linux-HighAvailability.repo' + section: 'ha' + option: 'mirrorlist' + create: 'no' + mode: '0644' + state: absent + when: >- + enable_repos | bool + and ansible_distribution_version in ['8.3', '8.4', '8.5'] + +- name: EOL configure baseurl for CentOS Linux 8.1, 8.2 HA repository + ini_file: + dest: '/etc/yum.repos.d/CentOS-HA.repo' + section: 'HighAvailability' + option: 'baseurl' + value: 'http://vault.centos.org/$contentdir/$releasever/HighAvailability/$basearch/os/' + create: 'no' + mode: '0644' + state: present + when: >- + enable_repos | bool + and ansible_distribution_version in ['8.1', '8.2'] + +- name: EOL configure baseurl for CentOS Linux 8.3, 8.4, 8.5 HA repository + ini_file: + dest: '/etc/yum.repos.d/CentOS-Linux-HighAvailability.repo' + section: 'ha' + option: 'baseurl' + value: 'http://vault.centos.org/$contentdir/$releasever/HighAvailability/$basearch/os/' + create: 'no' + mode: '0644' + state: present + when: >- + enable_repos | bool + and ansible_distribution_version in ['8.3', '8.4', '8.5'] + - name: enable highavailability repository (CentOS 8 Stream) ini_file: dest: '/etc/yum.repos.d/CentOS-Stream-HighAvailability.repo' From 2ecf0c091946e54fa562812fbbe96461720a8894 Mon Sep 17 00:00:00 2001 From: Ondrej Famera Date: Sat, 14 May 2022 16:07:01 +0900 Subject: [PATCH 05/13] add support for Fedora 34/35 --- README.md | 2 +- meta/main.yml | 2 ++ vars/fedora34.yml | 21 +++++++++++++++++++++ vars/fedora35.yml | 21 +++++++++++++++++++++ 4 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 vars/fedora34.yml create mode 100644 vars/fedora35.yml diff --git a/README.md b/README.md index 987b3f9..c60e552 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ ha-cluster-pacemaker ========= -Role for configuring and expanding basic pacemaker cluster on CentOS/RHEL 6/7/8, AlmaLinux 8, Rocky Linux 8, Fedora 31/32/33 and CentOS 8 Stream systems. +Role for configuring and expanding basic pacemaker cluster on CentOS/RHEL 6/7/8, AlmaLinux 8, Rocky Linux 8, Fedora 31/32/33/34/35 and CentOS 8 Stream systems. This role can configure following aspects of pacemaker cluster: - enable needed system repositories diff --git a/meta/main.yml b/meta/main.yml index bafbd21..2ca28ad 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -15,6 +15,8 @@ galaxy_info: - 31 - 32 - 33 + - 34 + - 35 - name: Debian versions: - 'buster' diff --git a/vars/fedora34.yml b/vars/fedora34.yml new file mode 100644 index 0000000..2fa89e5 --- /dev/null +++ b/vars/fedora34.yml @@ -0,0 +1,21 @@ +--- +pacemaker_packages: + - pcs + - pacemaker +pacemaker_remote_packages: + - pcs + - pacemaker-remote +fence_xvm_packages: + - fence-virt +fence_kdump_packages: + - fence-agents-kdump + - kexec-tools +fence_vmware_soap_packages: + - fence-agents-vmware-soap + - python3-requests +fence_vmware_rest_packages: + - fence-agents-vmware-rest +firewall_packages: + - firewalld + +pcsd_configuration_file: /etc/sysconfig/pcsd diff --git a/vars/fedora35.yml b/vars/fedora35.yml new file mode 100644 index 0000000..2fa89e5 --- /dev/null +++ b/vars/fedora35.yml @@ -0,0 +1,21 @@ +--- +pacemaker_packages: + - pcs + - pacemaker +pacemaker_remote_packages: + - pcs + - pacemaker-remote +fence_xvm_packages: + - fence-virt +fence_kdump_packages: + - fence-agents-kdump + - kexec-tools +fence_vmware_soap_packages: + - fence-agents-vmware-soap + - python3-requests +fence_vmware_rest_packages: + - fence-agents-vmware-rest +firewall_packages: + - firewalld + +pcsd_configuration_file: /etc/sysconfig/pcsd From abea7af730dd1f67dd478e59ac524da5a271ce1c Mon Sep 17 00:00:00 2001 From: Ondrej Famera Date: Sat, 14 May 2022 16:21:40 +0900 Subject: [PATCH 06/13] ansible-lint 5.4.0 using ansible 2.12.3: fix non-fuctional warning reported by ansible-lint (indentations, spaces, etc.) --- defaults/main.yml | 25 ++++++++++++------------ meta/main.yml | 36 +++++++++++++++++------------------ tasks/fence_vmware_rest.yml | 2 +- tasks/fence_vmware_soap.yml | 2 +- tasks/install_local_media.yml | 2 +- tasks/install_normal.yml | 2 +- 6 files changed, 35 insertions(+), 34 deletions(-) diff --git a/defaults/main.yml b/defaults/main.yml index 75ef3ff..5d4238c 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -32,9 +32,10 @@ cluster_configure_fence_kdump: false # You must provide IP/hostname of vCenter/hypervisor and username/password that is able to start/stop VMs for this cluster cluster_configure_fence_vmware_soap: false cluster_configure_fence_vmware_rest: false -#fence_vmware_ipaddr: '' -#fence_vmware_login: '' -#fence_vmware_passwd: '' +# fence_vmware_ipaddr: '' +# fence_vmware_login: '' +# fence_vmware_passwd: '' + # by default we use encrypted configuration (ssl=1) without validating certificates (ssl_insecure=1) fence_vmware_options: 'ssl="1" ssl_insecure="1"' # NOTE: Only one of fence_vmware_soap/fence_vmware_rest can be configured as stonith devices share same name. @@ -42,13 +43,13 @@ fence_vmware_options: 'ssl="1" ssl_insecure="1"' # custom fence device configuration variable which allows you to define your own fence devices # for proper options check examples below # -#cluster_fence_config: -# fence_device_1: -# fence_type: 'fence_vmware_soap' -# fence_options: 'pcmk_host_map="fastvm-1:vm_name_on_hypevisor1" ipaddr="vcenter.hostname" login="root" passwd="testest" ssl="1" ssl_insecure="1" op monitor interval=30s' -# fence_device_2: -# fence_type: 'fence_xvm' -# fence_options: 'pcmk_host_map="fastvm-2:vm_name_n_hypervisor2" op monitor interval=30s' +# cluster_fence_config: +# fence_device_1: +# fence_type: 'fence_vmware_soap' +# fence_options: 'pcmk_host_map="fastvm-1:vm_name_on_hypevisor1" ipaddr="vcenter.hostname" login="root" passwd="testest" ssl="1" ssl_insecure="1" op monitor interval=30s' +# fence_device_2: +# fence_type: 'fence_xvm' +# fence_options: 'pcmk_host_map="fastvm-2:vm_name_n_hypervisor2" op monitor interval=30s' # How to map fence devices to cluster nodes? # by default for every cluster node a separate stonith devices is created ('one-device-per-node'). @@ -98,8 +99,8 @@ allow_cluster_expansion: false # from interface `ens8` use `cluster_net_iface: 'ens8'`. Interface must exists on all cluster nodes. cluster_net_iface: '' -#Redundant network interface. If specified the role will setup a corosync redundant ring using the default IPv4 from this interface. -#Interface must exist on all cluster nodes. +# Redundant network interface. If specified the role will setup a corosync redundant ring using the default IPv4 from this interface. +# Interface must exist on all cluster nodes. rrp_interface: '' # Whether to add hosts to /etc/hosts. diff --git a/meta/main.yml b/meta/main.yml index 2ca28ad..a8275f1 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -5,23 +5,23 @@ galaxy_info: license: GPLv3 min_ansible_version: 2.8 platforms: - - name: EL - versions: - - 6 - - 7 - - 8 - - name: Fedora - versions: - - 31 - - 32 - - 33 - - 34 - - 35 - - name: Debian - versions: - - 'buster' + - name: EL + versions: + - 6 + - 7 + - 8 + - name: Fedora + versions: + - 31 + - 32 + - 33 + - 34 + - 35 + - name: Debian + versions: + - 'buster' galaxy_tags: - - clustering - - pacemaker + - clustering + - pacemaker dependencies: - - { role: ondrejhome.pcs-modules-2 } + - {role: ondrejhome.pcs-modules-2} diff --git a/tasks/fence_vmware_rest.yml b/tasks/fence_vmware_rest.yml index 080358a..784cc3c 100644 --- a/tasks/fence_vmware_rest.yml +++ b/tasks/fence_vmware_rest.yml @@ -26,7 +26,7 @@ - name: Install fence_vmware_rest fencing agent on all nodes yum: - name: "{{ fence_vmware_rest_packages }}" + name: "{{ fence_vmware_rest_packages }}" state: 'installed' - name: Configure separate stonith devices per cluster node diff --git a/tasks/fence_vmware_soap.yml b/tasks/fence_vmware_soap.yml index dbadd0e..9a0a19e 100644 --- a/tasks/fence_vmware_soap.yml +++ b/tasks/fence_vmware_soap.yml @@ -21,7 +21,7 @@ - name: Install fence_vmware_soap fencing agent on all nodes yum: - name: "{{ fence_vmware_soap_packages }}" + name: "{{ fence_vmware_soap_packages }}" state: 'installed' - name: Configure separate stonith devices per cluster node diff --git a/tasks/install_local_media.yml b/tasks/install_local_media.yml index 73b51b7..2bee47d 100644 --- a/tasks/install_local_media.yml +++ b/tasks/install_local_media.yml @@ -39,7 +39,7 @@ - name: Install package(s) for fence_kdump yum: - name: "{{ fence_kdump_packages }}" + name: "{{ fence_kdump_packages }}" state: 'installed' disablerepo: '*' enablerepo: "c{{ ansible_distribution_major_version }}-media" diff --git a/tasks/install_normal.yml b/tasks/install_normal.yml index 032debf..7e0e594 100644 --- a/tasks/install_normal.yml +++ b/tasks/install_normal.yml @@ -26,6 +26,6 @@ - name: Install package(s) for fence_kdump yum: - name: "{{ fence_kdump_packages }}" + name: "{{ fence_kdump_packages }}" state: 'installed' when: cluster_configure_fence_kdump|bool From 9577190c590f7b018f3512d2fc9c0c75abdb0b6d Mon Sep 17 00:00:00 2001 From: Ondrej Famera Date: Sat, 18 Jun 2022 10:29:46 +0900 Subject: [PATCH 07/13] ansible lint suggested changes to booleans --- tasks/cluster_constraint_colocation.yml | 2 +- tasks/cluster_constraint_location.yml | 2 +- tasks/cluster_constraint_order.yml | 2 +- tasks/cluster_property.yml | 2 +- tasks/cluster_resource.yml | 2 +- tasks/cluster_resource_defaults.yml | 2 +- tasks/debian10.yml | 4 ++-- tasks/main.yml | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tasks/cluster_constraint_colocation.yml b/tasks/cluster_constraint_colocation.yml index 18ed32a..52ea67d 100644 --- a/tasks/cluster_constraint_colocation.yml +++ b/tasks/cluster_constraint_colocation.yml @@ -8,4 +8,4 @@ resource2_role: "{{ item.resource2_role | default(omit) }}" score: "{{ item.score | default(omit) }}" with_items: "{{ cluster_constraint_colocation }}" - run_once: True + run_once: true diff --git a/tasks/cluster_constraint_location.yml b/tasks/cluster_constraint_location.yml index 3a1da32..3a76c44 100644 --- a/tasks/cluster_constraint_location.yml +++ b/tasks/cluster_constraint_location.yml @@ -8,4 +8,4 @@ resource2_role: "{{ item.resource2_role | default(omit) }}" score: "{{ item.score | default(omit) }}" with_items: "{{ cluster_constraint_location }}" - run_once: True + run_once: true diff --git a/tasks/cluster_constraint_order.yml b/tasks/cluster_constraint_order.yml index 55d0d00..7525c4f 100644 --- a/tasks/cluster_constraint_order.yml +++ b/tasks/cluster_constraint_order.yml @@ -9,4 +9,4 @@ kind: "{{ item.kind | default(omit) }}" symmetrical: "{{ item.symmetrical | default(omit) }}" with_items: "{{ cluster_constraint_order }}" - run_once: True + run_once: true diff --git a/tasks/cluster_property.yml b/tasks/cluster_property.yml index 9549595..311da32 100644 --- a/tasks/cluster_property.yml +++ b/tasks/cluster_property.yml @@ -6,4 +6,4 @@ node: "{{ item.node | default(omit) }}" value: "{{ item.value | default(omit) }}" with_items: "{{ cluster_property }}" - run_once: True + run_once: true diff --git a/tasks/cluster_resource.yml b/tasks/cluster_resource.yml index 7a77915..e433ce3 100644 --- a/tasks/cluster_resource.yml +++ b/tasks/cluster_resource.yml @@ -9,4 +9,4 @@ force_resource_update: "{{ item.force_resource_update | default(omit) }}" child_name: "{{ item.child_name | default(omit) }}" with_items: "{{ cluster_resource }}" - run_once: True + run_once: true diff --git a/tasks/cluster_resource_defaults.yml b/tasks/cluster_resource_defaults.yml index 78419ed..1484071 100644 --- a/tasks/cluster_resource_defaults.yml +++ b/tasks/cluster_resource_defaults.yml @@ -6,4 +6,4 @@ defaults_type: "{{ item.defaults_type | default(omit) }}" value: "{{ item.value | default(omit) }}" with_items: "{{ cluster_resource_defaults }}" - run_once: True + run_once: true diff --git a/tasks/debian10.yml b/tasks/debian10.yml index 6dac9f7..7f9689e 100644 --- a/tasks/debian10.yml +++ b/tasks/debian10.yml @@ -7,8 +7,8 @@ - name: Check if Corosync configuration is default configuration command: '/usr/bin/dpkg --verify corosync' register: result - changed_when: False - check_mode: False + changed_when: false + check_mode: false - name: Destroy default configuration pcs_cluster: diff --git a/tasks/main.yml b/tasks/main.yml index 2c7312d..c17c6c1 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -40,7 +40,7 @@ - files: - "{{ ansible_distribution|lower }}{{ ansible_distribution_major_version }}.yml" - "{{ ansible_distribution|lower }}_repos.yml" - skip: yes + skip: true - name: Install packages from local media include_tasks: install_local_media.yml From 94705e98b32f1de87ed8fc952961c0168bfee426 Mon Sep 17 00:00:00 2001 From: Ondrej Famera Date: Sun, 19 Jun 2022 11:44:19 +0900 Subject: [PATCH 08/13] move redhat_repos.yml back into tasks/ --- redhat_repos.yml => tasks/redhat_repos.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename redhat_repos.yml => tasks/redhat_repos.yml (100%) diff --git a/redhat_repos.yml b/tasks/redhat_repos.yml similarity index 100% rename from redhat_repos.yml rename to tasks/redhat_repos.yml From 498223774c9da9fc0d1da5af726ed2046071ac13 Mon Sep 17 00:00:00 2001 From: Ondrej Famera Date: Sun, 19 Jun 2022 17:32:13 +0900 Subject: [PATCH 09/13] Add support for some pcs-0.11 platforms (RHEL9, Fedora 36, AlmaLinux 9) --- README.md | 4 +++- meta/main.yml | 2 ++ tasks/almalinux_repos.yml | 13 +++++++++++++ tasks/firewall-el9.yml | 13 +++++++++++++ tasks/install_normal.yml | 1 + tasks/redhat_repos.yml | 8 ++++---- vars/fedora36.yml | 20 ++++++++++++++++++++ vars/redhat9.yml | 19 +++++++++++++++++++ 8 files changed, 75 insertions(+), 5 deletions(-) create mode 100644 tasks/firewall-el9.yml create mode 100644 vars/fedora36.yml create mode 100644 vars/redhat9.yml diff --git a/README.md b/README.md index c60e552..369d4d5 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ ha-cluster-pacemaker ========= -Role for configuring and expanding basic pacemaker cluster on CentOS/RHEL 6/7/8, AlmaLinux 8, Rocky Linux 8, Fedora 31/32/33/34/35 and CentOS 8 Stream systems. +Role for configuring and expanding basic pacemaker cluster on CentOS/RHEL 6/7/8/9, AlmaLinux 8/9, Rocky Linux 8, Fedora 31/32/33/34/35/36 and CentOS 8 Stream systems. This role can configure following aspects of pacemaker cluster: - enable needed system repositories @@ -48,6 +48,8 @@ Ansible version **2.9.10** and **2.9.11** will fail with error `"'hostvars' is u On **CentOS Linux 8** you have to ensure that BaseOS and Appstream repositories are working properly. As the CentOS Linux 8 is in the End-Of-Life phase, this role will configure HA repository to point to vault.centos.org if repository configuration (`enable_repos: true`) is requested (it is by default). +**pcs-0.11** version distributions (AlmaLinux 9, RHEL 9, Fedora 36) are supported only with ondrejhome.pcs-modules-2 version 27.0.0 or higher. + Role Variables -------------- diff --git a/meta/main.yml b/meta/main.yml index a8275f1..0dc1312 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -10,6 +10,7 @@ galaxy_info: - 6 - 7 - 8 + - 9 - name: Fedora versions: - 31 @@ -17,6 +18,7 @@ galaxy_info: - 33 - 34 - 35 + - 36 - name: Debian versions: - 'buster' diff --git a/tasks/almalinux_repos.yml b/tasks/almalinux_repos.yml index fee96b9..e0626f1 100644 --- a/tasks/almalinux_repos.yml +++ b/tasks/almalinux_repos.yml @@ -19,3 +19,16 @@ 'HighAvailability' not in yum_repolist.stdout and enable_repos | bool and ansible_distribution_major_version in ['8'] + +- name: enable highavailability repository (AlmaLinux 9) + ini_file: + dest: '/etc/yum.repos.d/almalinux-highavailability.repo' + section: 'highavailability' + option: 'enabled' + value: '1' + create: 'no' + mode: '0644' + when: >- + 'HighAvailability' not in yum_repolist.stdout + and enable_repos | bool + and ansible_distribution_major_version in ['9'] diff --git a/tasks/firewall-el9.yml b/tasks/firewall-el9.yml new file mode 100644 index 0000000..49ede1e --- /dev/null +++ b/tasks/firewall-el9.yml @@ -0,0 +1,13 @@ +--- +- name: Enable and start firewalld service + service: + name: 'firewalld' + enabled: true + state: 'started' + +- name: Enable 'high-availability' firewalld service + firewalld: + service: 'high-availability' + permanent: true + state: 'enabled' + immediate: true diff --git a/tasks/install_normal.yml b/tasks/install_normal.yml index 7e0e594..743ce4f 100644 --- a/tasks/install_normal.yml +++ b/tasks/install_normal.yml @@ -5,6 +5,7 @@ state: 'installed' when: - not (ansible_distribution in ['RedHat','CentOS','AlmaLinux','Rocky'] and ansible_distribution_major_version == '8') + - not (ansible_distribution in ['RedHat','AlmaLinux'] and ansible_distribution_major_version == '9') - not (ansible_distribution == 'Fedora' and ansible_distribution_major_version >= '31') - name: Install Pacemaker cluster packages to all nodes diff --git a/tasks/redhat_repos.yml b/tasks/redhat_repos.yml index eaa13a3..2edc615 100644 --- a/tasks/redhat_repos.yml +++ b/tasks/redhat_repos.yml @@ -47,15 +47,15 @@ and enable_beta_repos | bool and ansible_distribution_major_version in ['6','7'] -- name: enable HA/RS repository (RHEL 8) +- name: enable HA/RS repository (RHEL 8/9) command: >- subscription-manager repos - --enable="rhel-8-for-x86_64-{{ (repos_type == 'rs') | ternary( 'resilientstorage', 'highavailability' ) }}-rpms" + --enable="rhel-{{ ansible_distribution_major_version }}-for-x86_64-{{ (repos_type == 'rs') | ternary( 'resilientstorage', 'highavailability' ) }}-rpms" when: >- - ['rhel-8-for-x86_64-',(repos_type == 'rs') | ternary( 'resilientstorage', 'highavailability' ),'-rpms'] | join + ['rhel-',ansible_distribution_major_version,'-for-x86_64-',(repos_type == 'rs') | ternary( 'resilientstorage', 'highavailability' ),'-rpms'] | join not in yum_repolist.stdout and enable_repos | bool - and ansible_distribution_major_version in ['8'] + and ansible_distribution_major_version in ['8','9'] - name: enable single custom repository command: subscription-manager repos --enable="{{ custom_repository }}" diff --git a/vars/fedora36.yml b/vars/fedora36.yml new file mode 100644 index 0000000..c452272 --- /dev/null +++ b/vars/fedora36.yml @@ -0,0 +1,20 @@ +--- +pacemaker_packages: + - pcs + - pacemaker +pacemaker_remote_packages: + - pcs + - pacemaker-remote +fence_xvm_packages: + - fence-virt +fence_kdump_packages: + - fence-agents-kdump + - kexec-tools +fence_vmware_soap_packages: + - fence-agents-vmware-soap +fence_vmware_rest_packages: + - fence-agents-vmware-rest +firewall_packages: + - firewalld + +pcsd_configuration_file: /etc/sysconfig/pcsd diff --git a/vars/redhat9.yml b/vars/redhat9.yml new file mode 100644 index 0000000..5f1065e --- /dev/null +++ b/vars/redhat9.yml @@ -0,0 +1,19 @@ +--- +pacemaker_packages: + - pcs +pacemaker_remote_packages: + - pcs + - pacemaker-remote +fence_xvm_packages: + - fence-virt +fence_kdump_packages: + - fence-agents-kdump + - kexec-tools +fence_vmware_soap_packages: + - fence-agents-vmware-soap +fence_vmware_rest_packages: + - fence-agents-vmware-rest +firewall_packages: + - firewalld + +pcsd_configuration_file: /etc/sysconfig/pcsd From 5d50da0e8891aee67dd17b2b822c131f833923db Mon Sep 17 00:00:00 2001 From: Ondrej Famera Date: Sun, 19 Jun 2022 17:40:46 +0900 Subject: [PATCH 10/13] Add basic support for Debian 11.x Bullsyeye --- README.md | 2 +- meta/main.yml | 1 + tasks/debian11.yml | 24 ++++++++++++++++++++++++ vars/debian11.yml | 12 ++++++++++++ 4 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 tasks/debian11.yml create mode 100644 vars/debian11.yml diff --git a/README.md b/README.md index 369d4d5..0a1b3cb 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ This role depend on role [ondrejhome.pcs-modules-2](https://github.com/OndrejHom **CentOS 8 Stream** Tested with version 20201211 minimal usable ansible version is **2.9.16/2.10.4**. Version **2.8.18** was **not** working at time of testing. This is related to [Service is in unknown state #71528](https://github.com/ansible/ansible/issues/71528). -**Debian Buster** Tested with version 20210310 with ansible version **2.10**. Debian version does not include the stonith configuration and the firewall configuration. **Note:** This role went only through limited testing on Debian - not all features of this role were tested. +**Debian Buster** Tested with version 20210310 with ansible version **2.10** and **Debian Bullseye** Tested with version 20220326 with ansible version **2.12**. Debian part of this role does not include the stonith configuration and the firewall configuration. **Note:** This role went only through limited testing on Debian - not all features of this role were tested. Ansible version **2.9.10** and **2.9.11** will fail with error `"'hostvars' is undefined"` when trying to configure remote nodes. This applies only when there is at least one node with `cluster_node_is_remote=True`. **Avoid these Ansible versions** if you plan to configure remote nodes with this role. diff --git a/meta/main.yml b/meta/main.yml index 0dc1312..f7fdeb6 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -22,6 +22,7 @@ galaxy_info: - name: Debian versions: - 'buster' + - 'bullseye' galaxy_tags: - clustering - pacemaker diff --git a/tasks/debian11.yml b/tasks/debian11.yml new file mode 100644 index 0000000..44a30d6 --- /dev/null +++ b/tasks/debian11.yml @@ -0,0 +1,24 @@ +--- +- name: Install Pacemaker cluster packages to all nodes + apt: + name: "{{ cluster_node_is_remote | bool | ternary(pacemaker_remote_packages, pacemaker_packages) }}" + state: 'present' + cache_valid_time: 3600 + +- name: Install dependencies for pcs-modules-2 + apt: + name: 'python3-distutils' + state: 'present' + cache_valid_time: 3600 + when: ansible_distribution == 'Debian' and ansible_distribution_major_version == '11' + +- name: Check if Corosync configuration is default configuration + command: '/usr/bin/dpkg --verify corosync' + register: result + changed_when: false + check_mode: false + +- name: Destroy default configuration + pcs_cluster: + state: 'absent' + when: not result.stdout | regex_search(".* \/etc\/corosync\/corosync.conf$", multiline=True) diff --git a/vars/debian11.yml b/vars/debian11.yml new file mode 100644 index 0000000..698a1b6 --- /dev/null +++ b/vars/debian11.yml @@ -0,0 +1,12 @@ +--- +pacemaker_packages: + - pcs + - pacemaker +pacemaker_remote_packages: + - pcs + - pacemaker-remote + +cluster_configure_fence_xvm: false +cluster_firewall: false + +pcsd_configuration_file: /etc/default/pcsd From 990a8fd6b898dbf4f5fe03aae97d3ccc768b6f5b Mon Sep 17 00:00:00 2001 From: Ondrej Famera Date: Sun, 25 Sep 2022 10:07:23 +0900 Subject: [PATCH 11/13] fix: missing firewalld setup for EL9 in fence_xvm --- tasks/fence_xvm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks/fence_xvm.yml b/tasks/fence_xvm.yml index c1b0430..2b79752 100644 --- a/tasks/fence_xvm.yml +++ b/tasks/fence_xvm.yml @@ -25,7 +25,7 @@ state: 'enabled' immediate: true when: >- - (ansible_distribution_major_version in [ "7", "8" ] or ansible_distribution == 'Fedora') + (ansible_distribution_major_version in [ "7", "8", "9" ] or ansible_distribution == 'Fedora') and cluster_firewall|bool - name: Configure separate stonith devices per cluster node From 2e93fe18c17fe24b531b71bb51e5727d8f0e069c Mon Sep 17 00:00:00 2001 From: Ondrej Famera Date: Thu, 24 Nov 2022 14:09:49 +0900 Subject: [PATCH 12/13] make kdump service name into variable so it can be overriden --- tasks/fence_kdump.yml | 2 +- vars/main.yml | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 vars/main.yml diff --git a/tasks/fence_kdump.yml b/tasks/fence_kdump.yml index fe7e897..38fb62b 100644 --- a/tasks/fence_kdump.yml +++ b/tasks/fence_kdump.yml @@ -1,7 +1,7 @@ --- - name: Enable kdump service service: - name: 'kdump' + name: "{{ kdump_service_name }}" state: 'started' enabled: true diff --git a/vars/main.yml b/vars/main.yml new file mode 100644 index 0000000..f7d2acb --- /dev/null +++ b/vars/main.yml @@ -0,0 +1,3 @@ +--- +# common values that can be overriden by other files in this directory +kdump_service_name: 'kdump' From e83ef873c755b6b5f1f4cd73220464039c9d8dbf Mon Sep 17 00:00:00 2001 From: Ondrej Famera Date: Thu, 24 Nov 2022 14:25:57 +0900 Subject: [PATCH 13/13] add support for configuring fence_kdump on Debian 10/11 systems --- tasks/debian10.yml | 8 ++++++++ tasks/debian11.yml | 7 +++++++ tasks/fence_kdump.yml | 3 ++- vars/debian10.yml | 3 +++ vars/debian11.yml | 3 +++ 5 files changed, 23 insertions(+), 1 deletion(-) diff --git a/tasks/debian10.yml b/tasks/debian10.yml index 7f9689e..cc8950e 100644 --- a/tasks/debian10.yml +++ b/tasks/debian10.yml @@ -3,6 +3,14 @@ apt: name: "{{ cluster_node_is_remote | bool | ternary(pacemaker_remote_packages, pacemaker_packages) }}" state: 'present' + cache_valid_time: 3600 + +- name: Install package(s) for fence_kdump + apt: + name: "{{ fence_kdump_packages }}" + state: 'present' + cache_valid_time: 3600 + when: cluster_configure_fence_kdump|bool - name: Check if Corosync configuration is default configuration command: '/usr/bin/dpkg --verify corosync' diff --git a/tasks/debian11.yml b/tasks/debian11.yml index 44a30d6..ac19cf8 100644 --- a/tasks/debian11.yml +++ b/tasks/debian11.yml @@ -12,6 +12,13 @@ cache_valid_time: 3600 when: ansible_distribution == 'Debian' and ansible_distribution_major_version == '11' +- name: Install package(s) for fence_kdump + apt: + name: "{{ fence_kdump_packages }}" + state: 'present' + cache_valid_time: 3600 + when: cluster_configure_fence_kdump|bool + - name: Check if Corosync configuration is default configuration command: '/usr/bin/dpkg --verify corosync' register: result diff --git a/tasks/fence_kdump.yml b/tasks/fence_kdump.yml index 38fb62b..80990fb 100644 --- a/tasks/fence_kdump.yml +++ b/tasks/fence_kdump.yml @@ -12,9 +12,10 @@ name: "fence-kdump-{{ hostvars[item][cluster_hostname_fact] }}" resource_class: 'stonith' resource_type: 'fence_kdump' - options: "pcmk_host_list={{ hostvars[item][cluster_hostname_fact] }}" + options: "pcmk_host_list={{ hostvars[item][cluster_hostname_fact] }} {% if ansible_distribution == 'Debian' %}pcmk_monitor_action=metadata{% endif %}" with_items: "{{ play_hosts }}" run_once: true + # FIXME: fence_kdump on Debian returns exit code 1 for 'monitor' op so we use 'metadata' as dummy replacement - name: create fence constraints pcs_constraint_location: diff --git a/vars/debian10.yml b/vars/debian10.yml index 698a1b6..94ae21b 100644 --- a/vars/debian10.yml +++ b/vars/debian10.yml @@ -5,7 +5,10 @@ pacemaker_packages: pacemaker_remote_packages: - pcs - pacemaker-remote +fence_kdump_packages: + - kdump-tools +kdump_service_name: 'kdump-tools' cluster_configure_fence_xvm: false cluster_firewall: false diff --git a/vars/debian11.yml b/vars/debian11.yml index 698a1b6..94ae21b 100644 --- a/vars/debian11.yml +++ b/vars/debian11.yml @@ -5,7 +5,10 @@ pacemaker_packages: pacemaker_remote_packages: - pcs - pacemaker-remote +fence_kdump_packages: + - kdump-tools +kdump_service_name: 'kdump-tools' cluster_configure_fence_xvm: false cluster_firewall: false