diff --git a/README.md b/README.md index 0c7b741..0a1b3cb 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ ha-cluster-pacemaker ========= -Role for configuring and expanding basic pacemaker cluster on CentOS/RHEL 6/7/8, AlmaLinux 8, Fedora 31/32/33 and CentOS 8 Stream systems. +Role for configuring and expanding basic pacemaker cluster on CentOS/RHEL 6/7/8/9, AlmaLinux 8/9, Rocky Linux 8, Fedora 31/32/33/34/35/36 and CentOS 8 Stream systems. This role can configure following aspects of pacemaker cluster: - enable needed system repositories @@ -42,10 +42,14 @@ This role depend on role [ondrejhome.pcs-modules-2](https://github.com/OndrejHom **CentOS 8 Stream** Tested with version 20201211 minimal usable ansible version is **2.9.16/2.10.4**. Version **2.8.18** was **not** working at time of testing. This is related to [Service is in unknown state #71528](https://github.com/ansible/ansible/issues/71528). -**Debian Buster** Tested with version 20210310 with ansible version **2.10**. Debian version does not include the stonith configuration and the firewall configuration. **Note:** This role went only through limited testing on Debian - not all features of this role were tested. +**Debian Buster** Tested with version 20210310 with ansible version **2.10** and **Debian Bullseye** Tested with version 20220326 with ansible version **2.12**. Debian part of this role does not include the stonith configuration and the firewall configuration. **Note:** This role went only through limited testing on Debian - not all features of this role were tested. Ansible version **2.9.10** and **2.9.11** will fail with error `"'hostvars' is undefined"` when trying to configure remote nodes. This applies only when there is at least one node with `cluster_node_is_remote=True`. **Avoid these Ansible versions** if you plan to configure remote nodes with this role. +On **CentOS Linux 8** you have to ensure that BaseOS and Appstream repositories are working properly. As the CentOS Linux 8 is in the End-Of-Life phase, this role will configure HA repository to point to vault.centos.org if repository configuration (`enable_repos: true`) is requested (it is by default). + +**pcs-0.11** version distributions (AlmaLinux 9, RHEL 9, Fedora 36) are supported only with ondrejhome.pcs-modules-2 version 27.0.0 or higher. + Role Variables -------------- @@ -131,7 +135,7 @@ Role Variables cluster_configure_stonith_style: 'one-device-per-node' ``` - - (RHEL/CentOS/AlmaLinux) enable the repositories containing needed packages + - (RHEL/CentOS/AlmaLinux/Rocky) enable the repositories containing needed packages ``` enable_repos: true ``` diff --git a/defaults/main.yml b/defaults/main.yml index 75ef3ff..5d4238c 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -32,9 +32,10 @@ cluster_configure_fence_kdump: false # You must provide IP/hostname of vCenter/hypervisor and username/password that is able to start/stop VMs for this cluster cluster_configure_fence_vmware_soap: false cluster_configure_fence_vmware_rest: false -#fence_vmware_ipaddr: '' -#fence_vmware_login: '' -#fence_vmware_passwd: '' +# fence_vmware_ipaddr: '' +# fence_vmware_login: '' +# fence_vmware_passwd: '' + # by default we use encrypted configuration (ssl=1) without validating certificates (ssl_insecure=1) fence_vmware_options: 'ssl="1" ssl_insecure="1"' # NOTE: Only one of fence_vmware_soap/fence_vmware_rest can be configured as stonith devices share same name. @@ -42,13 +43,13 @@ fence_vmware_options: 'ssl="1" ssl_insecure="1"' # custom fence device configuration variable which allows you to define your own fence devices # for proper options check examples below # -#cluster_fence_config: -# fence_device_1: -# fence_type: 'fence_vmware_soap' -# fence_options: 'pcmk_host_map="fastvm-1:vm_name_on_hypevisor1" ipaddr="vcenter.hostname" login="root" passwd="testest" ssl="1" ssl_insecure="1" op monitor interval=30s' -# fence_device_2: -# fence_type: 'fence_xvm' -# fence_options: 'pcmk_host_map="fastvm-2:vm_name_n_hypervisor2" op monitor interval=30s' +# cluster_fence_config: +# fence_device_1: +# fence_type: 'fence_vmware_soap' +# fence_options: 'pcmk_host_map="fastvm-1:vm_name_on_hypevisor1" ipaddr="vcenter.hostname" login="root" passwd="testest" ssl="1" ssl_insecure="1" op monitor interval=30s' +# fence_device_2: +# fence_type: 'fence_xvm' +# fence_options: 'pcmk_host_map="fastvm-2:vm_name_n_hypervisor2" op monitor interval=30s' # How to map fence devices to cluster nodes? # by default for every cluster node a separate stonith devices is created ('one-device-per-node'). @@ -98,8 +99,8 @@ allow_cluster_expansion: false # from interface `ens8` use `cluster_net_iface: 'ens8'`. Interface must exists on all cluster nodes. cluster_net_iface: '' -#Redundant network interface. If specified the role will setup a corosync redundant ring using the default IPv4 from this interface. -#Interface must exist on all cluster nodes. +# Redundant network interface. If specified the role will setup a corosync redundant ring using the default IPv4 from this interface. +# Interface must exist on all cluster nodes. rrp_interface: '' # Whether to add hosts to /etc/hosts. diff --git a/meta/main.yml b/meta/main.yml index bafbd21..f7fdeb6 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -5,21 +5,26 @@ galaxy_info: license: GPLv3 min_ansible_version: 2.8 platforms: - - name: EL - versions: - - 6 - - 7 - - 8 - - name: Fedora - versions: - - 31 - - 32 - - 33 - - name: Debian - versions: - - 'buster' + - name: EL + versions: + - 6 + - 7 + - 8 + - 9 + - name: Fedora + versions: + - 31 + - 32 + - 33 + - 34 + - 35 + - 36 + - name: Debian + versions: + - 'buster' + - 'bullseye' galaxy_tags: - - clustering - - pacemaker + - clustering + - pacemaker dependencies: - - { role: ondrejhome.pcs-modules-2 } + - {role: ondrejhome.pcs-modules-2} diff --git a/tasks/almalinux_repos.yml b/tasks/almalinux_repos.yml index fee96b9..e0626f1 100644 --- a/tasks/almalinux_repos.yml +++ b/tasks/almalinux_repos.yml @@ -19,3 +19,16 @@ 'HighAvailability' not in yum_repolist.stdout and enable_repos | bool and ansible_distribution_major_version in ['8'] + +- name: enable highavailability repository (AlmaLinux 9) + ini_file: + dest: '/etc/yum.repos.d/almalinux-highavailability.repo' + section: 'highavailability' + option: 'enabled' + value: '1' + create: 'no' + mode: '0644' + when: >- + 'HighAvailability' not in yum_repolist.stdout + and enable_repos | bool + and ansible_distribution_major_version in ['9'] diff --git a/tasks/centos_repos.yml b/tasks/centos_repos.yml index c7aee03..48847f6 100644 --- a/tasks/centos_repos.yml +++ b/tasks/centos_repos.yml @@ -7,7 +7,7 @@ changed_when: false check_mode: false -- name: enable highavailability repository (CentOS 8.1/8.2) +- name: EOL enable highavailability repository (CentOS 8.1, 8.2) ini_file: dest: '/etc/yum.repos.d/CentOS-HA.repo' section: 'HighAvailability' @@ -20,7 +20,7 @@ and enable_repos | bool and ansible_distribution_version in ['8.1', '8.2'] -- name: enable highavailability repository (CentOS 8.3+) +- name: EOL enable highavailability repository (CentOS 8.3, 8.4, 8.5) ini_file: dest: '/etc/yum.repos.d/CentOS-Linux-HighAvailability.repo' section: 'ha' @@ -34,6 +34,56 @@ and ansible_distribution_major_version in ['8'] and ansible_distribution_version not in ['8.0', '8.1', '8.2', '8'] +- name: EOL disable mirrorlist for CentOS Linux 8.1, 8.2 HA repository + ini_file: + dest: '/etc/yum.repos.d/CentOS-HA.repo' + section: 'HighAvailability' + option: 'mirrorlist' + create: 'no' + mode: '0644' + state: absent + when: >- + enable_repos | bool + and ansible_distribution_version in ['8.1', '8.2'] + +- name: EOL disable mirrorlist for CentOS Linux 8.3, 8.4, 8.5 HA repository + ini_file: + dest: '/etc/yum.repos.d/CentOS-Linux-HighAvailability.repo' + section: 'ha' + option: 'mirrorlist' + create: 'no' + mode: '0644' + state: absent + when: >- + enable_repos | bool + and ansible_distribution_version in ['8.3', '8.4', '8.5'] + +- name: EOL configure baseurl for CentOS Linux 8.1, 8.2 HA repository + ini_file: + dest: '/etc/yum.repos.d/CentOS-HA.repo' + section: 'HighAvailability' + option: 'baseurl' + value: 'http://vault.centos.org/$contentdir/$releasever/HighAvailability/$basearch/os/' + create: 'no' + mode: '0644' + state: present + when: >- + enable_repos | bool + and ansible_distribution_version in ['8.1', '8.2'] + +- name: EOL configure baseurl for CentOS Linux 8.3, 8.4, 8.5 HA repository + ini_file: + dest: '/etc/yum.repos.d/CentOS-Linux-HighAvailability.repo' + section: 'ha' + option: 'baseurl' + value: 'http://vault.centos.org/$contentdir/$releasever/HighAvailability/$basearch/os/' + create: 'no' + mode: '0644' + state: present + when: >- + enable_repos | bool + and ansible_distribution_version in ['8.3', '8.4', '8.5'] + - name: enable highavailability repository (CentOS 8 Stream) ini_file: dest: '/etc/yum.repos.d/CentOS-Stream-HighAvailability.repo' diff --git a/tasks/cluster_constraint_colocation.yml b/tasks/cluster_constraint_colocation.yml index 18ed32a..52ea67d 100644 --- a/tasks/cluster_constraint_colocation.yml +++ b/tasks/cluster_constraint_colocation.yml @@ -8,4 +8,4 @@ resource2_role: "{{ item.resource2_role | default(omit) }}" score: "{{ item.score | default(omit) }}" with_items: "{{ cluster_constraint_colocation }}" - run_once: True + run_once: true diff --git a/tasks/cluster_constraint_location.yml b/tasks/cluster_constraint_location.yml index 3a1da32..3a76c44 100644 --- a/tasks/cluster_constraint_location.yml +++ b/tasks/cluster_constraint_location.yml @@ -8,4 +8,4 @@ resource2_role: "{{ item.resource2_role | default(omit) }}" score: "{{ item.score | default(omit) }}" with_items: "{{ cluster_constraint_location }}" - run_once: True + run_once: true diff --git a/tasks/cluster_constraint_order.yml b/tasks/cluster_constraint_order.yml index 55d0d00..7525c4f 100644 --- a/tasks/cluster_constraint_order.yml +++ b/tasks/cluster_constraint_order.yml @@ -9,4 +9,4 @@ kind: "{{ item.kind | default(omit) }}" symmetrical: "{{ item.symmetrical | default(omit) }}" with_items: "{{ cluster_constraint_order }}" - run_once: True + run_once: true diff --git a/tasks/cluster_property.yml b/tasks/cluster_property.yml index 9549595..311da32 100644 --- a/tasks/cluster_property.yml +++ b/tasks/cluster_property.yml @@ -6,4 +6,4 @@ node: "{{ item.node | default(omit) }}" value: "{{ item.value | default(omit) }}" with_items: "{{ cluster_property }}" - run_once: True + run_once: true diff --git a/tasks/cluster_resource.yml b/tasks/cluster_resource.yml index 7a77915..e433ce3 100644 --- a/tasks/cluster_resource.yml +++ b/tasks/cluster_resource.yml @@ -9,4 +9,4 @@ force_resource_update: "{{ item.force_resource_update | default(omit) }}" child_name: "{{ item.child_name | default(omit) }}" with_items: "{{ cluster_resource }}" - run_once: True + run_once: true diff --git a/tasks/cluster_resource_defaults.yml b/tasks/cluster_resource_defaults.yml index 78419ed..1484071 100644 --- a/tasks/cluster_resource_defaults.yml +++ b/tasks/cluster_resource_defaults.yml @@ -6,4 +6,4 @@ defaults_type: "{{ item.defaults_type | default(omit) }}" value: "{{ item.value | default(omit) }}" with_items: "{{ cluster_resource_defaults }}" - run_once: True + run_once: true diff --git a/tasks/debian10.yml b/tasks/debian10.yml index 6dac9f7..cc8950e 100644 --- a/tasks/debian10.yml +++ b/tasks/debian10.yml @@ -3,12 +3,20 @@ apt: name: "{{ cluster_node_is_remote | bool | ternary(pacemaker_remote_packages, pacemaker_packages) }}" state: 'present' + cache_valid_time: 3600 + +- name: Install package(s) for fence_kdump + apt: + name: "{{ fence_kdump_packages }}" + state: 'present' + cache_valid_time: 3600 + when: cluster_configure_fence_kdump|bool - name: Check if Corosync configuration is default configuration command: '/usr/bin/dpkg --verify corosync' register: result - changed_when: False - check_mode: False + changed_when: false + check_mode: false - name: Destroy default configuration pcs_cluster: diff --git a/tasks/debian11.yml b/tasks/debian11.yml new file mode 100644 index 0000000..ac19cf8 --- /dev/null +++ b/tasks/debian11.yml @@ -0,0 +1,31 @@ +--- +- name: Install Pacemaker cluster packages to all nodes + apt: + name: "{{ cluster_node_is_remote | bool | ternary(pacemaker_remote_packages, pacemaker_packages) }}" + state: 'present' + cache_valid_time: 3600 + +- name: Install dependencies for pcs-modules-2 + apt: + name: 'python3-distutils' + state: 'present' + cache_valid_time: 3600 + when: ansible_distribution == 'Debian' and ansible_distribution_major_version == '11' + +- name: Install package(s) for fence_kdump + apt: + name: "{{ fence_kdump_packages }}" + state: 'present' + cache_valid_time: 3600 + when: cluster_configure_fence_kdump|bool + +- name: Check if Corosync configuration is default configuration + command: '/usr/bin/dpkg --verify corosync' + register: result + changed_when: false + check_mode: false + +- name: Destroy default configuration + pcs_cluster: + state: 'absent' + when: not result.stdout | regex_search(".* \/etc\/corosync\/corosync.conf$", multiline=True) diff --git a/tasks/fence_kdump.yml b/tasks/fence_kdump.yml index 5ed37b3..80990fb 100644 --- a/tasks/fence_kdump.yml +++ b/tasks/fence_kdump.yml @@ -1,7 +1,7 @@ --- - name: Enable kdump service service: - name: 'kdump' + name: "{{ kdump_service_name }}" state: 'started' enabled: true @@ -12,9 +12,10 @@ name: "fence-kdump-{{ hostvars[item][cluster_hostname_fact] }}" resource_class: 'stonith' resource_type: 'fence_kdump' - options: "pcmk_host_list={{ hostvars[item][cluster_hostname_fact] }}" + options: "pcmk_host_list={{ hostvars[item][cluster_hostname_fact] }} {% if ansible_distribution == 'Debian' %}pcmk_monitor_action=metadata{% endif %}" with_items: "{{ play_hosts }}" run_once: true + # FIXME: fence_kdump on Debian returns exit code 1 for 'monitor' op so we use 'metadata' as dummy replacement - name: create fence constraints pcs_constraint_location: @@ -33,6 +34,6 @@ resource_class: 'stonith' resource_type: 'fence_kdump' options: >- - pcmk_host_map="{% for item in groups['cluster_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" + pcmk_host_map="{% for item in groups['cluster'+rand_id+'_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" run_once: true when: cluster_configure_stonith_style is defined and cluster_configure_stonith_style == 'one-device-per-cluster' diff --git a/tasks/fence_vmware_rest.yml b/tasks/fence_vmware_rest.yml index 63b6593..784cc3c 100644 --- a/tasks/fence_vmware_rest.yml +++ b/tasks/fence_vmware_rest.yml @@ -26,7 +26,7 @@ - name: Install fence_vmware_rest fencing agent on all nodes yum: - name: "{{ fence_vmware_rest_packages }}" + name: "{{ fence_vmware_rest_packages }}" state: 'installed' - name: Configure separate stonith devices per cluster node @@ -62,7 +62,7 @@ resource_class: 'stonith' resource_type: 'fence_vmware_rest' options: >- - pcmk_host_map="{% for item in groups['cluster_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" + pcmk_host_map="{% for item in groups['cluster'+rand_id+'_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" ipaddr={{ fence_vmware_ipaddr }} login={{ fence_vmware_login }} passwd={{ fence_vmware_passwd }} diff --git a/tasks/fence_vmware_soap.yml b/tasks/fence_vmware_soap.yml index 492c5a2..9a0a19e 100644 --- a/tasks/fence_vmware_soap.yml +++ b/tasks/fence_vmware_soap.yml @@ -21,7 +21,7 @@ - name: Install fence_vmware_soap fencing agent on all nodes yum: - name: "{{ fence_vmware_soap_packages }}" + name: "{{ fence_vmware_soap_packages }}" state: 'installed' - name: Configure separate stonith devices per cluster node @@ -57,7 +57,7 @@ resource_class: 'stonith' resource_type: 'fence_vmware_soap' options: >- - pcmk_host_map="{% for item in groups['cluster_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" + pcmk_host_map="{% for item in groups['cluster'+rand_id+'_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" ipaddr={{ fence_vmware_ipaddr }} login={{ fence_vmware_login }} passwd={{ fence_vmware_passwd }} diff --git a/tasks/fence_xvm.yml b/tasks/fence_xvm.yml index 153ebd7..2b79752 100644 --- a/tasks/fence_xvm.yml +++ b/tasks/fence_xvm.yml @@ -25,7 +25,7 @@ state: 'enabled' immediate: true when: >- - (ansible_distribution_major_version in [ "7", "8" ] or ansible_distribution == 'Fedora') + (ansible_distribution_major_version in [ "7", "8", "9" ] or ansible_distribution == 'Fedora') and cluster_firewall|bool - name: Configure separate stonith devices per cluster node @@ -58,7 +58,7 @@ resource_class: 'stonith' resource_type: 'fence_xvm' options: >- - pcmk_host_map="{% for item in groups['cluster_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" + pcmk_host_map="{% for item in groups['cluster'+rand_id+'_node_is_remote_False'] %}{{ hostvars[item][cluster_hostname_fact] }}:{{ hostvars[item]['vm_name'] }};{% endfor %}" op monitor interval=30s run_once: true when: cluster_configure_stonith_style is defined and cluster_configure_stonith_style == 'one-device-per-cluster' diff --git a/tasks/firewall-el9.yml b/tasks/firewall-el9.yml new file mode 100644 index 0000000..49ede1e --- /dev/null +++ b/tasks/firewall-el9.yml @@ -0,0 +1,13 @@ +--- +- name: Enable and start firewalld service + service: + name: 'firewalld' + enabled: true + state: 'started' + +- name: Enable 'high-availability' firewalld service + firewalld: + service: 'high-availability' + permanent: true + state: 'enabled' + immediate: true diff --git a/tasks/install_local_media.yml b/tasks/install_local_media.yml index 73b51b7..2bee47d 100644 --- a/tasks/install_local_media.yml +++ b/tasks/install_local_media.yml @@ -39,7 +39,7 @@ - name: Install package(s) for fence_kdump yum: - name: "{{ fence_kdump_packages }}" + name: "{{ fence_kdump_packages }}" state: 'installed' disablerepo: '*' enablerepo: "c{{ ansible_distribution_major_version }}-media" diff --git a/tasks/install_normal.yml b/tasks/install_normal.yml index 09bdacb..743ce4f 100644 --- a/tasks/install_normal.yml +++ b/tasks/install_normal.yml @@ -4,7 +4,8 @@ name: 'libselinux-python' state: 'installed' when: - - not (ansible_distribution in ['RedHat','CentOS','AlmaLinux'] and ansible_distribution_major_version == '8') + - not (ansible_distribution in ['RedHat','CentOS','AlmaLinux','Rocky'] and ansible_distribution_major_version == '8') + - not (ansible_distribution in ['RedHat','AlmaLinux'] and ansible_distribution_major_version == '9') - not (ansible_distribution == 'Fedora' and ansible_distribution_major_version >= '31') - name: Install Pacemaker cluster packages to all nodes @@ -26,6 +27,6 @@ - name: Install package(s) for fence_kdump yum: - name: "{{ fence_kdump_packages }}" + name: "{{ fence_kdump_packages }}" state: 'installed' when: cluster_configure_fence_kdump|bool diff --git a/tasks/main.yml b/tasks/main.yml index 1f55744..e1a0e81 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -1,12 +1,23 @@ --- +- name: Generate random ID for this cluster deployment + set_fact: + rand_id: "{{ 1024 | random }}" + run_once: true + changed_when: false + check_mode: false + # This ID is used for ansible group names grouping machines from this run of role. + # If this role is run multiple times in single playbook with different hosts this + # ID will distinguish these different hosts + # FIXME: this is workaround for inability to "remove things from groups" + - name: Add a group to the inventory for remote nodes group_by: - key: "cluster_node_is_remote_{{ cluster_node_is_remote | bool }}" + key: "cluster{{ rand_id }}_node_is_remote_{{ cluster_node_is_remote | bool }}" - name: Check if cluster consist of at least 2 nodes fail: msg: 'Cluster must have at least 2 full members' - when: groups['cluster_node_is_remote_False']|default([])|count() < 2 + when: groups['cluster'+rand_id+'_node_is_remote_False']|default([])|count() < 2 run_once: true - name: Check if cluster_user_pass is not default @@ -29,7 +40,7 @@ - files: - "{{ ansible_os_family|lower }}{{ ansible_distribution_major_version }}.yml" - "{{ ansible_distribution|lower }}_repos.yml" - skip: yes + skip: true - name: Install packages from local media include_tasks: install_local_media.yml @@ -97,11 +108,11 @@ enabled: true state: 'started' -- name: Setup firewall for RHEL/CentOS/AlmaLinux systems +- name: Setup firewall for RHEL/CentOS/AlmaLinux/Rocky systems include_tasks: "firewall-el{{ ansible_distribution_major_version }}.yml" when: - cluster_firewall|bool - - ansible_distribution in ['RedHat','CentOS','AlmaLinux'] + - ansible_distribution in ['RedHat','CentOS','AlmaLinux','Rocky'] - name: Setup firewall for Fedora systems include_tasks: "firewall-fedora.yml" @@ -131,7 +142,7 @@ - name: Setup cluster pcs_cluster: node_list: >- - {% for item in groups['cluster_node_is_remote_False'] %} + {% for item in groups['cluster'+rand_id+'_node_is_remote_False'] %} {{ hostvars[item][cluster_hostname_fact] }}{% if hostvars[item].rrp_ip is defined %},{{ hostvars[item].rrp_ip }}{% endif %} {% endfor %} cluster_name: "{{ cluster_name }}" @@ -175,13 +186,13 @@ - cluster_node_is_remote | bool - (pcs_status.stdout | regex_search('\\b' ~ cluster_hostname ~ '\\s+\\(ocf::pacemaker:remote\\)') or '') | length == 0 vars: - delegate_host: "{{ hostvars[groups['cluster_node_is_remote_False'][0]].inventory_hostname }}" + delegate_host: "{{ hostvars[groups['cluster'+rand_id+'_node_is_remote_False'][0]].inventory_hostname }}" # NOTE: Without this, the host's ansible_host variable will not be # respected when using delegate_to. ansible_host: "{{ hostvars[delegate_host].ansible_host | default(delegate_host) }}" when: - - groups['cluster_node_is_remote_False'] is defined - - groups['cluster_node_is_remote_True']|default([])|count() > 0 + - groups['cluster'+rand_id+'_node_is_remote_False'] is defined + - groups['cluster'+rand_id+'_node_is_remote_True']|default([])|count() > 0 delegate_to: "{{ delegate_host }}" ### fencing setup diff --git a/redhat_repos.yml b/tasks/redhat_repos.yml similarity index 84% rename from redhat_repos.yml rename to tasks/redhat_repos.yml index eaa13a3..2edc615 100644 --- a/redhat_repos.yml +++ b/tasks/redhat_repos.yml @@ -47,15 +47,15 @@ and enable_beta_repos | bool and ansible_distribution_major_version in ['6','7'] -- name: enable HA/RS repository (RHEL 8) +- name: enable HA/RS repository (RHEL 8/9) command: >- subscription-manager repos - --enable="rhel-8-for-x86_64-{{ (repos_type == 'rs') | ternary( 'resilientstorage', 'highavailability' ) }}-rpms" + --enable="rhel-{{ ansible_distribution_major_version }}-for-x86_64-{{ (repos_type == 'rs') | ternary( 'resilientstorage', 'highavailability' ) }}-rpms" when: >- - ['rhel-8-for-x86_64-',(repos_type == 'rs') | ternary( 'resilientstorage', 'highavailability' ),'-rpms'] | join + ['rhel-',ansible_distribution_major_version,'-for-x86_64-',(repos_type == 'rs') | ternary( 'resilientstorage', 'highavailability' ),'-rpms'] | join not in yum_repolist.stdout and enable_repos | bool - and ansible_distribution_major_version in ['8'] + and ansible_distribution_major_version in ['8','9'] - name: enable single custom repository command: subscription-manager repos --enable="{{ custom_repository }}" diff --git a/tasks/rocky_repos.yml b/tasks/rocky_repos.yml new file mode 100644 index 0000000..9d09e0b --- /dev/null +++ b/tasks/rocky_repos.yml @@ -0,0 +1,21 @@ +--- +- name: get list of active repositories + command: yum repolist + args: + warn: false + register: yum_repolist + changed_when: false + check_mode: false + +- name: enable highavailability repository (Rocky) + ini_file: + dest: '/etc/yum.repos.d/Rocky-HighAvailability.repo' + section: 'ha' + option: 'enabled' + value: '1' + create: 'no' + mode: '0644' + when: >- + 'HighAvailability' not in yum_repolist.stdout + and enable_repos | bool + and ansible_distribution_major_version in ['8'] diff --git a/vars/debian10.yml b/vars/debian10.yml index 698a1b6..94ae21b 100644 --- a/vars/debian10.yml +++ b/vars/debian10.yml @@ -5,7 +5,10 @@ pacemaker_packages: pacemaker_remote_packages: - pcs - pacemaker-remote +fence_kdump_packages: + - kdump-tools +kdump_service_name: 'kdump-tools' cluster_configure_fence_xvm: false cluster_firewall: false diff --git a/vars/debian11.yml b/vars/debian11.yml new file mode 100644 index 0000000..94ae21b --- /dev/null +++ b/vars/debian11.yml @@ -0,0 +1,15 @@ +--- +pacemaker_packages: + - pcs + - pacemaker +pacemaker_remote_packages: + - pcs + - pacemaker-remote +fence_kdump_packages: + - kdump-tools + +kdump_service_name: 'kdump-tools' +cluster_configure_fence_xvm: false +cluster_firewall: false + +pcsd_configuration_file: /etc/default/pcsd diff --git a/vars/fedora34.yml b/vars/fedora34.yml new file mode 100644 index 0000000..2fa89e5 --- /dev/null +++ b/vars/fedora34.yml @@ -0,0 +1,21 @@ +--- +pacemaker_packages: + - pcs + - pacemaker +pacemaker_remote_packages: + - pcs + - pacemaker-remote +fence_xvm_packages: + - fence-virt +fence_kdump_packages: + - fence-agents-kdump + - kexec-tools +fence_vmware_soap_packages: + - fence-agents-vmware-soap + - python3-requests +fence_vmware_rest_packages: + - fence-agents-vmware-rest +firewall_packages: + - firewalld + +pcsd_configuration_file: /etc/sysconfig/pcsd diff --git a/vars/fedora35.yml b/vars/fedora35.yml new file mode 100644 index 0000000..2fa89e5 --- /dev/null +++ b/vars/fedora35.yml @@ -0,0 +1,21 @@ +--- +pacemaker_packages: + - pcs + - pacemaker +pacemaker_remote_packages: + - pcs + - pacemaker-remote +fence_xvm_packages: + - fence-virt +fence_kdump_packages: + - fence-agents-kdump + - kexec-tools +fence_vmware_soap_packages: + - fence-agents-vmware-soap + - python3-requests +fence_vmware_rest_packages: + - fence-agents-vmware-rest +firewall_packages: + - firewalld + +pcsd_configuration_file: /etc/sysconfig/pcsd diff --git a/vars/fedora36.yml b/vars/fedora36.yml new file mode 100644 index 0000000..c452272 --- /dev/null +++ b/vars/fedora36.yml @@ -0,0 +1,20 @@ +--- +pacemaker_packages: + - pcs + - pacemaker +pacemaker_remote_packages: + - pcs + - pacemaker-remote +fence_xvm_packages: + - fence-virt +fence_kdump_packages: + - fence-agents-kdump + - kexec-tools +fence_vmware_soap_packages: + - fence-agents-vmware-soap +fence_vmware_rest_packages: + - fence-agents-vmware-rest +firewall_packages: + - firewalld + +pcsd_configuration_file: /etc/sysconfig/pcsd diff --git a/vars/main.yml b/vars/main.yml new file mode 100644 index 0000000..f7d2acb --- /dev/null +++ b/vars/main.yml @@ -0,0 +1,3 @@ +--- +# common values that can be overriden by other files in this directory +kdump_service_name: 'kdump' diff --git a/vars/redhat9.yml b/vars/redhat9.yml new file mode 100644 index 0000000..5f1065e --- /dev/null +++ b/vars/redhat9.yml @@ -0,0 +1,19 @@ +--- +pacemaker_packages: + - pcs +pacemaker_remote_packages: + - pcs + - pacemaker-remote +fence_xvm_packages: + - fence-virt +fence_kdump_packages: + - fence-agents-kdump + - kexec-tools +fence_vmware_soap_packages: + - fence-agents-vmware-soap +fence_vmware_rest_packages: + - fence-agents-vmware-rest +firewall_packages: + - firewalld + +pcsd_configuration_file: /etc/sysconfig/pcsd