From d530b2b5b6913c5f7cdb6404d90d52721f3e613c Mon Sep 17 00:00:00 2001 From: Veerabhadrarao Damisetti Date: Tue, 3 Dec 2024 20:34:29 +0530 Subject: [PATCH] feat: Support for LPAR as compute nodes for HCP (#358) Support for LPAR as compute nodes for HCP 1. Network types - OSA - RoCE 2. Disk types - FCP - DASD - NVMe Updated Documentation --------- Signed-off-by: DAMISETTI-VEERABHADRARAO --- docs/run-the-playbooks-for-hcp.md | 16 +++++- docs/set-variables-group-vars.md | 20 ++++++++ .../default/group_vars/hcp.yaml.template | 40 ++++++++++++++- playbooks/create_hosted_cluster.yaml | 10 ++-- roles/boot_LPAR_hcp/main.yaml | 50 +++++++++++++++++++ .../templates/inventory_template.j2 | 2 +- .../templates/ssh-key.exp.j2 | 2 +- roles/download_rootfs_hcp/tasks/main.yaml | 15 ++++++ .../tasks/main.yaml | 12 ++--- roles/setup_for_agents_hcp/tasks/main.yaml | 2 +- .../templates/parm-file.parm.j2 | 6 ++- 11 files changed, 157 insertions(+), 18 deletions(-) create mode 100644 roles/boot_LPAR_hcp/main.yaml diff --git a/docs/run-the-playbooks-for-hcp.md b/docs/run-the-playbooks-for-hcp.md index ba2c70ec..5967039c 100644 --- a/docs/run-the-playbooks-for-hcp.md +++ b/docs/run-the-playbooks-for-hcp.md @@ -9,8 +9,16 @@ * If using dynamic IP for agents, make sure you have entries in DHCP Server for macaddresses you are using in installation to map to IPv4 addresses and along with this DHCP server should make your IPs to use nameserver which you have configured. ## Note: -* As of now we are supporting only macvtap for Hosted Control Plane Agent based installation for KVM compute nodes. -* Supported network modes for zVM : vswitch, OSA, RoCE, Hipersockets +Supported Confugurations: +* KVM Compute nodes + * Network type: MacVTap ( Static IP / DHCP ) + * Disk types: QCOW, DASD +* z/VM Compute nodes + * Network types: vSwitch, OSA , RoCE , Hipersockets + * Disk types: FCP, DASD +* LPAR Compute nodes ( Classical LPAR only ) + * Network types: OSA , RoCE + * Disk types: FCP, DASD, NVMe ## Step-1: Setup Ansible Vault for Management Cluster Credentials ### Overview @@ -31,6 +39,10 @@ bastion_root_pw: '' api_server: ':' user_name: '' password: '' + +# HMC login Credentials ( Required only if compute_node_type is LPAR ) +hmca_username: '' +hmca_password: '' ``` * You can edit the encrypted file using below command diff --git a/docs/set-variables-group-vars.md b/docs/set-variables-group-vars.md index 5f7150ca..1e00da79 100644 --- a/docs/set-variables-group-vars.md +++ b/docs/set-variables-group-vars.md @@ -351,6 +351,26 @@ **hcp.data_plane.zvm.interface.ip** | IP addresses for to be used for zVM nodes | 192.168.10.1 **hcp.data_plane.zvm.nodes.dasd.disk_id** | Disk id for dasd disk to be used for zVM node | 4404 **hcp.data_plane.zvm.nodes.lun** | Disk details of fcp disk to be used for zVM node | 0.0.4404 +**hcp.data_plane.lpar.network_mode** | Network mode for zvm nodes Supported modes: vswitch,osa, RoCE | vswitch +**hcp.data_plane.lpar.disk_type** | Disk type for zvm nodes Supported disk types: fcp, dasd | dasd +**hcp.data_plane.lpar.subnetmask** | Subnet mask for compute nodes | 255.255.255.0 +**hcp.data_plane.lpar.gateway** | Gateway for compute nodes | 192.168.10.1 +**hcp.data_plane.lpar.nodes** | Set of parameters for zvm nodes Give the details of each zvm node here | +**hcp.data_plane.lpar.name** | Name of the zVM guest | m1317002 +**hcp.data_plane.lpar.nodes.hmc_host** | Host name of the zVM guests which we use to login 3270 console | boem1317 +**hcp.data_plane.lpar.nodes.interface.ifname** | Network interface name for zVM guests | encbdf0 +**hcp.data_plane.lpar.nodes.interface.nettype** | Network type for zVM guests for network connectivity | qeth +**hcp.data_plane.lpar.nodes.interface.subchannels** | subchannels for zVM guests interfaces | 0.0.bdf0,0.0.bdf1,0.0.bdf2 +**hcp.data_plane.lpar.nodes.interface.options** | Configurations options | layer2=1 +**hcp.data_plane.lpar.interface.ip** | IP addresses for to be used for zVM nodes | 192.168.10.1 +**hcp.data_plane.lpar.nodes.dasd.disk_id** | Disk id for dasd disk to be used for zVM node | 4404 +**hcp.data_plane.lpar.nodes.lun** | Disk details of fcp disk to be used for zVM node | 0.0.4404 +**hcp.data_plane.lpar.nodes.live_disk.disk_type** | Live disk type for booting LPAR | scsi +**hcp.data_plane.lpar.nodes.live_disk.devicenr** | devicenr for the live disk | 8001 +**hcp.data_plane.lpar.nodes.live_disk.lun** | lun id for the live disk | 40xxxxxxxxxxxxx +**hcp.data_plane.lpar.nodes.live_disk.wwpn** | wwpn for the live disk | 500507630xxxxxxx +**hcp.data_plane.lpar.nodes.live_disk.password** | password of the live disk | xxxxxx + ## ZVM ( Optional ) **Variable Name** | **Description** | **Example** diff --git a/inventories/default/group_vars/hcp.yaml.template b/inventories/default/group_vars/hcp.yaml.template index 593f5977..a019aab5 100644 --- a/inventories/default/group_vars/hcp.yaml.template +++ b/inventories/default/group_vars/hcp.yaml.template @@ -1,5 +1,5 @@ hcp: - compute_node_type: # KVM or zVM + compute_node_type: # KVM , zVM , lpar # Parameters for oc login mgmt_cluster_nameserver: @@ -138,3 +138,41 @@ hcp: paths: - wwpn: fcp: + + lpar: + network_mode: vswitch # Supported modes: osa, RoCE + disk_type: # Supported modes: fcp , dasd , NVMe + subnetmask: + gateway: + + nodes: + - name: + hmc_host: + interface: + ifname: encbdf0 + nettype: qeth + subchannels: 0.0.bdf0,0.0.bdf1,0.0.bdf2 + options: layer2=1 + ip: + + # Required if disk_type is dasd + dasd: + disk_id: + + # Required if disk_type is fcp + lun: + - id: + paths: + - wwpn: + fcp: + + # Live disk details + live_disk: + disk_type: + devicenr: + lun: + wwpn: + password: + + + diff --git a/playbooks/create_hosted_cluster.yaml b/playbooks/create_hosted_cluster.yaml index 3b439846..a0e66325 100644 --- a/playbooks/create_hosted_cluster.yaml +++ b/playbooks/create_hosted_cluster.yaml @@ -8,12 +8,12 @@ - name: Setting host set_fact: host: 'kvm_host_hcp' - when: hcp.compute_node_type | lower != 'zvm' + when: hcp.compute_node_type | lower == 'kvm' - name: Install Prereqs on host import_role: name: install_prerequisites_host_hcp - when: hcp.compute_node_type | lower != 'zvm' + when: hcp.compute_node_type | lower == 'kvm' - name: Setting vars for storage pool configuration set_fact: @@ -44,12 +44,12 @@ device1: "{{ network_interface.stdout }}" env: vnet_name: macvtap - when: hcp.compute_node_type | lower != 'zvm' + when: hcp.compute_node_type | lower == 'kvm' - name: Creating macvtap network import_role: name: macvtap - when: hcp.compute_node_type | lower != 'zvm' + when: hcp.compute_node_type | lower == 'kvm' - name: Create bastion for hcp hosts: kvm_host_hcp @@ -62,7 +62,7 @@ name: create_bastion_hcp when: - hcp.bastion_params.create == true - - hcp.compute_node_type | lower != 'zvm' + - hcp.compute_node_type | lower == 'kvm' - name: Configuring Bastion hosts: bastion_hcp diff --git a/roles/boot_LPAR_hcp/main.yaml b/roles/boot_LPAR_hcp/main.yaml new file mode 100644 index 00000000..0d5808c5 --- /dev/null +++ b/roles/boot_LPAR_hcp/main.yaml @@ -0,0 +1,50 @@ +--- +- name: Creating agents + block: + - name: Getting script for booting + template: + src: "{{ playbook_dir }}/../roles/boot_LPAR/templates/boot_lpar.py" + dest: /root/ansible_workdir/boot_lpar.py + + - name: Debug + debug: + msg: "Booting agent-{{ item }}" + + - name: Booting LPAR + shell: | + python /root/ansible_workdir/boot_lpar.py \ + --cpcname "{{ hcp.data_plane.lpar.nodes[item].name.split('lp')[0] }}" \ + --lparname "{{ hcp.data_plane.lpar.nodes[item].name }}" \ + --hmchost "{{ hcp.data_plane.lpar.nodes[item].hmc_host }}" \ + --hmcuser "{{ hmca_username }}" \ + --hmcpass "{{ hmca_password }}" \ + --cpu "{{ hcp.data_plane.vcpus }}" \ + --memory "{{ hcp.data_plane.memory }}" \ + --kernel http://"{{ hcp.bastion_params.ip }}":8080/kernel.img \ + --initrd http://"{{ hcp.bastion_params.ip }}":8080/initrd.img \ + --livedisktype "{{ hcp.data_plane.lpar.nodes[item].live_disk.disk_type }}" \ + --devicenr "{{ hcp.data_plane.lpar.nodes[item].live_disk.devicenr }}" \ + --livedisklun "{{ hcp.data_plane.lpar.nodes[item].live_disk.lun }}" \ + --livediskwwpn "{{ hcp.data_plane.lpar.nodes[item].live_disk.wwpn }}" \ + --netset_ip "{{ hcp.data_plane.zvm.nodes[item].interface.ip }}" \ + --netset_gateway "{{ hcp.data_plane.zvm.gateway }}" \ + --netset_network_type "osa" \ + --netset_network_device "{{ hcp.data_plane.zvm.nodes[item].interface.subchannels }}" \ + --netset_password "{{ hcp.data_plane.lpar.nodes[item].live_disk.password }}" \ + --netset_dns "{{ hcp.bastion_params.ip }}" \ + --log_level DEBUG \ + --cmdline "$(cat /root/ansible_workdir/agent-{{ item }}.parm)" + + - name: Wait for the agent to come up + shell: oc get agents -n "{{ hcp.control_plane.clusters_namespace }}-{{ hcp.control_plane.hosted_cluster_name }}" --no-headers -o custom-columns=NAME:.metadata.name,APPROVED:.spec.approved | awk '$2 == "false"' | wc -l + register: agent_count + until: agent_count.stdout | int == 1 + retries: 40 + delay: 10 + + - name: Get the name of agent + shell: oc get agents -n {{ hcp.control_plane.clusters_namespace }}-{{ hcp.control_plane.hosted_cluster_name }} --no-headers -o custom-columns=NAME:.metadata.name,APPROVED:.spec.approved | awk '$2 == "false"' + register: agent_name + + - name: Approve agents + shell: oc -n {{ hcp.control_plane.clusters_namespace }}-{{ hcp.control_plane.hosted_cluster_name }} patch agent {{ agent_name.stdout.split(' ')[0] }} -p '{"spec":{"approved":true,"hostname":"compute-{{ item }}.{{hcp.control_plane.hosted_cluster_name }}.{{ hcp.control_plane.basedomain }}"}}' --type merge \ No newline at end of file diff --git a/roles/create_inventory_setup_hcp/templates/inventory_template.j2 b/roles/create_inventory_setup_hcp/templates/inventory_template.j2 index 2e73b9c9..56dd20d1 100644 --- a/roles/create_inventory_setup_hcp/templates/inventory_template.j2 +++ b/roles/create_inventory_setup_hcp/templates/inventory_template.j2 @@ -1,5 +1,5 @@ [hcp] -{% if hcp.compute_node_type | lower != 'zvm' %} +{% if hcp.compute_node_type | lower == 'kvm' %} kvm_host_hcp ansible_host={{ hcp.bastion_params.host }} ansible_user={{ hcp.bastion_params.host_user }} ansible_become_password={{ kvm_host_password }} {% endif %} diff --git a/roles/create_inventory_setup_hcp/templates/ssh-key.exp.j2 b/roles/create_inventory_setup_hcp/templates/ssh-key.exp.j2 index 8202b0ee..49b4e62c 100644 --- a/roles/create_inventory_setup_hcp/templates/ssh-key.exp.j2 +++ b/roles/create_inventory_setup_hcp/templates/ssh-key.exp.j2 @@ -1,5 +1,5 @@ #!/usr/bin/expect -{% if hcp.compute_node_type | lower != 'zvm' %} +{% if hcp.compute_node_type | lower == 'kvm' %} set password "{{ kvm_host_password }}" spawn ssh-copy-id -i {{ lookup('env', 'HOME') }}/.ssh/{{ hcp.ansible_key_name }} {{ hcp.bastion_params.host_user }}@{{ hcp.bastion_params.host }} expect "{{ hcp.bastion_params.host_user }}@{{ hcp.bastion_params.host }}'s password:" diff --git a/roles/download_rootfs_hcp/tasks/main.yaml b/roles/download_rootfs_hcp/tasks/main.yaml index 95c3090a..6c16ae2a 100644 --- a/roles/download_rootfs_hcp/tasks/main.yaml +++ b/roles/download_rootfs_hcp/tasks/main.yaml @@ -48,6 +48,21 @@ dest: /var/www/html/rootfs.img validate_certs: false +- name: Copy kernel.img to /var/www/html/ + ansible.builtin.copy: + src: /var/lib/libvirt/images/pxeboot/kernel.img + dest: /var/www/html/kernel.img + remote_src: true + when: hcp.compute_node_type | lower == 'lpar' + +- name: Copy initrd.img to /var/www/html/ + ansible.builtin.copy: + src: /var/lib/libvirt/images/pxeboot/initrd.img + dest: /var/www/html/initrd.img + remote_src: true + when: hcp.compute_node_type | lower == 'lpar' + + - name: restart httpd service: name: httpd.service diff --git a/roles/scale_nodepool_and_wait_for_compute_hcp/tasks/main.yaml b/roles/scale_nodepool_and_wait_for_compute_hcp/tasks/main.yaml index ca65ea3b..acdb1f68 100644 --- a/roles/scale_nodepool_and_wait_for_compute_hcp/tasks/main.yaml +++ b/roles/scale_nodepool_and_wait_for_compute_hcp/tasks/main.yaml @@ -9,28 +9,28 @@ until: agents.resources | length == {{ hcp.data_plane.compute_count }} retries: 30 delay: 10 - when: hcp.compute_node_type | lower != 'zvm' + when: hcp.compute_node_type | lower == 'kvm' - name: Get agent names command: oc get agents -n {{ hcp.control_plane.hosted_cluster_name }}-agents --no-headers register: agents_info - when: hcp.compute_node_type | lower != 'zvm' + when: hcp.compute_node_type | lower == 'kvm' - name: Create List for agents set_fact: agents: [] - when: hcp.compute_node_type | lower != 'zvm' + when: hcp.compute_node_type | lower == 'kvm' - name: Get a List of agents set_fact: agents: "{{ agents + [agents_info.stdout.split('\n')[item].split(' ')[0]] }}" loop: "{{ range(hcp.data_plane.compute_count|int) | list }}" - when: hcp.compute_node_type | lower != 'zvm' + when: hcp.compute_node_type | lower == 'kvm' - name: Patch Agents - shell: oc -n {{ hcp.control_plane.clusters_namespace }}-agents patch agent {{ agents[item] }} -p '{"spec":{"approved":true,"hostname":"compute-{{item}}.{{ hcp.control_plane.hosted_cluster_name }}.{{ hcp.control_plane.basedomain }}"}}' --type merge + shell: oc -n {{ hcp.control_plane.hosted_cluster_name }}-agents patch agent {{ agents[item] }} -p '{"spec":{"approved":true,"hostname":"compute-{{item}}.{{ hcp.control_plane.hosted_cluster_name }}.{{ hcp.control_plane.basedomain }}"}}' --type merge loop: "{{ range(hcp.data_plane.compute_count|int) | list }}" - when: hcp.compute_node_type | lower != 'zvm' + when: hcp.compute_node_type | lower == 'kvm' - name: Scale Nodepool command: oc -n {{ hcp.control_plane.clusters_namespace }} scale nodepool {{ hcp.control_plane.hosted_cluster_name }} --replicas {{ hcp.data_plane.compute_count }} diff --git a/roles/setup_for_agents_hcp/tasks/main.yaml b/roles/setup_for_agents_hcp/tasks/main.yaml index a83fad68..dfb96281 100644 --- a/roles/setup_for_agents_hcp/tasks/main.yaml +++ b/roles/setup_for_agents_hcp/tasks/main.yaml @@ -45,5 +45,5 @@ template: src: parm-file.parm.j2 dest: /root/ansible_workdir/agent-{{ item }}.parm - when: hcp.compute_node_type | lower == 'zvm' + when: hcp.compute_node_type | lower != 'kvm' loop: "{{ range(hcp.data_plane.compute_count | int) | list }}" diff --git a/roles/setup_for_agents_hcp/templates/parm-file.parm.j2 b/roles/setup_for_agents_hcp/templates/parm-file.parm.j2 index a271a951..24066fad 100644 --- a/roles/setup_for_agents_hcp/templates/parm-file.parm.j2 +++ b/roles/setup_for_agents_hcp/templates/parm-file.parm.j2 @@ -1 +1,5 @@ -rd.neednet=1 ai.ip_cfg_override=1 console=ttysclp0 coreos.live.rootfs_url=http://{{ hcp.bastion_params.ip }}:8080/rootfs.img ip={{ hcp.data_plane.zvm.nodes[item].interface.ip }}::{{ hcp.data_plane.zvm.gateway }}:{{ hcp.data_plane.zvm.subnetmask }}{% if hcp.data_plane.zvm.network_mode | lower != 'roce' %}::{{ hcp.data_plane.zvm.nodes[item].interface.ifname }}:none{% endif %} nameserver={{ hcp.bastion_params.ip }} zfcp.allow_lun_scan=0 {% if hcp.data_plane.zvm.network_mode | lower != 'roce' %}rd.znet={{ hcp.data_plane.zvm.nodes[item].interface.nettype }},{{ hcp.data_plane.zvm.nodes[item].interface.subchannels }},{{ hcp.data_plane.zvm.nodes[item].interface.options }}{% endif %} {% if hcp.data_plane.zvm.disk_type | lower != 'fcp' %}rd.dasd=0.0.{{ hcp.data_plane.zvm.nodes[item].dasd.disk_id }}{% else %}rd.zfcp={{ hcp.data_plane.zvm.nodes[item].lun[0].paths[0].fcp}},{{ hcp.data_plane.zvm.nodes[item].lun[0].paths[0].wwpn }},{{ hcp.data_plane.zvm.nodes[item].lun[0].id }} {% endif %} random.trust_cpu=on rd.luks.options=discard ignition.firstboot ignition.platform.id=metal console=tty1 console=ttyS1,115200n8 coreos.inst.persistent-kargs="console=tty1 console=ttyS1,115200n8" \ No newline at end of file +{% if hcp.compute_node_type | lower == 'zvm' %} +rd.neednet=1 ai.ip_cfg_override=1 console=ttysclp0 coreos.live.rootfs_url=http://{{ hcp.bastion_params.ip }}:8080/rootfs.img ip={{ hcp.data_plane.zvm.nodes[item].interface.ip }}::{{ hcp.data_plane.zvm.gateway }}:{{ hcp.data_plane.zvm.subnetmask }}{% if hcp.data_plane.zvm.network_mode | lower != 'roce' %}::{{ hcp.data_plane.zvm.nodes[item].interface.ifname }}:none{% endif %} nameserver={{ hcp.bastion_params.ip }} zfcp.allow_lun_scan=0 {% if hcp.data_plane.zvm.network_mode | lower != 'roce' %}rd.znet={{ hcp.data_plane.zvm.nodes[item].interface.nettype }},{{ hcp.data_plane.zvm.nodes[item].interface.subchannels }},{{ hcp.data_plane.zvm.nodes[item].interface.options }}{% endif %} {% if hcp.data_plane.zvm.disk_type | lower != 'fcp' %}rd.dasd=0.0.{{ hcp.data_plane.zvm.nodes[item].dasd.disk_id }}{% else %}rd.zfcp={{ hcp.data_plane.zvm.nodes[item].lun[0].paths[0].fcp}},{{ hcp.data_plane.zvm.nodes[item].lun[0].paths[0].wwpn }},{{ hcp.data_plane.zvm.nodes[item].lun[0].id }} {% endif %} random.trust_cpu=on rd.luks.options=discard ignition.firstboot ignition.platform.id=metal console=tty1 console=ttyS1,115200n8 coreos.inst.persistent-kargs="console=tty1 console=ttyS1,115200n8" +{% else %} +rd.neednet=1 ai.ip_cfg_override=1 console=ttysclp0 coreos.live.rootfs_url=http://{{ hcp.bastion_params.ip }}:8080/rootfs.img ip={{ hcp.data_plane.lpar.nodes[item].interface.ip }}::{{ hcp.data_plane.lpar.gateway }}:{{ hcp.data_plane.lpar.subnetmask }}{% if hcp.data_plane.lpar.network_mode | lower != 'roce' %}::{{ hcp.data_plane.lpar.nodes[item].interface.ifname }}:none{% endif %} nameserver={{ hcp.bastion_params.ip }} zfcp.allow_lun_scan=0 {% if hcp.data_plane.lpar.network_mode | lower != 'roce' %}rd.znet={{ hcp.data_plane.lpar.nodes[item].interface.nettype }},{{ hcp.data_plane.lpar.nodes[item].interface.subchannels }},{{ hcp.data_plane.lpar.nodes[item].interface.options }}{% endif %} {% if hcp.data_plane.lpar.disk_type | lower != 'nvme' %}{% if hcp.data_plane.lpar.disk_type | lower != 'fcp' %}rd.dasd=0.0.{{ hcp.data_plane.lpar.nodes[item].dasd.disk_id }}{% else %}rd.zfcp={{ hcp.data_plane.lpar.nodes[item].lun[0].paths[0].fcp}},{{ hcp.data_plane.lpar.nodes[item].lun[0].paths[0].wwpn }},{{ hcp.data_plane.lpar.nodes[item].lun[0].id }} {% endif %} {% endif %} random.trust_cpu=on rd.luks.options=discard ignition.firstboot ignition.platform.id=metal console=tty1 console=ttyS1,115200n8 coreos.inst.persistent-kargs="console=tty1 console=ttyS1,115200n8" +{% endif %} \ No newline at end of file