From f1a09012cdcd85b85edb366d19c5109ab19a3781 Mon Sep 17 00:00:00 2001 From: Veerabhadrarao Damisetti Date: Mon, 25 Nov 2024 12:20:38 +0530 Subject: [PATCH] Enhancement: Updated documentation for HCP (#354) Updated documentation for HCP Updated parmfile template for zVM --------- Signed-off-by: DAMISETTI-VEERABHADRARAO --- docs/run-the-playbooks-for-hcp.md | 16 ++++++++++------ docs/set-variables-group-vars.md | 2 +- inventories/default/group_vars/hcp.yaml.template | 2 +- roles/create_hcp_InfraEnv/tasks/main.yaml | 2 +- .../delete_resources_bastion_hcp/tasks/main.yaml | 3 +++ .../templates/parm-file.parm.j2 | 2 +- 6 files changed, 17 insertions(+), 10 deletions(-) diff --git a/docs/run-the-playbooks-for-hcp.md b/docs/run-the-playbooks-for-hcp.md index 2f13aeab..ba2c70ec 100644 --- a/docs/run-the-playbooks-for-hcp.md +++ b/docs/run-the-playbooks-for-hcp.md @@ -26,9 +26,11 @@ ansible-vault create playbooks/secrets.yaml ``` kvm_host_password: '' bastion_root_pw: '' -api_server: ':' -user_name: '' -password: '' + +# Management cluster login credentials +api_server: ':' +user_name: '' +password: '' ``` * You can edit the encrypted file using below command @@ -39,7 +41,7 @@ ansible-vault edit playbooks/secrets.yaml ## Step-2: Initial Setup for Hosted Control Plane * Navigate to the [root folder of the cloned Git repository](https://github.com/IBM/Ansible-OpenShift-Provisioning) in your terminal (`ls` should show [ansible.cfg](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/ansible.cfg)). -* Update variables as per the compute node type (zKVM /zVM) in [hcp.yaml](https://github.com/veera-damisetti/Ansible-OpenShift-Provisioning/blob/main/inventories/default/group_vars/hcp.yaml.template) ( hcp.yaml.template )before running the playbooks. +* Update variables as per the compute node type (zKVM /zVM) in [hcp.yaml](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/inventories/default/group_vars/hcp.yaml.template) ( hcp.yaml.template )before running the playbooks. * First playbook to be run is setup_for_hcp.yaml which will create inventory file for HCP and will add ssh key to the kvm host. * Run this shell command: @@ -48,7 +50,9 @@ ansible-playbook playbooks/setup_for_hcp.yaml --ask-vault-pass ``` ## Step-3: Create Hosted Cluster -* Run each part step-by-step by running one playbook at a time, or all at once using [hcp.yaml](https://github.com/veera-damisetti/Ansible-OpenShift-Provisioning/blob/main/playbooks/hcp.yaml). +* Run each part step-by-step by running one playbook at a time, or all at once using [hcp.yaml](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/hcp.yaml). + * If bastion is already available ( bastion_params.create = false ) , just give ip ,user, and nameserver under bastion_params section and remaining parameters under bastion_params can be ignored. + * Here's the full list of playbooks to be run in order, full descriptions of each can be found further down the page: * create_hosted_cluster.yaml ([code](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/create_hosted_cluster.yaml)) * create_agents_and_wait_for_install_complete.yaml ([code](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/create_agents_and_wait_for_install_complete.yaml)) @@ -111,7 +115,7 @@ ansible-playbook playbooks/hcp.yaml --ask-vault-pass * Destroy the Hosted Control Plane and other resources created as part of installation ### Procedure -* Run the playbook [destroy_cluster_hcp.yaml](https://github.com/veera-damisetti/Ansible-OpenShift-Provisioning/blob/main/playbooks/destroy_cluster_hcp.yaml) to destroy all the resources created while installation +* Run the playbook [destroy_cluster_hcp.yaml](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/destroy_cluster_hcp.yaml) to destroy all the resources created while installation ``` ansible-playbook playbooks/destroy_cluster_hcp.yaml --ask-vault-pass ``` diff --git a/docs/set-variables-group-vars.md b/docs/set-variables-group-vars.md index 6619a348..5f7150ca 100644 --- a/docs/set-variables-group-vars.md +++ b/docs/set-variables-group-vars.md @@ -350,7 +350,7 @@ **hcp.data_plane.zvm.nodes.interface.options** | Configurations options | layer2=1 **hcp.data_plane.zvm.interface.ip** | IP addresses for to be used for zVM nodes | 192.168.10.1 **hcp.data_plane.zvm.nodes.dasd.disk_id** | Disk id for dasd disk to be used for zVM node | 4404 -**hcp.data_plane.zvm.nodes.lun** | Disk details of fcp disk to be used for zVM node | 4404 +**hcp.data_plane.zvm.nodes.lun** | Disk details of fcp disk to be used for zVM node | 0.0.4404 ## ZVM ( Optional ) **Variable Name** | **Description** | **Example** diff --git a/inventories/default/group_vars/hcp.yaml.template b/inventories/default/group_vars/hcp.yaml.template index 2b68d436..593f5977 100644 --- a/inventories/default/group_vars/hcp.yaml.template +++ b/inventories/default/group_vars/hcp.yaml.template @@ -29,7 +29,7 @@ hcp: # Hosted Control Plane Parameters control_plane: - high_availabiliy: true + high_availability: true clusters_namespace: hosted_cluster_name: basedomain: diff --git a/roles/create_hcp_InfraEnv/tasks/main.yaml b/roles/create_hcp_InfraEnv/tasks/main.yaml index 934a4889..c555d4da 100644 --- a/roles/create_hcp_InfraEnv/tasks/main.yaml +++ b/roles/create_hcp_InfraEnv/tasks/main.yaml @@ -60,7 +60,7 @@ --base-domain={{ hcp.control_plane.basedomain }} --api-server-address=api.{{ hcp.control_plane.hosted_cluster_name }}.{{ hcp.control_plane.basedomain }} --ssh-key ~/.ssh/{{ hcp.ansible_key_name }}.pub - {% if hcp.control_plane.high_availabiliy == false %} + {% if hcp.control_plane.high_availability == false %} --control-plane-availability-policy "SingleReplica" {% endif %} --infra-availability-policy "SingleReplica" diff --git a/roles/delete_resources_bastion_hcp/tasks/main.yaml b/roles/delete_resources_bastion_hcp/tasks/main.yaml index 292f5527..3128292f 100644 --- a/roles/delete_resources_bastion_hcp/tasks/main.yaml +++ b/roles/delete_resources_bastion_hcp/tasks/main.yaml @@ -6,6 +6,9 @@ - name: Scale in Nodepool command: oc -n {{ hcp.control_plane.clusters_namespace }} scale nodepool {{ hcp.control_plane.hosted_cluster_name }} --replicas 0 +- name: Create Kubeconfig for Hosted Cluster + shell: hcp create kubeconfig --namespace {{ hcp.control_plane.clusters_namespace }} --name {{ hcp.control_plane.hosted_cluster_name }} > /root/ansible_workdir/hcp-kubeconfig + - block: - name: Wait for Worker Nodes to Detach k8s_info: diff --git a/roles/setup_for_agents_hcp/templates/parm-file.parm.j2 b/roles/setup_for_agents_hcp/templates/parm-file.parm.j2 index e9edc733..a271a951 100644 --- a/roles/setup_for_agents_hcp/templates/parm-file.parm.j2 +++ b/roles/setup_for_agents_hcp/templates/parm-file.parm.j2 @@ -1 +1 @@ -rd.neednet=1 ai.ip_cfg_override=1 console=ttysclp0 coreos.live.rootfs_url=http://{{ hcp.bastion_params.ip }}:8080/rootfs.img ip={{ hcp.data_plane.zvm.nodes[item].interface.ip }}::{{ hcp.data_plane.zvm.gateway }}:{{ hcp.data_plane.zvm.subnetmask }}{% if hcp.data_plane.zvm.network_mode | lower != 'roce' %}::{{ hcp.data_plane.zvm.nodes[item].interface.ifname }}:none{% endif %} nameserver={{ hcp.bastion_params.ip }} zfcp.allow_lun_scan=0 {% if hcp.data_plane.zvm.network_mode | lower != 'roce' %}rd.znet={{ hcp.data_plane.zvm.nodes[item].interface.nettype }},{{ hcp.data_plane.zvm.nodes[item].interface.subchannels }},{{ hcp.data_plane.zvm.nodes[item].interface.options }}{% endif %} {% if hcp.data_plane.zvm.disk_type | lower != 'fcp' %}rd.dasd=0.0.{{ hcp.data_plane.zvm.nodes[item].dasd.disk_id }}{% else %}rd.zfcp=0.0.{{ hcp.data_plane.zvm.nodes[item].lun[0].paths[0].fcp}},{{ hcp.data_plane.zvm.nodes[item].lun[0].paths[0].wwpn }},{{ hcp.data_plane.zvm.nodes[item].lun[0].id }} {% endif %} random.trust_cpu=on rd.luks.options=discard ignition.firstboot ignition.platform.id=metal console=tty1 console=ttyS1,115200n8 coreos.inst.persistent-kargs="console=tty1 console=ttyS1,115200n8" \ No newline at end of file +rd.neednet=1 ai.ip_cfg_override=1 console=ttysclp0 coreos.live.rootfs_url=http://{{ hcp.bastion_params.ip }}:8080/rootfs.img ip={{ hcp.data_plane.zvm.nodes[item].interface.ip }}::{{ hcp.data_plane.zvm.gateway }}:{{ hcp.data_plane.zvm.subnetmask }}{% if hcp.data_plane.zvm.network_mode | lower != 'roce' %}::{{ hcp.data_plane.zvm.nodes[item].interface.ifname }}:none{% endif %} nameserver={{ hcp.bastion_params.ip }} zfcp.allow_lun_scan=0 {% if hcp.data_plane.zvm.network_mode | lower != 'roce' %}rd.znet={{ hcp.data_plane.zvm.nodes[item].interface.nettype }},{{ hcp.data_plane.zvm.nodes[item].interface.subchannels }},{{ hcp.data_plane.zvm.nodes[item].interface.options }}{% endif %} {% if hcp.data_plane.zvm.disk_type | lower != 'fcp' %}rd.dasd=0.0.{{ hcp.data_plane.zvm.nodes[item].dasd.disk_id }}{% else %}rd.zfcp={{ hcp.data_plane.zvm.nodes[item].lun[0].paths[0].fcp}},{{ hcp.data_plane.zvm.nodes[item].lun[0].paths[0].wwpn }},{{ hcp.data_plane.zvm.nodes[item].lun[0].id }} {% endif %} random.trust_cpu=on rd.luks.options=discard ignition.firstboot ignition.platform.id=metal console=tty1 console=ttyS1,115200n8 coreos.inst.persistent-kargs="console=tty1 console=ttyS1,115200n8" \ No newline at end of file