From dd2d6b07a885c6634afa92437b1562486f2771d4 Mon Sep 17 00:00:00 2001 From: Chin-Ya Huang Date: Thu, 29 Feb 2024 10:26:59 +0800 Subject: [PATCH] refactor(negative): adapt keywords to test longhorn/longhorn-7034 Signed-off-by: Chin-Ya Huang --- e2e/keywords/common.resource | 30 ++---- e2e/keywords/deployment.resource | 26 +++-- e2e/keywords/engine.resource | 1 - e2e/keywords/kubelet.resource | 15 --- e2e/keywords/longhorn.resource | 21 ++++ e2e/keywords/node.resource | 68 ------------- e2e/keywords/node_instance.resource | 34 +++++++ e2e/keywords/persistentvolumeclaim.resource | 15 +++ e2e/keywords/recurringjob.resource | 13 ++- e2e/keywords/statefulset.resource | 105 +++++++++----------- e2e/keywords/stress.resource | 20 ++-- e2e/keywords/volume.resource | 92 +++++++---------- e2e/keywords/workload.resource | 88 +++------------- 13 files changed, 213 insertions(+), 315 deletions(-) delete mode 100644 e2e/keywords/kubelet.resource create mode 100644 e2e/keywords/longhorn.resource delete mode 100644 e2e/keywords/node.resource create mode 100644 e2e/keywords/node_instance.resource create mode 100644 e2e/keywords/persistentvolumeclaim.resource diff --git a/e2e/keywords/common.resource b/e2e/keywords/common.resource index af101f0f5a..4b722f9bd5 100644 --- a/e2e/keywords/common.resource +++ b/e2e/keywords/common.resource @@ -2,39 +2,29 @@ Documentation Common keywords Library ../libs/keywords/common_keywords.py -Library ../libs/keywords/stress_keywords.py -Library ../libs/keywords/volume_keywords.py -Library ../libs/keywords/recurringjob_keywords.py Library ../libs/keywords/deployment_keywords.py +Library ../libs/keywords/network_keywords.py +Library ../libs/keywords/recurringjob_keywords.py Library ../libs/keywords/statefulset_keywords.py +Library ../libs/keywords/stress_keywords.py +Library ../libs/keywords/volume_keywords.py Library ../libs/keywords/workload_keywords.py -Library ../libs/keywords/network_keywords.py - - -*** Variables *** - *** Keywords *** Set test environment init_k8s_api_client init_node_exec ${SUITE NAME.rsplit('.')[1]} init_storageclasses - @{volume_list} = Create List - Set Test Variable ${volume_list} - @{deployment_list} = Create List - Set Test Variable ${deployment_list} - @{statefulset_list} = Create List - Set Test Variable ${statefulset_list} - @{persistentvolumeclaim_list} = Create List - Set Test Variable ${persistentvolumeclaim_list} + setup_control_plane_network_latency Cleanup test resources cleanup_control_plane_network_latency cleanup_node_exec cleanup_stress_helper - cleanup_recurringjobs ${volume_list} - cleanup_volumes ${volume_list} - cleanup_deployments ${deployment_list} - cleanup_statefulsets ${statefulset_list} + cleanup_recurringjobs + cleanup_deployments + cleanup_statefulsets + cleanup_persistentvolumeclaims + cleanup_volumes cleanup_storageclasses diff --git a/e2e/keywords/deployment.resource b/e2e/keywords/deployment.resource index c016abb30f..d2b6df8f5f 100644 --- a/e2e/keywords/deployment.resource +++ b/e2e/keywords/deployment.resource @@ -2,22 +2,20 @@ Documentation Deployment Keywords Library Collections +Library ../libs/keywords/common_keywords.py Library ../libs/keywords/deployment_keywords.py *** Keywords *** -Create deployment ${idx} with ${volume_type} volume - ${deployment_name} = create_deployment ${volume_type} - Insert Into List ${deployment_list} ${idx} ${deployment_name} +Create deployment ${deployment_id} with persistentvolumeclaim ${claim_id} + ${deployment_name} = generate_name_with_suffix deployment ${deployment_id} + ${claim_name} = generate_name_with_suffix claim ${claim_id} + create_deployment ${deployment_name} ${claim_name} -Create deployment ${idx} with ${volume_type} and ${option} volume - ${deployment_name} = create_deployment ${volume_type} ${option} - Insert Into List ${deployment_list} ${idx} ${deployment_name} +Check deployment ${deployment_id} works + ${deployment_name} = generate_name_with_suffix deployment ${deployment_id} + write_workload_pod_random_data ${deployment_name} 1024 random-data + check_workload_pod_data_checksum ${deployment_name} random-data -Keep writing data to deployment ${idx} - ${pod_name} = get_workload_pod_name ${deployment_list}[${idx}] - keep_writing_pod_data ${pod_name} - -Check deployment ${idx} works - ${pod_name} = get_workload_pod_name ${deployment_list}[${idx}] - ${pod_data_checksum} = write_pod_random_data ${pod_name} 1024 - check_pod_data_checksum ${pod_name} ${pod_data_checksum} +Wait for deployment ${deployment_id} pods stable + ${deployment_name} = generate_name_with_suffix deployment ${deployment_id} + wait_for_workload_pods_stable ${deployment_name} diff --git a/e2e/keywords/engine.resource b/e2e/keywords/engine.resource index 4fc3ba7826..cfb5c6d8f6 100644 --- a/e2e/keywords/engine.resource +++ b/e2e/keywords/engine.resource @@ -4,7 +4,6 @@ Documentation Longhorn engine related keywords Library ../libs/keywords/common_keywords.py Library ../libs/keywords/engine_keywords.py - *** Keywords *** Engine state should eventually be ${expected_engine_state} Run keyword And Continue On Failure diff --git a/e2e/keywords/kubelet.resource b/e2e/keywords/kubelet.resource deleted file mode 100644 index 4af606b794..0000000000 --- a/e2e/keywords/kubelet.resource +++ /dev/null @@ -1,15 +0,0 @@ -*** Settings *** -Documentation Kubelet keywords - -Library ../libs/keywords/kubelet_keywords.py -Library ../libs/keywords/workload_keywords.py -Library ../libs/keywords/volume_keywords.py - -*** Variables *** - - -*** Keywords *** -Stop volume node kubelet of statefulset ${idx} for ${stop_time_in_sec} seconds - ${volume_name} = get_workload_volume_name ${statefulset_list}[${idx}] - ${node_name} = get_replica_node_attached_to_volume ${volume_name} - restart_kubelet ${node_name} ${stop_time_in_sec} diff --git a/e2e/keywords/longhorn.resource b/e2e/keywords/longhorn.resource new file mode 100644 index 0000000000..c563bfe741 --- /dev/null +++ b/e2e/keywords/longhorn.resource @@ -0,0 +1,21 @@ +*** Settings *** +Documentation Longhorn Keywords + +Library ../libs/keywords/instancemanager_keywords.py +Library ../libs/keywords/workload_keywords.py + +*** Variables *** +@{longhorn_workloads} +... csi-attacher +... csi-provisioner +... csi-resizer +... csi-snapshotter +... longhorn-driver-deployer +... longhorn-csi-plugin +... longhorn-manager +... longhorn-ui + +*** Keywords *** +Wait for longhorn ready + wait_for_all_instance_manager_running + wait_for_workloads_pods_stable ${longhorn_workloads} longhorn-system diff --git a/e2e/keywords/node.resource b/e2e/keywords/node.resource deleted file mode 100644 index e14a1de19a..0000000000 --- a/e2e/keywords/node.resource +++ /dev/null @@ -1,68 +0,0 @@ -*** Settings *** -Documentation Physical Node Keywords - -Library ../libs/keywords/volume_keywords.py -Library ../libs/keywords/node_keywords.py -Library ../libs/keywords/workload_keywords.py -Library ../libs/keywords/network_keywords.py - -*** Keywords *** -During replica rebuilding, reboot volume node - reboot_volume_node ${volume_name} - -During replica rebuilding, reboot replica node - reboot_replica_node ${volume_name} - -Reboot volume ${idx} volume node - reboot_volume_node ${volume_list}[${idx}] - FOR ${item} IN @{volume_list} - wait for volume_attached ${item} - END - -Reboot volume ${idx} replica node - reboot_replica_node ${volume_list}[${idx}] - FOR ${item} IN @{volume_list} - wait for volume_attached ${item} - END - -Reboot node ${idx} - reboot_node_by_index ${idx} - -Restart all worker nodes - reboot_all_worker_nodes - -Reboot volume node of statefulset ${idx} - ${volume_name} = get_workload_volume_name ${statefulset_list}[${idx}] - ${node_name} = get_replica_node_attached_to_volume ${volume_name} - reboot_node_by_name ${node_name} - -Power off node ${idx} for ${power_off_time_in_min} mins - reboot_node_by_index ${idx} ${power_off_time_in_min} - -Power off all worker nodes for ${power_off_time_in_min} mins - reboot_all_worker_nodes ${power_off_time_in_min} - -Power off volume node of statefulset ${idx} for ${power_off_time_in_min} mins - ${volume_name} = get_workload_volume_name ${statefulset_list}[${idx}] - ${node_name} = get_replica_node_attached_to_volume ${volume_name} - reboot_node_by_name ${node_name} ${power_off_time_in_min} - -Wait for longhorn ready - wait_for_all_instance_manager_running - FOR ${deployment} IN @{deployment_list} - wait_for_workload_pod_stable ${deployment} - END - FOR ${statefulset} IN @{statefulset_list} - wait_for_workload_pod_stable ${statefulset} - END - -Restart cluster - reboot_all_nodes - setup_control_plane_network_latency - wait_for_all_instance_manager_running - FOR ${deployment} IN @{deployment_list} - wait_for_workload_pod_stable ${deployment} - END - FOR ${statefulset} IN @{statefulset_list} - wait_for_workload_pod_stable ${statefulset} - END diff --git a/e2e/keywords/node_instance.resource b/e2e/keywords/node_instance.resource new file mode 100644 index 0000000000..692bf3c316 --- /dev/null +++ b/e2e/keywords/node_instance.resource @@ -0,0 +1,34 @@ +*** Settings *** +Documentation Physical Node Keywords + +Library ../libs/keywords/common_keywords.py +Library ../libs/keywords/instancemanager_keywords.py +Library ../libs/keywords/network_keywords.py +Library ../libs/keywords/node_instance_keywords.py +Library ../libs/keywords/volume_keywords.py +Library ../libs/keywords/workload_keywords.py + +*** Keywords *** +Reboot volume ${volume_id} volume node + ${volume_name} = generate_name_with_suffix volume ${volume_id} + reboot_volume_node ${volume_name} + +Reboot volume ${volume_id} replica node + ${volume_name} = generate_name_with_suffix volume ${volume_id} + reboot_replica_node ${volume_name} + +Reboot node ${idx} + reboot_node_by_index ${idx} + +Restart all worker nodes + reboot_all_worker_nodes + +Power off node ${idx} for ${power_off_time_in_min} mins + reboot_node_by_index ${idx} ${power_off_time_in_min} + +Power off all worker nodes for ${power_off_time_in_min} mins + reboot_all_worker_nodes ${power_off_time_in_min} + +Restart cluster + reboot_all_nodes + setup_control_plane_network_latency diff --git a/e2e/keywords/persistentvolumeclaim.resource b/e2e/keywords/persistentvolumeclaim.resource new file mode 100644 index 0000000000..5257c478c9 --- /dev/null +++ b/e2e/keywords/persistentvolumeclaim.resource @@ -0,0 +1,15 @@ +*** Settings *** +Documentation PersistentVolumeClaim Keywords + +Library Collections +Library ../libs/keywords/common_keywords.py +Library ../libs/keywords/persistentvolumeclaim_keywords.py + +*** Keywords *** +Create persistentvolumeclaim ${claim_id} using ${volume_type} volume + ${claim_name} = generate_name_with_suffix claim ${claim_id} + create_persistentvolumeclaim ${claim_name} ${volume_type} + +Create persistentvolumeclaim ${claim_id} using ${volume_type} volume with ${option} storageclass + ${claim_name} = generate_name_with_suffix claim ${claim_id} + create_persistentvolumeclaim ${claim_name} ${volume_type} ${option} diff --git a/e2e/keywords/recurringjob.resource b/e2e/keywords/recurringjob.resource index fd53932380..2724a65c0b 100644 --- a/e2e/keywords/recurringjob.resource +++ b/e2e/keywords/recurringjob.resource @@ -2,12 +2,15 @@ Documentation Recurring Job Keywords Library Collections +Library ../libs/keywords/common_keywords.py Library ../libs/keywords/recurringjob_keywords.py *** Keywords *** -Create snapshot and backup recurringjob for volume ${idx} - create_snapshot_recurringjob_for_volume ${volume_list}[${idx}] - create_backup_recurringjob_for_volume ${volume_list}[${idx}] +Create snapshot and backup recurringjob for volume ${volume_id} + ${volume_name} = generate_name_with_suffix volume ${volume_id} + create_snapshot_recurringjob_for_volume ${volume_name} + create_backup_recurringjob_for_volume ${volume_name} -Check recurringjobs for volume ${idx} work - check_recurringjobs_work ${volume_list}[${idx}] +Check recurringjobs for volume ${volume_id} work + ${volume_name} = generate_name_with_suffix volume ${volume_id} + check_recurringjobs_work ${volume_name} diff --git a/e2e/keywords/statefulset.resource b/e2e/keywords/statefulset.resource index d6c7efdaa5..e00317cf11 100644 --- a/e2e/keywords/statefulset.resource +++ b/e2e/keywords/statefulset.resource @@ -2,62 +2,55 @@ Documentation StatefulSet Keywords Library Collections +Library ../libs/keywords/common_keywords.py Library ../libs/keywords/statefulset_keywords.py *** Keywords *** -Create statefulset ${idx} with ${volume_type} volume - ${statefulset_name} = create_statefulset ${volume_type} - Insert Into List ${statefulset_list} ${idx} ${statefulset_name} - - ${volume_name} = get_workload_volume_name ${statefulset_name} - Insert Into List ${volume_list} ${idx} ${volume_name} - - ${pvc_name} = get_workload_pvc_name ${statefulset_name} - Insert Into List ${persistentvolumeclaim_list} ${idx} ${pvc_name} - -Create statefulset ${idx} with ${volume_type} and ${option} volume - ${statefulset_name} = create_statefulset ${volume_type} ${option} - Insert Into List ${statefulset_list} ${idx} ${statefulset_name} - -Scale down statefulset ${idx} to detach volume - ${statefulset} = get_statefulset ${statefulset_list}[${idx}] - ${scale_up_replica_count} = Set Variable ${statefulset.spec.replicas} - Set Test Variable ${scale_up_replica_count} - - scale_statefulset ${statefulset_list}[${idx}] 0 - wait_for_volume_detached ${volume_list}[${idx}] - -Scale up statefulset ${idx} to attach volume - scale_statefulset ${statefulset_list}[${idx}] ${scale_up_replica_count} - wait_for_volume_healthy ${volume_list}[${idx}] - wait_for_statefulset_replicas_ready ${statefulset_list}[${idx}] ${scale_up_replica_count} - -Expand statefulset ${idx} volume by ${size} MiB - ${expected_size} = expand_pvc_size_by_mib ${persistentvolumeclaim_list}[${idx}] ${size} - Set Test Variable ${expected_size} - -Write ${size} MB data to statefulset ${idx} - ${pod_name} = get_workload_pod_name ${statefulset_list}[${idx}] - ${pod_data_checksum} = write_pod_random_data ${pod_name} ${size} - Insert Into List ${data_checksum_list} ${idx} ${pod_data_checksum} - -Keep writing data to statefulset ${idx} - ${pod_name} = get_workload_pod_name ${statefulset_list}[${idx}] - keep_writing_pod_data ${pod_name} - -Check statefulset ${idx} works - ${pod_name} = get_workload_pod_name ${statefulset_list}[${idx}] - ${pod_data_checksum} = write_pod_random_data ${pod_name} 1024 - check_pod_data_checksum ${pod_name} ${pod_data_checksum} - -Check statefulset ${idx} data is intact - ${pod_name} = get_workload_pod_name ${statefulset_list}[${idx}] - ${expected_data_checksum} = Get From List ${data_checksum_list} ${idx} - check_pod_data_checksum ${pod_name} ${expected_data_checksum} - -Wait for statefulset ${idx} volume size expanded - wait_for_volume_expand_to_size ${volume_list}[${idx}] ${expected_size} - -Wait for statefulset ${idx} stable - wait_for_workload_pod_stable ${statefulset_list}[${idx}] - +Create statefulset ${statefulset_id} using ${volume_type} volume + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + create_statefulset ${statefulset_name} ${volume_type} + +Create statefulset ${statefulset_id} using ${volume_type} volume with ${option} storageclass + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + create_statefulset ${statefulset_name} ${volume_type} ${option} + +Scale statefulset ${statefulset_id} to ${replicaset_size} + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + scale_statefulset ${statefulset_name} ${replicaset_size} + +Scale down statefulset ${statefulset_id} to detach volume + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + scale_statefulset_down ${statefulset_name} + +Scale up statefulset ${statefulset_id} to attach volume + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + scale_statefulset_up ${statefulset_name} + +Expand statefulset ${statefulset_id} volume by ${size} MiB + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + expand_workload_claim_size_by_mib ${statefulset_name} ${size} + +Write ${size} MB data to file ${file_name} in statefulset ${statefulset_id} + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + write_workload_pod_random_data ${statefulset_name} ${size} ${file_name} + +Check statefulset ${statefulset_id} works + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + write_workload_pod_random_data ${statefulset_name} 1024 random-data + check_workload_pod_data_checksum ${statefulset_name} random-data + +Check statefulset ${statefulset_id} data in file ${file_name} is intact + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + check_workload_pod_data_checksum ${statefulset_name} ${file_name} + +Wait for statefulset ${statefulset_id} volume size expanded + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + wait_for_workload_claim_size_expanded ${statefulset_name} + +Wait for statefulset ${statefulset_id} volume detached + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + wait_for_workload_volume_detached ${statefulset_name} + +Wait for statefulset ${statefulset_id} pods stable + ${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id} + wait_for_workload_pods_stable ${statefulset_name} diff --git a/e2e/keywords/stress.resource b/e2e/keywords/stress.resource index d4a99230fc..a432b3bfd4 100644 --- a/e2e/keywords/stress.resource +++ b/e2e/keywords/stress.resource @@ -4,14 +4,22 @@ Documentation Stress Node Keywords Library ../libs/keywords/stress_keywords.py *** Keywords *** -Stress the CPU of all ${role} nodes +Stress CPU of all ${role} nodes stress_node_cpu_by_role ${role} -Stress the CPU of all volume nodes - stress_node_cpu_by_volumes ${volume_list} +Stress CPU of node with volume ${volume_id} + ${volume_name} = generate_name_with_suffix volume ${volume_id} + stress_node_cpu_by_volume ${volume_name} -Stress the memory of all ${role} nodes +Stress CPU of volume nodes + stress_node_cpu_of_all_volumes + +Stress memory of all ${role} nodes stress_node_memory_by_role ${role} -Stress the memory of all volume nodes - stress_node_memory_by_volumes ${volume_list} +Stress memory of node with volume ${volume_id} + ${volume_name} = generate_name_with_suffix volume ${volume_id} + stress_node_memory_by_volume ${volume_name} + +Stress memory of volume nodes + stress_node_memory_of_all_volumes diff --git a/e2e/keywords/volume.resource b/e2e/keywords/volume.resource index d260593983..8d71880a20 100644 --- a/e2e/keywords/volume.resource +++ b/e2e/keywords/volume.resource @@ -2,80 +2,56 @@ Documentation Volume Keywords Library Collections -Library ../libs/keywords/volume_keywords.py Library ../libs/keywords/common_keywords.py +Library ../libs/keywords/volume_keywords.py *** Keywords *** -Create a volume with ${size} GB and ${replica_count} replicas - ${volume_name} = create_volume ${size} ${replica_count} - attach_volume ${volume_name} - Set Test Variable ${volume_name} - Append To List ${volume_list} ${volume_name} +Create volume ${volume_id} with ${size} GB and ${replica_count} replicas + ${volume_name} = generate_name_with_suffix volume ${volume_id} + create_volume ${volume_name} ${size} ${replica_count} -Create volume ${idx} with ${size} GB and ${replica_count} replicas - ${volume_name} = create_volume ${size} ${replica_count} - attach_volume ${volume_name} - Insert Into List ${volume_list} ${idx} ${volume_name} +Update volume ${volume_name} replica count to ${replica_count} + update_volume_spec ${volume_name} numberOfReplicas 3 -Attach volume to node +Attach volume ${volume_id} + ${volume_name} = generate_name_with_suffix volume ${volume_id} attach_volume ${volume_name} -Detach volume from node +Detach volume ${volume_id} + ${volume_name} = generate_name_with_suffix volume ${volume_id} detach_volume ${volume_name} -Write data to the volume - ${volume_data_checksum} = write_volume_random_data ${volume_name} 2048 - Set Test Variable ${volume_data_checksum} - -Keep writing data to volume ${idx} - keep_writing_data ${volume_list}[${idx}] +Write data to volume ${volume_id} + ${volume_name} = generate_name_with_suffix volume ${volume_id} + write_volume_random_data ${volume_name} 2048 -Delete replica on replica node to trigger replica rebuilding - ${replica_node} = get_replica_node ${volume_name} - delete_replica ${volume_name} ${replica_node} - wait_for_replica_rebuilding_start ${volume_name} ${replica_node} +Keep writing data to volume ${volume_id} + ${volume_name} = generate_name_with_suffix volume ${volume_id} + keep_writing_data ${volume_name} -Delete replica on volume node to trigger replica rebuilding - ${volume_node} = get_replica_node_attached_to_volume ${volume_name} - delete_replica ${volume_name} ${volume_node} - wait_for_replica_rebuilding_start ${volume_name} ${volume_node} +Delete volume ${volume_id} replica on ${replica_locality} + ${volume_name} = generate_name_with_suffix volume ${volume_id} + delete_replica_on_node ${volume_name} ${replica_locality} -Delete replica ${replica_0} to trigger replica ${replica_0} rebuilding - delete_replica ${volume_name} ${replica_0} - wait_for_replica_rebuilding_start ${volume_name} ${replica_0} +Wait until volume ${volume_id} replica rebuilding started on ${replica_locality} + ${volume_name} = generate_name_with_suffix volume ${volume_id} + wait_for_replica_rebuilding_to_start_on_node ${volume_name} ${replica_locality} -During replica ${replica_0} rebuilding, delete replica ${replica_1} - wait_for_replica_rebuilding_start ${volume_name} ${replica_0} - delete_replica ${volume_name} ${replica_1} +Wait until volume ${volume_id} replica rebuilding completed on ${replica_locality} + ${volume_name} = generate_name_with_suffix volume ${volume_id} + wait_for_replica_rebuilding_to_complete_on_node ${volume_name} ${replica_locality} -Wait until replica ${replica_0} rebuilt, delete replica ${replica_2} - wait_for_replica_rebuilding_complete ${volume_name} ${replica_0} - delete_replica ${volume_name} ${replica_2} +Wait until volume ${volume_id} replicas rebuilding completed + ${volume_name} = generate_name_with_suffix volume ${volume_id} + wait_for_replica_rebuilding_to_complete ${volume_name} -Check data is intact - check_data_checksum ${volume_name} ${volume_data_checksum} +Check volume ${volume_id} data is intact + ${volume_name} = generate_name_with_suffix volume ${volume_id} + check_data_checksum ${volume_name} -Check volume ${idx} works - ${volume_data_checksum} = write_volume_random_data ${volume_list}[${idx}] 1024 +Check volume ${volume_id} works + ${volume_name} = generate_name_with_suffix volume ${volume_id} + ${volume_data_checksum} = write_volume_random_data ${volume_name} 1024 #TODO # write data to a file in the volume instead of writing /dev/longhorn/vol-name # so the data won't be overwritten and we can compare the checksum - -Wait until all replicas rebuilt - wait_for_replica_rebuilding_complete ${volume_name} 0 - wait_for_replica_rebuilding_complete ${volume_name} 1 - wait_for_replica_rebuilding_complete ${volume_name} 2 - -Wait until replica on volume node rebuilt - ${node_name} = get_replica_node_attached_to_volume ${volume_name} - wait_for_replica_rebuilding_start ${volume_name} ${node_name} - wait_for_replica_rebuilding_complete ${volume_name} ${node_name} - -Wait until replica on replica node rebuilt - ${node_name} = get_replica_node ${volume_name} - wait_for_replica_rebuilding_start ${volume_name} ${node_name} - wait_for_replica_rebuilding_complete ${volume_name} ${node_name} - -Wait for volume of statefulset ${idx} healthy - ${volume_name} = get_workload_volume_name ${statefulset_list}[${idx}] - wait_for_volume_healthy ${volume_name} diff --git a/e2e/keywords/workload.resource b/e2e/keywords/workload.resource index b8401c9437..85603180c3 100644 --- a/e2e/keywords/workload.resource +++ b/e2e/keywords/workload.resource @@ -2,82 +2,26 @@ Documentation Workload Keywords Library Collections +Library ../libs/keywords/common_keywords.py Library ../libs/keywords/workload_keywords.py *** Keywords *** -Create deployment ${idx} with ${volume_type} volume - ${deployment_name} = create_deployment ${volume_type} - Insert Into List ${deployment_list} ${idx} ${deployment_name} +Keep writing data to pod of ${workload_kind} ${workload_id} + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + keep_writing_workload_pod_data ${workload_name} -Create deployment ${idx} with ${volume_type} and ${option} volume - ${deployment_name} = create_deployment ${volume_type} ${option} - Insert Into List ${deployment_list} ${idx} ${deployment_name} +Power off volume node of ${workload_kind} ${workload_id} for ${duration} minutes + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + reboot_workload_volume_node ${workload_name} ${duration} -Create statefulset ${idx} with ${volume_type} volume - ${statefulset_name} = create_statefulset ${volume_type} - Insert Into List ${statefulset_list} ${idx} ${statefulset_name} +Reboot volume node of ${workload_kind} ${workload_id} + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + reboot_workload_volume_node ${workload_name} - ${volume_name} = get_workload_volume_name ${statefulset_name} - Insert Into List ${volume_list} ${idx} ${volume_name} - - ${pvc_name} = get_workload_pvc_name ${statefulset_name} - Insert Into List ${persistentvolumeclaim_list} ${idx} ${pvc_name} - -Create statefulset ${idx} with ${volume_type} and ${option} volume - ${statefulset_name} = create_statefulset ${volume_type} ${option} - Insert Into List ${statefulset_list} ${idx} ${statefulset_name} - -Scale down statefulset ${idx} to detach volume - ${statefulset} = get_statefulset ${statefulset_list}[${idx}] - ${scale_up_replica_count} = Set Variable ${statefulset.spec.replicas} - Set Test Variable ${scale_up_replica_count} - - scale_statefulset ${statefulset_list}[${idx}] 0 - wait_for_volume_detached ${volume_list}[${idx}] - -Scale up statefulset ${idx} to attach volume - scale_statefulset ${statefulset_list}[${idx}] ${scale_up_replica_count} - wait_for_volume_healthy ${volume_list}[${idx}] - wait_for_statefulset_replicas_ready ${statefulset_list}[${idx}] ${scale_up_replica_count} - -Expand statefulset ${idx} volume by ${size} MiB - ${expected_size} = expand_pvc_size_by_mib ${persistentvolumeclaim_list}[${idx}] ${size} - Set Test Variable ${expected_size} - -Write ${size} MB data to statefulset ${idx} - ${pod_name} = get_workload_pod_name ${statefulset_list}[${idx}] - ${pod_data_checksum} = write_pod_random_data ${pod_name} ${size} - Insert Into List ${data_checksum_list} ${idx} ${pod_data_checksum} - -Keep writing data to deployment ${idx} - ${pod_name} = get_workload_pod_name ${deployment_list}[${idx}] - keep_writing_pod_data ${pod_name} - -Keep writing data to statefulset ${idx} - ${pod_name} = get_workload_pod_name ${statefulset_list}[${idx}] - keep_writing_pod_data ${pod_name} - -Check deployment ${idx} works - ${pod_name} = get_workload_pod_name ${deployment_list}[${idx}] - ${pod_data_checksum} = write_pod_random_data ${pod_name} 1024 - check_pod_data_checksum ${pod_name} ${pod_data_checksum} - -Check statefulset ${idx} works - ${pod_name} = get_workload_pod_name ${statefulset_list}[${idx}] - ${pod_data_checksum} = write_pod_random_data ${pod_name} 1024 - check_pod_data_checksum ${pod_name} ${pod_data_checksum} - -Check statefulset ${idx} data is intact - ${pod_name} = get_workload_pod_name ${statefulset_list}[${idx}] - ${expected_data_checksum} = Get From List ${data_checksum_list} ${idx} - check_pod_data_checksum ${pod_name} ${expected_data_checksum} - -Wait for statefulset ${idx} volume size expanded - wait_for_volume_expand_to_size ${volume_list}[${idx}] ${expected_size} - -Wait for statefulset ${idx} volume detached - wait_for_volume_detached ${volume_list}[${idx}] - -Wait for statefulset ${idx} stable - wait_for_workload_pod_stable ${statefulset_list}[${idx}] +When Stop volume node kubelet of ${workload_kind} ${workload_id} for ${duration} seconds + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + restart_workload_kubelet ${workload_name} ${duration} +Wait for volume of ${workload_kind} ${workload_id} healthy + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + wait_for_workload_volume_healthy ${workload_name}