From 725357b73c9cc04b5e9d94fe439344a8493c0f96 Mon Sep 17 00:00:00 2001 From: Chin-Ya Huang Date: Wed, 1 Nov 2023 15:56:55 +0800 Subject: [PATCH] test(negative): stress volume node CPU when volume is offline expanding ref: 6982 Signed-off-by: Chin-Ya Huang --- e2e/keywords/node.resource | 3 +++ e2e/keywords/workload.resource | 13 +++++++++++++ e2e/libs/keywords/node_keywords.py | 4 ++++ e2e/libs/keywords/volume_keywords.py | 3 +++ e2e/libs/keywords/workload_keywords.py | 9 +++++++++ e2e/libs/volume/crd.py | 3 ++- e2e/libs/volume/volume.py | 3 +++ e2e/libs/workload/workload.py | 18 ++++++++++++++++++ e2e/tests/stress_cpu.robot | 18 ++++++++++++++++++ 9 files changed, 73 insertions(+), 1 deletion(-) diff --git a/e2e/keywords/node.resource b/e2e/keywords/node.resource index 38279e87e7..1ce6618428 100644 --- a/e2e/keywords/node.resource +++ b/e2e/keywords/node.resource @@ -67,6 +67,9 @@ Restart cluster wait_for_workload_pod_stable ${statefulset} END +Stress the CPU of all ${role} nodes + stress_node_cpu_by_role ${role} + Stress the CPU of all volume nodes stress_node_cpu_by_volumes ${volume_list} diff --git a/e2e/keywords/workload.resource b/e2e/keywords/workload.resource index 47bd78909d..cef6b99489 100644 --- a/e2e/keywords/workload.resource +++ b/e2e/keywords/workload.resource @@ -20,6 +20,19 @@ Create statefulset ${idx} with ${volume_type} volume ${pvc_name} = get_workload_pvc_name ${statefulset_name} Insert Into List ${persistentvolumeclaim_list} ${idx} ${pvc_name} +Scale down statefulset ${idx} to detach volume + ${statefulset} = get_statefulset ${statefulset_list}[${idx}] + ${scale_up_replica_count} = Set Variable ${statefulset.spec.replicas} + Set Test Variable ${scale_up_replica_count} + + scale_statefulset ${statefulset_list}[${idx}] 0 + wait_for_volume_detached ${volume_list}[${idx}] + +Scale up statefulset ${idx} to attach volume + scale_statefulset ${statefulset_list}[${idx}] ${scale_up_replica_count} + wait_for_volume_healthy ${volume_list}[${idx}] + wait_for_statefulset_replicas_ready ${statefulset_list}[${idx}] ${scale_up_replica_count} + Create deployment ${idx} with ${volume_type} and ${option} volume ${deployment_name} = create_deployment ${volume_type} ${option} Insert Into List ${deployment_list} ${idx} ${deployment_name} diff --git a/e2e/libs/keywords/node_keywords.py b/e2e/libs/keywords/node_keywords.py index df2a0a0f4a..048b84749d 100644 --- a/e2e/libs/keywords/node_keywords.py +++ b/e2e/libs/keywords/node_keywords.py @@ -3,6 +3,7 @@ from node import Node from node import Stress from node.utility import get_node_by_index +from node.utility import list_node_names_by_role from node.utility import list_node_names_by_volumes from utility.utility import wait_for_all_instance_manager_running @@ -43,6 +44,9 @@ def wait_for_all_instance_manager_running(self): def cleanup_stress_helper(self): self.stress.cleanup() + def stress_node_cpu_by_role(self, role): + self.stress.cpu(list_node_names_by_role(role)) + def stress_node_cpu_by_volumes(self, volume_names): self.stress.cpu(list_node_names_by_volumes(volume_names)) diff --git a/e2e/libs/keywords/volume_keywords.py b/e2e/libs/keywords/volume_keywords.py index 471d73b23a..647566bf56 100644 --- a/e2e/libs/keywords/volume_keywords.py +++ b/e2e/libs/keywords/volume_keywords.py @@ -96,6 +96,9 @@ def wait_for_replica_rebuilding_complete(self, volume_name, replica_node): def wait_for_volume_attached(self, volume_name): self.volume.wait_for_volume_attached(volume_name) + def wait_for_volume_detached(self, volume_name): + self.volume.wait_for_volume_detached(volume_name) + def wait_for_volume_healthy(self, volume_name): self.volume.wait_for_volume_healthy(volume_name) diff --git a/e2e/libs/keywords/workload_keywords.py b/e2e/libs/keywords/workload_keywords.py index 50fbc330ed..6ebbc90996 100644 --- a/e2e/libs/keywords/workload_keywords.py +++ b/e2e/libs/keywords/workload_keywords.py @@ -22,6 +22,12 @@ def create_statefulset(self, volume_type="rwo", option=""): statefulset_name = create_statefulset(volume_type, option) return statefulset_name + def get_statefulset(self, statefulset_name): + return get_statefulset(statefulset_name) + + def scale_statefulset(self, statefulset_name, replica_count): + return scale_statefulset(statefulset_name, replica_count) + def get_workload_pod_name(self, workload_name): return get_workload_pod_names(workload_name)[0] @@ -54,3 +60,6 @@ def cleanup_statefulsets(self, statefulset_names): def wait_for_workload_pod_stable(self, workload_name): return wait_for_workload_pod_stable(workload_name) + + def wait_for_statefulset_replicas_ready(self, statefulset_name, expected_ready_count): + return wait_for_statefulset_replicas_ready(statefulset_name, expected_ready_count) diff --git a/e2e/libs/volume/crd.py b/e2e/libs/volume/crd.py index 2580fbf626..ef72439866 100644 --- a/e2e/libs/volume/crd.py +++ b/e2e/libs/volume/crd.py @@ -196,6 +196,7 @@ def wait_for_volume_robustness_not(self, volume_name, not_desired_state): assert self.get(volume_name)["status"]["robustness"] != not_desired_state def wait_for_volume_expand_to_size(self, volume_name, expected_size): + engine = None engine_operation = Engine() for i in range(self.retry_count): logging(f"Waiting for {volume_name} expand to {expected_size} ({i}) ...") @@ -206,7 +207,7 @@ def wait_for_volume_expand_to_size(self, volume_name, expected_size): time.sleep(self.retry_interval) - engine = engine_operation.get_engine_by_volume(self.get(volume_name)) + assert engine is not None assert int(engine['status']['currentSize']) == expected_size def get_endpoint(self, volume_name): diff --git a/e2e/libs/volume/volume.py b/e2e/libs/volume/volume.py index 18b32742ef..28b1278a06 100644 --- a/e2e/libs/volume/volume.py +++ b/e2e/libs/volume/volume.py @@ -42,6 +42,9 @@ def wait_for_volume_attached(self, volume_name): self.volume.wait_for_volume_state(volume_name, "attached") self.volume.wait_for_volume_robustness_not(volume_name, "unknown") + def wait_for_volume_detached(self, volume_name): + self.volume.wait_for_volume_state(volume_name, "detached") + def wait_for_volume_healthy(self, volume_name): self.volume.wait_for_volume_state(volume_name, "attached") self.volume.wait_for_volume_robustness(volume_name, "healthy") diff --git a/e2e/libs/workload/workload.py b/e2e/libs/workload/workload.py index eaf3274adb..752062bc7f 100644 --- a/e2e/libs/workload/workload.py +++ b/e2e/libs/workload/workload.py @@ -170,6 +170,24 @@ def delete_statefulset(name, namespace='default'): time.sleep(retry_interval) assert deleted +def get_statefulset(name, namespace='default'): + api = client.AppsV1Api() + return api.read_namespaced_stateful_set(name=name, namespace=namespace) + +def scale_statefulset(name, replica_count, namespace='default'): + logging(f"Scaling statefulset {name} to {replica_count}") + + apps_v1_api = client.AppsV1Api() + + scale = client.V1Scale( + metadata=client.V1ObjectMeta(name=name, namespace=namespace), + spec=client.V1ScaleSpec(replicas=int(replica_count)) + ) + apps_v1_api.patch_namespaced_stateful_set_scale(name=name, namespace=namespace, body=scale) + + statefulset = get_statefulset(name, namespace) + assert statefulset.spec.replicas == int(replica_count) + def create_pvc(volume_type, option): filepath = "./templates/workload/pvc.yaml" with open(filepath, 'r') as f: diff --git a/e2e/tests/stress_cpu.robot b/e2e/tests/stress_cpu.robot index 08a7ef4ea0..e71cd817fd 100644 --- a/e2e/tests/stress_cpu.robot +++ b/e2e/tests/stress_cpu.robot @@ -54,3 +54,21 @@ Stress Volume Node CPU When Volume Is Online Expanding Then Wait for statefulset 0 volume size expanded And Check statefulset 0 data is intact END + +Stress Volume Node CPU When Volume Is Offline Expanding + @{data_checksum_list} = Create List + Set Test Variable ${data_checksum_list} + + Given Create statefulset 0 with rwo volume + And Write 1024 MB data to statefulset 0 + + FOR ${i} IN RANGE ${LOOP_COUNT} + And Scale down statefulset 0 to detach volume + And Stress the CPU of all worker nodes + + When Expand statefulset 0 volume by 100 MiB + + Then Wait for statefulset 0 volume size expanded + And Scale up statefulset 0 to attach volume + And Check statefulset 0 data is intact + END