diff --git a/e2e/keywords/workload.resource b/e2e/keywords/workload.resource index 6b71fdbca5..e3dce4a006 100644 --- a/e2e/keywords/workload.resource +++ b/e2e/keywords/workload.resource @@ -189,3 +189,14 @@ Check ${workload_kind} ${workload_id} pod is ${expect_state} on another node Delete Longhorn ${workload_kind} ${workload_name} pod on node ${node_id} ${node_name} = get_node_by_index ${node_id} delete_workload_pod_on_node ${workload_name} ${node_name} longhorn-system + +Trim ${workload_kind} ${workload_id} volume should ${condition} + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + + IF $condition == "fail" + trim_workload_volume_filesystem ${workload_name} is_expect_fail=True + ELSE IF $condition == "pass" + trim_workload_volume_filesystem ${workload_name} is_expect_fail=False + ELSE + Fail "Invalid condition value: ${condition}" + END diff --git a/e2e/libs/keywords/workload_keywords.py b/e2e/libs/keywords/workload_keywords.py index 6f9175bd7b..d27845d91a 100644 --- a/e2e/libs/keywords/workload_keywords.py +++ b/e2e/libs/keywords/workload_keywords.py @@ -192,3 +192,7 @@ def is_workloads_pods_has_annotations(self, workload_names, annotation_key, name if not is_workload_pods_has_annotations(workload_name, annotation_key, namespace=namespace, label_selector=label_selector): return False return True + + def trim_workload_volume_filesystem(self, workload_name, is_expect_fail=False): + volume_name = get_workload_volume_name(workload_name) + self.volume.trim_filesystem(volume_name, is_expect_fail=is_expect_fail) diff --git a/e2e/libs/volume/crd.py b/e2e/libs/volume/crd.py index eea996d79b..d8b69c2784 100644 --- a/e2e/libs/volume/crd.py +++ b/e2e/libs/volume/crd.py @@ -511,3 +511,6 @@ def validate_volume_setting(self, volume_name, setting_name, value): volume = self.get(volume_name) assert str(volume["spec"][setting_name]) == value, \ f"Expected volume {volume_name} setting {setting_name} is {value}, but it's {str(volume['spec'][setting_name])}" + + def trim_filesystem(self, volume_name, is_expect_fail=False): + return Rest(self).trim_filesystem(volume_name, is_expect_fail=is_expect_fail) \ No newline at end of file diff --git a/e2e/libs/volume/rest.py b/e2e/libs/volume/rest.py index 502d8f64e7..9cce306a46 100644 --- a/e2e/libs/volume/rest.py +++ b/e2e/libs/volume/rest.py @@ -370,3 +370,20 @@ def wait_for_replica_ready_to_rw(self, volume_name): break time.sleep(self.retry_interval) assert ready, f"Failed to get volume {volume_name} replicas ready: {replicas}" + + def trim_filesystem(self, volume_name, is_expect_fail=False): + is_unexpected_pass = False + try: + self.get(volume_name).trimFilesystem(name=volume_name) + + if is_expect_fail: + is_unexpected_pass = True + + except Exception as e: + if is_expect_fail: + logging(f"Failed to trim filesystem: {e}") + else: + raise e + + if is_unexpected_pass: + raise Exception(f"Expected volume {volume_name} trim filesystem to fail") diff --git a/e2e/libs/volume/volume.py b/e2e/libs/volume/volume.py index bbfb2832bf..a6f5da7a85 100644 --- a/e2e/libs/volume/volume.py +++ b/e2e/libs/volume/volume.py @@ -154,3 +154,6 @@ def wait_for_engine_image_upgrade_completed(self, volume_name, engine_image_name def validate_volume_setting(self, volume_name, setting_name, value): return self.volume.validate_volume_setting(volume_name, setting_name, value) + + def trim_filesystem(self, volume_name, is_expect_fail=False): + return self.volume.trim_filesystem(volume_name, is_expect_fail=is_expect_fail) diff --git a/e2e/tests/regression/test_v2.robot b/e2e/tests/regression/test_v2.robot index 27c3831665..137d7eb7c3 100644 --- a/e2e/tests/regression/test_v2.robot +++ b/e2e/tests/regression/test_v2.robot @@ -11,6 +11,8 @@ Resource ../keywords/workload.resource Resource ../keywords/volume.resource Resource ../keywords/setting.resource Resource ../keywords/node.resource +Resource ../keywords/host.resource +Resource ../keywords/longhorn.resource Test Setup Set test environment Test Teardown Cleanup test resources @@ -50,3 +52,23 @@ Degraded Volume Replica Rebuilding And Wait for deployment 0 pods stable Then Check deployment 0 data in file data.txt is intact END + +V2 Volume Should Block Trim When Volume Is Degraded + Given Set setting auto-salvage to true + And Create storageclass longhorn-test with dataEngine=v2 + And Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass + And Create deployment 0 with persistentvolumeclaim 0 + + FOR ${i} IN RANGE ${LOOP_COUNT} + And Keep writing data to pod of deployment 0 + + When Restart cluster + And Wait for longhorn ready + And Wait for volume of deployment 0 attached and degraded + Then Trim deployment 0 volume should fail + + When Wait for workloads pods stable + ... deployment 0 + And Check deployment 0 works + Then Trim deployment 0 volume should pass + END