diff --git a/e2e/keywords/common.resource b/e2e/keywords/common.resource index 89fabbbeb..2b4c030e0 100644 --- a/e2e/keywords/common.resource +++ b/e2e/keywords/common.resource @@ -57,8 +57,8 @@ Cleanup test resources cleanup_volumes cleanup_storageclasses cleanup_backups - cleanup_disks + # cleanup_disks cleanup_backing_images cleanup_engine_images reset_backupstore - reset_settings + # reset_settings diff --git a/e2e/keywords/k8s.resource b/e2e/keywords/k8s.resource index eb4075836..63bd5b44d 100644 --- a/e2e/keywords/k8s.resource +++ b/e2e/keywords/k8s.resource @@ -72,6 +72,10 @@ Drain volume of ${workload_kind} ${workload_id} volume node Uncordon the drained node uncordon_node ${drained_node} +Uncordon node ${node_id} + ${node_name} = get_node_by_index ${node_id} + uncordon_node ${node_name} + Cordon node ${node_id} ${node_name} = get_node_by_index ${node_id} cordon_node ${node_name} diff --git a/e2e/keywords/volume.resource b/e2e/keywords/volume.resource index ee23739fc..87f183a80 100644 --- a/e2e/keywords/volume.resource +++ b/e2e/keywords/volume.resource @@ -376,3 +376,27 @@ Wait for volume ${volume_id} restoration from backup ${backup_id} in another clu Volume ${volume_id} setting ${setting_name} should be ${setting_value} ${volume_name} = generate_name_with_suffix volume ${volume_id} validate_volume_setting ${volume_name} ${setting_name} ${setting_value} + +Assert DM device for volume ${volume_id} ${condition} exist on node ${node_id} + ${volume_name} = generate_name_with_suffix volume ${volume_id} + ${node_name} = get_node_by_index ${node_id} + ${dm_devices} = list_dm_devices_on_node ${node_name} + IF '${condition}' == 'does' + Should Contain ${dm_devices} ${volume_name} + ELSE IF '${condition}' == 'not' + Should Not Contain ${dm_devices} ${volume_name} + ELSE + Fail Invalid condition: ${condition} + END + +Assert device for volume ${volume_id} ${condition} exist on node ${node_id} + ${volume_name} = generate_name_with_suffix volume ${volume_id} + ${node_name} = get_node_by_index ${node_id} + ${devices} = list_volume_devices_on_node ${node_name} + IF '${condition}' == 'does' + Should Contain ${devices} ${volume_name} + ELSE IF '${condition}' == 'not' + Should Not Contain ${devices} ${volume_name} + ELSE + Fail Invalid condition: ${condition} + END diff --git a/e2e/libs/keywords/node_keywords.py b/e2e/libs/keywords/node_keywords.py index 9e120de78..a992bde54 100644 --- a/e2e/libs/keywords/node_keywords.py +++ b/e2e/libs/keywords/node_keywords.py @@ -75,3 +75,9 @@ def wait_for_disk_not_in_pressure(self, node_name, disk_name): def get_disk_uuid(self, node_name, disk_name): return self.node.get_disk_uuid(node_name, disk_name) + + def list_dm_devices_on_node(self, node_name): + return self.node.list_dm_devices(node_name) + + def list_volume_devices_on_node(self, node_name): + return self.node.list_volume_devices(node_name) diff --git a/e2e/libs/node/node.py b/e2e/libs/node/node.py index fd2238a5e..cf62a1a7c 100644 --- a/e2e/libs/node/node.py +++ b/e2e/libs/node/node.py @@ -287,3 +287,13 @@ def wait_for_node_up(self, node_name): break time.sleep(self.retry_interval) assert up, f"Waiting for node {node_name} up failed: {node.status.conditions}" + + def list_dm_devices(self, node_name): + cmd = "dmsetup ls | awk '{print $1}'" + res = NodeExec(node_name).issue_cmd(cmd) + return res + + def list_volume_devices(self, node_name): + cmd = "ls /dev/longhorn/" + res = NodeExec(node_name).issue_cmd(cmd) + return res diff --git a/e2e/libs/node_exec/node_exec.py b/e2e/libs/node_exec/node_exec.py index 56ecfa6cd..8e52693b6 100644 --- a/e2e/libs/node_exec/node_exec.py +++ b/e2e/libs/node_exec/node_exec.py @@ -100,6 +100,12 @@ def launch_pod(self): "operator": "Equal", "value": "true", "effect": "NoExecute" + }, + # Allow to schedule on cordoned node to execute command on its host. + { + "key": "node.kubernetes.io/unschedulable", + "operator": "Exists", + "effect": "NoSchedule" }], 'containers': [{ 'image': 'ubuntu:16.04', diff --git a/e2e/tests/regression/test_v2.robot b/e2e/tests/regression/test_v2.robot index c2ef068dd..e1e271a8e 100644 --- a/e2e/tests/regression/test_v2.robot +++ b/e2e/tests/regression/test_v2.robot @@ -14,6 +14,7 @@ Resource ../keywords/setting.resource Resource ../keywords/node.resource Resource ../keywords/host.resource Resource ../keywords/longhorn.resource +Resource ../keywords/k8s.resource Test Setup Set test environment Test Teardown Cleanup test resources @@ -70,3 +71,48 @@ V2 Volume Should Block Trim When Volume Is Degraded And Check deployment 0 works Then Trim deployment 0 volume should pass END + +V2 Volume Should Cleanup Resources When Instance Manager Is Deleted + [Tags] coretest + [Documentation] Verify that v2 volumes cleanup resources when their instance manager + ... is deleted. And ensure this process does not impact v1 volumes. + ... + ... Issue: https://github.com/longhorn/longhorn/issues/9959 + + When Create volume 0 with dataEngine=v2 + And Create volume 1 with dataEngine=v2 + And Create volume 2 with dataEngine=v1 + And Attach volume 0 to node 0 + And Attach volume 1 to node 0 + And Attach volume 2 to node 0 + And Wait for volume 0 healthy + And Wait for volume 1 healthy + And Wait for volume 2 healthy + And Write data to volume 0 + And Write data to volume 1 + And Write data to volume 2 + + FOR ${i} IN RANGE ${LOOP_COUNT} + When Cordon node 0 + And Delete instance-manager of volume 0 + + Then Assert DM device for volume 0 not exist on node 0 + And Assert DM device for volume 1 not exist on node 0 + And Assert device for volume 0 not exist on node 0 + And Assert device for volume 1 not exist on node 0 + And Assert device for volume 2 does exist on node 0 + + When Uncordon node 0 + And Wait for volume 0 healthy + And Wait for volume 1 healthy + And Wait for volume 2 healthy + + Then Assert DM device for volume 0 does exist on node 0 + And Assert DM device for volume 1 does exist on node 0 + And Assert device for volume 0 does exist on node 0 + And Assert device for volume 1 does exist on node 0 + And Assert device for volume 2 does exist on node 0 + And Check volume 0 data is intact + And Check volume 1 data is intact + And Check volume 2 data is intact + END