diff --git a/e2e/keywords/common.resource b/e2e/keywords/common.resource index 05f3bd0186..e38dfa5445 100644 --- a/e2e/keywords/common.resource +++ b/e2e/keywords/common.resource @@ -1,6 +1,7 @@ *** Settings *** Documentation Common keywords +Library Collections Library OperatingSystem Library ../libs/keywords/common_keywords.py Library ../libs/keywords/deployment_keywords.py @@ -38,7 +39,10 @@ Set test environment END Cleanup test resources - Run keyword And Ignore Error power_on_node_by_name ${powered_off_node} + FOR ${powered_off_node} IN @{powered_off_nodes} + Run keyword And Ignore Error power_on_node_by_name ${powered_off_node} + Remove Values From List ${powered_off_nodes} ${powered_off_node} + END uncordon_all_nodes cleanup_control_plane_network_latency reset_node_schedule diff --git a/e2e/keywords/host.resource b/e2e/keywords/host.resource index e686aec164..07483b2132 100644 --- a/e2e/keywords/host.resource +++ b/e2e/keywords/host.resource @@ -1,6 +1,7 @@ *** Settings *** Documentation Physical Node Keywords +Library Collections Library ../libs/keywords/common_keywords.py Library ../libs/keywords/host_keywords.py Library ../libs/keywords/network_keywords.py @@ -34,11 +35,13 @@ Restart cluster reboot_all_nodes setup_control_plane_network_latency -Power on off node - Run keyword And Ignore Error - ... power_on_node_by_name ${powered_off_node} +Power on off nodes + FOR ${powered_off_node} IN @{powered_off_nodes} + Run keyword And Ignore Error power_on_node_by_name ${powered_off_node} + Remove Values From List ${powered_off_nodes} ${powered_off_node} + END Power off node ${node_id} ${powered_off_node} = get_node_by_index ${node_id} + Append to list ${powered_off_nodes} ${powered_off_node} power_off_node_by_name ${powered_off_node} - Set Test Variable ${powered_off_node} \ No newline at end of file diff --git a/e2e/keywords/sharemanager.resource b/e2e/keywords/sharemanager.resource index 3e8026de2d..c5d4d51745 100644 --- a/e2e/keywords/sharemanager.resource +++ b/e2e/keywords/sharemanager.resource @@ -26,6 +26,11 @@ Delete sharemanager pod of deployment ${deployment_id} and wait for recreation ${volume_name} = get_workload_volume_name ${deployment_name} delete_sharemanager_pod_and_wait_for_recreation ${volume_name} +Wait for sharemanager pod of deployment ${deployment_id} restart + ${deployment_name} = generate_name_with_suffix deployment ${deployment_id} + ${volume_name} = get_workload_volume_name ${deployment_name} + wait_for_sharemanager_pod_restart ${volume_name} + Wait for sharemanager pod of deployment ${deployment_id} running ${deployment_name} = generate_name_with_suffix deployment ${deployment_id} ${volume_name} = get_workload_volume_name ${deployment_name} diff --git a/e2e/keywords/variables.resource b/e2e/keywords/variables.resource new file mode 100644 index 0000000000..c213dcabc4 --- /dev/null +++ b/e2e/keywords/variables.resource @@ -0,0 +1,13 @@ +*** Settings *** +Documentation Global Variables + +*** Variables *** +${LOOP_COUNT} 1 +${RETRY_COUNT} 300 +${RETRY_INTERVAL} 1 +${VOLUME_TYPE} RWO +${CONTROL_PLANE_NODE_NETWORK_LATENCY_IN_MS} 0 +${RWX_VOLUME_FAST_FAILOVER} false +${DATA_ENGINE} v1 + +@{powered_off_nodes}= diff --git a/e2e/keywords/workload.resource b/e2e/keywords/workload.resource index a81ed33b2f..18bae1c0e1 100644 --- a/e2e/keywords/workload.resource +++ b/e2e/keywords/workload.resource @@ -46,9 +46,18 @@ Power off volume node of ${workload_kind} ${workload_id} ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} ${volume_name} = get_workload_volume_name ${workload_name} ${powered_off_node} = get_volume_node ${volume_name} + Append to list ${powered_off_nodes} ${powered_off_node} ${last_volume_node} = get_volume_node ${volume_name} power_off_volume_node ${volume_name} - Set Test Variable ${powered_off_node} + Set Test Variable ${last_volume_node} + +Power off volume node of ${workload_kind} ${workload_id} without waiting + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + ${volume_name} = get_workload_volume_name ${workload_name} + ${powered_off_node} = get_volume_node ${volume_name} + Append to list ${powered_off_nodes} ${powered_off_node} + ${last_volume_node} = get_volume_node ${volume_name} + power_off_volume_node ${volume_name} waiting=False Set Test Variable ${last_volume_node} Reboot volume node of ${workload_kind} ${workload_id} diff --git a/e2e/libs/host/aws.py b/e2e/libs/host/aws.py index 2f2148b7f4..bcce99086d 100644 --- a/e2e/libs/host/aws.py +++ b/e2e/libs/host/aws.py @@ -68,14 +68,15 @@ def reboot_all_worker_nodes(self, shut_down_time_in_sec=NODE_REBOOT_DOWN_TIME_SE waiter.wait(InstanceIds=instance_ids) logging(f"Started instances") - def power_off_node(self, power_off_node_name): + def power_off_node(self, power_off_node_name, waiting=True): instance_ids = [self.mapping[power_off_node_name]] resp = self.aws_client.stop_instances(InstanceIds=instance_ids, Force=True) assert resp['ResponseMetadata']['HTTPStatusCode'] == 200, f"Failed to stop instances {instance_ids} response: {resp}" logging(f"Stopping instances {instance_ids}") - waiter = self.aws_client.get_waiter('instance_stopped') - waiter.wait(InstanceIds=instance_ids) - logging(f"Stopped instances") + if waiting: + waiter = self.aws_client.get_waiter('instance_stopped') + waiter.wait(InstanceIds=instance_ids) + logging(f"Stopped instances") def power_on_node(self, power_on_node_name): instance_ids = [self.mapping[power_on_node_name]] diff --git a/e2e/libs/host/base.py b/e2e/libs/host/base.py index c9a30bc463..323cc4867e 100644 --- a/e2e/libs/host/base.py +++ b/e2e/libs/host/base.py @@ -23,7 +23,7 @@ def reboot_all_worker_nodes(self, shut_down_time_in_sec): return NotImplemented @abstractmethod - def power_off_node(self, node_name): + def power_off_node(self, node_name, waiting): return NotImplemented @abstractmethod diff --git a/e2e/libs/host/harvester.py b/e2e/libs/host/harvester.py index 2a1e26a772..f856cd983d 100644 --- a/e2e/libs/host/harvester.py +++ b/e2e/libs/host/harvester.py @@ -53,7 +53,7 @@ def reboot_all_worker_nodes(self, shut_down_time_in_sec): for node_name in node_names: self.power_on_node(node_name) - def power_off_node(self, node_name): + def power_off_node(self, node_name, waiting=True): vm_id = self.mapping[node_name] url = f"{self.url}/{vm_id}" @@ -68,6 +68,9 @@ def power_off_node(self, node_name): logging(f"Stopping vm failed with error {e}") logging(f"Stopping vm {vm_id}") + if not waiting: + return + stopped = False for i in range(self.retry_count): logging(f"Waiting for vm {vm_id} stopped ... ({i})") diff --git a/e2e/libs/keywords/host_keywords.py b/e2e/libs/keywords/host_keywords.py index 99d6cc4a67..92a3aee5e7 100644 --- a/e2e/libs/keywords/host_keywords.py +++ b/e2e/libs/keywords/host_keywords.py @@ -46,10 +46,10 @@ def reboot_node_by_name(self, node_name, downtime_in_min=1): logging(f'Rebooting node {node_name} with downtime {reboot_down_time_sec} seconds') self.host.reboot_node(node_name, reboot_down_time_sec) - def power_off_volume_node(self, volume_name): + def power_off_volume_node(self, volume_name, waiting=True): node_id = self.volume_keywords.get_node_id_by_replica_locality(volume_name, "volume node") - logging(f'Power off volume {volume_name} node {node_id}') - self.host.power_off_node(node_id) + logging(f'Power off volume {volume_name} node {node_id} with waiting = {waiting}') + self.host.power_off_node(node_id, waiting) def power_on_node_by_name(self, node_name): self.host.power_on_node(node_name) diff --git a/e2e/libs/keywords/sharemanager_keywords.py b/e2e/libs/keywords/sharemanager_keywords.py index b541f5b26b..7819c60df5 100644 --- a/e2e/libs/keywords/sharemanager_keywords.py +++ b/e2e/libs/keywords/sharemanager_keywords.py @@ -67,12 +67,32 @@ def delete_sharemanager_pod_and_wait_for_recreation(self, name): assert False, f"sharemanager pod {sharemanager_pod_name} not recreated" + def wait_for_sharemanager_pod_restart(self, name): + sharemanager_pod_name = "share-manager-" + name + sharemanager_pod = get_pod(sharemanager_pod_name, "longhorn-system") + last_creation_time = sharemanager_pod.metadata.creation_timestamp + + retry_count, retry_interval = get_retry_count_and_interval() + for i in range(retry_count): + logging(f"Waiting for sharemanager for volume {name} restart ... ({i})") + time.sleep(retry_interval) + sharemanager_pod = get_pod(sharemanager_pod_name, "longhorn-system") + if sharemanager_pod == None: + continue + creation_time = sharemanager_pod.metadata.creation_timestamp + logging(f"Getting new sharemanager which is created at {creation_time}, and old one is created at {last_creation_time}") + if creation_time > last_creation_time: + return + + assert False, f"sharemanager pod {sharemanager_pod_name} isn't restarted" + def wait_for_share_manager_pod_running(self, name): sharemanager_pod_name = "share-manager-" + name retry_count, retry_interval = get_retry_count_and_interval() for i in range(retry_count): sharemanager_pod = get_pod(sharemanager_pod_name, "longhorn-system") + logging(f"Waiting for sharemanager for volume {name} running, currently {sharemanager_pod.status.phase} ... ({i})") if sharemanager_pod.status.phase == "Running": return diff --git a/e2e/tests/negative/cluster_restart.robot b/e2e/tests/negative/cluster_restart.robot index a4687830f3..02626de0c9 100644 --- a/e2e/tests/negative/cluster_restart.robot +++ b/e2e/tests/negative/cluster_restart.robot @@ -3,6 +3,7 @@ Documentation Negative Test Cases Test Tags negative cluster +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/deployment.resource Resource ../keywords/longhorn.resource @@ -16,15 +17,6 @@ Resource ../keywords/setting.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${CONTROL_PLANE_NODE_NETWORK_LATENCY_IN_MS} 0 -${RWX_VOLUME_FAST_FAILOVER} false -${DATA_ENGINE} v1 - - *** Test Cases *** Restart Cluster While Workload Heavy Writing Given Set setting rwx-volume-fast-failover to ${RWX_VOLUME_FAST_FAILOVER} diff --git a/e2e/tests/negative/component_resilience.robot b/e2e/tests/negative/component_resilience.robot index fa45633760..3d959ed4db 100644 --- a/e2e/tests/negative/component_resilience.robot +++ b/e2e/tests/negative/component_resilience.robot @@ -3,6 +3,7 @@ Documentation Negative Test Cases Test Tags negative +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/volume.resource Resource ../keywords/backing_image.resource @@ -18,13 +19,6 @@ Resource ../keywords/sharemanager.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${RWX_VOLUME_FAST_FAILOVER} false -${DATA_ENGINE} v1 - *** Keywords *** Delete instance-manager of volume ${volume_id} and wait for recover When Delete instance-manager of volume ${volume_id} diff --git a/e2e/tests/negative/kubelet_restart.robot b/e2e/tests/negative/kubelet_restart.robot index 116630dbf4..2d56a0db6e 100644 --- a/e2e/tests/negative/kubelet_restart.robot +++ b/e2e/tests/negative/kubelet_restart.robot @@ -3,6 +3,7 @@ Documentation Negative Test Cases Test Tags negative +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/storageclass.resource Resource ../keywords/persistentvolumeclaim.resource @@ -14,13 +15,6 @@ Resource ../keywords/setting.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${RWX_VOLUME_FAST_FAILOVER} false -${DATA_ENGINE} v1 - *** Test Cases *** Restart Volume Node Kubelet While Workload Heavy Writing Given Set setting rwx-volume-fast-failover to ${RWX_VOLUME_FAST_FAILOVER} diff --git a/e2e/tests/negative/network_disconnect.robot b/e2e/tests/negative/network_disconnect.robot index 3e0b786c54..142492c914 100644 --- a/e2e/tests/negative/network_disconnect.robot +++ b/e2e/tests/negative/network_disconnect.robot @@ -3,6 +3,7 @@ Documentation Negative Test Cases Test Tags negative +Resource ../keywords/variables.resource Resource ../keywords/volume.resource Resource ../keywords/storageclass.resource Resource ../keywords/statefulset.resource @@ -14,14 +15,6 @@ Resource ../keywords/setting.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${LATENCY_IN_MS} 0 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${RWX_VOLUME_FAST_FAILOVER} false -${DATA_ENGINE} v1 - *** Test Cases *** Disconnect Volume Node Network While Workload Heavy Writing Given Set setting rwx-volume-fast-failover to ${RWX_VOLUME_FAST_FAILOVER} diff --git a/e2e/tests/negative/node_delete.robot b/e2e/tests/negative/node_delete.robot index 146a47badd..8ffadb7815 100644 --- a/e2e/tests/negative/node_delete.robot +++ b/e2e/tests/negative/node_delete.robot @@ -3,6 +3,7 @@ Documentation Negative Test Cases Test Tags negative +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/host.resource Resource ../keywords/storageclass.resource @@ -15,13 +16,6 @@ Resource ../keywords/setting.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${RWX_VOLUME_FAST_FAILOVER} false -${DATA_ENGINE} v1 - *** Test Cases *** Delete Volume Node While Replica Rebuilding Given Set setting node-down-pod-deletion-policy to do-nothing diff --git a/e2e/tests/negative/node_drain.robot b/e2e/tests/negative/node_drain.robot index d8551fcab8..bdd1d5c454 100644 --- a/e2e/tests/negative/node_drain.robot +++ b/e2e/tests/negative/node_drain.robot @@ -3,6 +3,7 @@ Documentation Negative Test Cases Test Tags negative +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/storageclass.resource Resource ../keywords/persistentvolumeclaim.resource @@ -18,13 +19,6 @@ Resource ../keywords/node.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${RWX_VOLUME_FAST_FAILOVER} false -${DATA_ENGINE} v1 - *** Test Cases *** Force Drain Volume Node While Replica Rebuilding Given Set setting rwx-volume-fast-failover to ${RWX_VOLUME_FAST_FAILOVER} diff --git a/e2e/tests/negative/node_reboot.robot b/e2e/tests/negative/node_reboot.robot index 36de6fe7e5..676982a379 100644 --- a/e2e/tests/negative/node_reboot.robot +++ b/e2e/tests/negative/node_reboot.robot @@ -3,6 +3,8 @@ Documentation Negative Test Cases Test Tags negative +Resource ../keywords/variables.resource +Resource ../keywords/sharemanager.resource Resource ../keywords/common.resource Resource ../keywords/deployment.resource Resource ../keywords/longhorn.resource @@ -18,16 +20,41 @@ Resource ../keywords/setting.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${VOLUME_TYPE} RWO -${CONTROL_PLANE_NODE_NETWORK_LATENCY_IN_MS} 0 -${RWX_VOLUME_FAST_FAILOVER} false -${DATA_ENGINE} v1 - *** Test Cases *** +Shutdown Volume Node And Test Auto Reattach To A New Node + Given Set setting node-down-pod-deletion-policy to delete-both-statefulset-and-deployment-pod + And Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} + And Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass + And Create persistentvolumeclaim 1 using RWX volume with longhorn-test storageclass + + And Create deployment 0 with persistentvolumeclaim 0 + And Create deployment 1 with persistentvolumeclaim 1 + + And Wait for volume of deployment 0 healthy + And Wait for volume of deployment 1 healthy + + And Write 100 MB data to file data.bin in deployment 0 + And Write 100 MB data to file data.bin in deployment 1 + + When Power off volume node of deployment 0 without waiting + And Power off volume node of deployment 1 without waiting + + Then Wait for sharemanager pod of deployment 1 restart + And Wait for sharemanager pod of deployment 1 running + + And Wait for volume of deployment 0 attached and degraded + And Wait for volume of deployment 1 attached and degraded + + And Wait for workloads pods stable + ... deployment 0 deployment 1 + + And Check deployment 0 data in file data.bin is intact + And Check deployment 1 data in file data.bin is intact + And Check deployment 0 works + And Check deployment 1 works + + And Power on off nodes + Reboot Node One By One While Workload Heavy Writing [Tags] reboot Given Set setting rwx-volume-fast-failover to ${RWX_VOLUME_FAST_FAILOVER} @@ -289,7 +316,7 @@ Single Replica Node Down Deletion Policy do-nothing With RWO Volume Replica Loca And Power off volume node of deployment 0 And Wait for deployment 0 pod stuck in Terminating on the original node - When Power on off node + When Power on off nodes And Wait for deployment 0 pods stable Then Check deployment 0 data in file data is intact @@ -307,7 +334,7 @@ Single Replica Node Down Deletion Policy do-nothing With RWO Volume Replica Loca And Power off volume node of deployment 0 And Wait for deployment 0 pod stuck in Terminating on the original node - When Power on off node + When Power on off nodes And Wait for deployment 0 pods stable Then Check deployment 0 data in file data is intact @@ -328,7 +355,7 @@ Single Replica Node Down Deletion Policy delete-deployment-pod With RWO Volume R And Wait for deployment 0 pods stable Then Check deployment 0 data in file data is intact - And Power on off node + And Power on off nodes Single Replica Node Down Deletion Policy delete-deployment-pod With RWO Volume Replica Locate On Volume Node Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} @@ -345,7 +372,7 @@ Single Replica Node Down Deletion Policy delete-deployment-pod With RWO Volume R Then Wait for volume of deployment 0 faulted And Wait for deployment 0 pod stuck in ContainerCreating on another node - When Power on off node + When Power on off nodes And Wait for deployment 0 pods stable And Check deployment 0 pod is Running on the original node Then Check deployment 0 data in file data is intact @@ -366,7 +393,7 @@ Single Replica Node Down Deletion Policy delete-both-statefulset-and-deployment- And Wait for statefulset 0 pods stable Then Check statefulset 0 data in file data is intact - And Power on off node + And Power on off nodes Single Replica Node Down Deletion Policy delete-both-statefulset-and-deployment-pod With RWO Volume Replica Locate On Volume Node Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} @@ -382,7 +409,7 @@ Single Replica Node Down Deletion Policy delete-both-statefulset-and-deployment- Then Wait for volume of statefulset 0 faulted And Wait for statefulset 0 pod stuck in ContainerCreating on another node - When Power on off node + When Power on off nodes And Wait for statefulset 0 pods stable And Check statefulset 0 pod is Running on the original node Then Check statefulset 0 data in file data is intact diff --git a/e2e/tests/negative/pull_backup_from_another_longhorn.robot b/e2e/tests/negative/pull_backup_from_another_longhorn.robot index 819350ad68..5b2de7b8b8 100644 --- a/e2e/tests/negative/pull_backup_from_another_longhorn.robot +++ b/e2e/tests/negative/pull_backup_from_another_longhorn.robot @@ -3,6 +3,7 @@ Documentation Uninstallation Checks Test Tags negative +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/setting.resource Resource ../keywords/volume.resource @@ -18,12 +19,6 @@ Library ../libs/keywords/setting_keywords.py Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 - *** Test Cases *** Pull backup created by another Longhorn system [Documentation] Pull backup created by another Longhorn system diff --git a/e2e/tests/negative/replica_rebuilding.robot b/e2e/tests/negative/replica_rebuilding.robot index 167dac8691..1a6cbf9ed1 100644 --- a/e2e/tests/negative/replica_rebuilding.robot +++ b/e2e/tests/negative/replica_rebuilding.robot @@ -3,6 +3,7 @@ Documentation Negative Test Cases Test Tags negative +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/host.resource Resource ../keywords/volume.resource @@ -14,12 +15,6 @@ Resource ../keywords/workload.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 - *** Test Cases *** Delete Replica While Replica Rebuilding Given Create volume 0 with size=2Gi numberOfReplicas=3 dataEngine=${DATA_ENGINE} diff --git a/e2e/tests/negative/stress_cpu.robot b/e2e/tests/negative/stress_cpu.robot index b9d0a65836..68047dc40e 100644 --- a/e2e/tests/negative/stress_cpu.robot +++ b/e2e/tests/negative/stress_cpu.robot @@ -3,6 +3,7 @@ Documentation Negative Test Cases Test Tags negative +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/persistentvolumeclaim.resource Resource ../keywords/statefulset.resource @@ -13,12 +14,7 @@ Resource ../keywords/workload.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 *** Test Cases *** - Stress Volume Node CPU When Replica Is Rebuilding Given Create volume 0 with size=5Gi numberOfReplicas=3 And Attach volume 0 diff --git a/e2e/tests/negative/stress_filesystem.robot b/e2e/tests/negative/stress_filesystem.robot index 85ec54de9f..094aa6bf3c 100644 --- a/e2e/tests/negative/stress_filesystem.robot +++ b/e2e/tests/negative/stress_filesystem.robot @@ -3,6 +3,7 @@ Documentation Negative Test Cases Test Tags negative +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/persistentvolumeclaim.resource Resource ../keywords/statefulset.resource @@ -13,13 +14,7 @@ Resource ../keywords/workload.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 - *** Test Cases *** - Stress Volume Node Filesystem When Replica Is Rebuilding Given Create volume 0 with size=5Gi numberOfReplicas=3 And Attach volume 0 diff --git a/e2e/tests/negative/stress_memory.robot b/e2e/tests/negative/stress_memory.robot index 6f3a5c6b90..f566610d76 100644 --- a/e2e/tests/negative/stress_memory.robot +++ b/e2e/tests/negative/stress_memory.robot @@ -3,6 +3,7 @@ Documentation Negative Test Cases Test Tags negative +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/persistentvolumeclaim.resource Resource ../keywords/statefulset.resource @@ -13,13 +14,7 @@ Resource ../keywords/workload.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 - *** Test Cases *** - Stress Volume Node Memory When Replica Is Rebuilding Given Create volume 0 with size=5Gi numberOfReplicas=3 And Attach volume 0 diff --git a/e2e/tests/negative/test_backup_listing.robot b/e2e/tests/negative/test_backup_listing.robot index 6e0c921b25..58c2661aae 100644 --- a/e2e/tests/negative/test_backup_listing.robot +++ b/e2e/tests/negative/test_backup_listing.robot @@ -2,8 +2,9 @@ Documentation Test backup listing ... https://longhorn.github.io/longhorn-tests/manual/pre-release/stress/backup-listing/ -Test Tags manual +Test Tags manual negative +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/deployment.resource Resource ../keywords/workload.resource @@ -22,9 +23,6 @@ Test Teardown Cleanup test resources *** Variables *** ${LOOP_COUNT} 1001 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 *** Keywords *** Verify backup ${backup_id} count for ${workload_kind} ${workload_id} volume @@ -142,7 +140,7 @@ Backup listing with more than 1000 backups And Volume 1 data should same as deployment 0 volume Backup listing of volume bigger than 200 Gi - [Tags] manual longhorn-8355 + [Tags] manual longhorn-8355 large-size [Documentation] Test backup bigger than 200 Gi Given Create persistentvolumeclaim 0 using RWO volume And Create deployment 0 with persistentvolumeclaim 0 @@ -152,7 +150,8 @@ Backup listing of volume bigger than 200 Gi And Create deployment 1 with volume 1 Then Get deployment 1 volume data in file data And Volume 1 data should same as deployment 0 volume - Then Create pod 2 mount 250 GB volume 2 + Then Create volume 2 from deployment 0 volume random backup + And Create pod 2 mount 250 GB volume 2 And Write 210 GB large data to file 0 in pod 2 Then Volume 2 backup 0 should be able to create Then Delete pod 2 and volume 2 diff --git a/e2e/tests/negative/uninstallation_checks.robot b/e2e/tests/negative/uninstallation_checks.robot index 0d35658e28..c5d83cf26f 100644 --- a/e2e/tests/negative/uninstallation_checks.robot +++ b/e2e/tests/negative/uninstallation_checks.robot @@ -3,6 +3,7 @@ Documentation Uninstallation Checks Test Tags negative +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/setting.resource Resource ../keywords/volume.resource @@ -18,11 +19,6 @@ Library ../libs/keywords/setting_keywords.py Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 - *** Test Cases *** Uninstallation Checks [Documentation] Uninstallation Checks diff --git a/e2e/tests/regression/test_backing_image.robot b/e2e/tests/regression/test_backing_image.robot index 7eb1564a86..618b48dee8 100644 --- a/e2e/tests/regression/test_backing_image.robot +++ b/e2e/tests/regression/test_backing_image.robot @@ -3,6 +3,7 @@ Documentation Backing Image Test Cases Test Tags regression backing_image +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/volume.resource Resource ../keywords/backing_image.resource @@ -10,12 +11,6 @@ Resource ../keywords/backing_image.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 - *** Test Cases *** Test Backing Image Basic Operation [Tags] coretest diff --git a/e2e/tests/regression/test_backup.robot b/e2e/tests/regression/test_backup.robot index 7a793d0131..0e2257aab1 100644 --- a/e2e/tests/regression/test_backup.robot +++ b/e2e/tests/regression/test_backup.robot @@ -3,6 +3,7 @@ Documentation Backup Test Cases Test Tags regression +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/setting.resource Resource ../keywords/volume.resource @@ -16,12 +17,6 @@ Resource ../keywords/backupstore.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 - *** Keywords *** Snapshot PV PVC could not be created on DR volume 1 Create snapshot 0 of volume 1 will fail diff --git a/e2e/tests/regression/test_basic.robot b/e2e/tests/regression/test_basic.robot index b0791e30c9..c8fa7d5060 100644 --- a/e2e/tests/regression/test_basic.robot +++ b/e2e/tests/regression/test_basic.robot @@ -3,6 +3,7 @@ Documentation Basic Test Cases Test Tags regression +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/node.resource Resource ../keywords/setting.resource @@ -19,12 +20,6 @@ Resource ../keywords/node.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 - *** Keywords *** Create volume with invalid name should fail [Arguments] ${invalid_volume_name} diff --git a/e2e/tests/regression/test_engine_image.robot b/e2e/tests/regression/test_engine_image.robot index ed0e2dbdd7..55b01b94eb 100644 --- a/e2e/tests/regression/test_engine_image.robot +++ b/e2e/tests/regression/test_engine_image.robot @@ -3,6 +3,7 @@ Documentation Engine Image Test Cases Test Tags regression engine_image +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/volume.resource Resource ../keywords/engine_image.resource @@ -10,12 +11,6 @@ Resource ../keywords/engine_image.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 - *** Test Cases *** Test Replica Rebuilding After Engine Upgrade [Tags] coretest diff --git a/e2e/tests/regression/test_ha.robot b/e2e/tests/regression/test_ha.robot index bb86936c57..a58818e3d9 100644 --- a/e2e/tests/regression/test_ha.robot +++ b/e2e/tests/regression/test_ha.robot @@ -3,6 +3,7 @@ Documentation HA Test Cases Test Tags regression +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/volume.resource Resource ../keywords/setting.resource @@ -14,11 +15,6 @@ Resource ../keywords/statefulset.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 - *** Test Cases *** Disrupt Data Plane Traffic For Less Than Long Engine Replica Timeout Given Set setting engine-replica-timeout to 15 diff --git a/e2e/tests/regression/test_migration.robot b/e2e/tests/regression/test_migration.robot index c9ce064ebf..850f27082c 100644 --- a/e2e/tests/regression/test_migration.robot +++ b/e2e/tests/regression/test_migration.robot @@ -3,6 +3,7 @@ Documentation Migration Test Cases Test Tags regression +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/deployment.resource Resource ../keywords/persistentvolumeclaim.resource @@ -13,12 +14,6 @@ Resource ../keywords/volume.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 - *** Test Cases *** Test Migration Confirm [Tags] coretest migration diff --git a/e2e/tests/regression/test_replica.robot b/e2e/tests/regression/test_replica.robot index 1555d3e5bd..633b0b6320 100644 --- a/e2e/tests/regression/test_replica.robot +++ b/e2e/tests/regression/test_replica.robot @@ -3,6 +3,7 @@ Documentation Replica Test Cases Test Tags regression +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/volume.resource Resource ../keywords/setting.resource @@ -13,12 +14,6 @@ Resource ../keywords/workload.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 - *** Test Cases *** Test Replica Rebuilding Per Volume Limit [Tags] coretest diff --git a/e2e/tests/regression/test_scheduling.robot b/e2e/tests/regression/test_scheduling.robot index b9520edd8a..b26c0b5339 100644 --- a/e2e/tests/regression/test_scheduling.robot +++ b/e2e/tests/regression/test_scheduling.robot @@ -3,6 +3,7 @@ Documentation Scheduling Test Cases Test Tags regression +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/volume.resource Resource ../keywords/replica.resource @@ -16,12 +17,6 @@ Resource ../keywords/node.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 - *** Test Cases *** Test Soft Anti Affinity Scheduling [Tags] coretest diff --git a/e2e/tests/regression/test_settings.robot b/e2e/tests/regression/test_settings.robot index bb28480533..1d9a7093e7 100644 --- a/e2e/tests/regression/test_settings.robot +++ b/e2e/tests/regression/test_settings.robot @@ -3,6 +3,7 @@ Documentation Settings Test Cases Test Tags regression +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/volume.resource Resource ../keywords/setting.resource @@ -15,12 +16,6 @@ Resource ../keywords/sharemanager.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 - *** Test Cases *** Test Setting Concurrent Rebuild Limit [Documentation] Test if setting Concurrent Replica Rebuild Per Node Limit works correctly. diff --git a/e2e/tests/regression/test_v2.robot b/e2e/tests/regression/test_v2.robot index 6554876994..c2ef068dd1 100644 --- a/e2e/tests/regression/test_v2.robot +++ b/e2e/tests/regression/test_v2.robot @@ -3,6 +3,7 @@ Documentation v2 Data Engine Test Cases Test Tags regression v2 +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/storageclass.resource Resource ../keywords/persistentvolumeclaim.resource @@ -17,11 +18,6 @@ Resource ../keywords/longhorn.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 - *** Test Cases *** Test V2 Volume Basic [Tags] coretest diff --git a/e2e/tests/regression/test_volume.robot b/e2e/tests/regression/test_volume.robot index c55aa49586..c06df66dd1 100644 --- a/e2e/tests/regression/test_volume.robot +++ b/e2e/tests/regression/test_volume.robot @@ -3,6 +3,7 @@ Documentation Volume Test Cases Test Tags regression +Resource ../keywords/variables.resource Resource ../keywords/common.resource Resource ../keywords/deployment.resource Resource ../keywords/longhorn.resource @@ -15,12 +16,6 @@ Resource ../keywords/volume.resource Test Setup Set test environment Test Teardown Cleanup test resources -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 - *** Keywords *** Create volume with invalid name should fail [Arguments] ${invalid_volume_name}