diff --git a/e2e/run.sh b/e2e/run.sh index 0fb575afe5..5c45f86c79 100755 --- a/e2e/run.sh +++ b/e2e/run.sh @@ -1,3 +1,3 @@ #!/bin/bash -robot -P ./libs -d /tmp/test-report "$@" ./tests \ No newline at end of file +robot -x junit.xml -P ./libs -d /tmp/test-report "$@" ./tests diff --git a/e2e/tests/cluster_restart.robot b/e2e/tests/negative/cluster_restart.robot similarity index 99% rename from e2e/tests/cluster_restart.robot rename to e2e/tests/negative/cluster_restart.robot index 3463201253..a4687830f3 100644 --- a/e2e/tests/cluster_restart.robot +++ b/e2e/tests/negative/cluster_restart.robot @@ -1,7 +1,7 @@ *** Settings *** Documentation Negative Test Cases -Test Tags negative +Test Tags negative cluster Resource ../keywords/common.resource Resource ../keywords/deployment.resource diff --git a/e2e/tests/kubelet_restart.robot b/e2e/tests/negative/kubelet_restart.robot similarity index 100% rename from e2e/tests/kubelet_restart.robot rename to e2e/tests/negative/kubelet_restart.robot diff --git a/e2e/tests/test_cases/node_disconnection_test.robot b/e2e/tests/negative/network_disconnect.robot similarity index 77% rename from e2e/tests/test_cases/node_disconnection_test.robot rename to e2e/tests/negative/network_disconnect.robot index af448a2e14..7cab8c6226 100644 --- a/e2e/tests/test_cases/node_disconnection_test.robot +++ b/e2e/tests/negative/network_disconnect.robot @@ -1,27 +1,60 @@ *** Settings *** -Documentation Node disconnection test -... https://github.com/longhorn/longhorn/issues/1545 +Documentation Negative Test Cases -Test Tags manual_test_case +Test Tags negative -Resource ../keywords/common.resource -Resource ../keywords/storageclass.resource Resource ../keywords/volume.resource -Resource ../keywords/setting.resource -Resource ../keywords/network.resource +Resource ../keywords/storageclass.resource Resource ../keywords/statefulset.resource Resource ../keywords/workload.resource +Resource ../keywords/common.resource +Resource ../keywords/network.resource +Resource ../keywords/setting.resource Test Setup Set test environment Test Teardown Cleanup test resources *** Variables *** -${LOOP_COUNT} 3 +${LOOP_COUNT} 1 +${LATENCY_IN_MS} 0 ${RETRY_COUNT} 300 ${RETRY_INTERVAL} 1 +${RWX_VOLUME_FAST_FAILOVER} false ${DATA_ENGINE} v1 *** Test Cases *** +Disconnect Volume Node Network While Workload Heavy Writing + Given Set setting rwx-volume-fast-failover to ${RWX_VOLUME_FAST_FAILOVER} + And Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} + And Create statefulset 0 using RWO volume with longhorn-test storageclass + And Create statefulset 1 using RWX volume with longhorn-test storageclass + FOR ${i} IN RANGE ${LOOP_COUNT} + And Keep writing data to pod of statefulset 0 + And Keep writing data to pod of statefulset 1 + When Disconnect volume nodes network for 20 seconds statefulset 0 statefulset 1 + And Wait for volume of statefulset 0 healthy + And Wait for volume of statefulset 1 healthy + And Wait for workloads pods stable statefulset 0 statefulset 1 + Then Check statefulset 0 works + And Check statefulset 1 works + END + +Disconnect Volume Node Network For More Than Pod Eviction Timeout While Workload Heavy Writing + Given Set setting rwx-volume-fast-failover to ${RWX_VOLUME_FAST_FAILOVER} + And Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} + And Create statefulset 0 using RWO volume with longhorn-test storageclass + And Create statefulset 1 using RWX volume with longhorn-test storageclass + FOR ${i} IN RANGE ${LOOP_COUNT} + And Keep writing data to pod of statefulset 0 + And Keep writing data to pod of statefulset 1 + When Disconnect volume nodes network for 360 seconds statefulset 0 statefulset 1 + And Wait for volume of statefulset 0 healthy + And Wait for volume of statefulset 1 healthy + And Wait for workloads pods stable statefulset 0 statefulset 1 + Then Check statefulset 0 works + And Check statefulset 1 works + END + Node Disconnect And Keep Data Writing And No Replica On The Disconnected Node [Documentation] -- Manual test plan -- ... Disable auto-salvage. diff --git a/e2e/tests/node_delete.robot b/e2e/tests/negative/node_delete.robot similarity index 100% rename from e2e/tests/node_delete.robot rename to e2e/tests/negative/node_delete.robot diff --git a/e2e/tests/node_drain.robot b/e2e/tests/negative/node_drain.robot similarity index 100% rename from e2e/tests/node_drain.robot rename to e2e/tests/negative/node_drain.robot diff --git a/e2e/tests/node_reboot.robot b/e2e/tests/negative/node_reboot.robot similarity index 64% rename from e2e/tests/node_reboot.robot rename to e2e/tests/negative/node_reboot.robot index 2377f2668e..f86d9b88da 100644 --- a/e2e/tests/node_reboot.robot +++ b/e2e/tests/negative/node_reboot.robot @@ -69,7 +69,7 @@ Reboot Node One By One While Workload Heavy Writing And Check statefulset 2 works END -Power Off Node One By Once For More Than Pod Eviction Timeout While Workload Heavy Writing +Power Off Node One By One For More Than Pod Eviction Timeout While Workload Heavy Writing [Tags] reboot Given Set setting rwx-volume-fast-failover to ${RWX_VOLUME_FAST_FAILOVER} And Create storageclass strict-local with numberOfReplicas=1 dataLocality=strict-local @@ -255,6 +255,142 @@ Reboot Volume Node While Heavy Writing And Recurring Jobs Exist And Check volume 2 works END +Physical Node Reboot With Attached Deployment + Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} + And Create persistentvolumeclaim 0 using ${VOLUME_TYPE} volume with longhorn-test storageclass + And Create deployment 0 with persistentvolumeclaim 0 + And Write 100 MB data to file data in deployment 0 + + And Reboot volume node of deployment 0 + And Wait for deployment 0 pods stable + Then Check deployment 0 data in file data is intact + +Physical Node Reboot With Attached Statefulset + Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} + And Create statefulset 0 using ${VOLUME_TYPE} volume with longhorn-test storageclass + And Write 100 MB data to file data in statefulset 0 + + And Reboot volume node of statefulset 0 + And Wait for statefulset 0 pods stable + Then Check statefulset 0 data in file data is intact + +Single Replica Node Down Deletion Policy do-nothing With RWO Volume Replica Locate On Replica Node + Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} + And Set setting node-down-pod-deletion-policy to do-nothing + When Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass + And Create deployment 0 with persistentvolumeclaim 0 + And Wait for volume of deployment 0 healthy + And Write 100 MB data to file data in deployment 0 + + # Delete replicas to have the volume with its only replica located on different nodes. + And Update volume of deployment 0 replica count to 1 + And Delete replica of deployment 0 volume on replica node + And Delete replica of deployment 0 volume on volume node + And Power off volume node of deployment 0 + Then Wait for volume of deployment 0 stuck in state attaching + And Wait for deployment 0 pod stuck in Terminating on the original node + + When Power on off node + And Wait for deployment 0 pods stable + And Check deployment 0 pod is Running on another node + Then Check deployment 0 data in file data is intact + +Single Replica Node Down Deletion Policy do-nothing With RWO Volume Replica Locate On Volume Node + Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} + And Set setting node-down-pod-deletion-policy to do-nothing + When Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass + And Create deployment 0 with persistentvolumeclaim 0 + And Wait for volume of deployment 0 healthy + And Write 100 MB data to file data in deployment 0 + + # Delete replicas to have the volume with its only replica located on the same node. + And Update volume of deployment 0 replica count to 1 + And Delete replica of deployment 0 volume on all replica node + And Power off volume node of deployment 0 + Then Wait for volume of deployment 0 faulted + And Wait for deployment 0 pod stuck in Terminating on the original node + + When Power on off node + And Wait for deployment 0 pods stable + And Check deployment 0 pod is Running on the original node + Then Check deployment 0 data in file data is intact + +Single Replica Node Down Deletion Policy delete-deployment-pod With RWO Volume Replica Locate On Replica Node + Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} + And Set setting node-down-pod-deletion-policy to delete-deployment-pod + When Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass + And Create deployment 0 with persistentvolumeclaim 0 + And Wait for volume of deployment 0 healthy + And Write 100 MB data to file data in deployment 0 + + # Delete replicas to have the volume with its only replica located on different nodes. + And Update volume of deployment 0 replica count to 1 + And Delete replica of deployment 0 volume on replica node + And Delete replica of deployment 0 volume on volume node + And Power off volume node of deployment 0 + Then Wait for volume of deployment 0 attaching + + And Wait for deployment 0 pods stable + Then Check deployment 0 data in file data is intact + And Power on off node + +Single Replica Node Down Deletion Policy delete-deployment-pod With RWO Volume Replica Locate On Volume Node + Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} + And Set setting node-down-pod-deletion-policy to delete-deployment-pod + When Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass + And Create deployment 0 with persistentvolumeclaim 0 + And Wait for volume of deployment 0 healthy + And Write 100 MB data to file data in deployment 0 + + # Delete replicas to have the volume with its only replica located on the same node + And Update volume of deployment 0 replica count to 1 + And Delete replica of deployment 0 volume on all replica node + And Power off volume node of deployment 0 + Then Wait for volume of deployment 0 faulted + And Wait for deployment 0 pod stuck in ContainerCreating on another node + + When Power on off node + And Wait for deployment 0 pods stable + And Check deployment 0 pod is Running on the original node + Then Check deployment 0 data in file data is intact + +Single Replica Node Down Deletion Policy delete-both-statefulset-and-deployment-pod With RWO Volume Replica Locate On Replica Node + Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} + And Set setting node-down-pod-deletion-policy to delete-both-statefulset-and-deployment-pod + When Create statefulset 0 using RWO volume with longhorn-test storageclass + And Wait for volume of statefulset 0 healthy + And Write 100 MB data to file data in statefulset 0 + + # Delete replicas to have the volume with its only replica located on different nodes. + And Update volume of statefulset 0 replica count to 1 + And Delete replica of statefulset 0 volume on replica node + And Delete replica of statefulset 0 volume on volume node + And Power off volume node of statefulset 0 + Then Wait for volume of statefulset 0 attaching + + And Wait for statefulset 0 pods stable + Then Check statefulset 0 data in file data is intact + And Power on off node + +Single Replica Node Down Deletion Policy delete-both-statefulset-and-deployment-pod With RWO Volume Replica Locate On Volume Node + Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} + And Set setting node-down-pod-deletion-policy to delete-both-statefulset-and-deployment-pod + When Create statefulset 0 using RWO volume with longhorn-test storageclass + And Wait for volume of statefulset 0 healthy + And Write 100 MB data to file data in statefulset 0 + + # Delete replicas to have the volume with its only replica located on the same. + And Update volume of statefulset 0 replica count to 1 + And Delete replica of statefulset 0 volume on all replica node + And Power off volume node of statefulset 0 + Then Wait for volume of statefulset 0 faulted + And Wait for statefulset 0 pod stuck in ContainerCreating on another node + + When Power on off node + And Wait for statefulset 0 pods stable + And Check statefulset 0 pod is Running on the original node + Then Check statefulset 0 data in file data is intact + Reboot Replica Node While Heavy Writing And Recurring Jobs Exist [Tags] recurring_job Given Create volume 0 with size=2Gi numberOfReplicas=1 dataEngine=${DATA_ENGINE} diff --git a/e2e/tests/replica_rebuilding.robot b/e2e/tests/negative/replica_rebuilding.robot similarity index 100% rename from e2e/tests/replica_rebuilding.robot rename to e2e/tests/negative/replica_rebuilding.robot diff --git a/e2e/tests/stress_cpu.robot b/e2e/tests/negative/stress_cpu.robot similarity index 100% rename from e2e/tests/stress_cpu.robot rename to e2e/tests/negative/stress_cpu.robot diff --git a/e2e/tests/stress_filesystem.robot b/e2e/tests/negative/stress_filesystem.robot similarity index 100% rename from e2e/tests/stress_filesystem.robot rename to e2e/tests/negative/stress_filesystem.robot diff --git a/e2e/tests/stress_memory.robot b/e2e/tests/negative/stress_memory.robot similarity index 100% rename from e2e/tests/stress_memory.robot rename to e2e/tests/negative/stress_memory.robot diff --git a/e2e/tests/network_disconnect.robot b/e2e/tests/network_disconnect.robot deleted file mode 100644 index 51c12ed0af..0000000000 --- a/e2e/tests/network_disconnect.robot +++ /dev/null @@ -1,56 +0,0 @@ -*** Settings *** -Documentation Negative Test Cases - -Test Tags negative - -Resource ../keywords/volume.resource -Resource ../keywords/storageclass.resource -Resource ../keywords/statefulset.resource -Resource ../keywords/workload.resource -Resource ../keywords/common.resource -Resource ../keywords/network.resource -Resource ../keywords/setting.resource - -Test Setup Set test environment -Test Teardown Cleanup test resources - -*** Variables *** -${LOOP_COUNT} 1 -${LATENCY_IN_MS} 0 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${RWX_VOLUME_FAST_FAILOVER} false -${DATA_ENGINE} v1 - -*** Test Cases *** -Disconnect Volume Node Network While Workload Heavy Writing - Given Set setting rwx-volume-fast-failover to ${RWX_VOLUME_FAST_FAILOVER} - And Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} - And Create statefulset 0 using RWO volume with longhorn-test storageclass - And Create statefulset 1 using RWX volume with longhorn-test storageclass - FOR ${i} IN RANGE ${LOOP_COUNT} - And Keep writing data to pod of statefulset 0 - And Keep writing data to pod of statefulset 1 - When Disconnect volume nodes network for 20 seconds statefulset 0 statefulset 1 - And Wait for volume of statefulset 0 healthy - And Wait for volume of statefulset 1 healthy - And Wait for workloads pods stable statefulset 0 statefulset 1 - Then Check statefulset 0 works - And Check statefulset 1 works - END - -Disconnect Volume Node Network For More Than Pod Eviction Timeout While Workload Heavy Writing - Given Set setting rwx-volume-fast-failover to ${RWX_VOLUME_FAST_FAILOVER} - And Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} - And Create statefulset 0 using RWO volume with longhorn-test storageclass - And Create statefulset 1 using RWX volume with longhorn-test storageclass - FOR ${i} IN RANGE ${LOOP_COUNT} - And Keep writing data to pod of statefulset 0 - And Keep writing data to pod of statefulset 1 - When Disconnect volume nodes network for 360 seconds statefulset 0 statefulset 1 - And Wait for volume of statefulset 0 healthy - And Wait for volume of statefulset 1 healthy - And Wait for workloads pods stable statefulset 0 statefulset 1 - Then Check statefulset 0 works - And Check statefulset 1 works - END \ No newline at end of file diff --git a/e2e/tests/regression/test_basic.robot b/e2e/tests/regression/test_basic.robot index db2a35e79f..a381497149 100644 --- a/e2e/tests/regression/test_basic.robot +++ b/e2e/tests/regression/test_basic.robot @@ -95,3 +95,48 @@ Test Snapshot And Validate snapshot 2 is not in volume 0 snapshot list And Check volume 0 data is data 1 + +Replica Rebuilding + [Documentation] -- Manual test plan -- + ... 1. Create and attach a volume. + ... 2. Write a large amount of data to the volume. + ... 3. Disable disk scheduling and the node scheduling for one replica. + ... 4. Crash the replica progress. Verify + ... - the corresponding replica in not running state. + ... - the volume will keep robustness Degraded. + ... 5. Enable the disk scheduling. Verify nothing changes. + ... 6. Enable the node scheduling. Verify. + ... - the failed replica is reused by Longhorn. + ... - the data content is correct after rebuilding. + ... - volume r/w works fine. + ... + ... == Not implemented == + ... 7. Direct delete one replica via UI. Verify + ... - a new replica will be replenished immediately. + ... - the rebuilding progress in UI page looks good. + ... - the data content is correct after rebuilding. + ... - volume r/w works fine. + When Create volume 0 with size=10Gi numberOfReplicas=3 dataEngine=${DATA_ENGINE} + And Attach volume 0 to node 0 + And Wait for volume 0 healthy + + And Write 1 GB data to volume 0 + + And Disable node 1 scheduling + And Disable node 1 default disk + + And Crash volume 0 replica process on node 1 + Then Wait volume 0 replica on node 1 stopped + And Wait for volume 0 degraded + + And Enable node 1 default disk + Then Check volume 0 replica on node 1 kept in stopped + And Check for volume 0 kept in degraded + + And Enable node 1 scheduling + Then Wait until volume 0 replica rebuilding started on node 1 + And Wait for volume 0 healthy + And Check volume 0 crashed replica reused on node 1 + + And Check volume 0 data is intact + And Check volume 0 works diff --git a/e2e/tests/v2/test_volume.robot b/e2e/tests/regression/test_v2.robot similarity index 100% rename from e2e/tests/v2/test_volume.robot rename to e2e/tests/regression/test_v2.robot diff --git a/e2e/tests/test_cases/physical_node_reboot.robot b/e2e/tests/test_cases/physical_node_reboot.robot deleted file mode 100644 index 63fa685772..0000000000 --- a/e2e/tests/test_cases/physical_node_reboot.robot +++ /dev/null @@ -1,43 +0,0 @@ -*** Settings *** -Documentation Physical node reboot - -Test Tags manual_test_case - -Resource ../keywords/common.resource -Resource ../keywords/storageclass.resource -Resource ../keywords/deployment.resource -Resource ../keywords/persistentvolumeclaim.resource -Resource ../keywords/statefulset.resource -Resource ../keywords/volume.resource -Resource ../keywords/workload.resource -Resource ../keywords/host.resource - -Test Setup Set test environment -Test Teardown Cleanup test resources - -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${VOLUME_TYPE} RWO -${DATA_ENGINE} v1 - -*** Test Cases *** -Physical Node Reboot With Attached Deployment - Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} - And Create persistentvolumeclaim 0 using ${VOLUME_TYPE} volume with longhorn-test storageclass - And Create deployment 0 with persistentvolumeclaim 0 - And Write 100 MB data to file data in deployment 0 - - And Reboot volume node of deployment 0 - And Wait for deployment 0 pods stable - Then Check deployment 0 data in file data is intact - -Physical Node Reboot With Attached Statefulset - Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} - And Create statefulset 0 using ${VOLUME_TYPE} volume with longhorn-test storageclass - And Write 100 MB data to file data in statefulset 0 - - And Reboot volume node of statefulset 0 - And Wait for statefulset 0 pods stable - Then Check statefulset 0 data in file data is intact diff --git a/e2e/tests/test_cases/replica_rebuilding.robot b/e2e/tests/test_cases/replica_rebuilding.robot deleted file mode 100644 index a737c716d0..0000000000 --- a/e2e/tests/test_cases/replica_rebuilding.robot +++ /dev/null @@ -1,65 +0,0 @@ -*** Settings *** -Documentation Replica Rebuilding - -Test Tags manual_test_case - -Resource ../keywords/common.resource -Resource ../keywords/storageclass.resource -Resource ../keywords/deployment.resource -Resource ../keywords/volume.resource -Resource ../keywords/node.resource - -Test Setup Set test environment -Test Teardown Cleanup test resources - -*** Variables *** -${LOOP_COUNT} 30 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 - -*** Test Cases *** -Replica Rebuilding - [Documentation] -- Manual test plan -- - ... 1. Create and attach a volume. - ... 2. Write a large amount of data to the volume. - ... 3. Disable disk scheduling and the node scheduling for one replica. - ... 4. Crash the replica progress. Verify - ... - the corresponding replica in not running state. - ... - the volume will keep robustness Degraded. - ... 5. Enable the disk scheduling. Verify nothing changes. - ... 6. Enable the node scheduling. Verify. - ... - the failed replica is reused by Longhorn. - ... - the data content is correct after rebuilding. - ... - volume r/w works fine. - ... - ... == Not implemented == - ... 7. Direct delete one replica via UI. Verify - ... - a new replica will be replenished immediately. - ... - the rebuilding progress in UI page looks good. - ... - the data content is correct after rebuilding. - ... - volume r/w works fine. - When Create volume 0 with size=10Gi numberOfReplicas=3 dataEngine=${DATA_ENGINE} - And Attach volume 0 to node 0 - And Wait for volume 0 healthy - - And Write 1 GB data to volume 0 - - And Disable node 1 scheduling - And Disable node 1 default disk - - And Crash volume 0 replica process on node 1 - Then Wait volume 0 replica on node 1 stopped - And Wait for volume 0 degraded - - And Enable node 1 default disk - Then Check volume 0 replica on node 1 kept in stopped - And Check for volume 0 kept in degraded - - And Enable node 1 scheduling - Then Wait until volume 0 replica rebuilding started on node 1 - And Wait for volume 0 healthy - And Check volume 0 crashed replica reused on node 1 - - And Check volume 0 data is intact - And Check volume 0 works diff --git a/e2e/tests/test_cases/single_replica_node_down.robot b/e2e/tests/test_cases/single_replica_node_down.robot deleted file mode 100644 index 26a31942d9..0000000000 --- a/e2e/tests/test_cases/single_replica_node_down.robot +++ /dev/null @@ -1,142 +0,0 @@ -*** Settings *** -Documentation Single replica node down - -Test Tags manual_test_case - -Resource ../keywords/common.resource -Resource ../keywords/storageclass.resource -Resource ../keywords/deployment.resource -Resource ../keywords/persistentvolumeclaim.resource -Resource ../keywords/recurringjob.resource -Resource ../keywords/statefulset.resource -Resource ../keywords/volume.resource -Resource ../keywords/setting.resource -Resource ../keywords/workload.resource -Resource ../keywords/host.resource - -Test Setup Set test environment -Test Teardown Cleanup test resources include off nodes - -*** Variables *** -${LOOP_COUNT} 1 -${RETRY_COUNT} 300 -${RETRY_INTERVAL} 1 -${DATA_ENGINE} v1 - -*** Test Cases *** -Single Replica Node Down Deletion Policy do-nothing With RWO Volume Replica Locate On Replica Node - Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} - And Set setting node-down-pod-deletion-policy to do-nothing - When Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass - And Create deployment 0 with persistentvolumeclaim 0 - And Wait for volume of deployment 0 healthy - And Write 100 MB data to file data in deployment 0 - - # Delete replicas to have the volume with its only replica located on different nodes. - And Update volume of deployment 0 replica count to 1 - And Delete replica of deployment 0 volume on replica node - And Delete replica of deployment 0 volume on volume node - And Power off volume node of deployment 0 - Then Wait for volume of deployment 0 stuck in state attaching - And Wait for deployment 0 pod stuck in Terminating on the original node - - When Power on off node - And Wait for deployment 0 pods stable - And Check deployment 0 pod is Running on another node - Then Check deployment 0 data in file data is intact - -Single Replica Node Down Deletion Policy do-nothing With RWO Volume Replica Locate On Volume Node - Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} - And Set setting node-down-pod-deletion-policy to do-nothing - When Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass - And Create deployment 0 with persistentvolumeclaim 0 - And Wait for volume of deployment 0 healthy - And Write 100 MB data to file data in deployment 0 - - # Delete replicas to have the volume with its only replica located on the same node. - And Update volume of deployment 0 replica count to 1 - And Delete replica of deployment 0 volume on all replica node - And Power off volume node of deployment 0 - Then Wait for volume of deployment 0 faulted - And Wait for deployment 0 pod stuck in Terminating on the original node - - When Power on off node - And Wait for deployment 0 pods stable - And Check deployment 0 pod is Running on the original node - Then Check deployment 0 data in file data is intact - -Single Replica Node Down Deletion Policy delete-deployment-pod With RWO Volume Replica Locate On Replica Node - Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} - And Set setting node-down-pod-deletion-policy to delete-deployment-pod - When Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass - And Create deployment 0 with persistentvolumeclaim 0 - And Wait for volume of deployment 0 healthy - And Write 100 MB data to file data in deployment 0 - - # Delete replicas to have the volume with its only replica located on different nodes. - And Update volume of deployment 0 replica count to 1 - And Delete replica of deployment 0 volume on replica node - And Delete replica of deployment 0 volume on volume node - And Power off volume node of deployment 0 - Then Wait for volume of deployment 0 attaching - - And Wait for deployment 0 pods stable - Then Check deployment 0 data in file data is intact - And Power on off node - -Single Replica Node Down Deletion Policy delete-deployment-pod With RWO Volume Replica Locate On Volume Node - Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} - And Set setting node-down-pod-deletion-policy to delete-deployment-pod - When Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass - And Create deployment 0 with persistentvolumeclaim 0 - And Wait for volume of deployment 0 healthy - And Write 100 MB data to file data in deployment 0 - - # Delete replicas to have the volume with its only replica located on the same node - And Update volume of deployment 0 replica count to 1 - And Delete replica of deployment 0 volume on all replica node - And Power off volume node of deployment 0 - Then Wait for volume of deployment 0 faulted - And Wait for deployment 0 pod stuck in ContainerCreating on another node - - When Power on off node - And Wait for deployment 0 pods stable - And Check deployment 0 pod is Running on the original node - Then Check deployment 0 data in file data is intact - -Single Replica Node Down Deletion Policy delete-both-statefulset-and-deployment-pod With RWO Volume Replica Locate On Replica Node - Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} - And Set setting node-down-pod-deletion-policy to delete-both-statefulset-and-deployment-pod - When Create statefulset 0 using RWO volume with longhorn-test storageclass - And Wait for volume of statefulset 0 healthy - And Write 100 MB data to file data in statefulset 0 - - # Delete replicas to have the volume with its only replica located on different nodes. - And Update volume of statefulset 0 replica count to 1 - And Delete replica of statefulset 0 volume on replica node - And Delete replica of statefulset 0 volume on volume node - And Power off volume node of statefulset 0 - Then Wait for volume of statefulset 0 attaching - - And Wait for statefulset 0 pods stable - Then Check statefulset 0 data in file data is intact - And Power on off node - -Single Replica Node Down Deletion Policy delete-both-statefulset-and-deployment-pod With RWO Volume Replica Locate On Volume Node - Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} - And Set setting node-down-pod-deletion-policy to delete-both-statefulset-and-deployment-pod - When Create statefulset 0 using RWO volume with longhorn-test storageclass - And Wait for volume of statefulset 0 healthy - And Write 100 MB data to file data in statefulset 0 - - # Delete replicas to have the volume with its only replica located on the same. - And Update volume of statefulset 0 replica count to 1 - And Delete replica of statefulset 0 volume on all replica node - And Power off volume node of statefulset 0 - Then Wait for volume of statefulset 0 faulted - And Wait for statefulset 0 pod stuck in ContainerCreating on another node - - When Power on off node - And Wait for statefulset 0 pods stable - And Check statefulset 0 pod is Running on the original node - Then Check statefulset 0 data in file data is intact diff --git a/pipelines/e2e/Jenkinsfile b/pipelines/e2e/Jenkinsfile index 037b7220b5..46708bd449 100644 --- a/pipelines/e2e/Jenkinsfile +++ b/pipelines/e2e/Jenkinsfile @@ -159,6 +159,7 @@ node { stage ('report generation') { sh "docker cp ${JOB_BASE_NAME}-${BUILD_NUMBER}:${WORKSPACE}/log.html ." sh "docker cp ${JOB_BASE_NAME}-${BUILD_NUMBER}:${WORKSPACE}/output.xml ." + sh "docker cp ${JOB_BASE_NAME}-${BUILD_NUMBER}:${WORKSPACE}/junit.html ." sh "docker cp ${JOB_BASE_NAME}-${BUILD_NUMBER}:${WORKSPACE}/report.html ." if(params.LONGHORN_UPGRADE_TEST && params.LONGHORN_TRANSIENT_VERSION) { diff --git a/pipelines/utilities/run_longhorn_e2e_test.sh b/pipelines/utilities/run_longhorn_e2e_test.sh index 0809e40fc0..7decee706f 100755 --- a/pipelines/utilities/run_longhorn_e2e_test.sh +++ b/pipelines/utilities/run_longhorn_e2e_test.sh @@ -71,6 +71,7 @@ run_longhorn_e2e_test(){ kubectl cp ${LONGHORN_TEST_POD_NAME}:/tmp/test-report/log.html "log.html" -c longhorn-test-report kubectl cp ${LONGHORN_TEST_POD_NAME}:/tmp/test-report/output.xml "output.xml" -c longhorn-test-report + kubectl cp ${LONGHORN_TEST_POD_NAME}:/tmp/test-report/junit.xml "junit.xml" -c longhorn-test-report kubectl cp ${LONGHORN_TEST_POD_NAME}:/tmp/test-report/report.html "report.html" -c longhorn-test-report } @@ -110,5 +111,6 @@ run_longhorn_e2e_test_out_of_cluster(){ cp /tmp/test-report/log.html "${WORKSPACE}/log.html" cp /tmp/test-report/output.xml "${WORKSPACE}/output.xml" + cp /tmp/test-report/junit.xml "${WORKSPACE}/junit.xml" cp /tmp/test-report/report.html "${WORKSPACE}/report.html" }