From 082fe6aec7068712bf31a233d2d9cc84931e8712 Mon Sep 17 00:00:00 2001 From: sipriyaa Date: Fri, 19 Jul 2024 15:07:15 +0530 Subject: [PATCH] supervisor snapshot code changes --- tests/e2e/csi_snapshot_basic.go | 2972 ++++++++++++------------- tests/e2e/csi_snapshot_file_volume.go | 43 +- tests/e2e/csi_snapshot_negative.go | 31 +- tests/e2e/csi_snapshot_utils.go | 113 +- tests/e2e/e2e_common.go | 6 +- tests/e2e/raw_block_volume.go | 52 +- tests/e2e/snapshot_vmservice_vm.go | 829 +++++++ tests/e2e/tkgs_ha.go | 8 +- tests/e2e/util.go | 2 +- tests/e2e/vmservice_utils.go | 16 +- 10 files changed, 2504 insertions(+), 1568 deletions(-) create mode 100644 tests/e2e/snapshot_vmservice_vm.go diff --git a/tests/e2e/csi_snapshot_basic.go b/tests/e2e/csi_snapshot_basic.go index 8ac4849255..ed39805a75 100644 --- a/tests/e2e/csi_snapshot_basic.go +++ b/tests/e2e/csi_snapshot_basic.go @@ -74,34 +74,53 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { labels_ns map[string]string isVcRebooted bool vcAddress string + labelsMap map[string]string + scName string + volHandle string + deleteFCDRequired bool ) ginkgo.BeforeEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + bootstrap() client = f.ClientSet namespace = getNamespaceToRunTests(f) scParameters = make(map[string]string) + + // reading shared datastoreurl and shared storage policy + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + + // fetching node list and checking node status nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") } + // delete nginx service + service, err := client.CoreV1().Services(namespace).Get(ctx, servicename, metav1.GetOptions{}) + if err == nil && service != nil { + deleteService(namespace, client, service) + } + + // reading vc credentials vcAddress = e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + // Get snapshot client using the rest config - if !guestCluster { + if vanillaCluster || supervisorCluster { restConfig = getRestConfigClient() snapc, err = snapclient.NewForConfig(restConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } else { + } else if guestCluster { guestClusterRestConfig = getRestConfigClientForGuestCluster(guestClusterRestConfig) snapc, err = snapclient.NewForConfig(guestClusterRestConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + // reading fullsync wait time if os.Getenv(envPandoraSyncWaitTime) != "" { pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -109,6 +128,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pandoraSyncWaitTime = defaultPandoraSyncWaitTime } + // reading operation scale value if os.Getenv("VOLUME_OPS_SCALE") != "" { volumeOpsScale, err = strconv.Atoi(os.Getenv(envVolumeOperationsScale)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -119,6 +139,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) + //creates a newk8s client from a given kubeConfig file controllerClusterConfig := os.Getenv(contollerClusterKubeConfig) c = client if controllerClusterConfig != "" { @@ -128,12 +149,34 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { c = remoteC } + // required for pod creation + labels_ns = map[string]string{} + labels_ns[admissionapi.EnforceLevelLabel] = string(admissionapi.LevelPrivileged) + labels_ns["e2e-framework"] = f.BaseName + + //setting map values + labelsMap = make(map[string]string) + labelsMap["app"] = "test" + + // reading sc parameters required for storage class + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + scName = "" + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + scName = "" + } else if supervisorCluster { + profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) + scParameters[scParamStoragePolicyID] = profileID + scName = storagePolicyName + // setting resource quota for storage policy tagged to supervisor namespace + //setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit) + } + if guestCluster { - storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, rqLimit) - datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) var datacenters []string ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -157,9 +200,13 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } } - labels_ns = map[string]string{} - labels_ns[admissionapi.EnforceLevelLabel] = string(admissionapi.LevelPrivileged) - labels_ns["e2e-framework"] = f.BaseName + // reading fullsync wait time + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } }) ginkgo.AfterEach(func() { @@ -190,6 +237,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-1 Create/Delete snapshot via k8s API using PVC (Dynamic Provisioning) 1. Create a storage class (eg: vsan default) and create a pvc using this sc @@ -203,32 +251,28 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 9. Query the snapshot from CNS side using volume id - should pass and return the snapshot entry 10. Delete the above snapshot from k8s side using kubectl delete, run a get and ensure it is removed 11. Also ensure that the VolumeSnapshotContent is deleted along with the - volume snapshot as the policy is delete + volume snapshot as the policy is delete 12. Query the snapshot from CNS side - should return 0 entries 13. Cleanup: Delete PVC, SC (validate they are removed) */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Verify snapshot dynamic provisioning "+ - "workflow", ginkgo.Label(p0, block, tkg, vanilla, snapshot, stable), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] [supervisor-snapshot] TC1Verify snapshot dynamic provisioning "+ + "workflow", ginkgo.Label(p0, block, tkg, vanilla, wcp, snapshot, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var volHandle string - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle = persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { @@ -255,7 +299,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -282,20 +326,21 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-2 Create/Delete snapshot via k8s API using VolumeSnapshotContent (Pre-Provisioned Snapshots) 1. Create a storage class (eg: vsan default) and create a pvc using this sc 2. The volumesnapshotclass is set to delete 3. Create a VolumeSnapshotContent using snapshot-handle - a. get snapshotHandle by referring to an existing volume snapshot - b. this snapshot will be created dynamically, and the snapshot-content that is - created by that will be referred to get the snapshotHandle + a. get snapshotHandle by referring to an existing volume snapshot + b. this snapshot will be created dynamically, and the snapshot-content that is + created by that will be referred to get the snapshotHandle 4. Create a volume snapshot using source set to volumeSnapshotContentName above - 5. Ensure the snapshot is created, verify using get VolumeSnapshot + 5. Ensure the snapshot is created, verify using get VolumeSnapshot 6. Verify the restoreSize on the snapshot and the snapshotcontent is set to same as that of the pvcSize 7. Delete the above snapshot, run a get from k8s side and ensure its removed 8. Run QuerySnapshot from CNS side, the backend snapshot should be deleted 9. Also ensure that the VolumeSnapshotContent is deleted along with the - volume snapshot as the policy is delete + volume snapshot as the policy is delete 10. Cleanup the pvc */ ginkgo.It("[block-vanilla-snapshot] Verify snapshot static provisioning through K8s "+ @@ -310,7 +355,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { var snapshotContentCreated = false ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -442,8 +486,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) snapshotContentCreated2 = false }) - /* + Testcase-3 Create pre-provisioned snapshot using backend snapshot (create via CNS) 1. Create a storage-class and create a pvc using this sc 2. Call CNS CreateSnapshot API on the above volume @@ -455,7 +499,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 8. Delete the above snapshot, run a get from k8s side and ensure its removed 9. Run QuerySnapshot from CNS side, the backend snapshot should be deleted 10. Also ensure that the VolumeSnapshotContent is deleted along with the - volume snapshot as the policy is delete + volume snapshot as the policy is delete 11. The snapshot that was created via CNS in step-2 should be deleted as part of k8s snapshot delete 12. Delete the pvc */ @@ -470,7 +514,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { var err error ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -583,13 +626,14 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-4 Create/Delete snapshot via k8s API using VolumeSnapshotContent (Pre-Provisioned Snapshots) 1. Create a storage class (eg: vsan default) and create a pvc using this sc - 2. The volumesnapshotclass is set to delete + 2. The volumesnapshotclass is set to retain 3. Create a VolumeSnapshotContent using snapshot-handle with deletion policy Retain - a. get snapshotHandle by referring to an existing volume snapshot - b. this snapshot will be created dynamically, and the snapshot-content that is - created by that will be referred to get the snapshotHandle + a. get snapshotHandle by referring to an existing volume snapshot + b. this snapshot will be created dynamically, and the snapshot-content that is + created by that will be referred to get the snapshotHandle 4. Create a volume snapshot using source set to volumeSnapshotContentName above 5. Ensure the snapshot is created, verify using get VolumeSnapshot 6. Verify the restoreSize on the snapshot and the snapshotcontent is set to same as that of the pvcSize @@ -613,7 +657,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { var snapshotContentCreated = false ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -771,6 +814,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-4 Create/Delete volume snapshot with snapshot-class retention set to Retain 1. Create a storage class and create a pvc 2. Create a VolumeSnapshot class with deletionPoloicy to retain @@ -796,7 +840,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { var contentName string ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -917,14 +960,15 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-5 Volume restore using snapshot (a) dynamic snapshot (b) pre-provisioned snapshot 1. Create a sc, a pvc and attach the pvc to a pod, write a file 2. Create pre-provisioned and dynamically provisioned snapshots using this pvc 3. Create new volumes (pvcFromPreProvSS and pvcFromDynamicSS) using these - snapshots as source, use the same sc + snapshots as source, use the same sc 4. Ensure the pvc gets provisioned and is Bound 5. Attach the pvc to a pod and ensure data from snapshot is available - (file that was written in step.1 should be available) + (file that was written in step.1 should be available) 6. And also write new data to the restored volumes and it should succeed 7. Delete the snapshots and pvcs/pods created in steps 1,2,3 8. Continue to write new data to the restore volumes and it should succeed @@ -944,7 +988,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { var snapshotContentCreated = false ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1254,7 +1297,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* - Snapshot creation and restore workflow verification with xfs filesystem 1. Create a storage class with fstype set to XFS and create a pvc using this sc 2. Create a pod which uses above PVC @@ -1274,38 +1316,32 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 16. Query the snapshot from CNS side - it shouldn't be available 17. Delete SC and VolumeSnapshotClass */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume snapshot creation and restoration workflow "+ + + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC16Volume snapshot creation and restoration workflow "+ "with xfs filesystem", ginkgo.Label(p0, block, vanilla, tkg, snapshot, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - scParameters[scParamFsType] = xfsFSType - ginkgo.By("Create storage class with xfs filesystem and create PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + ginkgo.By("Create storage class with xfs filesystem") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, - nil, "", diskSize, storageclass, true) - volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + ginkgo.By("Create PVC") + pvclaim, pvs := createPVCAndQueryVolumeInCNS(ctx, client, namespace, + labelsMap, "", diskSize, storageclass, true) + volHandle := pvs[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1313,48 +1349,41 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - - // Create a Pod to use this PVC, and verify volume has been attached ginkgo.By("Creating pod to attach PV to the node") pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify volume is attached to the node var vmUUID string + var exists bool nodeName := pod.Spec.NodeName if vanillaCluster { - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + vmUUID = getNodeUUID(ctx, client, nodeName) } else if guestCluster { - vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + vmUUID, err = getVMUUIDFromNodeName(nodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") - // Verify filesystem used to mount volume inside pod is xfs ginkgo.By("Verify that filesystem type is xfs as expected") _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, xfsFSType, time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // Create file1.txt at mountpath inside pod ginkgo.By(fmt.Sprintf("Creating file file1.txt at mountpath inside pod: %v", pod.Name)) data1 := "This file file1.txt is written by Pod1" filePath1 := "/mnt/volume1/file1.txt" @@ -1372,7 +1401,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -1400,7 +1429,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) } gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1414,9 +1442,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pod2, err := createPod(ctx, client, namespace, nodeSelector, []*v1.PersistentVolumeClaim{pvclaim2}, false, execCommand) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1427,9 +1453,15 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(nodeName == nodeName2).To(gomega.BeTrue(), "Pod is not scheduled on expected node") if vanillaCluster { - vmUUID = getNodeUUID(ctx, client, pod2.Spec.NodeName) + vmUUID = getNodeUUID(ctx, client, nodeName2) } else if guestCluster { - vmUUID, err = getVMUUIDFromNodeName(pod2.Spec.NodeName) + vmUUID, err = getVMUUIDFromNodeName(nodeName2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod2.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1444,6 +1476,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { xfsFSType, time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + time.Sleep(2 * time.Minute) // Ensure that file1.txt is available as expected on the restored PVC ginkgo.By("Verify that file1.txt data is available as part of snapshot") output := readFileFromPod(namespace, pod2.Name, filePath1) @@ -1467,43 +1500,41 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-6 Volume restore using snapshot on a different storageclass 1. Create a sc with thin-provisioned spbm policy, create a pvc and attach the pvc to a pod 2. Create a dynamically provisioned snapshots using this pvc 3. create another sc pointing to a different spbm policy (say thick) 4. Run a restore workflow by giving a different storageclass in the pvc spec 5. the new storageclass would point to a thick provisioned spbm plocy, - while the source pvc was created usig thin provisioned psp-operatorlicy + while the source pvc was created usig thin provisioned psp-operatorlicy 6. cleanup spbm policies, sc's, pvc's */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume restore using snapshot on a different "+ - "storageclass", ginkgo.Label(p0, block, vanilla, snapshot, tkg, stable), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] [supervisor-snapshot] TC3Volume "+ + "restore using snapshot on a different "+ + "storageclass", ginkgo.Label(p0, block, vanilla, wcp, snapshot, tkg, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1524,7 +1555,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -1545,8 +1576,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() scParameters1 := make(map[string]string) - scParameters1[scParamStoragePolicyName] = "Management Storage Policy - Regular" - curtime := time.Now().Unix() randomValue := rand.Int() val := strconv.FormatInt(int64(randomValue), 10) @@ -1555,22 +1584,37 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { scName := "snapshot" + curtimestring + val var storageclass1 *storagev1.StorageClass + ginkgo.By("Create a new storage class") if vanillaCluster { + scParameters1[scParamStoragePolicyName] = "Management Storage Policy - Regular" storageclass1, err = createStorageClass(client, scParameters1, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass1.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() } else if guestCluster { scName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores2) storageclass1, err = client.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{}) + } else if supervisorCluster { + storagePolicyName2 := GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores2) + profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName2) + scParameters1[scParamStoragePolicyID] = profileID + scName = storagePolicyName2 + storageclass1, err = client.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{}) + + // setting resource quota for storage policy tagged to supervisor namespace + setStoragePolicyQuota(ctx, restConfig, storagePolicyName2, namespace, rqLimit) } - pvclaim2, persistentVolumes2, _ := verifyVolumeRestoreOperation(ctx, client, - namespace, storageclass1, volumeSnapshot, diskSize, false) - volHandle2 := persistentVolumes2[0].Spec.CSI.VolumeHandle + ginkgo.By("Restore a pvc using a dynamic volume snapshot created above but with a different storage class") + pvclaim2, pvs2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass1, + volumeSnapshot, diskSize, false) + volHandle2 := pvs2[0].Spec.CSI.VolumeHandle if guestCluster { volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) } gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1637,17 +1681,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - if vanillaCluster { - ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create storage class") + storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1661,7 +1697,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } ginkgo.By("Create PVC") - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, newNamespaceName, nil, "", + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, newNamespaceName, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { @@ -1687,7 +1723,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create/Get volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, claimPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { if vanillaCluster { ginkgo.By("Delete volume snapshot class") @@ -1699,7 +1734,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, newNamespaceName, snapc, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, newNamespaceName, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) snapshotContentName := snapshotContent.Name @@ -1839,13 +1874,13 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - /*defer func() { + defer func() { framework.Logf("Deleting restored PVC") err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace2Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }()*/ + }() if vanillaCluster { ginkgo.By("Deleted volume snapshot is created above") @@ -1882,6 +1917,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-12 Delete a non-existent snapshot 1. Create pvc and volumesnapshot using the pvc 2. Verify that a snapshot using CNS querySnapshots API @@ -1889,36 +1925,32 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 4. Try deleting the volumesnapshot from k8s side 5. Delete should fail with appropriate error as the backend snapshot is missing 6. Delete would return a pass from CSI side (this is expected because CSI is - designed to return success even though it cannot find a snapshot in the backend) + designed to return success even though it cannot find a snapshot in the backend) */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Delete a non-existent snapshot", ginkgo.Label(p0, block, - vanilla, snapshot, tkg, negative), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC5Delete a non-existent snapshot", ginkgo.Label(p0, block, + vanilla, wcp, snapshot, tkg, negative), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create pvc") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1938,7 +1970,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -1967,38 +1999,35 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) /* + Testcase-14 Create snapshots using default VolumeSnapshotClass 1. Create a VolumeSnapshotClass and set it as default 2. Create a snapshot without providing the snapshotClass input and - ensure the default class is picked and honored for snapshot creation + ensure the default class is picked and honored for snapshot creation 3. Validate the fields after snapshot creation succeeds (snapshotClass, retentionPolicy) */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Create snapshots using default "+ - "VolumeSnapshotClass", ginkgo.Label(p0, block, vanilla, snapshot, tkg), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC7Create snapshots using default "+ + "VolumeSnapshotClass", ginkgo.Label(p0, block, vanilla, snapshot, wcp, tkg), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var volumeSnapshotClass *snapV1.VolumeSnapshotClass - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { @@ -2022,7 +2051,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volumeSnapshotClass, err = snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, vscSpec, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } else if guestCluster { + } else { restConfig = getRestConfigClient() snapc, err = snapclient.NewForConfig(restConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2052,8 +2081,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapc, err = snapclient.NewForConfig(guestClusterRestConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshotWithoutSnapClass(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -2080,34 +2110,39 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-16 Create Volume from snapshot with different size (high and low) 1. Restore operation (or creation of volume from snapshot) should pass - only if the volume size is same as that of the snapshot being used + only if the volume size is same as that of the snapshot being used 2. If a different size (high or low) is provided the pvc creation should fail with error 3. Verify the error 4. Create with exact size and ensure it succeeds */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Create Volume from snapshot with "+ - "different size", ginkgo.Label(p1, block, vanilla, snapshot, tkg, stable, negative), func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] TC8Create Volume from snapshot with "+ + "different size", ginkgo.Label(p1, block, vanilla, snapshot, tkg, wcp, stable, negative), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + + if !vanillaCluster { + var allowExpansion = true + storageclass.AllowVolumeExpansion = &allowExpansion + storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + defer func() { + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { @@ -2134,7 +2169,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -2154,8 +2189,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } }() - ginkgo.By("Create PVC using the higher size") - pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, defaultrqLimit, storageclass, nil, + ginkgo.By("Create PVC from dynamic volume snapshot but with a different higher size") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, defaultrqLimit, storageclass, labelsMap, v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2199,45 +2234,34 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* - Snapshot workflow for statefulsets - 1. Create a statefulset with 3 replicas using a storageclass with volumeBindingMode set to Immediate - 2. Wait for pvcs to be in Bound state - 3. Wait for pods to be in Running state - 4. Create snapshot on 3rd replica's pvc (pvc as source) - 5. Scale down the statefulset to 2 - 6. Delete the pvc on which snapshot was created - 7. PVC delete succeeds but PV delete will fail as there is snapshot - expected - 8. Create a new PVC with same name (using the snapshot from step-4 as source) - verify a new PV is created - 9. Scale up the statefulset to 3 - 10. Verify if the new pod attaches to the PV created in step-8 - 11. Cleanup the sts and the snapshot + pv that was left behind in step-7 + Testcase-17 + Snapshot workflow for statefulsets + 1. Create a statefulset with 3 replicas using a storageclass with volumeBindingMode set to Immediate + 2. Wait for pvcs to be in Bound state + 3. Wait for pods to be in Running state + 4. Create snapshot on 3rd replica's pvc (pvc as source) + 5. Scale down the statefulset to 2 + 6. Delete the pvc on which snapshot was created + 7. PVC delete succeeds but PV delete will fail as there is snapshot - expected + 8. Create a new PVC with same name (using the snapshot from step-4 as source) - verify a new PV is created + 9. Scale up the statefulset to 3 + 10. Verify if the new pod attaches to the PV created in step-8 + 11. Cleanup the sts and the snapshot + pv that was left behind in step-7 */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Snapshot workflow for statefulsets", ginkgo.Label(p0, block, - vanilla, snapshot, tkg), func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] TC9Snapshot workflow for statefulsets", ginkgo.Label(p0, block, + vanilla, snapshot, wcp, tkg), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - curtime := time.Now().Unix() - randomValue := rand.Int() - val := strconv.FormatInt(int64(randomValue), 10) - val = string(val[1:3]) - curtimestring := strconv.FormatInt(curtime, 10) - scName := "nginx-sc-default-" + curtimestring + val - var snapshotId1, snapshotId2 string - var snapshotContentCreated, snapshotCreated bool - - if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - scSpec := getVSphereStorageClassSpec(scName, scParameters, nil, "", "", false) - sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() ginkgo.By("Creating service") @@ -2246,11 +2270,12 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { deleteService(namespace, client, service) }() + ginkgo.By("Creating Statefulset") statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") - statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Spec.StorageClassName = &scName - *statefulset.Spec.Replicas = 2 + if !vanillaCluster { + statefulset.Spec.VolumeClaimTemplates[0].Spec.StorageClassName = &scName + } CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) defer func() { @@ -2268,16 +2293,18 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { "Number of Pods in the statefulset should match with number of replicas") framework.Logf("Fetching pod 1, pvc1 and pv1 details") + // pod1 details pod1, err := client.CoreV1().Pods(namespace).Get(ctx, ssPodsBeforeScaleDown.Items[0].Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // pvc1 details pvc1 := pod1.Spec.Volumes[0].PersistentVolumeClaim - pvclaim1, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc1.ClaimName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // pv1 details pv1 := getPvFromClaim(client, statefulset.Namespace, pvc1.ClaimName) volHandle1 := pv1.Spec.CSI.VolumeHandle gomega.Expect(volHandle1).NotTo(gomega.BeEmpty()) @@ -2293,16 +2320,18 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } framework.Logf("Fetching pod 2, pvc2 and pv2 details") + // pod2 details pod2, err := client.CoreV1().Pods(namespace).Get(ctx, ssPodsBeforeScaleDown.Items[1].Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // pvc2 details pvc2 := pod2.Spec.Volumes[0].PersistentVolumeClaim - pvclaim2, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc2.ClaimName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // pv2 details pv2 := getPvFromClaim(client, statefulset.Namespace, pvc2.ClaimName) volHandle2 := pv2.Spec.CSI.VolumeHandle gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) @@ -2329,275 +2358,135 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() ginkgo.By("Create a volume snapshot - 1") - volumeSnapshot1, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim1.Name), metav1.CreateOptions{}) + diskSize := "1Gi" + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim1, volHandle1, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot1.Name) defer func() { - if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotCreated { + if snapshotCreated1 { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot1.Name, - metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot1, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot1.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = true - gomega.Expect(volumeSnapshot1.Status.RestoreSize.Cmp(resource.MustParse("1Gi"))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent1, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent1.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - if vanillaCluster { - snapshothandle1 := *snapshotContent1.Status.SnapshotHandle - snapshotId1 = strings.Split(snapshothandle1, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle1, snapshotId1) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - } else if guestCluster { - snapshothandle1 := *snapshotContent1.Status.SnapshotHandle - snapshotId1, _, _, err = getSnapshotHandleFromSupervisorCluster(ctx, - snapshothandle1) - } - ginkgo.By("Create a volume snapshot - 2") - volumeSnapshot2, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim2.Name), metav1.CreateOptions{}) + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim2, volHandle2, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot2.Name) - - var snapshotCreated2 bool - var snapshotContentCreated2 bool defer func() { if snapshotContentCreated2 { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated2 { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot2.Name, - metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot2, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot2.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated2 = true - gomega.Expect(volumeSnapshot2.Status.RestoreSize.Cmp(resource.MustParse("1Gi"))).To(gomega.BeZero()) + ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) + _, scaledownErr := fss.Scale(ctx, client, statefulset, replicas-1) + gomega.Expect(scaledownErr).NotTo(gomega.HaveOccurred()) + fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas-1) + ssPodsAfterScaleDown := fss.GetPodList(ctx, client, statefulset) + gomega.Expect(ssPodsAfterScaleDown.Items).NotTo(gomega.BeEmpty(), + fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(replicas-1)).To(gomega.BeTrue(), + "Number of Pods in the statefulset should match with number of replicas") + replicas -= 1 - ginkgo.By("Verify volume snapshot content is created") - snapshotContent2, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated2 = true - gomega.Expect(*snapshotContent2.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - if vanillaCluster { - snapshothandle2 := *snapshotContent2.Status.SnapshotHandle - snapshotId2 = strings.Split(snapshothandle2, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle2, snapshotId2) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - } else if guestCluster { - snapshothandle2 := *snapshotContent2.Status.SnapshotHandle - snapshotId2, _, _, err = getSnapshotHandleFromSupervisorCluster(ctx, - snapshothandle2) - } - - ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) - _, scaledownErr := fss.Scale(ctx, client, statefulset, replicas-1) - gomega.Expect(scaledownErr).NotTo(gomega.HaveOccurred()) - fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas-1) - ssPodsAfterScaleDown := fss.GetPodList(ctx, client, statefulset) - gomega.Expect(ssPodsAfterScaleDown.Items).NotTo(gomega.BeEmpty(), - fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) - gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(replicas-1)).To(gomega.BeTrue(), - "Number of Pods in the statefulset should match with number of replicas") - replicas -= 1 - - var pvcToDelete *v1.PersistentVolumeClaim - var snapshotToBeDeleted *snapV1.VolumeSnapshot + /* We cannot delete a pvc if snapshot is attached to a volume, csi will not allow + for volume deletion and it will fail with below error + Error from server (Deleting volume with snapshots is not allowed): admission webhook "validation.csi.vsphere.vmware.com" + denied the request: Deleting volume with snapshots is not allowed */ - // var volId string - // Find the missing pod and check if the cnsvolumemetadata is deleted or not - if ssPodsAfterScaleDown.Items[0].Name == pod1.Name { - pvcToDelete = pvclaim2 - snapshotToBeDeleted = volumeSnapshot2 - - } else { - pvcToDelete = pvclaim1 - snapshotToBeDeleted = volumeSnapshot1 + ginkgo.By("Restoring a snapshot-1 to create a new volume and attach it to a new Pod") + pvclaim3, persistentVolumes3, pod3 := verifyVolumeRestoreOperation(ctx, client, + namespace, storageclass, volumeSnapshot1, diskSize, true) + volHandle3 := persistentVolumes3[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle3 = getVolumeIDFromSupervisorCluster(volHandle3) } - - ginkgo.By("Delete volume snapshot and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - snapshotToBeDeleted.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - framework.Logf("Wait till the volume snapshot content is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, - *snapshotToBeDeleted.Status.BoundVolumeSnapshotContentName) - if err != nil { + gomega.Expect(volHandle3).NotTo(gomega.BeEmpty()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod3.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod3) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvcToDelete.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) - time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) - ginkgo.By("Create a new PVC") - pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "1Gi", sc, nil, v1.ReadWriteOnce) + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim3.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() - pvcSpec.Name = pvcToDelete.Name - pvclaim3, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) + ginkgo.By("Delete dynamic volume snapshot-1") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle1, snapshotId1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Expecting the volume to bound") - newPV, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim3}, - framework.ClaimProvisionTimeout) + ginkgo.By("Delete dynamic volume snapshot-2") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle2, snapshotId2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - volHandleOfNewPV := newPV[0].Spec.CSI.VolumeHandle - gomega.Expect(volHandleOfNewPV).NotTo(gomega.BeEmpty()) - - replicas += 1 - ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) - _, scaleupErr := fss.Scale(ctx, client, statefulset, replicas) - gomega.Expect(scaleupErr).NotTo(gomega.HaveOccurred()) - time.Sleep(5 * time.Minute) - fss.WaitForStatusReplicas(ctx, client, statefulset, replicas) - fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) - - ginkgo.By("Delete volume snapshot 1 and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - volumeSnapshot1.Name, metav1.DeleteOptions{}) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } - snapshotCreated = false - - ginkgo.By("Delete volume snapshot 2 and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - volumeSnapshot2.Name, metav1.DeleteOptions{}) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } - snapshotCreated2 = false - - framework.Logf("Wait till the volume snapshot content is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, - *volumeSnapshot1.Status.BoundVolumeSnapshotContentName) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } - snapshotContentCreated = false - - framework.Logf("Wait till the volume snapshot content 1 is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, - *volumeSnapshot2.Status.BoundVolumeSnapshotContentName) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } - snapshotContentCreated2 = false - - if !guestCluster { - ginkgo.By("Verify snapshot 1 entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle1, snapshotId1) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } - - ginkgo.By("Verify snapshot 2 entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle2, snapshotId2) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } - } }) /* + Testcase-7 Volume deletion with existing snapshots 1. Create a sc, and a pvc using this sc 2. Create a dynamic snapshot using above pvc as source 3. Delete this pvc, expect the pvc to be deleted successfully 4. Underlying pv should not be deleted and should have a valid error - calling out that the volume has active snapshots - (note: the storageclass here is set to Delete retentionPolicy) + calling out that the volume has active snapshots + (note: the storageclass here is set to Delete retentionPolicy) 5. Expect VolumeFailedDelete error with an appropriate err-msg 6. Run cleanup - delete the snapshots and then delete pv */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume deletion with existing snapshots", ginkgo.Label(p0, - block, vanilla, snapshot, tkg, stable, negative), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC4Volume deletion with existing snapshots", ginkgo.Label(p0, + block, vanilla, snapshot, tkg, wcp, stable, negative), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create Storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentvolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentvolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fpv.DeletePersistentVolume(ctx, client, persistentvolumes[0].Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -2615,7 +2504,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -2648,22 +2537,15 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Delete PV") + ginkgo.By("Delete PVC") err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } }) /* + Testcase-8 Create a pre-provisioned snapshot using VolumeSnapshotContent as source (use VSC which is auto-created by a dynamic provisioned snapshot) 1. create a sc, and pvc using this sc @@ -2679,29 +2561,24 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() + ginkgo.By("Create PVC") pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, - nil, "", diskSize, storageclass, true) + labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2721,7 +2598,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -2765,17 +2642,18 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* - Pre-provisioned snapshot using incorrect/non-existing static snapshot - 1. Create a sc, and pvc using this sc - 2. Create a snapshot for this pvc (use CreateSnapshot CNS API) - 3. Create a VolumeSnapshotContent CR using above snapshot-id, by passing the snapshotHandle - 4. Create a VolumeSnapshot using above content as source - 5. VolumeSnapshot and VolumeSnapshotContent should be created successfully and readToUse set to True - 6. Delete the snapshot created in step-2 (use deleteSnapshots CNS API) - 7. VolumeSnapshot and VolumeSnapshotContent will still have readyToUse set to True - 8. Restore: Create a volume using above pre-provisioned snapshot k8s object - (note the snapshotHandle its pointing to has been deleted) - 9. Volume Create should fail with an appropriate error on k8s side + Testcase-9 + Pre-provisioned snapshot using incorrect/non-existing static snapshot + 1. Create a sc, and pvc using this sc + 2. Create a snapshot for this pvc (use CreateSnapshot CNS API) + 3. Create a VolumeSnapshotContent CR using above snapshot-id, by passing the snapshotHandle + 4. Create a VolumeSnapshot using above content as source + 5. VolumeSnapshot and VolumeSnapshotContent should be created successfully and readToUse set to True + 6. Delete the snapshot created in step-2 (use deleteSnapshots CNS API) + 7. VolumeSnapshot and VolumeSnapshotContent will still have readyToUse set to True + 8. Restore: Create a volume using above pre-provisioned snapshot k8s object + (note the snapshotHandle its pointing to has been deleted) + 9. Volume Create should fail with an appropriate error on k8s side */ ginkgo.It("[block-vanilla-snapshot] Pre-provisioned snapshot using incorrect/non-existing "+ "static snapshot", ginkgo.Label(p0, block, vanilla, snapshot, negative), func() { @@ -2788,7 +2666,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { var err error ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2924,12 +2801,13 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-10 Create a volume from a snapshot that is still not ready-to-use 1. Create a pre-provisioned snapshot pointing to a VolumeSnapshotContent - which is still not provisioned (or does not exist) + which is still not provisioned (or does not exist) 2. The snapshot will have status.readyToUse: false and snapshot is in Pending state 3. Create a volume using the above snapshot as source and ensure the provisioning fails with error: - ProvisioningFailed | snapshot <> not bound + ProvisioningFailed | snapshot <> not bound 4. pvc is stuck in Pending 5. Once the VolumeSnapshotContent is created, snapshot should have status.readyToUse: true 6. The volume should now get provisioned successfully @@ -2947,7 +2825,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { var err error ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3080,46 +2957,43 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* - Snapshot workflow for deployments - 1. Create a deployment with 1 replicas (dep-1) - 2. Write a file on each replica's pvc - 3. Create snapshots on all pvcs of the deployments - 4. Create another deployment (dep-2) such that the pvc's of this deployment - points to the pvc whose source is the snapshot created in step.3 - 5. Delete dep-1 - 6. The deployment should succeed and should have the file that was created in step.2 - 7. Cleanup dep-1 pv snapshots and pvs, delete dep-2 + Testcase-18 + Snapshot workflow for deployments + 1. Create a deployment with 1 replicas (dep-1) + 2. Write a file on each replica's pvc + 3. Create snapshots on all pvcs of the deployments + 4. Create another deployment (dep-2) such that the pvc's of this deployment + points to the pvc whose source is the snapshot created in step.3 + 5. Delete dep-1 + 6. The deployment should succeed and should have the file that was created in step.2 + 7. Cleanup dep-1 pv snapshots and pvs, delete dep-2 */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Snapshot workflow for deployments", ginkgo.Label(p0, - block, vanilla, tkg, snapshot, stable), func() { + + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC10Snapshot workflow for deployments", ginkgo.Label(p0, + block, vanilla, tkg, snapshot, wcp, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") + ginkgo.By("Create storage class") storageclass, err := createStorageClass(client, scParameters, nil, - "", "", true, "") + "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3127,9 +3001,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - labelsMap := make(map[string]string) - labelsMap["app"] = "test" - ginkgo.By("Creating a Deployment using pvc1") dep, err := createDeployment(ctx, client, 1, labelsMap, nil, namespace, []*v1.PersistentVolumeClaim{pvclaim}, execRWXCommandPod1, false, busyBoxImageOnGcr) @@ -3159,7 +3030,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -3184,7 +3055,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolume2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3205,7 +3075,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Creating a new deployment from the restored pvc") dep2, err := createDeployment(ctx, client, 1, labelsMap2, nil, namespace, - []*v1.PersistentVolumeClaim{pvclaim2}, "", false, busyBoxImageOnGcr) + []*v1.PersistentVolumeClaim{pvclaim2}, execRWXCommandPod2, false, busyBoxImageOnGcr) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Delete Deployment-2") @@ -3222,7 +3092,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Verify the volume is accessible and Read/write is possible") output := readFileFromPod(namespace, pod2.Name, filePathPod1) - gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + //gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + writeDataOnFileFromPod(namespace, pod2.Name, filePathPod1, "Hello message from Pod2") + output = readFileFromPod(namespace, pod2.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from Pod2")).NotTo(gomega.BeFalse()) ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, @@ -3231,6 +3105,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-24 Volume resize of a volume having snapshots 1. Create a pvc (say size=2GB) 2. Resize this volume (to size=4GB) - should succeed - offline resize @@ -3240,34 +3115,35 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 6. Run resize and it should succeed 7. Cleanup the pvc */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume offline resize of a volume "+ - "having snapshots", ginkgo.Label(p0, block, vanilla, tkg, snapshot, stable, negative), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC12Volume offline resize of a volume "+ + "having snapshots", ginkgo.Label(p0, block, vanilla, tkg, snapshot, stable, wcp, negative), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + var allowExpansion = true + storageclass.AllowVolumeExpansion = &allowExpansion + storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3277,7 +3153,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } }() - ginkgo.By("Expanding current pvc") + ginkgo.By("Expanding the current pvc") currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] newSize := currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("4Gi")) @@ -3286,7 +3162,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim, err = expandPVCSize(pvclaim, newSize, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvclaim).NotTo(gomega.BeNil()) - pvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { framework.Failf("error updating pvc size %q", pvclaim.Name) @@ -3304,7 +3179,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - if len(queryResult.Volumes) == 0 { err = fmt.Errorf("queryCNSVolumeWithResult returned no volume") } @@ -3331,7 +3205,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, newDiskSize, true) defer func() { if snapshotContentCreated { @@ -3406,44 +3280,46 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* - Volume online resize, and snapshots - 1. Create a pvc and attach the pvc to a pod - 2. Resize the volume (to 4GB frm 2GB) - should succeed - online resize - 3. Create a few snapshots for this volume - should succeed - 4. Resize the volume (to say size=6GB) - this operation should fail and verify the error returned - 5. Delete the snapshots - 6. Run resize and it should succeed - 7. Cleanup the pvc + Testcase-25 + Volume online resize, and snapshots + 1. Create a pvc and attach the pvc to a pod + 2. Resize the volume (to 4GB frm 2GB) - should succeed - online resize + 3. Create a few snapshots for this volume - should succeed + 4. Resize the volume (to say size=6GB) - this operation should fail and verify the error returned + 5. Delete the snapshots + 6. Run resize and it should succeed + 7. Cleanup the pvc */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume online resize of a volume having "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC13Volume online resize of a volume having "+ "snapshots", ginkgo.Label(p0, block, vanilla, tkg, snapshot, stable, negative), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // var allowExpansion = true + // storageclass.AllowVolumeExpansion = &allowExpansion + // storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, pvs := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) - volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + volHandle := pvs[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3457,30 +3333,38 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execRWXCommandPod1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() var vmUUID string - nodeName := pod.Spec.NodeName + var exists bool if vanillaCluster { vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") - // Modify PVC spec to trigger volume expansion + if !guestCluster { + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle, pvclaim, pvs[0], pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Modify the PVC spec to enable online volume expansion when no snapshot exists for this PVC") currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] newSize := currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("4Gi")) @@ -3525,7 +3409,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, newDiskSize, true) defer func() { if snapshotContentCreated { @@ -3544,7 +3428,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } }() - // Modify PVC spec to trigger volume expansion + ginkgo.By("Modify the PVC spec to enable online volume expansion when " + + "snapshot exists for this PVC, volume expansion should fail") currentPvcSize = claims.Spec.Resources.Requests[v1.ResourceStorage] newSize = currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("6Gi")) @@ -3622,47 +3507,44 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* - Snapshot restore while the Host is Down - - 1. Create a sc (vsan-default) and create a pvc using this sc - 2. Check the host on which the pvc is placed - 3. Create snapshots for this pvc - 4. verify snapshots are created successfully and bound - 5. bring the host on which the pvc was placed down - 6. use the snapshot as source to create a new volume and - it should succeed and pvc should come to Bound state - 7. bring the host back up - 8. cleanup the snapshots, restore-pvc and source-pvc + Testcase-35 + Snapshot restore while the Host is Down + + 1. Create a sc (vsan-default) and create a pvc using this sc + 2. Check the host on which the pvc is placed + 3. Create snapshots for this pvc + 4. verify snapshots are created successfully and bound + 5. bring the host on which the pvc was placed down + 6. use the snapshot as source to create a new volume and + it should succeed and pvc should come to Bound state + 7. bring the host back up + 8. cleanup the snapshots, restore-pvc and source-pvc */ - ginkgo.It("[block-vanilla-snapshot] Snapshot restore while the Host "+ + + ginkgo.It("[block-vanilla-snapshot][supervisor-snapshot] Snapshot restore while the Host "+ "is Down", ginkgo.Label(p0, block, vanilla, snapshot, disruptive), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim - var err error - var snapshotCreated = false - ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, - namespace, nil, scParameters, diskSize, nil, "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + diskSize, storageclass, true) + volHandle = persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3670,61 +3552,42 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, - volumeSnapshotClass.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Create a volume snapshot") - snapshot1, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", snapshot1.Name) - defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot1.Name, pandoraSyncWaitTime) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is Ready to use") - snapshot1_updated, err := waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, snapshot1.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = true - gomega.Expect(snapshot1_updated.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent1, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *snapshot1_updated.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - gomega.Expect(*snapshotContent1.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent1.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Identify the host on which the PV resides") - framework.Logf("pvName %v", persistentvolumes[0].Name) - vsanObjuuid := VsanObjIndentities(ctx, &e2eVSphere, persistentvolumes[0].Name) + framework.Logf("pvName %v", persistentVolumes[0].Name) + vsanObjuuid := VsanObjIndentities(ctx, &e2eVSphere, persistentVolumes[0].Name) framework.Logf("vsanObjuuid %v", vsanObjuuid) gomega.Expect(vsanObjuuid).NotTo(gomega.BeNil()) @@ -3738,7 +3601,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Stop hostd service on the host on which the PV is present") stopHostDOnHost(ctx, hostIP) - defer func() { ginkgo.By("Start hostd service on the host on which the PV is present") startHostDOnHost(ctx, hostIP) @@ -3746,7 +3608,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create PVC from snapshot") pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, - v1.ReadWriteOnce, snapshot1.Name, snapshotapigroup) + v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3755,8 +3617,10 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + } gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3769,6 +3633,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-34 VC reboot with deployment pvcs having snapshot 1. Create a sc and create 30 pvc's using this sc 2. Create a deployment using 3 replicas and pvc's pointing to above @@ -3780,76 +3645,69 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 8. Ensure the data written in step-4 is intanct 9. Delete both deployments and. the pvcs */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] VC reboot with deployment pvcs "+ + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] VC reboot with deployment pvcs "+ "having snapshot", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive, flaky), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var pvclaim *v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim var restoredpvclaims []*v1.PersistentVolumeClaim var volumesnapshots []*snapV1.VolumeSnapshot var volumesnapshotsReadytoUse []*snapV1.VolumeSnapshot + var snapshotContents []*snapV1.VolumeSnapshotContent - ginkgo.By("Create storage class and PVC") - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - curtime := time.Now().Unix() - randomValue := rand.Int() - val := strconv.FormatInt(int64(randomValue), 10) - val = string(val[1:3]) - curtimestring := strconv.FormatInt(curtime, 10) - scName := "snapshot" + curtimestring + val + ginkgo.By("Create storage class") storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() ginkgo.By("Creating PVCs using the Storage Class") - framework.Logf("VOLUME_OPS_SCALE is set to %v", 5) for i := 0; i < 5; i++ { framework.Logf("Creating pvc%v", i) accessMode := v1.ReadWriteOnce - pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, accessMode) + pvclaim, err := createPVC(ctx, client, namespace, nil, "", storageclass, accessMode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaims = append(pvclaims, pvclaim) } ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) defer func() { - for _, claim := range pvclaims { - err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) + for _, pvclaim := range pvclaims { + pv := getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name) + volHandle = pv.Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() labelsMap := make(map[string]string) - labelsMap["app"] = "test" - ginkgo.By("Creating a Deployment using pvc1") + labelsMap["app1"] = "test1" + ginkgo.By("Creating a Deployment with replica count 1 using pvcs") dep, err := createDeployment(ctx, client, 1, labelsMap, nil, namespace, pvclaims, execRWXCommandPod1, false, busyBoxImageOnGcr) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { ginkgo.By("Delete Deployment") err := client.AppsV1().Deployments(namespace).Delete(ctx, dep.Name, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Wait for deployment pods to be up and running") + ginkgo.By("Wait for deployment pod to be up and running") pods, err := fdep.GetPodsForDeployment(ctx, client, dep) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pod := pods.Items[0] @@ -3889,11 +3747,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { // After reboot. bootstrap() - - framework.Logf("Deleting volume snapshot") - for _, snapshot := range volumesnapshots { - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot.Name, pandoraSyncWaitTime) - } }() ginkgo.By("Rebooting VC") @@ -3940,9 +3793,25 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *snaps.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) + snapshotContents = append(snapshotContents, snapshotContent) } + defer func() { + framework.Logf("Deleting volume snapshot") + for i := 0; i < 5; i++ { + err = deleteVolumeSnapshotContent(ctx, snapshotContents[i], snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Create a PVC using the snapshot created above") + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumesnapshots[i].Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumesnapshots[i].Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create PVCs using the snapshot created above") for _, snapshot := range volumesnapshots { pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, v1.ReadWriteOnce, snapshot.Name, snapshotapigroup) @@ -3960,8 +3829,15 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { defer func() { for _, restoredpvc := range restoredpvclaims { + pv := getPvFromClaim(client, restoredpvc.Namespace, restoredpvc.Name) + volHandle = pv.Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } err := fpv.DeletePersistentVolumeClaim(ctx, client, restoredpvc.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -3981,9 +3857,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { labelsMap2["app2"] = "test2" dep2, err := createDeployment(ctx, client, 1, labelsMap2, nil, namespace, - restoredpvclaims, "", false, busyBoxImageOnGcr) + restoredpvclaims, execCommand, false, busyBoxImageOnGcr) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { ginkgo.By("Delete Deployment-2") err := client.AppsV1().Deployments(namespace).Delete(ctx, dep2.Name, metav1.DeleteOptions{}) @@ -4003,6 +3878,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-32 VC password reset during snapshot creation 1. Create a sc and pvc using this sc 2. Create a volume snapshot using pvc as source @@ -4014,37 +3890,31 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 8. Delete snapshot 9. Cleanup pvc/sc */ - ginkgo.It("[block-vanilla-snapshot] VC password reset during snapshot creation", ginkgo.Label(p1, block, + + ginkgo.It("[block-vanilla-snapshot][supervsior-snapshot] VC password reset during snapshot creation", ginkgo.Label(p1, block, vanilla, snapshot, disruptive), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim - var err error - var snapshotCreated = false - var snapshot3Created = false - nimbusGeneratedVcPwd := GetAndExpectStringEnvVar(nimbusVcPwd) - ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, - namespace, nil, scParameters, diskSize, nil, "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, (2 * framework.ClaimProvisionTimeout)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + diskSize, storageclass, true) + volHandle = persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4052,170 +3922,164 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, - volumeSnapshotClass.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Create a volume snapshot") - snapshot1, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", snapshot1.Name) - defer func() { - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot1.Name, pandoraSyncWaitTime) + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - }() - ginkgo.By("Verify volume snapshot is Ready to use") - snapshot1_updated, err := waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, snapshot1.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = true - gomega.Expect(snapshot1_updated.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) - ginkgo.By("Verify volume snapshot content is created") - snapshotContent1, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *snapshot1_updated.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(*snapshotContent1.Status.ReadyToUse).To(gomega.BeTrue()) + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent1.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] + ginkgo.By("Fetching the username and password of the current vcenter session from secret") + var newPassword, originalConf, username, nimbusGeneratedVcPwd, originalPassword string + var vsphereCfg e2eTestConfig + var secret *v1.Secret + var originalVCPasswordChanged bool - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + secret, err = c.CoreV1().Secrets(csiSystemNamespace).Get(ctx, configSecret, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Fetching the username and password of the current vcenter session from secret") - secret, err := c.CoreV1().Secrets(csiSystemNamespace).Get(ctx, configSecret, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalConf = string(secret.Data[vSphereCSIConf]) + vsphereCfg, err = readConfigFromSecretString(originalConf) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - originalConf := string(secret.Data[vSphereCSIConf]) - vsphereCfg, err := readConfigFromSecretString(originalConf) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By(fmt.Sprintln("Changing password on the vCenter host")) + nimbusGeneratedVcPwd = GetAndExpectStringEnvVar(nimbusVcPwd) + username = vsphereCfg.Global.User + originalPassword = vsphereCfg.Global.Password + newPassword = e2eTestPassword + ginkgo.By(fmt.Sprintf("Original password %s, new password %s", originalPassword, newPassword)) + err = invokeVCenterChangePassword(ctx, username, nimbusGeneratedVcPwd, newPassword, vcAddress, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVCPasswordChanged = true + defer func() { + if originalVCPasswordChanged { + ginkgo.By("Reverting the password change") + err = invokeVCenterChangePassword(ctx, username, nimbusGeneratedVcPwd, originalPassword, + vcAddress, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + } else if supervisorCluster { + ginkgo.By("Perform password rotation on the supervisor") + csiNamespace := GetAndExpectStringEnvVar(envCSINamespace) + passwordRotated, err := performPasswordRotationOnSupervisor(client, ctx, csiNamespace, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(passwordRotated).To(gomega.BeTrue()) + } - ginkgo.By(fmt.Sprintln("Changing password on the vCenter host")) - username := vsphereCfg.Global.User - originalPassword := vsphereCfg.Global.Password - newPassword := e2eTestPassword - ginkgo.By(fmt.Sprintf("Original password %s, new password %s", originalPassword, newPassword)) - err = invokeVCenterChangePassword(ctx, username, nimbusGeneratedVcPwd, newPassword, vcAddress, clientIndex) + ginkgo.By("Create a volume snapshot-2 and verify volume snapshot-2 " + + "creation succeeds with previous csi session") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - originalVCPasswordChanged := true - defer func() { - if originalVCPasswordChanged { - ginkgo.By("Reverting the password change") - err = invokeVCenterChangePassword(ctx, username, nimbusGeneratedVcPwd, originalPassword, - vcAddress, clientIndex) + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - }() - ginkgo.By("Create a volume snapshot2") - snapshot2, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshot2Created := true + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) - defer func() { - if snapshot2Created { - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot2.Name, pandoraSyncWaitTime) + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot 2 is creation succeeds with previous csi session") - snapshot2, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, snapshot2.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Snapshot details is %+v", snapshot2) - - ginkgo.By("Modifying the password in the secret") - vsphereCfg.Global.Password = newPassword - modifiedConf, err := writeConfigToSecretString(vsphereCfg) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Updating the secret to reflect the new password") - secret.Data[vSphereCSIConf] = []byte(modifiedConf) - _, err = c.CoreV1().Secrets(csiSystemNamespace).Update(ctx, secret, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - defer func() { - ginkgo.By("Reverting the secret change back to reflect the original password") - currentSecret, err := c.CoreV1().Secrets(csiSystemNamespace).Get(ctx, configSecret, metav1.GetOptions{}) + if vanillaCluster { + ginkgo.By("Modifying the password in the secret") + vsphereCfg.Global.Password = newPassword + modifiedConf, err := writeConfigToSecretString(vsphereCfg) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - currentSecret.Data[vSphereCSIConf] = []byte(originalConf) - _, err = c.CoreV1().Secrets(csiSystemNamespace).Update(ctx, currentSecret, metav1.UpdateOptions{}) + ginkgo.By("Updating the secret to reflect the new password") + secret.Data[vSphereCSIConf] = []byte(modifiedConf) + _, err = c.CoreV1().Secrets(csiSystemNamespace).Update(ctx, secret, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - ginkgo.By("Create a volume snapshot3") - snapshot3, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", snapshot3.Name) + defer func() { + ginkgo.By("Reverting the secret change back to reflect the original password") + currentSecret, err := c.CoreV1().Secrets(csiSystemNamespace).Get(ctx, configSecret, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - if snapshot3Created { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot3.Name, pandoraSyncWaitTime) - } - }() + currentSecret.Data[vSphereCSIConf] = []byte(originalConf) + _, err = c.CoreV1().Secrets(csiSystemNamespace).Update(ctx, currentSecret, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + } - ginkgo.By("Verify volume snapshot is Ready to use") - snapshot3_updated, err := waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, snapshot3.Name) + ginkgo.By("Create a volume snapshot-3") + volumeSnapshot3, snapshotContent3, snapshotCreated3, + snapshotContentCreated3, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshot3Created = true - gomega.Expect(snapshot3_updated.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content 3 is created") - snapshotContent3, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *snapshot3_updated.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(*snapshotContent3.Status.ReadyToUse).To(gomega.BeTrue()) + defer func() { + if snapshotContentCreated3 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent3, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle3 := *snapshotContent3.Status.SnapshotHandle - snapshotId3 := strings.Split(snapshothandle3, "+")[1] + if snapshotCreated3 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot3.Name, pandoraSyncWaitTime) - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId3) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot3.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() - ginkgo.By("Reverting the password change") - err = invokeVCenterChangePassword(ctx, username, nimbusGeneratedVcPwd, originalPassword, - vcAddress, clientIndex) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - originalVCPasswordChanged = false + if vanillaCluster { + ginkgo.By("Reverting the password change") + err = invokeVCenterChangePassword(ctx, username, nimbusGeneratedVcPwd, originalPassword, + vcAddress, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVCPasswordChanged = false - ginkgo.By("Reverting the secret change back to reflect the original password") - currentSecret, err := c.CoreV1().Secrets(csiSystemNamespace).Get(ctx, configSecret, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Reverting the secret change back to reflect the original password") + currentSecret, err := c.CoreV1().Secrets(csiSystemNamespace).Get(ctx, configSecret, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - currentSecret.Data[vSphereCSIConf] = []byte(originalConf) - _, err = c.CoreV1().Secrets(csiSystemNamespace).Update(ctx, currentSecret, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + currentSecret.Data[vSphereCSIConf] = []byte(originalConf) + _, err = c.CoreV1().Secrets(csiSystemNamespace).Update(ctx, currentSecret, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } ginkgo.By("Create PVC from snapshot") pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, - v1.ReadWriteOnce, snapshot1.Name, snapshotapigroup) + v1.ReadWriteOnce, volumeSnapshot1.Name, snapshotapigroup) pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4225,7 +4089,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4235,6 +4098,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-33 Multi-master and snapshot workflow 1. Create a PVC. 2. Create some dynamic volume snapshots. @@ -4245,13 +4109,15 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 7. Verify pvcs all are in Bound state. 8. Cleanup all the snapshots and the pvc. */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Multi-master and snapshot workflow", ginkgo.Label(p1, block, + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] Multi-master and snapshot workflow", ginkgo.Label(p1, block, vanilla, tkg, snapshot), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var storageclass *storagev1.StorageClass var pvclaim *v1.PersistentVolumeClaim + var pvclaims []*v1.PersistentVolumeClaim var err error var snapshotContentCreated = false var sshClientConfig, sshWcpConfig *ssh.ClientConfig @@ -4263,31 +4129,29 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapshotOpsScale = 5 } - ginkgo.By("Create storage class and PVC") + ginkgo.By("Create storage class") if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, "") + storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } else if guestCluster { + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + } else { storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { ginkgo.By("In defer function, deleting PVC") err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -4299,7 +4163,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { ginkgo.By("In defer function deleting volume snapshot class") err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, @@ -4307,9 +4170,18 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() + /* Get current leader Csi-Controller-Pod where CSI Snapshotter is running and " + + find the master node IP where this Csi-Controller-Pod is running */ + ginkgo.By("Get current leader Csi-Controller-Pod name where csi-snapshotter is running and " + + "find the master node IP where this Csi-Controller-Pod is running") + csiControllerPod, k8sMasterIP, err = getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, + c, sshClientConfig, snapshotterContainerName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("csi-snapshotter leader is in Pod %s "+ + "which is running on master node %s", csiControllerPod, k8sMasterIP) + if vanillaCluster { nimbusGeneratedK8sVmPwd := GetAndExpectStringEnvVar(nimbusK8sVmPwd) - sshClientConfig = &ssh.ClientConfig{ User: rootUser, Auth: []ssh.AuthMethod{ @@ -4317,17 +4189,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }, HostKeyCallback: ssh.InsecureIgnoreHostKey(), } - - /* Get current leader Csi-Controller-Pod where CSI Snapshotter is running and " + - find the master node IP where this Csi-Controller-Pod is running */ - ginkgo.By("Get current leader Csi-Controller-Pod name where csi-snapshotter is running and " + - "find the master node IP where this Csi-Controller-Pod is running") - csiControllerPod, k8sMasterIP, err = getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, - c, sshClientConfig, snapshotterContainerName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("csi-snapshotter leader is in Pod %s "+ - "which is running on master node %s", csiControllerPod, k8sMasterIP) - } else if guestCluster { + } else if supervisorCluster || guestCluster { svcMasterIp = GetAndExpectStringEnvVar(svcMasterIP) svcMasterPwd = GetAndExpectStringEnvVar(svcMasterPassword) framework.Logf("svc master ip: %s", svcMasterIp) @@ -4338,6 +4200,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }, HostKeyCallback: ssh.InsecureIgnoreHostKey(), } + framework.Logf("sshwcpConfig: %v", sshWcpConfig) csiControllerPod, k8sMasterIP, err = getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, client, sshWcpConfig, snapshotterContainerName) @@ -4377,7 +4240,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Kill container CSI-Snapshotter on the master node where elected leader " + "csi controller pod is running") - if vanillaCluster { + if vanillaCluster || supervisorCluster { /* Delete elected leader CSI-Controller-Pod where csi-snapshotter is running */ csipods, err := client.CoreV1().Pods(csiSystemNamespace).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4387,7 +4250,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { err = fpod.WaitForPodsRunningReady(ctx, client, csiSystemNamespace, int32(csipods.Size()), 0, pollTimeoutShort*2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } else if guestCluster { + } else { err = execStopContainerOnGc(sshWcpConfig, svcMasterIp, snapshotterContainerName, k8sMasterIP, svcNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4413,7 +4276,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volumeSnapshotContents = append(volumeSnapshotContents, snapshotContent) framework.Logf("Get volume snapshot ID from snapshot handle") - snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent) + snapshotId, _, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("snapshot Id: %s", snapshotId) @@ -4432,7 +4295,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("csi-snapshotter leader is in Pod %s "+ "which is running on master node %s", csiControllerPod, k8sMasterIP) - } else if guestCluster { + } else { framework.Logf("sshwcpConfig: %v", sshWcpConfig) csiControllerPod, k8sMasterIP, err = getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, client, sshWcpConfig, snapshotterContainerName) @@ -4473,17 +4336,26 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - - ginkgo.By("Deleting PVC2") - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvclaims = append(pvclaims, pvclaim2) } + defer func() { + for _, pvclaim := range pvclaims { + pv := getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name) + volHandle = pv.Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + for i := 0; i < snapshotOpsScale; i++ { framework.Logf("Get volume snapshot ID from snapshot handle") - snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, volumeSnapshotContents[i]) + snapshotId, _, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, volumeSnapshotContents[i]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Get(ctx, @@ -4517,7 +4389,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { var snapshotNames []string ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4716,41 +4587,33 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* - Volume snapshot creation when resize is in progress - 1. Create a pvc and resize the pvc (say from 2GB to 4GB) - 2. While the resize operation is in progress, create a snapshot on this volume - 3. Expected behavior: resize operation should succeed and the - snapshot creation should succeed after resize completes + Testcase-26 + Volume snapshot creation when resize is in progress + 1. Create a pvc and resize the pvc (say from 2GB to 4GB) + 2. While the resize operation is in progress, create a snapshot on this volume + 3. Expected behavior: resize operation should succeed and the + snapshot creation should succeed after resize completes */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Volume snapshot creation when "+ - "resize is in progress", ginkgo.Label(p1, block, vanilla, snapshot, tkg, stable), func() { + + ginkgo.It("TC14Volume snapshot creation when resize is in progress", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var volumeSnapshot *snapV1.VolumeSnapshot - var snapshotContent *snapV1.VolumeSnapshotContent - var snapshotCreated, snapshotContentCreated bool - var snapshotId string - var err error - - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, pvs := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", diskSize, storageclass, true) - volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + volHandle = pvs[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } @@ -4773,25 +4636,28 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() var vmUUID string - nodeName := pod.Spec.NodeName + var exists bool if vanillaCluster { vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") - // Modify PVC spec to trigger volume expansion + ginkgo.By("Modify PVC spec to trigger volume expansion") currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] newSize := currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("4Gi")) - newDiskSize := "6Gi" framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) claims, err := expandPVCSize(pvclaim, newSize, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4809,17 +4675,10 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() ginkgo.By("Create a dynamic volume snapshot") - if vanillaCluster { - volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err = createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, - pvclaim, volHandle, diskSize, true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } else if guestCluster { - volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err = createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, - pvclaim, volHandle, newDiskSize, true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if snapshotContentCreated { err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) @@ -4837,9 +4696,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } }() - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) - time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) - ginkgo.By("Waiting for file system resize to finish") claims, err = waitForFSResize(pvclaim, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4862,21 +4718,17 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Delete dynamic volume snapshot") - snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* - Volume provision and snapshot creation on VVOL Datastore - 1. Create a SC for VVOL datastore and provision a PVC - 2. Create Snapshot class and take a snapshot of the volume - 3. Cleanup of snapshot, pvc and sc + Testcase-13 + Volume provision and snapshot creation on VVOL Datastore + 1. Create a SC for VVOL datastore and provision a PVC + 2. Create Snapshot class and take a snapshot of the volume + 3. Cleanup of snapshot, pvc and sc */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot]Volume provision and "+ - "snapshot creation/restore on VVOL Datastore", ginkgo.Label(p0, block, vanilla, snapshot, tkg), func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] Volume provision and "+ + "snapshot creation/restore on VVOL Datastore", ginkgo.Label(p0, block, vanilla, snapshot, tkg, wcp), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "VVOL", @@ -4885,54 +4737,72 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* - Volume provision and snapshot creation on VMFS Datastore - 1. Create a SC for VMFS datastore and provision a PVC - 2. Create Snapshot class and take a snapshot of the volume - 3. Cleanup of snapshot, pvc and sc + Testcase-13 + Volume provision and snapshot creation on VMFS Datastore + 1. Create a SC for VMFS datastore and provision a PVC + 2. Create Snapshot class and take a snapshot of the volume + 3. Cleanup of snapshot, pvc and sc */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Volume provision and "+ - "snapshot creation/restore on VMFS Datastore", ginkgo.Label(p0, block, vanilla, snapshot, tkg), func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] Volume provision and "+ + "snapshot creation/restore on VMFS Datastore", ginkgo.Label(p0, block, vanilla, snapshot, tkg, wcp), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "VMFS", pandoraSyncWaitTime) }) /* - Volume provision and snapshot creation on NFS Datastore - 1. Create a SC for VMFS datastore and provision a PVC - 2. Create Snapshot class and take a snapshot of the volume - 3. Cleanup of snapshot, pvc and sc + Testcase-13 + Volume provision and snapshot creation on NFS Datastore + 1. Create a SC for VMFS datastore and provision a PVC + 2. Create Snapshot class and take a snapshot of the volume + 3. Cleanup of snapshot, pvc and sc */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Volume provision and "+ - "snapshot creation/restore on NFS Datastore", ginkgo.Label(p0, block, vanilla, snapshot, tkg), func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] Volume provision and "+ + "snapshot creation/restore on NFS Datastore", ginkgo.Label(p0, block, vanilla, snapshot, tkg, wcp), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "NFS", pandoraSyncWaitTime) }) /* - Volume provision and snapshot creation on VSAN2 Datastore - 1. Create a SC for VMFS datastore and provision a PVC - 2. Create Snapshot class and take a snapshot of the volume - 3. Cleanup of snapshot, pvc and sc + Testcase-13 + Volume provision and snapshot creation on VSAN2 Datastore + 1. Create a SC for VMFS datastore and provision a PVC + 2. Create Snapshot class and take a snapshot of the volume + 3. Cleanup of snapshot, pvc and sc + */ + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Volume provision and snapshot creation/restore on "+ + "VSAN2 Datastore", ginkgo.Label(p0, snapshot, tkg, newTest, wcp), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "VSAN2", pandoraSyncWaitTime) + }) + + /* + Testcase-13 + Volume provision and snapshot creation on VSANDIRECT Datastore + 1. Create a SC for VMFS datastore and provision a PVC + 2. Create Snapshot class and take a snapshot of the volume + 3. Cleanup of snapshot, pvc and sc */ - ginkgo.It("[tkg-snapshot] Volume provision and snapshot creation/restore on "+ - "VSAN2 Datastore", ginkgo.Label(p0, snapshot, tkg, newTest), func() { + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Volume provision and snapshot creation/restore on "+ + "vsan direct Datastore", ginkgo.Label(p0, snapshot, tkg, newTest, wcp), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "VSAN", pandoraSyncWaitTime) + invokeSnapshotOperationsOnSharedDatastore(client, ctx, namespace, scParameters, snapc, "VSANDIRECT", pandoraSyncWaitTime) }) /* - Scale-up creation of snapshots across multiple volumes - - 1. Create a few pvcs (around 25) - 2. Trigger parallel snapshot create calls on all pvcs - 3. Trigger parallel snapshot delete calls on all pvcs - 4. All calls in (2) and (3) should succeed since these are - triggered via k8s API (might take longer time) - 5. Trigger create/delete calls and ensure there are no stale entries left behind - 6. Create multiple volumes from the same snapshot + Testcase-29 + Scale-up creation of snapshots across multiple volumes + + 1. Create a few pvcs (around 25) + 2. Trigger parallel snapshot create calls on all pvcs + 3. Trigger parallel snapshot delete calls on all pvcs + 4. All calls in (2) and (3) should succeed since these are + triggered via k8s API (might take longer time) + 5. Trigger create/delete calls and ensure there are no stale entries left behind + 6. Create multiple volumes from the same snapshot */ /* @@ -4945,38 +4815,27 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 4. Volume restore 5. snapshot create/delete workflow */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Scale-up creation of snapshots across multiple "+ + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] [supervisor-snapshot] Scale-up creation of snapshots across multiple "+ "volumes", ginkgo.Label(p0, block, vanilla, snapshot, tkg, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass volumesnapshots := make([]*snapV1.VolumeSnapshot, volumeOpsScale) snapshotContents := make([]*snapV1.VolumeSnapshotContent, volumeOpsScale) pvclaims := make([]*v1.PersistentVolumeClaim, volumeOpsScale) pvclaims2 := make([]*v1.PersistentVolumeClaim, volumeOpsScale) var persistentvolumes []*v1.PersistentVolume - var err error - - ginkgo.By("Create storage class and PVC") - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } + var volHandles []string - curtime := time.Now().Unix() - randomValue := rand.Int() - val := strconv.FormatInt(int64(randomValue), 10) - val = string(val[1:3]) - curtimestring := strconv.FormatInt(curtime, 10) - scName := "snapshot-scale" + curtimestring + val - storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, scName) + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() ginkgo.By("Create volume snapshot class") @@ -4990,7 +4849,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } }() - ginkgo.By("Creating PVCs using the Storage Class") + ginkgo.By("Create PVCs") framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) for i := 0; i < volumeOpsScale; i++ { framework.Logf("Creating pvc%v", i) @@ -4999,10 +4858,27 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } ginkgo.By("Waiting for all claims to be in bound state") - persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i := 0; i < volumeOpsScale; i++ { + pv := getPvFromClaim(client, pvclaims[i].Namespace, pvclaims[i].Name) + volHandle = pv.Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + volHandles = append(volHandles, volHandle) + } + defer func() { + for i := 0; i < volumeOpsScale; i++ { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaims[i].Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandles[i]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + ginkgo.By("Create a volume snapshot") framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) for i := 0; i < volumeOpsScale; i++ { @@ -5026,6 +4902,21 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(*snapshotContents[i].Status.ReadyToUse).To(gomega.BeTrue()) } + defer func() { + for i := 0; i < volumeOpsScale; i++ { + err = deleteVolumeSnapshotContent(ctx, snapshotContents[i], snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumesnapshots[i].Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumesnapshots[i].Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + ginkgo.By("Create Multiple PVC from one snapshot") for i := 0; i < volumeOpsScale; i++ { pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, @@ -5038,18 +4929,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Wait for the PVC to be bound") _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims2, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - for i := 0; i < volumeOpsScale; i++ { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumesnapshots[i].Name, pandoraSyncWaitTime) - - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, snapshotContents[i].ObjectMeta.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - - // TODO: Add a logic to check for the no orphan volumes defer func() { - for _, claim := range pvclaims { + for _, claim := range pvclaims2 { err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -5065,6 +4946,15 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { "kubernetes", volumeID)) } }() + + ginkgo.By("Delete dynamic volume snapshot") + for i := 0; i < volumeOpsScale; i++ { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumesnapshots[i].Name, pandoraSyncWaitTime) + + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, snapshotContents[i].ObjectMeta.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }) /* Create/Delete snapshot via k8s API using VolumeSnapshotContent (Pre-Provisioned Snapshots) @@ -5097,22 +4987,16 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { @@ -5133,7 +5017,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, dynamicSnapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + snapshotContentCreated, dynamicSnapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) defer func() { if snapshotCreated { @@ -5218,14 +5102,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { defer cancel() var staticSnapshotCreated, staticSnapshotContentCreated bool - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -5233,7 +5111,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { @@ -5254,7 +5133,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -5350,7 +5229,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Take a snapshot of restored PVC created from dynamic snapshot") volumeSnapshot3, _, snapshotCreated3, - snapshotContentCreated3, snapshotId3, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated3, snapshotId3, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim2, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -5391,17 +5270,17 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - /* - Pre-provisioned snapshot using incorrect/non-existing static snapshot - 1. Create a sc, and pvc using this sc - 2. Create a snapshot for this pvc - 3. Create a VolumeSnapshotContent CR using above snapshot-id, by passing the snapshotHandle - 4. Create a VolumeSnapshot using above content as source - 5. VolumeSnapshot and VolumeSnapshotContent should be created successfully and readToUse set to True - 6. Delete the snapshot created in step-4 - 7. Restore: Create a volume using above pre-provisioned snapshot k8s object - (note the snapshotHandle its pointing to has been deleted) - 8. Volume Create should fail with an appropriate error on k8s side + /*Testcase-9 + Pre-provisioned snapshot using incorrect/non-existing static snapshot + 1. Create a sc, and pvc using this sc + 2. Create a snapshot for this pvc + 3. Create a VolumeSnapshotContent CR using above snapshot-id, by passing the snapshotHandle + 4. Create a VolumeSnapshot using above content as source + 5. VolumeSnapshot and VolumeSnapshotContent should be created successfully and readToUse set to True + 6. Delete the snapshot created in step-4 + 7. Restore: Create a volume using above pre-provisioned snapshot k8s object + (note the snapshotHandle its pointing to has been deleted) + 8. Volume Create should fail with an appropriate error on k8s side */ ginkgo.It("[tkg-snapshot] Restore volume using non-existing static snapshot", ginkgo.Label(p0, snapshot, tkg, newTest), func() { @@ -5410,22 +5289,16 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { defer cancel() var staticSnapshotCreated, staticSnapshotContentCreated bool - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { @@ -5446,7 +5319,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -5526,17 +5399,18 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* - Create a volume from a snapshot that is still not ready-to-use - 1. Create a pre-provisioned snapshot pointing to a VolumeSnapshotContent - which is still not provisioned (or does not exist) - 2. The snapshot will have status.readyToUse: false and snapshot is in Pending state - 3. Create a volume using the above snapshot as source and ensure the provisioning fails with error: - ProvisioningFailed | snapshot <> not bound - 4. pvc is stuck in Pending - 5. Once the VolumeSnapshotContent is created, snapshot should have status.readyToUse: true - 6. The volume should now get provisioned successfully - 7. Validate the pvc is Bound - 8. Cleanup the snapshot and pvc + Testcase-10 + Create a volume from a snapshot that is still not ready-to-use + 1. Create a pre-provisioned snapshot pointing to a VolumeSnapshotContent + which is still not provisioned (or does not exist) + 2. The snapshot will have status.readyToUse: false and snapshot is in Pending state + 3. Create a volume using the above snapshot as source and ensure the provisioning fails with error: + ProvisioningFailed | snapshot <> not bound + 4. pvc is stuck in Pending + 5. Once the VolumeSnapshotContent is created, snapshot should have status.readyToUse: true + 6. The volume should now get provisioned successfully + 7. Validate the pvc is Bound + 8. Cleanup the snapshot and pvc */ ginkgo.It("[tkg-snapshot] Restore volume from a static snapshot that is still not "+ "ready-to-use", ginkgo.Label(p0, snapshot, tkg, newTest), func() { @@ -5544,22 +5418,16 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { @@ -5580,7 +5448,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -5704,65 +5572,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - /* - Create snapshot on Supervisor cluster - - Create a Storage Class, and a PVC using this SC in Supervisor cluster. - Create a dynamic snapshot in supervisor using above PVC as source - Snapshot creation should fail with appropriate error message. - Cleanup the snapshots, PVC and SC - */ - - ginkgo.It("[tkg-snapshot] [stretched-svc] Verify Snapshot creation should fail on supervisor "+ - "cluster", ginkgo.Label(p0, snapshot, tkg, newTest, negative), func() { - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - if stretchedSVC { - zonalPolicy := GetAndExpectStringEnvVar(envZonalStoragePolicyName) - scParameters[svStorageClassName] = zonalPolicy - storagePolicyName = zonalPolicy - } - - storageclass, err := svcClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - pvclaim, err := fpv.CreatePVC(ctx, svcClient, svcNamespace, - getPersistentVolumeClaimSpecWithStorageClass(svcNamespace, diskSize, storageclass, nil, v1.ReadWriteOnce)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, svcClient, - []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { - err = fpv.DeletePersistentVolumeClaim(ctx, svcClient, pvclaim.Name, pvclaim.Namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - - ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Expected snapshot creation failure on supervisor cluster") - _, err = snapc.SnapshotV1().VolumeSnapshots(svcNamespace).Create(ctx, - getVolumeSnapshotSpec(svcNamespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) - if err != nil { - framework.Logf(err.Error()) - } - }) - /* Perform online resize on restored volume 1. Create a Storage Class, a PVC and attach the PVC to a Pod, write a file @@ -5775,40 +5584,28 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 8. Verify snapshot size. It should be same as that of restored volume size. 9. Run cleanup: Delete snapshots, restored-volumes, pods. */ - ginkgo.It("[tkg-snapshot] Perform online resize on restored volume", ginkgo.Label(p0, + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Perform online resize on restored volume", ginkgo.Label(p0, snapshot, tkg, newTest, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var err error - var snapshotContentCreated = false - var snapshotCreated = false - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, pvs := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) - volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + volHandle := pvs[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim.Name, namespace)) err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -5817,10 +5614,50 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() + ginkgo.By("Create Pod") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, + execRWXCommandPod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + var vmUUID string + var exists bool + + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat /mnt/volume1/Pod.html "} + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from Pod")).NotTo(gomega.BeFalse()) + + wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod' > /mnt/volume1/Pod.html"} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod")).NotTo(gomega.BeFalse()) + ginkgo.By("Create/Get volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { if vanillaCluster { err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, @@ -5831,9 +5668,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a volume snapshot") volumeSnapshot, _, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) - defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") @@ -5852,7 +5688,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() ginkgo.By("Create PVC from Snapshot and verify restore volume operations") - pvclaim2, persistentVolumes2, pod := verifyVolumeRestoreOperation(ctx, client, + pvclaim2, persistentVolumes2, pod2 := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot, diskSize, true) volHandle2 := persistentVolumes2[0].Spec.CSI.VolumeHandle svcPVCName2 := persistentVolumes2[0].Spec.CSI.VolumeHandle @@ -5860,10 +5696,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) } gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { - // Delete POD - ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -5875,19 +5709,15 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() ginkgo.By("Perform online resize on the restored volume and make sure resize should go fine") - verifyOnlineVolumeExpansionOnGc(client, namespace, svcPVCName2, volHandle, pvclaim2, pod, f) + verifyOnlineVolumeExpansionOnGc(client, namespace, svcPVCName2, volHandle, pvclaim2, pod2, f) ginkgo.By("Create a volume snapshot from restored volume") - volumeSnapshotFromRestoreVol, snapshotContentFromRestoreVol, snapshotCreated, - snapshotContentCreated, snapshotIdFromRestoreVol, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotFromRestoreVol, snapshotContentFromRestoreVol, snapshotCreatedFromRestoreVol, + snapshotContentCreatedFromRestoreVol, snapshotIdFromRestoreVol, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim2, volHandle2, "3Gi", true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - framework.Logf("Volume snapshot name is : %s", volumeSnapshotFromRestoreVol.Name) - snapshotCreated = true - defer func() { - if snapshotContentCreated { + if snapshotContentCreatedFromRestoreVol { framework.Logf("Deleting volume snapshot content") gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = deleteVolumeSnapshotContent(ctx, snapshotContentFromRestoreVol, @@ -5895,24 +5725,24 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotCreated { + if snapshotCreatedFromRestoreVol { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshotFromRestoreVol.Name, pandoraSyncWaitTime) } }() framework.Logf("Deleting volume snapshot") - snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshotFromRestoreVol, pandoraSyncWaitTime, volHandle2, snapshotIdFromRestoreVol) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + snapshotCreatedFromRestoreVol, snapshotContentCreatedFromRestoreVol, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshotFromRestoreVol, pandoraSyncWaitTime, volHandle2, snapshotIdFromRestoreVol) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* + Testcase-15 Offline relocation of FCD with snapshots 1. Create a Storage Class, and a PVC. 2. Ensure the Volume-snapshot and VolumeSnapshotContent is created and Bound @@ -5922,47 +5752,48 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 6. Create new volume using this snapshot as source, use the same SC and attach it to a Pod. 7. Run cleanup: Delete snapshots, restored-volumes, pods. */ - ginkgo.It("[tkg-snapshot] Offline relocation of FCD with snapshots", ginkgo.Label(p0, snapshot, tkg, + + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Offline relocation of FCD with snapshots", ginkgo.Label(p0, snapshot, tkg, newTest, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var err error - var snapshotContentCreated, snapshotCreated bool + var datastoreUrls []string - sharedvmfsURL := os.Getenv(envSharedVMFSDatastoreURL) - if sharedvmfsURL == "" { - ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) + // read nfs datastore url + nfsDatastoreUrl := os.Getenv(envSharedNFSDatastoreURL) + if nfsDatastoreUrl == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedNFSDatastoreURL)) } + // read vsan datastore url sharedVsanDatastoreURL := os.Getenv(envSharedDatastoreURL) if sharedVsanDatastoreURL == "" { ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedDatastoreURL)) } - datastoreUrls = append(datastoreUrls, sharedvmfsURL, sharedVsanDatastoreURL) + datastoreUrls = append(datastoreUrls, nfsDatastoreUrl, sharedVsanDatastoreURL) - storagePolicyName = os.Getenv(envStoragePolicyNameForVsanVmfsDatastores) + // read storage policy where vsan and nfs datastires are tagged + storagePolicyName = os.Getenv(envStoragePolicyNameForVsanNfsDatastores) if storagePolicyName == "" { - ginkgo.Skip(fmt.Sprintf("Env %v is missing", envStoragePolicyNameForVsanVmfsDatastores)) + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envStoragePolicyNameForVsanNfsDatastores)) } - ginkgo.By("Create storage class and PVC") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + ginkgo.By("Create storage class") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim.Name, namespace)) err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -5974,7 +5805,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create/Get volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { if vanillaCluster { err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, @@ -5985,10 +5815,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") @@ -6029,9 +5858,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) } gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -6043,11 +5870,33 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - framework.Logf("Deleting volume snapshot") + ginkgo.By("Create a volume snapshot from the restored pvc") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim2, volHandle2, diskSize, true) + framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) + defer func() { + if snapshotContentCreated2 { + framework.Logf("Deleting volume snapshot content") + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + } + }() + + framework.Logf("Deleting volume snapshot-1") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Deleting volume snapshot-2") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle2, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -6076,12 +5925,14 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Error creating k8s client with %v: %v", newGcKubconfigPath, err)) - ginkgo.By("Create storage class and PVC") + ginkgo.By("Create storage class") storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { @@ -6111,7 +5962,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a volume snapshot") volumeSnapshot, _, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) @@ -6185,29 +6036,30 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 5. Restore PVC creation should fail and be stuck in Pending state with appropriate error message. 6. Perform Cleanup. */ - ginkgo.It("[tkg-snapshot] Volume mode conversion", ginkgo.Label(p0, snapshot, tkg, newTest, stable, + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Volume mode conversion", ginkgo.Label(p0, snapshot, tkg, newTest, stable, negative), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var err error - var snapshotContentCreated, snapshotCreated bool - ginkgo.By("Create storage class and PVC") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) - if !apierrors.IsNotFound(err) { + var pvclaims []*v1.PersistentVolumeClaim + + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + }() + + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim.Name, namespace)) err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -6219,7 +6071,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create/Get volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { if vanillaCluster { err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, @@ -6230,11 +6081,10 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") @@ -6267,148 +6117,123 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { framework.Logf("Expected failure message: %+q", expectedErrMsg) err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim2.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) - - ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim2.Name, namespace)) - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - err = waitForPvcToBeDeleted(ctx, client, pvclaim2.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - framework.Logf("Deleting pending PVCs from SVC namespace") - pvcList := getAllPVCFromNamespace(svcClient, svcNamespace) - for _, pvc := range pvcList.Items { - if pvc.Status.Phase == v1.ClaimPending { - framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(ctx, svcClient, pvc.Name, svcNamespace), - "Failed to delete PVC", pvc.Name) + defer func() { + for _, pvclaim := range pvclaims { + pv := getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name) + volHandle = pv.Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + if guestCluster { + framework.Logf("Deleting pending PVCs from SVC namespace") + pvcList := getAllPVCFromNamespace(svcClient, svcNamespace) + for _, pvc := range pvcList.Items { + if pvc.Status.Phase == v1.ClaimPending { + framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(ctx, svcClient, pvc.Name, svcNamespace), + "Failed to delete PVC", pvc.Name) + } } } + + ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* - Volume snapshot creation on a file-share volume - Create a file-share pvc - Try creating a snapshot on this pvc - Should fail with an appropriate error - */ - - ginkgo.It("[tkg-snapshot] Volume snapshot creation on a file-share volume on a guest "+ - "cluster", ginkgo.Label(p1, snapshot, tkg, newTest, negative, stable), func() { - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - scParameters[svStorageClassName] = storagePolicyName - - ginkgo.By("Create storage class and PVC") - storageclass, pvclaim, err := createPVCAndStorageClass(ctx, client, - namespace, nil, scParameters, diskSize, nil, "", false, v1.ReadWriteMany) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - ginkgo.By("Expect claim to fail as invalid storage policy is specified in Storage Class") - err = fpv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, - client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout) - gomega.Expect(err).To(gomega.HaveOccurred()) - expectedErrMsg := "no datastores found to create file volume, vsan file service may be disabled" - err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) - }) - - /* - PVC/Pod → Snapshot → RestoreVolume/Pod → Snapshot → Restore Vol again/Pod - - Create a Storage Class, a PVC and attach the PVC to a Pod, write a file - Create dynamically provisioned snapshots using this PVC - Create new volume using this snapshots as source, use the same SC - Ensure the PVC gets provisioned and is Bound - Attach the PVC to a Pod and ensure data from snapshot is available (file that was written in step.1 - should be available) - And also write new data to the restored volumes and it should succeed - Take a snapshot of restore volume created in step #3. - Create new volume using the snapshot as source use the same SC created in step #7 - Ensure the PVC gets provisioned and is Bound - Attach the PVC to a Pod and ensure data from snapshot is available (file that was written in - step.1 and step 5 should be available) - And also write new data to the restored volumes and it should succeed - Run cleanup: Delete snapshots, restored-volumes, pods. + PVC/Pod → Snapshot → RestoreVolume/Pod → Snapshot → Restore Vol again/Pod + + Create a Storage Class, a PVC and attach the PVC to a Pod, write a file + Create dynamically provisioned snapshots using this PVC + Create new volume using this snapshots as source, use the same SC + Ensure the PVC gets provisioned and is Bound + Attach the PVC to a Pod and ensure data from snapshot is available (file that was written in step.1 + should be available) + And also write new data to the restored volumes and it should succeed + Take a snapshot of restore volume created in step #3. + Create new volume using the snapshot as source use the same SC created in step #7 + Ensure the PVC gets provisioned and is Bound + Attach the PVC to a Pod and ensure data from snapshot is available (file that was written in + step.1 and step 5 should be available) + And also write new data to the restored volumes and it should succeed + Run cleanup: Delete snapshots, restored-volumes, pods. */ - ginkgo.It("[tkg-snapshot] Create restore volume snapshot in consistent order", ginkgo.Label(p0, snapshot, + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Create restore volume snapshot in consistent order", ginkgo.Label(p0, snapshot, tkg, newTest, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim1, pvs1 := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) - volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + volHandle1 := pvs1[0].Spec.CSI.VolumeHandle if guestCluster { - volHandle = getVolumeIDFromSupervisorCluster(volHandle) + volHandle1 = getVolumeIDFromSupervisorCluster(volHandle1) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) defer func() { - err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim1.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Create Pod") - pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, + pod1, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim1}, false, execRWXCommandPod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) - err = fpod.DeletePodWithWait(ctx, client, pod) + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod1.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() var vmUUID string - nodeName := pod.Spec.NodeName + var exists bool if vanillaCluster { - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + vmUUID = getNodeUUID(ctx, client, pod1.Spec.NodeName) } else if guestCluster { - vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + vmUUID, err = getVMUUIDFromNodeName(pod1.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod1.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs1[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") ginkgo.By("Verify the volume is accessible and Read/write is possible") - cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + cmd := []string{"exec", pod1.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod.html "} output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod")).NotTo(gomega.BeFalse()) - wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + wrtiecmd := []string{"exec", pod1.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod' > /mnt/volume1/Pod.html"} e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) @@ -6426,166 +6251,95 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() ginkgo.By("Create a dynamic volume snapshot") - volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, - pvclaim, volHandle, diskSize, true) + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim1, volHandle1, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotCreated { + if snapshotCreated1 { framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) framework.Logf("Wait till the volume snapshot is deleted") err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() ginkgo.By("Restore volume from snapshot created above") - pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, - v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) - pvclaim1, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolume1, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim1}, - framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle1 := persistentvolume1[0].Spec.CSI.VolumeHandle - gomega.Expect(volHandle1).NotTo(gomega.BeEmpty()) + pvclaim2, pvs2, pod2 := verifyVolumeRestoreOperation(ctx, client, + namespace, storageclass, volumeSnapshot1, diskSize, true) + volHandle2 := pvs2[0].Spec.CSI.VolumeHandle if guestCluster { - volHandle1 = getVolumeIDFromSupervisorCluster(volHandle1) + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) } + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) defer func() { - err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim1.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle1) + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - ginkgo.By("Create Pod") - pod1, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim1}, false, - execRWXCommandPod1) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod1.Name, namespace)) - err = fpod.DeletePodWithWait(ctx, client, pod1) + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - if vanillaCluster { - vmUUID = getNodeUUID(ctx, client, pod1.Spec.NodeName) - } else if guestCluster { - vmUUID, err = getVMUUIDFromNodeName(pod1.Spec.NodeName) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle1, pod1.Spec.NodeName)) - isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volHandle1, vmUUID) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") - - ginkgo.By("Verify the volume is accessible and Read/write is possible") - cmd = []string{"exec", pod1.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", - "cat /mnt/volume1/Pod1.html "} - output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) - - wrtiecmd = []string{"exec", pod1.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", - "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) - output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + }() ginkgo.By("Create a dynamic volume snapshot") - volumeSnapshot1, snapshotContent1, snapshotCreated1, - snapshotContentCreated1, snapshotId1, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, - pvclaim1, volHandle1, diskSize, true) + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim2, volHandle2, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - if snapshotContentCreated1 { - err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotCreated1 { + if snapshotCreated2 { framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) framework.Logf("Wait till the volume snapshot is deleted") err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, - *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() ginkgo.By("Restore volume from snapshot created above") - pvcSpec = getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, - v1.ReadWriteOnce, volumeSnapshot1.Name, snapshotapigroup) - pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolume2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, - framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle2 := persistentvolume2[0].Spec.CSI.VolumeHandle - gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + pvclaim3, pvs3, pod3 := verifyVolumeRestoreOperation(ctx, client, + namespace, storageclass, volumeSnapshot2, diskSize, true) + volHandle3 := pvs3[0].Spec.CSI.VolumeHandle if guestCluster { - volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + volHandle3 = getVolumeIDFromSupervisorCluster(volHandle3) } + gomega.Expect(volHandle3).NotTo(gomega.BeEmpty()) defer func() { - err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod3.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod3) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - ginkgo.By("Create Pod") - pod2, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim2}, false, - execRWXCommandPod2) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) - err = fpod.DeletePodWithWait(ctx, client, pod2) + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim3.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - if vanillaCluster { - vmUUID = getNodeUUID(ctx, client, pod2.Spec.NodeName) - } else if guestCluster { - vmUUID, err = getVMUUIDFromNodeName(pod2.Spec.NodeName) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle3) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle2, pod2.Spec.NodeName)) - isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volHandle2, vmUUID) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") - - ginkgo.By("Verify the volume is accessible and Read/write is possible") - cmd = []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", - "cat /mnt/volume1/Pod2.html "} - output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(output, "Hello message from Pod2")).NotTo(gomega.BeFalse()) - - wrtiecmd = []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", - "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod2.html"} - e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) - output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) - - ginkgo.By("Delete dynamic volume snapshot") - snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() ginkgo.By("Delete dynamic volume snapshot") snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, volumeSnapshot1, pandoraSyncWaitTime, volHandle1, snapshotId1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle2, snapshotId2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -6599,35 +6353,28 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 7. Verify the older data. It should be intact and write new data. 8. Perform cleanup. */ - ginkgo.It("[tkg-snapshot] Detach volume with snapshot", ginkgo.Label(p1, snapshot, tkg, newTest, + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Detach volume with snapshot", ginkgo.Label(p1, snapshot, tkg, newTest, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var err error - var snapshotContentCreated = false - var snapshotCreated = false - var vmUUID string - scParameters[svStorageClassName] = storagePolicyName - - ginkgo.By("Get storage class") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) - if !apierrors.IsNotFound(err) { + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + }() ginkgo.By("Create PVC") - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + pvclaim, pvs := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) - volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + volHandle := pvs[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -6639,21 +6386,26 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execRWXCommandPod1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nodeName := pod.Spec.NodeName + + var vmUUID string + var exists bool + if vanillaCluster { vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") - defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -6662,7 +6414,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create/Get volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { if vanillaCluster { err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, @@ -6673,9 +6424,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, _, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) - defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") @@ -6703,9 +6453,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) } gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -6771,7 +6519,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { fcdID, err := e2eVSphere.createFCDwithValidProfileID(ctx, "staticfcd"+curtimestring, profileID, diskSizeInMb, defaultDatastore.Reference()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // deleteFCDRequired = false + deleteFCDRequired = false ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", pandoraSyncWaitTime, fcdID)) time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) @@ -6828,7 +6576,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create/Get volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { if vanillaCluster { err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, @@ -6996,19 +6743,14 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { var persistentvolumes []*v1.PersistentVolume var err error - ginkgo.By("Create storage class and PVC") - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - curtime := time.Now().Unix() randomValue := rand.Int() val := strconv.FormatInt(int64(randomValue), 10) val = string(val[1:3]) curtimestring := strconv.FormatInt(curtime, 10) scName := "snapshot-scale" + curtimestring + val + + ginkgo.By("Create storage class") storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -7147,59 +6889,50 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }) /* + Testcase-22 Max Snapshots per volume on GC 1. Create a sc, a pvc and ensure the pvc gets provisioned and is Bound 2. Create 33 dynamically provisioned snapshots using this pvc 3. Create new volumes (pvcFromPreProvSS and pvcFromDynamicSS) using these - snapshots as source, use the same sc + snapshots as source, use the same sc 4. Ensure the pvc gets provisioned and is Bound 5. Attach the pvc to a pod and ensure data from snapshot is available - (file that was written in step.1 should be available) + (file that was written in step.1 should be available) 6. And also write new data to the restored volumes and it should succeed 7. Delete the snapshots and pvcs/pods created in steps 1,2,3 8. Continue to write new data to the restore volumes and it should succeed 9. Create new snapshots on restore volume and verify it succeeds 10. Run cleanup: Delete snapshots, restored-volumes, pods */ - ginkgo.It("[tkg-snapshot] Max Snapshots per volume on GC", ginkgo.Label( + ginkgo.It("[tkg-snapshot][supervisor-snapshot] TC11Max Snapshots per volume on wcp and gc", ginkgo.Label( p1, snapshot, tkg, newTest), func() { + ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim - var err error - var snapshotContentCreated = false - var volumeSnapshots []*snapV1.VolumeSnapshot - var volumeSnapshotContents []*snapV1.VolumeSnapshotContent - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } + var volumeSnapshots []*snapV1.VolumeSnapshot + var snapshotIds []string + snapDeleted := false + noOfSnapshotToCreate := 33 - ginkgo.By("Get storage class and create PVC") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + ginkgo.By("Create PVC") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, v1.ReadWriteOnce, + diskSize, storageclass, true) + volHandle = persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -7207,17 +6940,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - ginkgo.By("Create/Get volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { if vanillaCluster { err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, @@ -7226,76 +6951,34 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } }() - for i := 0; i < 33; i++ { - ginkgo.By("Create a volume snapshot") - framework.Logf("Creating snapshot no: %d", i+1) - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + for i := 0; i < noOfSnapshotToCreate; i++ { + ginkgo.By(fmt.Sprintf("Creating snapshot no: %d for pvc %s", i+1, pvclaim.Name)) + volumeSnapshot, _, _, _, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - snapshotCreated := true - - defer func() { - if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) - - framework.Logf("Wait till the volume snapshot is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - } - }() - - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) - framework.Logf("VolumeSnapshot Name: %s", volumeSnapshot.Name) volumeSnapshots = append(volumeSnapshots, volumeSnapshot) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) - framework.Logf("VolumeSnapshotContent Name: %s", snapshotContent.Name) - volumeSnapshotContents = append(volumeSnapshotContents, snapshotContent) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("snapshot Id: %s", snapshotId) - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - + snapshotIds = append(snapshotIds, snapshotId) } - for i := 0; i < 33; i++ { - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, volumeSnapshotContents[i]) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Deleted volume snapshot is created above") - framework.Logf("VolumeSnapshot Name to be deleted: %s", volumeSnapshots[i].Name) - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshots[i].Name, pandoraSyncWaitTime) - - framework.Logf("Wait till the volume snapshot is deleted") - framework.Logf("VolumeSnapshotContent Name to be deleted: %s", volumeSnapshotContents[i].Name) - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, volumeSnapshotContents[i].ObjectMeta.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if !snapDeleted { + for i := 0; i < noOfSnapshotToCreate; i++ { + ginkgo.By("Delete dynamic volume snapshot") + _, _, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshots[i], pandoraSyncWaitTime, volHandle, snapshotIds[i]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + }() - ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + for i := 0; i < noOfSnapshotToCreate; i++ { + ginkgo.By("Delete dynamic volume snapshot") + _, _, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshots[i], pandoraSyncWaitTime, volHandle, snapshotIds[i]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + snapDeleted = true }) /* @@ -7325,12 +7008,14 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { namespace1, err := framework.CreateTestingNS(ctx, f.BaseName, client, labels_ns) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Error creating namespace on GC1") - ginkgo.By("Create storage class and PVC on GC1") + ginkgo.By("Create storage class on GC1") storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace1.Name, nil, "", + + ginkgo.By("Create PVC on GC1") + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace1.Name, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { @@ -7358,7 +7043,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot on GC1") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, dynamicSnapshotId, err := createDynamicVolumeSnapshot(ctx, namespace1.Name, snapc, + snapshotContentCreated, dynamicSnapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace1.Name, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) defer func() { if snapshotCreated { @@ -7484,6 +7169,300 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volumeSnapshot, pandoraSyncWaitTime, volHandle, dynamicSnapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) + + /* + Testcase-2 + Volume restore using dynamic snapshot + 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVC to reach the Bound state and verify CNS metadata for this created volume. + 3. Create a pod and attach it to the volume created in step #1. + 4. Wait for Pod to reach a running-ready state and write data into the volume. + 5. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 6. Create a volume snapshot using the above snapshot class (step #5) and PVC (step #1) as source. + 7. Ensure the snapshot is created, verify using get + 8. Also, verify that VolumeSnapshotContent is auto-created, verify the references to + PVC and volume snapshot on this object + 9. Verify that the VolumeSnapshot has ready-to-use set to True + 10. Verify that the Restore Size set on the snapshot is the same as that of the source volume size + 11. Query the snapshot from the CNS side using volume ID - should pass and return the snapshot entry + 12. Create a new volume using the snapshot created in step #6 and use the same SC. + 13. Ensure the PVC gets provisioned and is Bound + 14. Create a pod and attach it to a restored volume. + 15. Wait for Pod to reach ready running state. + 16. Ensure that the data from a snapshot is available (the file that was written in step #1 should be available) + 17. Also, write new data to the restored volumes and it should succeed + 18. Perform cleanup: Delete Snapshot, Pod, PVC. + */ + + ginkgo.It("[supervisor-snapshot] TC2Volume restore using a "+ + "dynamic snapshot", ginkgo.Label(p0, wcp, snapshot, block, stable), func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create PVC") + pvclaim, pvs := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + volHandle = pvs[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create a Pod using the volume created above and write data into the volume") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, + execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + pvs[0].Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + + var vmUUID string + var exists bool + + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + output := readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + writeDataOnFileFromPod(namespace, pod.Name, filePathPod1, "Hello message from test into Pod1") + output = readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + if !guestCluster { + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle, pvclaim, pvs[0], pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Create or restore a volume using the dynamically created volume snapshot") + pvclaim2, pvs2, pod2 := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, + volumeSnapshot, diskSize, true) + volHandle2 := pvs2[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + } + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + if !guestCluster { + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle2, pvclaim2, pvs2[0], pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + This test verifies the static provisioning workflow with snapshot on supervisor cluster + + Test Steps: + 1. Create CNS volume note the volumeID. + 2. Create Resource quota. + 3. create CNS register volume with above created VolumeID. + 4. verify created PV, PVC and check the bidirectional reference. + 5. Create Pod , with above created PVC. + 6. Verify volume is attached to the node and volume is accessible in the pod. + 7. Delete POD. + 8. Delete PVC. + 9. Verify PV is deleted automatically. + 10. Verify Volume id deleted automatically. + 11. Verify CRD deleted automatically. + */ + + ginkgo.It("[supervisor-snapshot] Verify static provisioning workflow with snapshot", ginkgo.Label(p0, block, wcp), func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + curtime := time.Now().Unix() + curtimestring := strconv.FormatInt(curtime, 10) + pvcName := "cns-pvc-" + curtimestring + framework.Logf("pvc name :%s", pvcName) + + restConfig, storageclass, profileID := staticProvisioningPreSetUpUtil(ctx, f, client, storagePolicyName) + framework.Logf("Storage class : ", storageclass.Name) + + ginkgo.By("Creating FCD (CNS Volume)") + fcdID, err := e2eVSphere.createFCDwithValidProfileID(ctx, + "staticfcd"+curtimestring, profileID, diskSizeInMb, defaultDatastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteFCDRequired = true + defer func() { + if deleteFCDRequired { + ginkgo.By(fmt.Sprintf("Deleting FCD: %s", fcdID)) + err := e2eVSphere.deleteFCD(ctx, fcdID, defaultDatastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By("Create CNS register volume with above created FCD") + cnsRegisterVolume := getCNSRegisterVolumeSpec(ctx, namespace, fcdID, "", pvcName, v1.ReadWriteOnce) + err = createCNSRegisterVolume(ctx, restConfig, cnsRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(waitForCNSRegisterVolumeToGetCreated(ctx, restConfig, + namespace, cnsRegisterVolume, poll, supervisorClusterOperationsTimeout)) + cnsRegisterVolumeName := cnsRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + + ginkgo.By(" verify created PV, PVC and check the bidirectional reference") + pvc, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := getPvFromClaim(client, namespace, pvcName) + verifyBidirectionalReferenceOfPVandPVC(ctx, client, pvc, pv, fcdID) + + ginkgo.By("Creating pod") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + podName := pod.GetName + framework.Logf("podName : %s", podName) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + var vmUUID string + var exists bool + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err = e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is detached from PodVM with vmUUID: %s", + pv.Spec.CSI.VolumeHandle, vmUUID)) + _, err = e2eVSphere.getVMByUUIDWithWait(ctx, vmUUID, supervisorClusterOperationsTimeout) + gomega.Expect(err).To(gomega.HaveOccurred(), + fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", + vmUUID, pv.Spec.CSI.VolumeHandle)) + defer func() { + testCleanUpUtil(ctx, restConfig, client, cnsRegisterVolume, namespace, pvc.Name, pv.Name) + }() + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) }) // invokeSnapshotOperationsOnSharedDatastore is a wrapper method which invokes creation of volume snapshot @@ -7517,6 +7496,26 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c } else { storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForNfsDatastores) } + } else if sharedDatastoreType == "VSAN2" { + if vanillaCluster { + sharedDatastoreURL = os.Getenv(envSharedVsan2DatastoreURL) + if sharedDatastoreURL == "" { + ginkgo.Skip("Skipping the test because SHARED_VSAN2_DATASTORE_URL is not set. " + + "This may be due to testbed is not having shared VSAN2 datastore.") + } + } else { + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForVsan2Datastore) + } + } else if sharedDatastoreType == "VSANDIRECT" { + if vanillaCluster { + sharedDatastoreURL = os.Getenv(envSharedVsanDirectDatastoreURL) + if sharedDatastoreURL == "" { + ginkgo.Skip("Skipping the test because SHARED_VSANDIRECT_DATASTORE_URL is not set. " + + "This may be due to testbed is not having shared VSANDIRECT datastore.") + } + } else { + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForVsanDirectDatastore) + } } else if sharedDatastoreType == "VVOL" { if vanillaCluster { sharedDatastoreURL = os.Getenv(envSharedVVOLDatastoreURL) @@ -7541,16 +7540,14 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c if vanillaCluster { ginkgo.By("Create storage class") - scParameters[scParamDatastoreURL] = sharedDatastoreURL storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() } else { - ginkgo.By("Get storage class and create PVC") + ginkgo.By("Get storage class") storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -7564,7 +7561,6 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -7575,7 +7571,6 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c ginkgo.By("Create/Get volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { if vanillaCluster { err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, @@ -7586,10 +7581,9 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, _, snapshotCreated, - snapshotContentCreated, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + snapshotContentCreated, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") @@ -7607,6 +7601,7 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c } }() + ginkgo.By("Create pvc using a dynamic volume snapshot") pvclaim2, persistentVolumes2, pod2 := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot, diskSize, true) volHandle2 := persistentVolumes2[0].Spec.CSI.VolumeHandle @@ -7616,7 +7611,6 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/csi_snapshot_file_volume.go b/tests/e2e/csi_snapshot_file_volume.go index 54d4c37ee1..f8be051529 100644 --- a/tests/e2e/csi_snapshot_file_volume.go +++ b/tests/e2e/csi_snapshot_file_volume.go @@ -31,7 +31,6 @@ import ( fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpv "k8s.io/kubernetes/test/e2e/framework/pv" - snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" ) @@ -101,7 +100,6 @@ var _ = ginkgo.Describe("[file-vanilla-snapshot] Volume Snapshot file volume Tes storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, diskSize, nil, "", false, v1.ReadWriteMany) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -113,7 +111,6 @@ var _ = ginkgo.Describe("[file-vanilla-snapshot] Volume Snapshot file volume Tes gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -122,32 +119,34 @@ var _ = ginkgo.Describe("[file-vanilla-snapshot] Volume Snapshot file volume Tes }() ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, _, + _, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) + // snapshot is not supported on file volume, expecting an error to occur + gomega.Expect(err).To(gomega.HaveOccurred()) defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, metav1.DeleteOptions{}) + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - ginkgo.By("Create a volume snapshot") - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - snapshotCreated := true + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - defer func() { - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - } + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).To(gomega.HaveOccurred()) } else { ginkgo.Skip("Block volume setup, so skip file volume provision") } diff --git a/tests/e2e/csi_snapshot_negative.go b/tests/e2e/csi_snapshot_negative.go index 932e90d115..5cac1ba226 100644 --- a/tests/e2e/csi_snapshot_negative.go +++ b/tests/e2e/csi_snapshot_negative.go @@ -237,18 +237,19 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) gomega.Expect(err).To(gomega.HaveOccurred()) }) - /* + + /* Testcase-20 Snapshot lifecycle ops with fault-injection 1. Create Snapshot (Pre-provisioned and dynamic) 2. Delete Snapshot 3. Create Volume from Snapshot 4. During 1a, 1b and 1c run the following fault events and ensure the operator - eventually succeeds and there is no functional impact + eventually succeeds and there is no functional impact 5. vSphere side service restarts: vpxd, sps, vsan-health, host-restart 6. k8s side: csi pod restarts with improved_idempotency enabled as well - as run a scenario with improved_idempotency disabled + as run a scenario with improved_idempotency disabled */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] create volume snapshot when "+ "hostd goes down", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() { serviceName = hostdServiceName @@ -256,7 +257,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti csiNamespace, fullSyncWaitTime, isServiceStopped, true, csiReplicas, pandoraSyncWaitTime) }) - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when CSI "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot]create volume snapshot when CSI "+ "restarts", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() { serviceName = "CSI" @@ -264,7 +265,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti csiNamespace, fullSyncWaitTime, isServiceStopped, true, csiReplicas, pandoraSyncWaitTime) }) - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when VPXD "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] create volume snapshot when VPXD "+ "goes down", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() { serviceName = vpxdServiceName @@ -272,7 +273,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti csiNamespace, fullSyncWaitTime, isServiceStopped, false, csiReplicas, pandoraSyncWaitTime) }) - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when CNS goes "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] create volume snapshot when CNS goes "+ "down", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() { serviceName = vsanhealthServiceName @@ -280,7 +281,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti csiNamespace, fullSyncWaitTime, isServiceStopped, false, csiReplicas, pandoraSyncWaitTime) }) - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when SPS "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] create volume snapshot when SPS "+ "goes down", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() { serviceName = spsServiceName @@ -288,7 +289,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti csiNamespace, fullSyncWaitTime, isServiceStopped, true, csiReplicas, pandoraSyncWaitTime) }) - ginkgo.It("[tkg-snapshot] create volume snapshot when SVC CSI restarts", ginkgo.Label(p0, + ginkgo.It("[tkg-snapshot] [supervisor-snapshot] create volume snapshot when SVC CSI restarts", ginkgo.Label(p0, tkg, snapshot, disruptive, newTest), func() { serviceName = "WCP CSI" @@ -318,7 +319,6 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, scParameters[scParamDatastoreURL] = datastoreURL storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -339,7 +339,6 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -347,9 +346,16 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Create/Get volume snapshot class") + ginkgo.By("Create volume snapshot class") volumeSnapshotClass, err = createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() ginkgo.By("Create a volume snapshot") snapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, @@ -357,7 +363,6 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Volume snapshot name is : %s", snapshot.Name) snapshotCreated := true - defer func() { if snapshotCreated { framework.Logf("Deleting volume snapshot") diff --git a/tests/e2e/csi_snapshot_utils.go b/tests/e2e/csi_snapshot_utils.go index aa86422563..02c0707f7d 100644 --- a/tests/e2e/csi_snapshot_utils.go +++ b/tests/e2e/csi_snapshot_utils.go @@ -335,20 +335,21 @@ func deleteVolumeSnapshot(ctx context.Context, snapc *snapclient.Clientset, name // getVolumeSnapshotIdFromSnapshotHandle fetches VolumeSnapshotId From SnapshotHandle func getVolumeSnapshotIdFromSnapshotHandle(ctx context.Context, - snapshotContent *snapV1.VolumeSnapshotContent) (string, error) { + snapshotContent *snapV1.VolumeSnapshotContent) (string, string, error) { var snapshotID string + var snapshotHandle string var err error - if vanillaCluster { - snapshotHandle := *snapshotContent.Status.SnapshotHandle + if vanillaCluster || supervisorCluster { + snapshotHandle = *snapshotContent.Status.SnapshotHandle snapshotID = strings.Split(snapshotHandle, "+")[1] } else if guestCluster { - snapshotHandle := *snapshotContent.Status.SnapshotHandle + snapshotHandle = *snapshotContent.Status.SnapshotHandle snapshotID, _, _, err = getSnapshotHandleFromSupervisorCluster(ctx, snapshotHandle) if err != nil { - return "", err + return snapshotID, snapshotHandle, err } } - return snapshotID, nil + return snapshotID, snapshotHandle, nil } // createVolumeSnapshotClass creates VSC for a Vanilla cluster and @@ -399,53 +400,53 @@ func createDynamicVolumeSnapshot(ctx context.Context, namespace string, snapc *snapclient.Clientset, volumeSnapshotClass *snapV1.VolumeSnapshotClass, pvclaim *v1.PersistentVolumeClaim, volHandle string, diskSize string, performCnsQueryVolumeSnapshot bool) (*snapV1.VolumeSnapshot, - *snapV1.VolumeSnapshotContent, bool, bool, string, error) { + *snapV1.VolumeSnapshotContent, bool, bool, string, string, error) { volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) if err != nil { - return nil, nil, false, false, "", err + return volumeSnapshot, nil, false, false, "", "", err } framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) ginkgo.By("Verify volume snapshot is created") volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) if err != nil { - return nil, nil, false, false, "", err + return volumeSnapshot, nil, false, false, "", "", err } snapshotCreated := true if volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize)) != 0 { - return nil, nil, false, false, "", fmt.Errorf("unexpected restore size") + return volumeSnapshot, nil, false, false, "", "", fmt.Errorf("unexpected restore size") } ginkgo.By("Verify volume snapshot content is created") snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) if err != nil { - return nil, nil, false, false, "", err + return volumeSnapshot, snapshotContent, false, false, "", "", err } snapshotContentCreated := true snapshotContent, err = waitForVolumeSnapshotContentReadyToUse(*snapc, ctx, snapshotContent.Name) if err != nil { - return nil, nil, false, false, "", fmt.Errorf("volume snapshot content is not ready to use") + return volumeSnapshot, snapshotContent, false, false, "", "", fmt.Errorf("volume snapshot content is not ready to use") } framework.Logf("Get volume snapshot ID from snapshot handle") - snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent) + snapshotId, snapshotHandle, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent) if err != nil { - return nil, nil, false, false, "", err + return volumeSnapshot, snapshotContent, false, false, snapshotId, "", err } if performCnsQueryVolumeSnapshot { ginkgo.By("Query CNS and check the volume snapshot entry") err = waitForCNSSnapshotToBeCreated(volHandle, snapshotId) if err != nil { - return nil, nil, false, false, snapshotId, err + return volumeSnapshot, snapshotContent, false, false, snapshotId, "", err } } - return volumeSnapshot, snapshotContent, snapshotCreated, snapshotContentCreated, snapshotId, nil + return volumeSnapshot, snapshotContent, snapshotCreated, snapshotContentCreated, snapshotId, snapshotHandle, nil } // getPersistentVolumeClaimSpecWithDatasource return the PersistentVolumeClaim @@ -636,6 +637,7 @@ func verifyVolumeRestoreOperation(ctx context.Context, client clientset.Interfac gomega.Expect(err).NotTo(gomega.HaveOccurred()) var vmUUID string + var exists bool nodeName := pod.Spec.NodeName if vanillaCluster { @@ -643,6 +645,12 @@ func verifyVolumeRestoreOperation(ctx context.Context, client clientset.Interfac } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle2, nodeName)) @@ -748,3 +756,76 @@ func getRestConfigClientForGuestCluster2(guestClusterRestConfig *rest.Config) *r } return guestClusterRestConfig } + +// createDynamicVolumeSnapshot util creates dynamic volume snapshot for a volume +func createDynamicVolumeSnapshotWithoutSnapClass(ctx context.Context, namespace string, + snapc *snapclient.Clientset, volumeSnapshotClass *snapV1.VolumeSnapshotClass, + pvclaim *v1.PersistentVolumeClaim, volHandle string, diskSize string, + performCnsQueryVolumeSnapshot bool) (*snapV1.VolumeSnapshot, + *snapV1.VolumeSnapshotContent, bool, bool, string, string, error) { + + volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, + getVolumeSnapshotSpecWithoutSC(namespace, pvclaim.Name), metav1.CreateOptions{}) + if err != nil { + return volumeSnapshot, nil, false, false, "", "", err + } + framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) + + ginkgo.By("Verify volume snapshot is created") + volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) + if err != nil { + return volumeSnapshot, nil, false, false, "", "", err + } + + snapshotCreated := true + if volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize)) != 0 { + return volumeSnapshot, nil, false, false, "", "", fmt.Errorf("unexpected restore size") + } + + ginkgo.By("Verify volume snapshot content is created") + snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) + if err != nil { + return volumeSnapshot, snapshotContent, false, false, "", "", err + } + snapshotContentCreated := true + snapshotContent, err = waitForVolumeSnapshotContentReadyToUse(*snapc, ctx, snapshotContent.Name) + if err != nil { + return volumeSnapshot, snapshotContent, false, false, "", "", fmt.Errorf("volume snapshot content is not ready to use") + } + + framework.Logf("Get volume snapshot ID from snapshot handle") + snapshotId, snapshotHandle, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent) + if err != nil { + return volumeSnapshot, snapshotContent, false, false, snapshotId, "", err + } + + if performCnsQueryVolumeSnapshot { + ginkgo.By("Query CNS and check the volume snapshot entry") + err = waitForCNSSnapshotToBeCreated(volHandle, snapshotId) + if err != nil { + return volumeSnapshot, snapshotContent, false, false, snapshotId, "", err + } + } + + return volumeSnapshot, snapshotContent, snapshotCreated, snapshotContentCreated, snapshotId, snapshotHandle, nil +} + +// getVolumeSnapshotSpecWithoutSC returns a spec for the volume snapshot without using snapshot class +func getVolumeSnapshotSpecWithoutSC(namespace string, pvcName string) *snapV1.VolumeSnapshot { + var volumesnapshotSpec = &snapV1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "snapshot-", + Namespace: namespace, + }, + Spec: snapV1.VolumeSnapshotSpec{ + Source: snapV1.VolumeSnapshotSource{ + PersistentVolumeClaimName: &pvcName, + }, + }, + } + return volumesnapshotSpec +} diff --git a/tests/e2e/e2e_common.go b/tests/e2e/e2e_common.go index 237ae0bccc..e166d44cf2 100644 --- a/tests/e2e/e2e_common.go +++ b/tests/e2e/e2e_common.go @@ -89,6 +89,8 @@ const ( envSharedNFSDatastoreURL = "SHARED_NFS_DATASTORE_URL" envSharedVMFSDatastoreURL = "SHARED_VMFS_DATASTORE_URL" envSharedVMFSDatastore2URL = "SHARED_VMFS_DATASTORE2_URL" + envSharedVsan2DatastoreURL = "SHARED_VSAN2_DATASTORE_URL" + envSharedVsanDirectDatastoreURL = "SHARED_VSANDIRECT_DATASTORE_URL" envVMClass = "VM_CLASS" envVsanDirectSetup = "USE_VSAN_DIRECT_DATASTORE_IN_WCP" envVsanDDatastoreURL = "SHARED_VSAND_DATASTORE_URL" @@ -96,11 +98,13 @@ const ( envStoragePolicyNameForNonSharedDatastores = "STORAGE_POLICY_FOR_NONSHARED_DATASTORES" envStoragePolicyNameForSharedDatastores = "STORAGE_POLICY_FOR_SHARED_DATASTORES" envStoragePolicyNameForHCIRemoteDatastores = "STORAGE_POLICY_FOR_HCI_REMOTE_DS" - envStoragePolicyNameForVsanVmfsDatastores = "STORAGE_POLICY_FOR_VSAN_VMFS_DATASTORES" + envStoragePolicyNameForVsanNfsDatastores = "STORAGE_POLICY_FOR_VSAN_NFS_DATASTORES" envStoragePolicyNameForSharedDatastores2 = "STORAGE_POLICY_FOR_SHARED_DATASTORES_2" envStoragePolicyNameForVmfsDatastores = "STORAGE_POLICY_FOR_VMFS_DATASTORES" envStoragePolicyNameForNfsDatastores = "STORAGE_POLICY_FOR_NFS_DATASTORES" envStoragePolicyNameForVvolDatastores = "STORAGE_POLICY_FOR_VVOL_DATASTORES" + envStoragePolicyNameForVsan2Datastore = "STORAGE_POLICY_FOR_VSAN2_DATASTORE" + envStoragePolicyNameForVsanDirectDatastore = "STORAGE_POLICY_FOR_VSANDIRECT_DATASTORE" envStoragePolicyNameFromInaccessibleZone = "STORAGE_POLICY_FROM_INACCESSIBLE_ZONE" envStoragePolicyNameWithThickProvision = "STORAGE_POLICY_WITH_THICK_PROVISIONING" envSupervisorClusterNamespace = "SVC_NAMESPACE" diff --git a/tests/e2e/raw_block_volume.go b/tests/e2e/raw_block_volume.go index 9bd2c7acd9..f71b4e9b3f 100644 --- a/tests/e2e/raw_block_volume.go +++ b/tests/e2e/raw_block_volume.go @@ -76,6 +76,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { snapc *snapclient.Clientset restConfig *restclient.Config guestClusterRestConfig *restclient.Config + scName string ) ginkgo.BeforeEach(func() { @@ -1095,27 +1096,34 @@ var _ = ginkgo.Describe("raw block volume support", func() { 15. Query the snapshot from CNS side - should return 0 entries 16. Cleanup: Delete PVC, SC (validate they are removed) */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Verify snapshot dynamic provisioning workflow with "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] Verify snapshot dynamic provisioning workflow with "+ "raw block volume", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() if vanillaCluster { scParameters[scParamDatastoreURL] = datastoreURL + scName = "" } else if guestCluster { scParameters[svStorageClassName] = storagePolicyName + scName = "" + } else if supervisorCluster { + profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) + scParameters[scParamStoragePolicyID] = profileID + scName = storagePolicyName + setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit) } ginkgo.By("Create storage class") - sc, err := createStorageClass(client, scParameters, nil, "", "", false, "") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Creating source raw block PVC") - pvcspec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", sc, nil, "") + pvcspec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", storageclass, nil, "") pvcspec.Spec.VolumeMode = &rawBlockVolumeMode pvc1, err := fpv.CreatePVC(ctx, client, namespace, pvcspec) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to create pvc with err: %v", err)) @@ -1129,7 +1137,6 @@ var _ = ginkgo.Describe("raw block volume support", func() { if guestCluster { volumeID = getVolumeIDFromSupervisorCluster(volumeID) } - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc1.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1148,24 +1155,28 @@ var _ = ginkgo.Describe("raw block volume support", func() { pod1, err := createPod(ctx, client, namespace, nil, []*corev1.PersistentVolumeClaim{pvc1}, false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod1.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() var vmUUID string - nodeName := pod1.Spec.NodeName + var exists bool + if vanillaCluster { vmUUID = getNodeUUID(ctx, client, pod1.Spec.NodeName) } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod1.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod1.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volumeID, nodeName)) - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volumeID, vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") @@ -1203,7 +1214,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvc1, volumeID, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -1226,7 +1237,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { // Restore volumeSnapshot to another PVC ginkgo.By("Restore volume snapshot to another raw block PVC") - restorePvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, sc, nil, + restorePvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, corev1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) restorePvcSpec.Spec.VolumeMode = &rawBlockVolumeMode restoredPvc, err := fpv.CreatePVC(ctx, client, namespace, restorePvcSpec) @@ -1256,28 +1267,31 @@ var _ = ginkgo.Describe("raw block volume support", func() { "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nodeName = pod2.Spec.NodeName if vanillaCluster { vmUUID = getNodeUUID(ctx, client, pod2.Spec.NodeName) } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod2.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod2.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volumeID2, nodeName)) - isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volumeID2, vmUUID) + isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, restoredPvs[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volumeID2, nodeName)) - isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, volumeID2, nodeName) + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volumeID2, pod2.Spec.NodeName)) + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, volumeID2, pod2.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskDetached).To(gomega.BeTrue(), - fmt.Sprintf("Volume %q is not detached from the node %q", volumeID2, nodeName)) + fmt.Sprintf("Volume %q is not detached from the node %q", volumeID2, pod2.Spec.NodeName)) if guestCluster { ginkgo.By("Waiting for 30 seconds to allow CnsNodeVMAttachment controller to reconcile resource") time.Sleep(waitTimeForCNSNodeVMAttachmentReconciler) diff --git a/tests/e2e/snapshot_vmservice_vm.go b/tests/e2e/snapshot_vmservice_vm.go new file mode 100644 index 0000000000..720c772ad1 --- /dev/null +++ b/tests/e2e/snapshot_vmservice_vm.go @@ -0,0 +1,829 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "strconv" + "strings" + "time" + + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/vmware/govmomi/vim25/types" + + vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + admissionapi "k8s.io/pod-security-admission/api" + ctlrclient "sigs.k8s.io/controller-runtime/pkg/client" + + cnsop "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator" +) + +var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { + + f := framework.NewDefaultFramework("vmsvc") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + f.SkipNamespaceCreation = true // tests will create their own namespaces + var ( + client clientset.Interface + namespace string + datastoreURL string + storagePolicyName string + storageClassName string + storageProfileId string + vcRestSessionId string + vmi string + vmClass string + vmopC ctlrclient.Client + cnsopC ctlrclient.Client + isVsanHealthServiceStopped bool + isSPSserviceStopped bool + vcAddress string + restConfig *restclient.Config + snapc *snapclient.Clientset + pandoraSyncWaitTime int + err error + dsRef types.ManagedObjectReference + labelsMap map[string]string + ) + + ginkgo.BeforeEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // client connection + client = f.ClientSet + bootstrap() + + // fetch the testbed type for executing testcases + topologyFeature := os.Getenv(topologyFeature) + + // fetching nodes and reading storage policy name + if topologyFeature != topologyTkgHaName { + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) + } else { + storagePolicyName = GetAndExpectStringEnvVar(envZonalStoragePolicyName) + } + + // fetching vc ip and creating creating vc session + vcAddress = e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + vcRestSessionId = createVcSession4RestApis(ctx) + + // reading storage class name for wcp setup "wcpglobal_storage_profile" + storageClassName = strings.ReplaceAll(storagePolicyName, "_", "-") // since this is a wcp setup + + // fetching shared datastore url + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + + // reading datastore morf reference + dsRef := getDsMoRefFromURL(ctx, datastoreURL) + framework.Logf("dsmoId: %v", dsRef.Value) + + // reading storage profile id of "wcpglobal_storage_profile" + storageProfileId = e2eVSphere.GetSpbmPolicyID(storagePolicyName) + + // creating/reading content library "https://wp-content-pstg.broadcom.com/vmsvc/lib.json" + contentLibId := createAndOrGetContentlibId4Url(vcRestSessionId, GetAndExpectStringEnvVar(envContentLibraryUrl), + dsRef.Value, GetAndExpectStringEnvVar(envContentLibraryUrlSslThumbprint)) + + // creating test wcp namespace + framework.Logf("Create a WCP namespace for the test") + + // reading vm class required for vm creation + vmClass = os.Getenv(envVMClass) + if vmClass == "" { + vmClass = vmClassBestEffortSmall + } + + // creating wcp test namespace and setting vmclass, contlib, storage class fields in test ns + namespace = createTestWcpNs( + vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId)) + + // creating vm schema + vmopScheme := runtime.NewScheme() + gomega.Expect(vmopv1.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + vmopC, err = ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + cnsOpScheme := runtime.NewScheme() + gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + cnsopC, err = ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: cnsOpScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // reading vm image name "ubuntu-2004-cloud-init-21.4-kube-v1.20.10" + vmImageName := GetAndExpectStringEnvVar(envVmsvcVmImageName) + + // listing the vm images available and added in test ns + framework.Logf("Waiting for virtual machine image list to be available in namespace '%s' for image '%s'", + namespace, vmImageName) + vmi = waitNGetVmiForImageName(ctx, vmopC, namespace, vmImageName) + gomega.Expect(vmi).NotTo(gomega.BeEmpty()) + + // Get snapshot client using the rest config + restConfig = getRestConfigClient() + snapc, err = snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // reading full sync wait time + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + + //setting map values + labelsMap = make(map[string]string) + labelsMap["app"] = "test" + + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if isVsanHealthServiceStopped { + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", vsanhealthServiceName)) + startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) + } + + if isSPSserviceStopped { + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", spsServiceName)) + startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isSPSserviceStopped) + } + dumpSvcNsEventsOnTestFailure(client, namespace) + delTestWcpNs(vcRestSessionId, namespace) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + }) + + /* + Testcase-1 + Dynamic PVC → VM → Snapshot + Steps: + 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVC to reach the Bound state. + 3. Create a VM service VM using the PVC created in step #1 + 4. Wait for the VM service to be up and in the powered-on state. + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some data into the volume. + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Create a volume snapshot for the PVC created in step #1. + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 10. Verify CNS metadata for a PVC + 11. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VM1", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, labelsMap, diskSize, storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Waiting for CNS volumes to be deleted") + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + // creating a secret for vm credentials + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-2 + Static PVC → VM → Snapshot + Steps: + 1. Create FCD + 2. Create a static PV and PVC using cns register volume API + 3. Wait for PV and PVC to reach the Bound state. + 4. Create a VM service VM using the PVC created in step #2 + 5. Wait for the VM service to be up and in the powered-on state. + 6. Once the VM is up, verify that the volume is accessible inside the VM + 7. Write some data into the volume. + 8. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 9. Create a volume snapshot for the PVC created in step #1. + 10. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 11. Verify CNS metadata for a PVC + 12. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VM2", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Creating FCD Disk") + fcdID, err := e2eVSphere.createFCD(ctx, fcdName, diskSizeInMb, dsRef.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By(fmt.Sprintf("Creating the PV with the fcdID %s", fcdID)) + staticPVLabels := make(map[string]string) + staticPVLabels["fcd-id"] = fcdID + staticPv := getPersistentVolumeSpec(fcdID, v1.PersistentVolumeReclaimDelete, nil, ext4FSType) + staticPv, err = client.CoreV1().PersistentVolumes().Create(ctx, staticPv, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := staticPv.Spec.CSI.VolumeHandle + + err = e2eVSphere.waitForCNSVolumeToBeCreated(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating a static PVC") + staticPvc := getPersistentVolumeClaimSpec(namespace, staticPVLabels, staticPv.Name) + staticPvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create( + ctx, staticPvc, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{staticPvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{staticPvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + staticPvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-3 + Dynamic PVC  → VM → Snapshot → RestoreVol → VM + Steps: + 1. Create a dynamic PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for dynamic PVC to reach the Bound state. + 3. Create a VM service VM using dynamic PVC. + 4. Wait for the VM service to be up and in the powered-on state. + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some IO to the CSI volumes, read it back from them and verify the data integrity + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Create a volume snapshot for the dynamic PVC created in step #1 + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 10. Verify CNS metadata for a PVC + 11. Create a new PVC from the snapshot created in step #11. + 12. Wait for PVC to reach the Bound state. + 13. Create a VM service VM using the PVC created in step #14 + Wait for the VM service to be up and in the powered-on state. + Once the VM is up, verify that the volume is accessible inside the VM + Verify reading/writing data in the volume. + Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VM3", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, labelsMap, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Waiting for CNS volumes to be deleted") + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + // creating a secret for vm credentials + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a volume from a snapshot") + pvc2, pv2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot, diskSize, false) + volHandle2 := pv2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + + ginkgo.By("Creating VM") + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc2}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{pvc2})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp2) + verifyDataIntegrityOnVmDisk(vmIp2, volFolder) + } + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-4 + Dynamic PVC → VM → Snapshot1 and Snapshot2 → PVC1 and PVC2 → VM + Steps: + 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVC to reach the Bound state. + 3. Create a VM service VM using the PVC created in step #1 + 4. Wait for the VM service to be up and in the powered-on state. + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some IO to the CSI volumes, read it back from them and verify the data integrity + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Take 2 snapshots (snapshot-1, snapshot-2) for the PVC created in step #1. + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + + Verify CNS metadata for a PVC created in step #1 + Create PVC-1 from Snapshot-1, PVC-2 from Snapshot-2 + Wait for PVCs to reach the Bound state. + Create a VM service VM using pvc-1 and pvc-2 created in step #11 + Wait for the VM service to be up and in the powered-on state. + Once the VM is up, verify that the volume is accessible inside the VM + Write some IO to the CSI volumes, read it back from them and verify the data integrity + Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VM3", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, labelsMap, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Waiting for CNS volumes to be deleted") + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + // creating a secret for vm credentials + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot-1 for the volume") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot-2 for the volume") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a volume from a snapshot") + pvc2, pv2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot1, diskSize, false) + volHandle2 := pv2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + + ginkgo.By("Create a volume from a snapshot") + pvc3, pv3, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot2, diskSize, false) + volHandle3 := pv3[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle3).NotTo(gomega.BeEmpty()) + + ginkgo.By("Creating VM") + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc2, pvc3}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{pvc2})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp2) + verifyDataIntegrityOnVmDisk(vmIp2, volFolder) + } + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle, snapshotId1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle, snapshotId2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) +}) diff --git a/tests/e2e/tkgs_ha.go b/tests/e2e/tkgs_ha.go index d9e4d6363d..53ba7b055c 100644 --- a/tests/e2e/tkgs_ha.go +++ b/tests/e2e/tkgs_ha.go @@ -266,7 +266,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, dynamicSnapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, + snapshotContentCreated, dynamicSnapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -875,7 +875,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, dynamicSnapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, + snapshotContentCreated, dynamicSnapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, staticPvc, volumeID, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1044,7 +1044,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, "1Gi", false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -1351,7 +1351,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { diff --git a/tests/e2e/util.go b/tests/e2e/util.go index 05c47d02de..a0e2b0095b 100644 --- a/tests/e2e/util.go +++ b/tests/e2e/util.go @@ -5719,7 +5719,7 @@ func (o replicaSetsByCreationTimestampDate) Less(i, j int) bool { return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) } -// createKubernetesClientFromConfig creaates a newk8s client from given +// createKubernetesClientFromConfig creates a newk8s client from given // kubeConfig file. func createKubernetesClientFromConfig(kubeConfigPath string) (clientset.Interface, error) { diff --git a/tests/e2e/vmservice_utils.go b/tests/e2e/vmservice_utils.go index 8427b9b6ca..944f383c04 100644 --- a/tests/e2e/vmservice_utils.go +++ b/tests/e2e/vmservice_utils.go @@ -332,7 +332,7 @@ func waitNgetVmsvcVM(ctx context.Context, c ctlrclient.Client, namespace string, // waitNgetVmsvcVmIp wait and fetch the primary IP of the vm in give ns func waitNgetVmsvcVmIp(ctx context.Context, c ctlrclient.Client, namespace string, name string) (string, error) { ip := "" - err := wait.PollUntilContextTimeout(ctx, poll*10, pollTimeout*2, true, + err := wait.PollUntilContextTimeout(ctx, poll*10, pollTimeout*4, true, func(ctx context.Context) (bool, error) { vm, err := getVmsvcVM(ctx, c, namespace, name) if err != nil { @@ -367,6 +367,7 @@ users: } secret, err := client.CoreV1().Secrets(namespace).Create(ctx, &secretSpec, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + fmt.Println(secretSpec) return secret.Name } @@ -504,18 +505,25 @@ func waitNverifyPvcsAreAttachedToVmsvcVm(ctx context.Context, vmopC ctlrclient.C // permissions under the mount point func formatNVerifyPvcIsAccessible(diskUuid string, mountIndex int, vmIp string) string { p := "/dev/disk/by-id/wwn-0x" + strings.ReplaceAll(strings.ToLower(diskUuid), "-", "") + fmt.Println(p) results := execSshOnVmThroughGatewayVm(vmIp, []string{"ls -l /dev/disk/by-id/", "ls -l " + p}) + fmt.Println(results) dev := "/dev/" + strings.TrimSpace(strings.Split(results[1].Stdout, "/")[6]) + fmt.Println(dev) gomega.Expect(dev).ShouldNot(gomega.Equal("/dev/")) framework.Logf("Found %s dev for disk with uuid %s", dev, diskUuid) partitionDev := dev + "1" - _ = execSshOnVmThroughGatewayVm(vmIp, []string{"sudo parted --script " + dev + " mklabel gpt", + fmt.Println(partitionDev) + res := execSshOnVmThroughGatewayVm(vmIp, []string{"sudo parted --script " + dev + " mklabel gpt", "sudo parted --script -a optimal " + dev + " mkpart primary 0% 100%", "lsblk -l", "sudo mkfs.ext4 " + partitionDev}) + fmt.Println(res) volMountPath := "/mnt/volume" + strconv.Itoa(mountIndex) + fmt.Println(volMountPath) volFolder := volMountPath + "/data" + fmt.Println(volFolder) results = execSshOnVmThroughGatewayVm(vmIp, []string{ "sudo mkdir -p " + volMountPath, "sudo mount " + partitionDev + " " + volMountPath, @@ -525,6 +533,7 @@ func formatNVerifyPvcIsAccessible(diskUuid string, mountIndex int, vmIp string) "grep -c ext4 " + volFolder + "/fstype", "sync", }) + fmt.Println(results) gomega.Expect(strings.TrimSpace(results[5].Stdout)).To(gomega.Equal("1")) return volFolder } @@ -542,7 +551,8 @@ func verifyDataIntegrityOnVmDisk(vmIp, volFolder string) { // execSshOnVmThroughGatewayVm executes cmd(s) on VM via gateway(bastion) host and returns the result(s) func execSshOnVmThroughGatewayVm(vmIp string, cmds []string) []fssh.Result { results := []fssh.Result{} - + fmt.Println(vmIp) + fmt.Println(cmds) gatewayClient, sshClient := getSshClientForVmThroughGatewayVm(vmIp) defer sshClient.Close() defer gatewayClient.Close()