From 864701710bf55bec856e24c6ddb37ea7441836d6 Mon Sep 17 00:00:00 2001 From: sipriyaa Date: Tue, 24 Sep 2024 15:50:14 +0530 Subject: [PATCH] snapshot support on supervisor --- tests/e2e/csi_snapshot_basic.go | 2837 ++++++++------ tests/e2e/csi_snapshot_negative.go | 25 +- tests/e2e/csi_snapshot_utils.go | 173 +- tests/e2e/e2e_common.go | 9 +- tests/e2e/multi_vc.go | 6 +- tests/e2e/multi_vc_preferential_topology.go | 2 +- tests/e2e/preferential_topology.go | 14 +- tests/e2e/preferential_topology_disruptive.go | 6 +- tests/e2e/preferential_topology_snapshot.go | 10 +- tests/e2e/raw_block_volume.go | 4 +- tests/e2e/snapshot_stretched_supervisor.go | 1158 ++++++ tests/e2e/snapshot_vmservice_vm.go | 3395 +++++++++++++++++ tests/e2e/tkgs_ha.go | 16 +- tests/e2e/topology_multi_replica.go | 12 +- tests/e2e/topology_snapshot.go | 2 +- tests/e2e/util.go | 9 +- tests/e2e/vmservice_utils.go | 119 +- ...olume_provisioning_with_level5_topology.go | 12 +- 18 files changed, 6443 insertions(+), 1366 deletions(-) create mode 100644 tests/e2e/snapshot_stretched_supervisor.go create mode 100644 tests/e2e/snapshot_vmservice_vm.go diff --git a/tests/e2e/csi_snapshot_basic.go b/tests/e2e/csi_snapshot_basic.go index 8ac4849255..1628023b4f 100644 --- a/tests/e2e/csi_snapshot_basic.go +++ b/tests/e2e/csi_snapshot_basic.go @@ -74,34 +74,55 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { labels_ns map[string]string isVcRebooted bool vcAddress string + labelsMap map[string]string + scName string + volHandle string ) ginkgo.BeforeEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + bootstrap() client = f.ClientSet namespace = getNamespaceToRunTests(f) scParameters = make(map[string]string) + + // reading shared datastoreurl and shared storage policy + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + + // fetching node list and checking node status nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") } + // delete nginx service + service, err := client.CoreV1().Services(namespace).Get(ctx, servicename, metav1.GetOptions{}) + if err == nil && service != nil { + deleteService(namespace, client, service) + } + + // reading vc credentials vcAddress = e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + // Get snapshot client using the rest config - if !guestCluster { + if vanillaCluster || supervisorCluster { restConfig = getRestConfigClient() snapc, err = snapclient.NewForConfig(restConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } else { + + //setting resource quota for storage policy tagged to supervisor namespace + setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit) + } else if guestCluster { guestClusterRestConfig = getRestConfigClientForGuestCluster(guestClusterRestConfig) snapc, err = snapclient.NewForConfig(guestClusterRestConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + // reading fullsync wait time if os.Getenv(envPandoraSyncWaitTime) != "" { pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -109,6 +130,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pandoraSyncWaitTime = defaultPandoraSyncWaitTime } + // reading operation scale value if os.Getenv("VOLUME_OPS_SCALE") != "" { volumeOpsScale, err = strconv.Atoi(os.Getenv(envVolumeOperationsScale)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -119,6 +141,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) + //creates a newk8s client from a given kubeConfig file controllerClusterConfig := os.Getenv(contollerClusterKubeConfig) c = client if controllerClusterConfig != "" { @@ -128,38 +151,55 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { c = remoteC } - if guestCluster { - storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) - svcClient, svNamespace := getSvcClientAndNamespace() - setResourceQuota(svcClient, svNamespace, rqLimit) + // required for pod creation + labels_ns = map[string]string{} + labels_ns[admissionapi.EnforceLevelLabel] = string(admissionapi.LevelPrivileged) + labels_ns["e2e-framework"] = f.BaseName - datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) - var datacenters []string - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - finder := find.NewFinder(e2eVSphere.Client.Client, false) - cfg, err := getConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - dcList := strings.Split(cfg.Global.Datacenters, ",") - for _, dc := range dcList { - dcName := strings.TrimSpace(dc) - if dcName != "" { - datacenters = append(datacenters, dcName) - } - } + //setting map values + labelsMap = make(map[string]string) + labelsMap["app"] = "test" - for _, dc := range datacenters { - defaultDatacenter, err = finder.Datacenter(ctx, dc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - finder.SetDatacenter(defaultDatacenter) - defaultDatastore, err = getDatastoreByURL(ctx, datastoreURL, defaultDatacenter) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // reading sc parameters required for storage class + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + scName = "" + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + scName = "" + } else if supervisorCluster { + profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) + scParameters[scParamStoragePolicyID] = profileID + scName = storagePolicyName + } + + var datacenters []string + finder := find.NewFinder(e2eVSphere.Client.Client, false) + cfg, err := getConfig() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + dcList := strings.Split(cfg.Global.Datacenters, ",") + for _, dc := range dcList { + dcName := strings.TrimSpace(dc) + if dcName != "" { + datacenters = append(datacenters, dcName) } } - labels_ns = map[string]string{} - labels_ns[admissionapi.EnforceLevelLabel] = string(admissionapi.LevelPrivileged) - labels_ns["e2e-framework"] = f.BaseName + for _, dc := range datacenters { + defaultDatacenter, err = finder.Datacenter(ctx, dc) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + finder.SetDatacenter(defaultDatacenter) + defaultDatastore, err = getDatastoreByURL(ctx, datastoreURL, defaultDatacenter) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + // reading fullsync wait time + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } }) ginkgo.AfterEach(func() { @@ -207,29 +247,27 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 12. Query the snapshot from CNS side - should return 0 entries 13. Cleanup: Delete PVC, SC (validate they are removed) */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Verify snapshot dynamic provisioning "+ - "workflow", ginkgo.Label(p0, block, tkg, vanilla, snapshot, stable), func() { + + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] [supervisor-snapshot] TC1Verify snapshot dynamic provisioning "+ + "workflow", ginkgo.Label(p0, block, tkg, vanilla, wcp, snapshot, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var volHandle string - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle = persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) @@ -255,7 +293,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -277,7 +315,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1292,8 +1330,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) @@ -1372,7 +1411,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -1462,7 +1501,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1473,37 +1512,35 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 3. create another sc pointing to a different spbm policy (say thick) 4. Run a restore workflow by giving a different storageclass in the pvc spec 5. the new storageclass would point to a thick provisioned spbm plocy, - while the source pvc was created usig thin provisioned psp-operatorlicy + while the source pvc was created usig thin provisioned psp-operatorlicy 6. cleanup spbm policies, sc's, pvc's */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume restore using snapshot on a different "+ - "storageclass", ginkgo.Label(p0, block, vanilla, snapshot, tkg, stable), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] [supervisor-snapshot] TC3Volume "+ + "restore using snapshot on a different "+ + "storageclass", ginkgo.Label(p0, block, vanilla, wcp, snapshot, tkg, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1524,7 +1561,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -1545,8 +1582,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() scParameters1 := make(map[string]string) - scParameters1[scParamStoragePolicyName] = "Management Storage Policy - Regular" - curtime := time.Now().Unix() randomValue := rand.Int() val := strconv.FormatInt(int64(randomValue), 10) @@ -1555,22 +1590,37 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { scName := "snapshot" + curtimestring + val var storageclass1 *storagev1.StorageClass + ginkgo.By("Create a new storage class") if vanillaCluster { + scParameters1[scParamStoragePolicyName] = "Management Storage Policy - Regular" storageclass1, err = createStorageClass(client, scParameters1, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass1.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() } else if guestCluster { scName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores2) storageclass1, err = client.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{}) + } else if supervisorCluster { + storagePolicyName2 := GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores2) + profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName2) + scParameters1[scParamStoragePolicyID] = profileID + scName = storagePolicyName2 + storageclass1, err = client.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{}) + + // setting resource quota for storage policy tagged to supervisor namespace + setStoragePolicyQuota(ctx, restConfig, storagePolicyName2, namespace, rqLimit) } - pvclaim2, persistentVolumes2, _ := verifyVolumeRestoreOperation(ctx, client, - namespace, storageclass1, volumeSnapshot, diskSize, false) - volHandle2 := persistentVolumes2[0].Spec.CSI.VolumeHandle + ginkgo.By("Restore a pvc using a dynamic volume snapshot created above but with a different storage class") + pvclaim2, pvs2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass1, + volumeSnapshot, diskSize, false) + volHandle2 := pvs2[0].Spec.CSI.VolumeHandle if guestCluster { volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) } gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1580,7 +1630,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1661,8 +1711,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } ginkgo.By("Create PVC") - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, newNamespaceName, nil, "", + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, newNamespaceName, nil, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) @@ -1699,7 +1750,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, newNamespaceName, snapc, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, newNamespaceName, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) snapshotContentName := snapshotContent.Name @@ -1875,7 +1926,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } else if guestCluster { framework.Logf("Deleting volume snapshot 2: %s", volumeSnapshot2.Name) snapshotCreated2, _, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot2, pandoraSyncWaitTime, volHandle, staticSnapshotId) + volumeSnapshot2, pandoraSyncWaitTime, volHandle, staticSnapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1891,34 +1942,31 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 6. Delete would return a pass from CSI side (this is expected because CSI is designed to return success even though it cannot find a snapshot in the backend) */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Delete a non-existent snapshot", ginkgo.Label(p0, block, - vanilla, snapshot, tkg, negative), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC5Delete a non-existent "+ + "snapshot", ginkgo.Label(p0, block, vanilla, wcp, snapshot, tkg, negative), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create pvc") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1938,7 +1986,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -1965,9 +2013,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) /* @@ -1977,30 +2024,29 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ensure the default class is picked and honored for snapshot creation 3. Validate the fields after snapshot creation succeeds (snapshotClass, retentionPolicy) */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Create snapshots using default "+ - "VolumeSnapshotClass", ginkgo.Label(p0, block, vanilla, snapshot, tkg), func() { + + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC7Create snapshots using default "+ + "VolumeSnapshotClass", ginkgo.Label(p0, block, vanilla, snapshot, wcp, tkg), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var volumeSnapshotClass *snapV1.VolumeSnapshotClass - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } @@ -2022,7 +2068,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volumeSnapshotClass, err = snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, vscSpec, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } else if guestCluster { + } else { restConfig = getRestConfigClient() snapc, err = snapclient.NewForConfig(restConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2052,9 +2098,10 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapc, err = snapclient.NewForConfig(guestClusterRestConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, - pvclaim, volHandle, diskSize, true) + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshotWithoutSnapClass(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if snapshotContentCreated { @@ -2075,7 +2122,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -2087,28 +2134,37 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 3. Verify the error 4. Create with exact size and ensure it succeeds */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Create Volume from snapshot with "+ - "different size", ginkgo.Label(p1, block, vanilla, snapshot, tkg, stable, negative), func() { + + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] TC8Create Volume from snapshot with "+ + "different size", ginkgo.Label(p1, block, vanilla, snapshot, tkg, wcp, stable, negative), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if !vanillaCluster { + var allowExpansion = true + if storageclass.AllowVolumeExpansion == nil || *storageclass.AllowVolumeExpansion != allowExpansion { + storageclass.AllowVolumeExpansion = &allowExpansion + storageclass.Parameters = scParameters + storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) @@ -2134,7 +2190,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -2154,8 +2210,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } }() - ginkgo.By("Create PVC using the higher size") - pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, defaultrqLimit, storageclass, nil, + ginkgo.By("Create PVC from dynamic volume snapshot but with a different higher size") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, defaultrqLimit, storageclass, labelsMap, v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2194,7 +2250,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -2212,32 +2268,21 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 10. Verify if the new pod attaches to the PV created in step-8 11. Cleanup the sts and the snapshot + pv that was left behind in step-7 */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Snapshot workflow for statefulsets", ginkgo.Label(p0, block, - vanilla, snapshot, tkg), func() { + + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] TC9Snapshot workflow for "+ + "statefulsets", ginkgo.Label(p0, block, vanilla, snapshot, wcp, tkg), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - curtime := time.Now().Unix() - randomValue := rand.Int() - val := strconv.FormatInt(int64(randomValue), 10) - val = string(val[1:3]) - curtimestring := strconv.FormatInt(curtime, 10) - scName := "nginx-sc-default-" + curtimestring + val - var snapshotId1, snapshotId2 string - var snapshotContentCreated, snapshotCreated bool - - if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - scSpec := getVSphereStorageClassSpec(scName, scParameters, nil, "", "", false) - sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() ginkgo.By("Creating service") @@ -2246,11 +2291,12 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { deleteService(namespace, client, service) }() + ginkgo.By("Creating Statefulset") statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") - statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Spec.StorageClassName = &scName - *statefulset.Spec.Replicas = 2 + if !vanillaCluster { + statefulset.Spec.VolumeClaimTemplates[0].Spec.StorageClassName = &scName + } CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) defer func() { @@ -2268,16 +2314,18 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { "Number of Pods in the statefulset should match with number of replicas") framework.Logf("Fetching pod 1, pvc1 and pv1 details") + // pod1 details pod1, err := client.CoreV1().Pods(namespace).Get(ctx, ssPodsBeforeScaleDown.Items[0].Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // pvc1 details pvc1 := pod1.Spec.Volumes[0].PersistentVolumeClaim - pvclaim1, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc1.ClaimName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // pv1 details pv1 := getPvFromClaim(client, statefulset.Namespace, pvc1.ClaimName) volHandle1 := pv1.Spec.CSI.VolumeHandle gomega.Expect(volHandle1).NotTo(gomega.BeEmpty()) @@ -2293,16 +2341,18 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } framework.Logf("Fetching pod 2, pvc2 and pv2 details") + // pod2 details pod2, err := client.CoreV1().Pods(namespace).Get(ctx, ssPodsBeforeScaleDown.Items[1].Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // pvc2 details pvc2 := pod2.Spec.Volumes[0].PersistentVolumeClaim - pvclaim2, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc2.ClaimName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // pv2 details pv2 := getPvFromClaim(client, statefulset.Namespace, pvc2.ClaimName) volHandle2 := pv2.Spec.CSI.VolumeHandle gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) @@ -2329,106 +2379,50 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() ginkgo.By("Create a volume snapshot - 1") - volumeSnapshot1, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim1.Name), metav1.CreateOptions{}) + diskSize := "1Gi" + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim1, volHandle1, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot1.Name) defer func() { - if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotCreated { + if snapshotCreated1 { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot1.Name, - metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot1, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot1.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = true - gomega.Expect(volumeSnapshot1.Status.RestoreSize.Cmp(resource.MustParse("1Gi"))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent1, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent1.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - if vanillaCluster { - snapshothandle1 := *snapshotContent1.Status.SnapshotHandle - snapshotId1 = strings.Split(snapshothandle1, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle1, snapshotId1) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - } else if guestCluster { - snapshothandle1 := *snapshotContent1.Status.SnapshotHandle - snapshotId1, _, _, err = getSnapshotHandleFromSupervisorCluster(ctx, - snapshothandle1) - } - ginkgo.By("Create a volume snapshot - 2") - volumeSnapshot2, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim2.Name), metav1.CreateOptions{}) + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim2, volHandle2, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot2.Name) - - var snapshotCreated2 bool - var snapshotContentCreated2 bool defer func() { if snapshotContentCreated2 { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated2 { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot2.Name, - metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot2, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot2.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated2 = true - gomega.Expect(volumeSnapshot2.Status.RestoreSize.Cmp(resource.MustParse("1Gi"))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent2, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated2 = true - gomega.Expect(*snapshotContent2.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - if vanillaCluster { - snapshothandle2 := *snapshotContent2.Status.SnapshotHandle - snapshotId2 = strings.Split(snapshothandle2, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle2, snapshotId2) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - } else if guestCluster { - snapshothandle2 := *snapshotContent2.Status.SnapshotHandle - snapshotId2, _, _, err = getSnapshotHandleFromSupervisorCluster(ctx, - snapshothandle2) - } - ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) _, scaledownErr := fss.Scale(ctx, client, statefulset, replicas-1) gomega.Expect(scaledownErr).NotTo(gomega.HaveOccurred()) @@ -2438,120 +2432,41 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(replicas-1)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") - replicas -= 1 - - var pvcToDelete *v1.PersistentVolumeClaim - var snapshotToBeDeleted *snapV1.VolumeSnapshot - // var volId string - // Find the missing pod and check if the cnsvolumemetadata is deleted or not - if ssPodsAfterScaleDown.Items[0].Name == pod1.Name { - pvcToDelete = pvclaim2 - snapshotToBeDeleted = volumeSnapshot2 + /* We cannot delete a pvc if snapshot is attached to a volume, csi will not allow + for volume deletion and it will fail with below error + Error from server (Deleting volume with snapshots is not allowed): + admission webhook "validation.csi.vsphere.vmware.com" + denied the request: Deleting volume with snapshots is not allowed */ - } else { - pvcToDelete = pvclaim1 - snapshotToBeDeleted = volumeSnapshot1 + ginkgo.By("Restoring a snapshot-1 to create a new volume and attach it to a new Pod") + pvclaim3, persistentVolumes3, pod3 := verifyVolumeRestoreOperation(ctx, client, + namespace, storageclass, volumeSnapshot1, diskSize, true) + volHandle3 := persistentVolumes3[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle3 = getVolumeIDFromSupervisorCluster(volHandle3) } + gomega.Expect(volHandle3).NotTo(gomega.BeEmpty()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod3.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Delete volume snapshot and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - snapshotToBeDeleted.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - framework.Logf("Wait till the volume snapshot content is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, - *snapshotToBeDeleted.Status.BoundVolumeSnapshotContentName) - if err != nil { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim3.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvcToDelete.Name, namespace) + ginkgo.By("Delete dynamic volume snapshot-1") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle1, snapshotId1, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) - time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) - - ginkgo.By("Create a new PVC") - pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "1Gi", sc, nil, v1.ReadWriteOnce) - - pvcSpec.Name = pvcToDelete.Name - pvclaim3, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Expecting the volume to bound") - newPV, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim3}, - framework.ClaimProvisionTimeout) + ginkgo.By("Delete dynamic volume snapshot-2") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle2, snapshotId2, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - volHandleOfNewPV := newPV[0].Spec.CSI.VolumeHandle - gomega.Expect(volHandleOfNewPV).NotTo(gomega.BeEmpty()) - - replicas += 1 - ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) - _, scaleupErr := fss.Scale(ctx, client, statefulset, replicas) - gomega.Expect(scaleupErr).NotTo(gomega.HaveOccurred()) - time.Sleep(5 * time.Minute) - fss.WaitForStatusReplicas(ctx, client, statefulset, replicas) - fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) - - ginkgo.By("Delete volume snapshot 1 and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - volumeSnapshot1.Name, metav1.DeleteOptions{}) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } - snapshotCreated = false - - ginkgo.By("Delete volume snapshot 2 and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - volumeSnapshot2.Name, metav1.DeleteOptions{}) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } - snapshotCreated2 = false - - framework.Logf("Wait till the volume snapshot content is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, - *volumeSnapshot1.Status.BoundVolumeSnapshotContentName) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } - snapshotContentCreated = false - - framework.Logf("Wait till the volume snapshot content 1 is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, - *volumeSnapshot2.Status.BoundVolumeSnapshotContentName) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } - snapshotContentCreated2 = false - - if !guestCluster { - ginkgo.By("Verify snapshot 1 entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle1, snapshotId1) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } - - ginkgo.By("Verify snapshot 2 entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle2, snapshotId2) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } - } }) /* @@ -2565,39 +2480,34 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 5. Expect VolumeFailedDelete error with an appropriate err-msg 6. Run cleanup - delete the snapshots and then delete pv */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume deletion with existing snapshots", ginkgo.Label(p0, - block, vanilla, snapshot, tkg, stable, negative), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC4Volume deletion "+ + "with existing snapshots", ginkgo.Label(p0, block, vanilla, snapshot, tkg, wcp, stable, negative), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create Storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentvolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentvolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fpv.DeletePersistentVolume(ctx, client, persistentvolumes[0].Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -2615,7 +2525,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -2645,22 +2555,14 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Delete PV") + ginkgo.By("Delete PVC") err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - } }) /* @@ -2694,8 +2596,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) @@ -2721,7 +2624,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -2760,7 +2663,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -3090,36 +2993,33 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 6. The deployment should succeed and should have the file that was created in step.2 7. Cleanup dep-1 pv snapshots and pvs, delete dep-2 */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Snapshot workflow for deployments", ginkgo.Label(p0, - block, vanilla, tkg, snapshot, stable), func() { + + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC10Snapshot workflow for "+ + "deployments", ginkgo.Label(p0, block, vanilla, tkg, snapshot, wcp, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") + ginkgo.By("Create storage class") storageclass, err := createStorageClass(client, scParameters, nil, - "", "", true, "") + "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3127,9 +3027,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - labelsMap := make(map[string]string) - labelsMap["app"] = "test" - ginkgo.By("Creating a Deployment using pvc1") dep, err := createDeployment(ctx, client, 1, labelsMap, nil, namespace, []*v1.PersistentVolumeClaim{pvclaim}, execRWXCommandPod1, false, busyBoxImageOnGcr) @@ -3159,7 +3056,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -3184,7 +3081,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolume2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3205,7 +3101,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Creating a new deployment from the restored pvc") dep2, err := createDeployment(ctx, client, 1, labelsMap2, nil, namespace, - []*v1.PersistentVolumeClaim{pvclaim2}, "", false, busyBoxImageOnGcr) + []*v1.PersistentVolumeClaim{pvclaim2}, execRWXCommandPod2, false, busyBoxImageOnGcr) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Delete Deployment-2") @@ -3224,9 +3120,13 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { output := readFileFromPod(namespace, pod2.Name, filePathPod1) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + writeDataOnFileFromPod(namespace, pod2.Name, filePathPod1, "Hello message from Pod2") + output = readFileFromPod(namespace, pod2.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from Pod2")).NotTo(gomega.BeFalse()) + ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -3240,34 +3140,40 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 6. Run resize and it should succeed 7. Cleanup the pvc */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume offline resize of a volume "+ - "having snapshots", ginkgo.Label(p0, block, vanilla, tkg, snapshot, stable, negative), func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC12Volume offline resize of a volume "+ + "having snapshots", ginkgo.Label(p0, block, vanilla, tkg, snapshot, stable, wcp, negative), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if !vanillaCluster { + var allowExpansion = true + if storageclass.AllowVolumeExpansion == nil || *storageclass.AllowVolumeExpansion != allowExpansion { + storageclass.AllowVolumeExpansion = &allowExpansion + storageclass.Parameters = scParameters + storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3277,7 +3183,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } }() - ginkgo.By("Expanding current pvc") + ginkgo.By("Expanding the current pvc") currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] newSize := currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("4Gi")) @@ -3286,7 +3192,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaim, err = expandPVCSize(pvclaim, newSize, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvclaim).NotTo(gomega.BeNil()) - pvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { framework.Failf("error updating pvc size %q", pvclaim.Name) @@ -3304,7 +3209,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - if len(queryResult.Volumes) == 0 { err = fmt.Errorf("queryCNSVolumeWithResult returned no volume") } @@ -3331,7 +3235,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, newDiskSize, true) defer func() { if snapshotContentCreated { @@ -3361,7 +3265,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Expanding current pvc after deleting volume snapshot") @@ -3415,35 +3319,41 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 6. Run resize and it should succeed 7. Cleanup the pvc */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume online resize of a volume having "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC13Volume online resize of a volume having "+ "snapshots", ginkgo.Label(p0, block, vanilla, tkg, snapshot, stable, negative), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if !vanillaCluster { + var allowExpansion = true + if storageclass.AllowVolumeExpansion == nil || *storageclass.AllowVolumeExpansion != allowExpansion { + storageclass.AllowVolumeExpansion = &allowExpansion + storageclass.Parameters = scParameters + storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) - volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pvs[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3457,30 +3367,38 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execRWXCommandPod1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() var vmUUID string - nodeName := pod.Spec.NodeName + var exists bool if vanillaCluster { vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") - // Modify PVC spec to trigger volume expansion + if !guestCluster { + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle, pvclaim, pvs[0], pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Modify the PVC spec to enable online volume expansion when no snapshot exists for this PVC") currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] newSize := currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("4Gi")) @@ -3525,7 +3443,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, newDiskSize, true) defer func() { if snapshotContentCreated { @@ -3544,7 +3462,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } }() - // Modify PVC spec to trigger volume expansion + ginkgo.By("Modify the PVC spec to enable online volume expansion when " + + "snapshot exists for this PVC, volume expansion should fail") currentPvcSize = claims.Spec.Resources.Requests[v1.ResourceStorage] newSize = currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("6Gi")) @@ -3554,7 +3473,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for file system resize to finish") @@ -3634,35 +3553,32 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 7. bring the host back up 8. cleanup the snapshots, restore-pvc and source-pvc */ - ginkgo.It("[block-vanilla-snapshot] Snapshot restore while the Host "+ + + ginkgo.It("[block-vanilla-snapshot][supervisor-snapshot] TC27Snapshot restore while the Host "+ "is Down", ginkgo.Label(p0, block, vanilla, snapshot, disruptive), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim - var err error - var snapshotCreated = false - ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, - namespace, nil, scParameters, diskSize, nil, "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + diskSize, storageclass, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + volHandle = persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3670,61 +3586,42 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, - volumeSnapshotClass.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Create a volume snapshot") - snapshot1, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", snapshot1.Name) - defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot1.Name, pandoraSyncWaitTime) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is Ready to use") - snapshot1_updated, err := waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, snapshot1.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = true - gomega.Expect(snapshot1_updated.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent1, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *snapshot1_updated.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - gomega.Expect(*snapshotContent1.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent1.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Identify the host on which the PV resides") - framework.Logf("pvName %v", persistentvolumes[0].Name) - vsanObjuuid := VsanObjIndentities(ctx, &e2eVSphere, persistentvolumes[0].Name) + framework.Logf("pvName %v", persistentVolumes[0].Name) + vsanObjuuid := VsanObjIndentities(ctx, &e2eVSphere, persistentVolumes[0].Name) framework.Logf("vsanObjuuid %v", vsanObjuuid) gomega.Expect(vsanObjuuid).NotTo(gomega.BeNil()) @@ -3738,7 +3635,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Stop hostd service on the host on which the PV is present") stopHostDOnHost(ctx, hostIP) - defer func() { ginkgo.By("Start hostd service on the host on which the PV is present") startHostDOnHost(ctx, hostIP) @@ -3746,7 +3642,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create PVC from snapshot") pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, - v1.ReadWriteOnce, snapshot1.Name, snapshotapigroup) + v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3755,8 +3651,10 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + } gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3766,6 +3664,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Start hostd service on the host on which the PV is present") startHostDOnHost(ctx, hostIP) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -3780,76 +3683,70 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 8. Ensure the data written in step-4 is intanct 9. Delete both deployments and. the pvcs */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] VC reboot with deployment pvcs "+ + + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] TC25VC reboot with deployment pvcs "+ "having snapshot", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive, flaky), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var pvclaim *v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim var restoredpvclaims []*v1.PersistentVolumeClaim var volumesnapshots []*snapV1.VolumeSnapshot var volumesnapshotsReadytoUse []*snapV1.VolumeSnapshot + var snapshotContents []*snapV1.VolumeSnapshotContent - ginkgo.By("Create storage class and PVC") - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - curtime := time.Now().Unix() - randomValue := rand.Int() - val := strconv.FormatInt(int64(randomValue), 10) - val = string(val[1:3]) - curtimestring := strconv.FormatInt(curtime, 10) - scName := "snapshot" + curtimestring + val + ginkgo.By("Create storage class") storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() ginkgo.By("Creating PVCs using the Storage Class") - framework.Logf("VOLUME_OPS_SCALE is set to %v", 5) for i := 0; i < 5; i++ { framework.Logf("Creating pvc%v", i) accessMode := v1.ReadWriteOnce - pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, accessMode) + pvclaim, err := createPVC(ctx, client, namespace, nil, "", storageclass, accessMode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaims = append(pvclaims, pvclaim) } ginkgo.By("Expect claim to provision volume successfully") - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) defer func() { - for _, claim := range pvclaims { - err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) + for _, pvclaim := range pvclaims { + pv := getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name) + volHandle = pv.Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() labelsMap := make(map[string]string) - labelsMap["app"] = "test" - ginkgo.By("Creating a Deployment using pvc1") + labelsMap["app1"] = "test1" + ginkgo.By("Creating a Deployment with replica count 1 using pvcs") dep, err := createDeployment(ctx, client, 1, labelsMap, nil, namespace, pvclaims, execRWXCommandPod1, false, busyBoxImageOnGcr) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { ginkgo.By("Delete Deployment") err := client.AppsV1().Deployments(namespace).Delete(ctx, dep.Name, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Wait for deployment pods to be up and running") + ginkgo.By("Wait for deployment pod to be up and running") pods, err := fdep.GetPodsForDeployment(ctx, client, dep) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pod := pods.Items[0] @@ -3879,36 +3776,33 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { defer func() { ginkgo.By("Rebooting VC") err = invokeVCenterReboot(ctx, vcAddress) - isVcRebooted = true + //isVcRebooted = true gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = waitForHostToBeUp(e2eVSphere.Config.Global.VCenterHostname) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Done with reboot") - essentialServices := []string{spsServiceName, vsanhealthServiceName, vpxdServiceName} - checkVcenterServicesRunning(ctx, vcAddress, essentialServices) + // essentialServices := []string{spsServiceName, vsanhealthServiceName, vpxdServiceName} + // checkVcenterServicesRunning(ctx, vcAddress, essentialServices) // After reboot. bootstrap() - - framework.Logf("Deleting volume snapshot") - for _, snapshot := range volumesnapshots { - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot.Name, pandoraSyncWaitTime) - } }() ginkgo.By("Rebooting VC") err = invokeVCenterReboot(ctx, vcAddress) - isVcRebooted = true + //isVcRebooted = true gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = waitForHostToBeUp(e2eVSphere.Config.Global.VCenterHostname) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Done with reboot") - essentialServices := []string{spsServiceName, vsanhealthServiceName, vpxdServiceName} - checkVcenterServicesRunning(ctx, vcAddress, essentialServices) + // essentialServices := []string{spsServiceName, vsanhealthServiceName, vpxdServiceName} + // checkVcenterServicesRunning(ctx, vcAddress, essentialServices) // After reboot. bootstrap() + time.Sleep(15 * time.Minute) + fullSyncWaitTime := 0 if os.Getenv(envFullSyncWaitTime) != "" { @@ -3940,9 +3834,25 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { *snaps.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) + snapshotContents = append(snapshotContents, snapshotContent) } + defer func() { + framework.Logf("Deleting volume snapshot") + for i := 0; i < 5; i++ { + err = deleteVolumeSnapshotContent(ctx, snapshotContents[i], snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Create a PVC using the snapshot created above") + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumesnapshots[i].Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumesnapshots[i].Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create PVCs using the snapshot created above") for _, snapshot := range volumesnapshots { pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, v1.ReadWriteOnce, snapshot.Name, snapshotapigroup) @@ -3960,19 +3870,26 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { defer func() { for _, restoredpvc := range restoredpvclaims { + pv := getPvFromClaim(client, restoredpvc.Namespace, restoredpvc.Name) + volHandle = pv.Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } err := fpv.DeletePersistentVolumeClaim(ctx, client, restoredpvc.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() ginkgo.By("Rebooting VC") err = invokeVCenterReboot(ctx, vcAddress) - isVcRebooted = true + //isVcRebooted = true gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = waitForHostToBeUp(e2eVSphere.Config.Global.VCenterHostname) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Done with reboot") - checkVcenterServicesRunning(ctx, vcAddress, essentialServices) + //checkVcenterServicesRunning(ctx, vcAddress, essentialServices) // After reboot. bootstrap() @@ -3981,9 +3898,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { labelsMap2["app2"] = "test2" dep2, err := createDeployment(ctx, client, 1, labelsMap2, nil, namespace, - restoredpvclaims, "", false, busyBoxImageOnGcr) + restoredpvclaims, execCommand, false, busyBoxImageOnGcr) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { ginkgo.By("Delete Deployment-2") err := client.AppsV1().Deployments(namespace).Delete(ctx, dep2.Name, metav1.DeleteOptions{}) @@ -4014,37 +3930,31 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 8. Delete snapshot 9. Cleanup pvc/sc */ - ginkgo.It("[block-vanilla-snapshot] VC password reset during snapshot creation", ginkgo.Label(p1, block, - vanilla, snapshot, disruptive), func() { + ginkgo.It("[block-vanilla-snapshot][supervisor-snapshot] TC15VC password reset during "+ + "snapshot creation", ginkgo.Label(p1, block, vanilla, snapshot, disruptive), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim - var err error - var snapshotCreated = false - var snapshot3Created = false - nimbusGeneratedVcPwd := GetAndExpectStringEnvVar(nimbusVcPwd) - ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, - namespace, nil, scParameters, diskSize, nil, "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, (2 * framework.ClaimProvisionTimeout)) + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + diskSize, storageclass, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + volHandle = persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4052,170 +3962,164 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, - volumeSnapshotClass.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Create a volume snapshot") - snapshot1, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", snapshot1.Name) - defer func() { - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot1.Name, pandoraSyncWaitTime) + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - }() - ginkgo.By("Verify volume snapshot is Ready to use") - snapshot1_updated, err := waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, snapshot1.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = true - gomega.Expect(snapshot1_updated.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) - ginkgo.By("Verify volume snapshot content is created") - snapshotContent1, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *snapshot1_updated.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(*snapshotContent1.Status.ReadyToUse).To(gomega.BeTrue()) + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent1.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] + ginkgo.By("Fetching the username and password of the current vcenter session from secret") + var newPassword, originalConf, username, nimbusGeneratedVcPwd, originalPassword string + var vsphereCfg e2eTestConfig + var secret *v1.Secret + var originalVCPasswordChanged bool - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + secret, err = c.CoreV1().Secrets(csiSystemNamespace).Get(ctx, configSecret, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Fetching the username and password of the current vcenter session from secret") - secret, err := c.CoreV1().Secrets(csiSystemNamespace).Get(ctx, configSecret, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalConf = string(secret.Data[vSphereCSIConf]) + vsphereCfg, err = readConfigFromSecretString(originalConf) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - originalConf := string(secret.Data[vSphereCSIConf]) - vsphereCfg, err := readConfigFromSecretString(originalConf) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By(fmt.Sprintln("Changing password on the vCenter host")) + nimbusGeneratedVcPwd = GetAndExpectStringEnvVar(nimbusVcPwd) + username = vsphereCfg.Global.User + originalPassword = vsphereCfg.Global.Password + newPassword = e2eTestPassword + ginkgo.By(fmt.Sprintf("Original password %s, new password %s", originalPassword, newPassword)) + err = invokeVCenterChangePassword(ctx, username, nimbusGeneratedVcPwd, newPassword, vcAddress, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVCPasswordChanged = true + defer func() { + if originalVCPasswordChanged { + ginkgo.By("Reverting the password change") + err = invokeVCenterChangePassword(ctx, username, nimbusGeneratedVcPwd, originalPassword, + vcAddress, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + } else if supervisorCluster { + ginkgo.By("Perform password rotation on the supervisor") + csiNamespace := GetAndExpectStringEnvVar(envCSINamespace) + passwordRotated, err := performPasswordRotationOnSupervisor(client, ctx, csiNamespace, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(passwordRotated).To(gomega.BeTrue()) + } - ginkgo.By(fmt.Sprintln("Changing password on the vCenter host")) - username := vsphereCfg.Global.User - originalPassword := vsphereCfg.Global.Password - newPassword := e2eTestPassword - ginkgo.By(fmt.Sprintf("Original password %s, new password %s", originalPassword, newPassword)) - err = invokeVCenterChangePassword(ctx, username, nimbusGeneratedVcPwd, newPassword, vcAddress, clientIndex) + ginkgo.By("Create a volume snapshot-2 and verify volume snapshot-2 " + + "creation succeeds with previous csi session") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - originalVCPasswordChanged := true - defer func() { - if originalVCPasswordChanged { - ginkgo.By("Reverting the password change") - err = invokeVCenterChangePassword(ctx, username, nimbusGeneratedVcPwd, originalPassword, - vcAddress, clientIndex) + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - }() - ginkgo.By("Create a volume snapshot2") - snapshot2, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshot2Created := true + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) - defer func() { - if snapshot2Created { - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot2.Name, pandoraSyncWaitTime) + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot 2 is creation succeeds with previous csi session") - snapshot2, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, snapshot2.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Snapshot details is %+v", snapshot2) - - ginkgo.By("Modifying the password in the secret") - vsphereCfg.Global.Password = newPassword - modifiedConf, err := writeConfigToSecretString(vsphereCfg) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Updating the secret to reflect the new password") - secret.Data[vSphereCSIConf] = []byte(modifiedConf) - _, err = c.CoreV1().Secrets(csiSystemNamespace).Update(ctx, secret, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - defer func() { - ginkgo.By("Reverting the secret change back to reflect the original password") - currentSecret, err := c.CoreV1().Secrets(csiSystemNamespace).Get(ctx, configSecret, metav1.GetOptions{}) + if vanillaCluster { + ginkgo.By("Modifying the password in the secret") + vsphereCfg.Global.Password = newPassword + modifiedConf, err := writeConfigToSecretString(vsphereCfg) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - currentSecret.Data[vSphereCSIConf] = []byte(originalConf) - _, err = c.CoreV1().Secrets(csiSystemNamespace).Update(ctx, currentSecret, metav1.UpdateOptions{}) + ginkgo.By("Updating the secret to reflect the new password") + secret.Data[vSphereCSIConf] = []byte(modifiedConf) + _, err = c.CoreV1().Secrets(csiSystemNamespace).Update(ctx, secret, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - ginkgo.By("Create a volume snapshot3") - snapshot3, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", snapshot3.Name) - defer func() { - if snapshot3Created { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot3.Name, pandoraSyncWaitTime) - } - }() + defer func() { + ginkgo.By("Reverting the secret change back to reflect the original password") + currentSecret, err := c.CoreV1().Secrets(csiSystemNamespace).Get(ctx, configSecret, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Verify volume snapshot is Ready to use") - snapshot3_updated, err := waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, snapshot3.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshot3Created = true - gomega.Expect(snapshot3_updated.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) + currentSecret.Data[vSphereCSIConf] = []byte(originalConf) + _, err = c.CoreV1().Secrets(csiSystemNamespace).Update(ctx, currentSecret, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + } - ginkgo.By("Verify volume snapshot content 3 is created") - snapshotContent3, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *snapshot3_updated.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) + ginkgo.By("Create a volume snapshot-3") + volumeSnapshot3, snapshotContent3, snapshotCreated3, + snapshotContentCreated3, snapshotId3, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(*snapshotContent3.Status.ReadyToUse).To(gomega.BeTrue()) + defer func() { + if snapshotContentCreated3 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent3, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle3 := *snapshotContent3.Status.SnapshotHandle - snapshotId3 := strings.Split(snapshothandle3, "+")[1] + if snapshotCreated3 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot3.Name, pandoraSyncWaitTime) - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId3) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot3.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() - ginkgo.By("Reverting the password change") - err = invokeVCenterChangePassword(ctx, username, nimbusGeneratedVcPwd, originalPassword, - vcAddress, clientIndex) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - originalVCPasswordChanged = false + if vanillaCluster { + ginkgo.By("Reverting the password change") + err = invokeVCenterChangePassword(ctx, username, nimbusGeneratedVcPwd, originalPassword, + vcAddress, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVCPasswordChanged = false - ginkgo.By("Reverting the secret change back to reflect the original password") - currentSecret, err := c.CoreV1().Secrets(csiSystemNamespace).Get(ctx, configSecret, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Reverting the secret change back to reflect the original password") + currentSecret, err := c.CoreV1().Secrets(csiSystemNamespace).Get(ctx, configSecret, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - currentSecret.Data[vSphereCSIConf] = []byte(originalConf) - _, err = c.CoreV1().Secrets(csiSystemNamespace).Update(ctx, currentSecret, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + currentSecret.Data[vSphereCSIConf] = []byte(originalConf) + _, err = c.CoreV1().Secrets(csiSystemNamespace).Update(ctx, currentSecret, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } ginkgo.By("Create PVC from snapshot") pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, - v1.ReadWriteOnce, snapshot1.Name, snapshotapigroup) + v1.ReadWriteOnce, volumeSnapshot1.Name, snapshotapigroup) pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4225,13 +4129,27 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle, snapshotId1, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle, snapshotId2, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated3, snapshotContentCreated3, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot3, pandoraSyncWaitTime, volHandle, snapshotId3, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -4245,13 +4163,16 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 7. Verify pvcs all are in Bound state. 8. Cleanup all the snapshots and the pvc. */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Multi-master and snapshot workflow", ginkgo.Label(p1, block, - vanilla, tkg, snapshot), func() { + + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot][supervisor-snapshot] TC26Multi-master and "+ + "snapshot workflow", ginkgo.Label(p1, block, vanilla, tkg, snapshot), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var storageclass *storagev1.StorageClass var pvclaim *v1.PersistentVolumeClaim + var pvclaims []*v1.PersistentVolumeClaim var err error var snapshotContentCreated = false var sshClientConfig, sshWcpConfig *ssh.ClientConfig @@ -4263,33 +4184,31 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapshotOpsScale = 5 } - ginkgo.By("Create storage class and PVC") + ginkgo.By("Create storage class") if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, "") + storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } else if guestCluster { + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + } else { storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { - ginkgo.By("In defer function, deleting PVC") err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) @@ -4299,17 +4218,16 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - ginkgo.By("In defer function deleting volume snapshot class") - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, - volumeSnapshotClass.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, + volumeSnapshotClass.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() if vanillaCluster { nimbusGeneratedK8sVmPwd := GetAndExpectStringEnvVar(nimbusK8sVmPwd) - sshClientConfig = &ssh.ClientConfig{ User: rootUser, Auth: []ssh.AuthMethod{ @@ -4317,17 +4235,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }, HostKeyCallback: ssh.InsecureIgnoreHostKey(), } - - /* Get current leader Csi-Controller-Pod where CSI Snapshotter is running and " + - find the master node IP where this Csi-Controller-Pod is running */ - ginkgo.By("Get current leader Csi-Controller-Pod name where csi-snapshotter is running and " + - "find the master node IP where this Csi-Controller-Pod is running") - csiControllerPod, k8sMasterIP, err = getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, - c, sshClientConfig, snapshotterContainerName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("csi-snapshotter leader is in Pod %s "+ - "which is running on master node %s", csiControllerPod, k8sMasterIP) - } else if guestCluster { + } else if supervisorCluster || guestCluster { svcMasterIp = GetAndExpectStringEnvVar(svcMasterIP) svcMasterPwd = GetAndExpectStringEnvVar(svcMasterPassword) framework.Logf("svc master ip: %s", svcMasterIp) @@ -4338,6 +4246,19 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }, HostKeyCallback: ssh.InsecureIgnoreHostKey(), } + } + + /* Get current leader Csi-Controller-Pod where CSI Snapshotter is running and " + + find the master node IP where this Csi-Controller-Pod is running */ + if vanillaCluster { + ginkgo.By("Get current leader Csi-Controller-Pod name where csi-snapshotter is running and " + + "find the master node IP where this Csi-Controller-Pod is running") + csiControllerPod, k8sMasterIP, err = getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, + c, sshClientConfig, snapshotterContainerName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("csi-snapshotter leader is in Pod %s "+ + "which is running on master node %s", csiControllerPod, k8sMasterIP) + } else { framework.Logf("sshwcpConfig: %v", sshWcpConfig) csiControllerPod, k8sMasterIP, err = getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, client, sshWcpConfig, snapshotterContainerName) @@ -4377,7 +4298,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Kill container CSI-Snapshotter on the master node where elected leader " + "csi controller pod is running") - if vanillaCluster { + if vanillaCluster || supervisorCluster { /* Delete elected leader CSI-Controller-Pod where csi-snapshotter is running */ csipods, err := client.CoreV1().Pods(csiSystemNamespace).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4387,7 +4308,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { err = fpod.WaitForPodsRunningReady(ctx, client, csiSystemNamespace, int32(csipods.Size()), 0, pollTimeoutShort*2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } else if guestCluster { + } else { err = execStopContainerOnGc(sshWcpConfig, svcMasterIp, snapshotterContainerName, k8sMasterIP, svcNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4413,7 +4334,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volumeSnapshotContents = append(volumeSnapshotContents, snapshotContent) framework.Logf("Get volume snapshot ID from snapshot handle") - snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent) + snapshotId, _, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("snapshot Id: %s", snapshotId) @@ -4432,7 +4353,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("csi-snapshotter leader is in Pod %s "+ "which is running on master node %s", csiControllerPod, k8sMasterIP) - } else if guestCluster { + } else { framework.Logf("sshwcpConfig: %v", sshWcpConfig) csiControllerPod, k8sMasterIP, err = getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, client, sshWcpConfig, snapshotterContainerName) @@ -4473,17 +4394,26 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - - ginkgo.By("Deleting PVC2") - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvclaims = append(pvclaims, pvclaim2) } + defer func() { + for _, pvclaim := range pvclaims { + pv := getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name) + volHandle = pv.Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + for i := 0; i < snapshotOpsScale; i++ { framework.Logf("Get volume snapshot ID from snapshot handle") - snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, volumeSnapshotContents[i]) + snapshotId, _, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, volumeSnapshotContents[i]) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Get(ctx, @@ -4491,7 +4421,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Delete volume snapshot") _, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) @@ -4722,39 +4652,40 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 3. Expected behavior: resize operation should succeed and the snapshot creation should succeed after resize completes */ - ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Volume snapshot creation when "+ - "resize is in progress", ginkgo.Label(p1, block, vanilla, snapshot, tkg, stable), func() { + ginkgo.It("TC14Volume snapshot creation when resize is in progress", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var volumeSnapshot *snapV1.VolumeSnapshot - var snapshotContent *snapV1.VolumeSnapshotContent - var snapshotCreated, snapshotContentCreated bool - var snapshotId string - var err error + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName + if !vanillaCluster { + var allowExpansion = true + if storageclass.AllowVolumeExpansion == nil || *storageclass.AllowVolumeExpansion != allowExpansion { + storageclass.AllowVolumeExpansion = &allowExpansion + storageclass.Parameters = scParameters + storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", diskSize, storageclass, true) - volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle = pvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4773,21 +4704,25 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() var vmUUID string - nodeName := pod.Spec.NodeName + var exists bool if vanillaCluster { vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") - // Modify PVC spec to trigger volume expansion + ginkgo.By("Modify PVC spec to trigger volume expansion") currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] newSize := currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("4Gi")) @@ -4809,17 +4744,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() ginkgo.By("Create a dynamic volume snapshot") - if vanillaCluster { - volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err = createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, - pvclaim, volHandle, diskSize, true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } else if guestCluster { - volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err = createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, - pvclaim, volHandle, newDiskSize, true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, _ := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, newDiskSize, true) defer func() { if snapshotContentCreated { err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) @@ -4837,9 +4764,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } }() - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) - time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) - ginkgo.By("Waiting for file system resize to finish") claims, err = waitForFSResize(pvclaim, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4865,7 +4789,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -5112,8 +5036,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) @@ -5133,7 +5058,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, dynamicSnapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + snapshotContentCreated, dynamicSnapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) defer func() { if snapshotCreated { @@ -5186,12 +5111,12 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete pre-provisioned snapshot") staticSnapshotCreated, staticSnapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, dynamicSnapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, dynamicSnapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -5233,8 +5158,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) @@ -5254,7 +5180,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -5350,8 +5276,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Take a snapshot of restored PVC created from dynamic snapshot") volumeSnapshot3, _, snapshotCreated3, - snapshotContentCreated3, snapshotId3, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, - pvclaim2, volHandle, diskSize, true) + snapshotContentCreated3, snapshotId3, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim2, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if snapshotContentCreated3 { @@ -5377,17 +5303,17 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot3, pandoraSyncWaitTime, volHandle2, snapshotId3) + volumeSnapshot3, pandoraSyncWaitTime, volHandle2, snapshotId3, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Delete pre-provisioned snapshot") staticSnapshotCreated, staticSnapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -5425,8 +5351,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) @@ -5446,7 +5373,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -5498,7 +5425,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete static volume snapshot") staticSnapshotCreated, staticSnapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - staticSnapshot, pandoraSyncWaitTime, volHandle, staticSnapshotId) + staticSnapshot, pandoraSyncWaitTime, volHandle, staticSnapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC using the snapshot deleted") @@ -5559,8 +5486,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) @@ -5580,7 +5508,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -5674,7 +5602,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete pre-provisioned snapshot") staticSnapshotCreated, staticSnapshotContentCreated, err := deleteVolumeSnapshot(ctx, snapc, namespace, - staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if staticSnapshotCreated { @@ -5700,7 +5628,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -5775,41 +5703,35 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 8. Verify snapshot size. It should be same as that of restored volume size. 9. Run cleanup: Delete snapshots, restored-volumes, pods. */ - ginkgo.It("[tkg-snapshot] Perform online resize on restored volume", ginkgo.Label(p0, - snapshot, tkg, newTest, stable), func() { + ginkgo.It("[tkg-snapshot][supervisor-snapshot] TC21Perform online resize on restored volume", ginkgo.Label(p0, + snapshot, tkg, newTest, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var err error - var snapshotContentCreated = false - var snapshotCreated = false - - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - ginkgo.By("Create storage class and PVC") - storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) - volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pvs[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + ginkgo.By("Create Pod") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, + execRWXCommandPod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim.Name, namespace)) err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -5817,23 +5739,47 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() + var vmUUID string + var exists bool + + if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat /mnt/volume1/Pod.html "} + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from Pod")).NotTo(gomega.BeFalse()) + + wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod' > /mnt/volume1/Pod.html"} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod")).NotTo(gomega.BeFalse()) + + wrtiecmd2 := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'fsync' "} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd2...) + ginkgo.By("Create/Get volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - if vanillaCluster { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() - ginkgo.By("Create a volume snapshot") volumeSnapshot, _, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) - defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") @@ -5852,7 +5798,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() ginkgo.By("Create PVC from Snapshot and verify restore volume operations") - pvclaim2, persistentVolumes2, pod := verifyVolumeRestoreOperation(ctx, client, + pvclaim2, persistentVolumes2, pod2 := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot, diskSize, true) volHandle2 := persistentVolumes2[0].Spec.CSI.VolumeHandle svcPVCName2 := persistentVolumes2[0].Spec.CSI.VolumeHandle @@ -5860,34 +5806,67 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) } gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { - // Delete POD - ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) - err = fpod.DeletePodWithWait(ctx, client, pod) + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim2.Name, namespace)) - err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Perform online resize on the restored volume and make sure resize should go fine") - verifyOnlineVolumeExpansionOnGc(client, namespace, svcPVCName2, volHandle, pvclaim2, pod, f) + var newDiskSize string + if guestCluster { + verifyOnlineVolumeExpansionOnGc(client, namespace, svcPVCName2, volHandle, pvclaim2, pod2, f) + } else if supervisorCluster { + ginkgo.By("Expanding current pvc after deleting volume snapshot") + currentPvcSize := pvclaim2.Spec.Resources.Requests[v1.ResourceStorage] + newSize := currentPvcSize.DeepCopy() + newSize.Add(resource.MustParse("4Gi")) + newDiskSize = "6Gi" + framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) + pvclaim2, err = expandPVCSize(pvclaim2, newSize, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(pvclaim2).NotTo(gomega.BeNil()) - ginkgo.By("Create a volume snapshot from restored volume") - volumeSnapshotFromRestoreVol, snapshotContentFromRestoreVol, snapshotCreated, - snapshotContentCreated, snapshotIdFromRestoreVol, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, - volumeSnapshotClass, pvclaim2, volHandle2, "3Gi", true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvcSize := pvclaim2.Spec.Resources.Requests[v1.ResourceStorage] + if pvcSize.Cmp(newSize) != 0 { + framework.Failf("error updating pvc size %q", pvclaim2.Name) + } - framework.Logf("Volume snapshot name is : %s", volumeSnapshotFromRestoreVol.Name) - snapshotCreated = true + ginkgo.By("Waiting for controller volume resize to finish") + err = waitForPvResizeForGivenPvc(pvclaim2, client, totalResizeWaitPeriod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle2)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if len(queryResult.Volumes) == 0 { + err = fmt.Errorf("queryCNSVolumeWithResult returned no volume") + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verifying disk size requested in volume expansion is honored") + newSizeInMb := int64(6144) + if queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb != + newSizeInMb { + err = fmt.Errorf("got wrong disk size after volume expansion +%v ", + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb) + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Create a snapshot for the restored volume") + volumeSnapshotFromRestoreVol, snapshotContentFromRestoreVol, snapshotCreatedFromRestoreVol, + snapshotContentCreatedFromRestoreVol, snapshotIdFromRestoreVol, _, err := createDynamicVolumeSnapshot(ctx, + namespace, snapc, volumeSnapshotClass, pvclaim2, volHandle2, newDiskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - if snapshotContentCreated { + if snapshotContentCreatedFromRestoreVol { framework.Logf("Deleting volume snapshot content") gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = deleteVolumeSnapshotContent(ctx, snapshotContentFromRestoreVol, @@ -5895,7 +5874,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - if snapshotCreated { + if snapshotCreatedFromRestoreVol { framework.Logf("Deleting volume snapshot") deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshotFromRestoreVol.Name, pandoraSyncWaitTime) } @@ -5903,13 +5882,12 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { framework.Logf("Deleting volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshotFromRestoreVol, pandoraSyncWaitTime, volHandle2, snapshotIdFromRestoreVol) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + snapshotCreatedFromRestoreVol, snapshotContentCreatedFromRestoreVol, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshotFromRestoreVol, pandoraSyncWaitTime, volHandle2, snapshotIdFromRestoreVol, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) /* @@ -5922,47 +5900,49 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 6. Create new volume using this snapshot as source, use the same SC and attach it to a Pod. 7. Run cleanup: Delete snapshots, restored-volumes, pods. */ - ginkgo.It("[tkg-snapshot] Offline relocation of FCD with snapshots", ginkgo.Label(p0, snapshot, tkg, - newTest, stable), func() { + + ginkgo.It("[tkg-snapshot][supervisor-snapshot] TC22Offline relocation of FCD "+ + "with snapshots", ginkgo.Label(p0, snapshot, tkg, newTest, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var err error - var snapshotContentCreated, snapshotCreated bool + var datastoreUrls []string - sharedvmfsURL := os.Getenv(envSharedVMFSDatastoreURL) - if sharedvmfsURL == "" { - ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) + // read nfs datastore url + nfsDatastoreUrl := os.Getenv(envSharedNFSDatastoreURL) + if nfsDatastoreUrl == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedNFSDatastoreURL)) } + // read vsan datastore url sharedVsanDatastoreURL := os.Getenv(envSharedDatastoreURL) if sharedVsanDatastoreURL == "" { ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedDatastoreURL)) } - datastoreUrls = append(datastoreUrls, sharedvmfsURL, sharedVsanDatastoreURL) + datastoreUrls = append(datastoreUrls, nfsDatastoreUrl, sharedVsanDatastoreURL) - storagePolicyName = os.Getenv(envStoragePolicyNameForVsanVmfsDatastores) + // read storage policy where vsan and nfs datastires are tagged + storagePolicyName = os.Getenv(envStoragePolicyNameForVsanNfsDatastores) if storagePolicyName == "" { - ginkgo.Skip(fmt.Sprintf("Env %v is missing", envStoragePolicyNameForVsanVmfsDatastores)) + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envStoragePolicyNameForVsanNfsDatastores)) } - ginkgo.By("Create storage class and PVC") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + ginkgo.By("Create storage class") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim.Name, namespace)) err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -5975,20 +5955,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - if vanillaCluster { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() - ginkgo.By("Create a volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") @@ -6029,9 +6000,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) } gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -6043,11 +6012,33 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - framework.Logf("Deleting volume snapshot") + ginkgo.By("Create a volume snapshot from the restored pvc") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim2, volHandle2, diskSize, true) + framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) + defer func() { + if snapshotContentCreated2 { + framework.Logf("Deleting volume snapshot content") + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + } + }() + + framework.Logf("Deleting volume snapshot-1") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Deleting volume snapshot-2") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle2, snapshotId, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -6081,8 +6072,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) @@ -6111,7 +6103,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a volume snapshot") volumeSnapshot, _, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) @@ -6171,7 +6163,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -6185,29 +6177,27 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 5. Restore PVC creation should fail and be stuck in Pending state with appropriate error message. 6. Perform Cleanup. */ - ginkgo.It("[tkg-snapshot] Volume mode conversion", ginkgo.Label(p0, snapshot, tkg, newTest, stable, - negative), func() { + ginkgo.It("[tkg-snapshot][supervisor-snapshot] TC18Volume mode "+ + "conversion", ginkgo.Label(p0, snapshot, tkg, newTest, stable, negative), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var err error - var snapshotContentCreated, snapshotCreated bool - ginkgo.By("Create storage class and PVC") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + var pvclaims []*v1.PersistentVolumeClaim + + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim.Name, namespace)) err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) @@ -6216,25 +6206,16 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Create/Get volume snapshot class") + ginkgo.By("Get volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - if vanillaCluster { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() - ginkgo.By("Create a volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") @@ -6248,18 +6229,19 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } }() + ginkgo.By("Creating a PVC from a snapshot but with different access mode") accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteMany, v1.ReadOnlyMany} - for _, accessMode := range accessModes { ginkgo.By(fmt.Sprintf("Create PVC from snapshot with %s access mode", accessMode)) pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, accessMode, volumeSnapshot.Name, snapshotapigroup) pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) + pvclaims = append(pvclaims, pvclaim2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) _, err = fpv.WaitForPVClaimBoundPhase(ctx, client, - []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) + []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionShortTimeout) framework.Logf("Error from creating pvc with %s accessmode is : %s", accessMode, err.Error()) gomega.Expect(err).To(gomega.HaveOccurred()) @@ -6267,26 +6249,28 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { framework.Logf("Expected failure message: %+q", expectedErrMsg) err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim2.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) + } - ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim2.Name, namespace)) - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - err = waitForPvcToBeDeleted(ctx, client, pvclaim2.Name, namespace) + ginkgo.By("Deleting a PVC which is stuck in Pending state") + for _, pvclaim := range pvclaims { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - framework.Logf("Deleting pending PVCs from SVC namespace") - pvcList := getAllPVCFromNamespace(svcClient, svcNamespace) - for _, pvc := range pvcList.Items { - if pvc.Status.Phase == v1.ClaimPending { - framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(ctx, svcClient, pvc.Name, svcNamespace), - "Failed to delete PVC", pvc.Name) + if guestCluster { + framework.Logf("Deleting pending PVCs from SVC namespace") + pvcList := getAllPVCFromNamespace(svcClient, svcNamespace) + for _, pvc := range pvcList.Items { + if pvc.Status.Phase == v1.ClaimPending { + framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(ctx, svcClient, pvc.Name, svcNamespace), + "Failed to delete PVC", pvc.Name) + } } } + + ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -6343,119 +6327,21 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { Run cleanup: Delete snapshots, restored-volumes, pods. */ - ginkgo.It("[tkg-snapshot] Create restore volume snapshot in consistent order", ginkgo.Label(p0, snapshot, - tkg, newTest, stable), func() { + ginkgo.It("[tkg-snapshot][supervisor-snapshot] TC19Create restore volume snapshot "+ + "in consistent order", ginkgo.Label(p0, snapshot, tkg, newTest, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - ginkgo.By("Create storage class and PVC") - storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + ginkgo.By("Create PVC") + pvclaim1, pvs1, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) - volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle - if guestCluster { - volHandle = getVolumeIDFromSupervisorCluster(volHandle) - } - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { - err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - ginkgo.By("Create Pod") - pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, - execRWXCommandPod) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) - err = fpod.DeletePodWithWait(ctx, client, pod) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - var vmUUID string - nodeName := pod.Spec.NodeName - - if vanillaCluster { - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) - } else if guestCluster { - vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") - - ginkgo.By("Verify the volume is accessible and Read/write is possible") - cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", - "cat /mnt/volume1/Pod.html "} - output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(output, "Hello message from Pod")).NotTo(gomega.BeFalse()) - - wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", - "echo 'Hello message from test into Pod' > /mnt/volume1/Pod.html"} - e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) - output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(output, "Hello message from test into Pod")).NotTo(gomega.BeFalse()) - - ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - if vanillaCluster { - err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() - - ginkgo.By("Create a dynamic volume snapshot") - volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, - pvclaim, volHandle, diskSize, true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - if snapshotContentCreated { - err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - - framework.Logf("Wait till the volume snapshot is deleted") - err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() - - ginkgo.By("Restore volume from snapshot created above") - pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, - v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) - pvclaim1, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolume1, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim1}, - framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle1 := persistentvolume1[0].Spec.CSI.VolumeHandle + volHandle1 := pvs1[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle1).NotTo(gomega.BeEmpty()) if guestCluster { volHandle1 = getVolumeIDFromSupervisorCluster(volHandle1) @@ -6477,34 +6363,41 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() + var vmUUID string + var exists bool + if vanillaCluster { vmUUID = getNodeUUID(ctx, client, pod1.Spec.NodeName) } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod1.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod1.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle1, pod1.Spec.NodeName)) - isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volHandle1, vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs1[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") ginkgo.By("Verify the volume is accessible and Read/write is possible") - cmd = []string{"exec", pod1.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", - "cat /mnt/volume1/Pod1.html "} - output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + output := readFileFromPod(namespace, pod1.Name, filePathPod1) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) - wrtiecmd = []string{"exec", pod1.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", - "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) - output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + writeDataOnFileFromPod(namespace, pod1.Name, filePathPod1, "Hello message from test into Pod1") + output = readFileFromPod(namespace, pod1.Name, filePathPod1) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot1, snapshotContent1, snapshotCreated1, - snapshotContentCreated1, snapshotId1, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, - pvclaim1, volHandle1, diskSize, true) + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim1, volHandle1, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if snapshotContentCreated1 { @@ -6524,67 +6417,72 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { }() ginkgo.By("Restore volume from snapshot created above") - pvcSpec = getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, - v1.ReadWriteOnce, volumeSnapshot1.Name, snapshotapigroup) - pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - persistentvolume2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim2}, - framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle2 := persistentvolume2[0].Spec.CSI.VolumeHandle + pvclaim2, pvs2, pod2 := verifyVolumeRestoreOperation(ctx, client, + namespace, storageclass, volumeSnapshot1, diskSize, true) + volHandle2 := pvs2[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) if guestCluster { volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) } defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Create Pod") - pod2, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim2}, false, - execRWXCommandPod2) + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim2, volHandle2, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) - err = fpod.DeletePodWithWait(ctx, client, pod2) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } - if vanillaCluster { - vmUUID = getNodeUUID(ctx, client, pod2.Spec.NodeName) - } else if guestCluster { - vmUUID, err = getVMUUIDFromNodeName(pod2.Spec.NodeName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle2, pod2.Spec.NodeName)) - isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volHandle2, vmUUID) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() - ginkgo.By("Verify the volume is accessible and Read/write is possible") - cmd = []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", - "cat /mnt/volume1/Pod2.html "} - output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(output, "Hello message from Pod2")).NotTo(gomega.BeFalse()) + ginkgo.By("Restore volume from snapshot created above") + pvclaim3, pvs3, pod3 := verifyVolumeRestoreOperation(ctx, client, + namespace, storageclass, volumeSnapshot2, diskSize, true) + volHandle3 := pvs3[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle3).NotTo(gomega.BeEmpty()) + if guestCluster { + volHandle3 = getVolumeIDFromSupervisorCluster(volHandle3) + } + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod3.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - wrtiecmd = []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", - "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod2.html"} - e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) - output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim3.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() ginkgo.By("Delete dynamic volume snapshot") - snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle1, snapshotId1, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Delete dynamic volume snapshot") - snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot1, pandoraSyncWaitTime, volHandle1, snapshotId1) + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle2, snapshotId2, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -6599,35 +6497,26 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 7. Verify the older data. It should be intact and write new data. 8. Perform cleanup. */ - ginkgo.It("[tkg-snapshot] Detach volume with snapshot", ginkgo.Label(p1, snapshot, tkg, newTest, - stable), func() { + + ginkgo.It("[tkg-snapshot][supervisor-snapshot] TC28Detach volume with "+ + "snapshot", ginkgo.Label(p1, snapshot, tkg, newTest, stable), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var err error - var snapshotContentCreated = false - var snapshotCreated = false - var vmUUID string - scParameters[svStorageClassName] = storagePolicyName - - ginkgo.By("Get storage class") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC") - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + pvclaim, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", diskSize, storageclass, true) - volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pvs[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -6639,21 +6528,25 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execRWXCommandPod1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nodeName := pod.Spec.NodeName - if vanillaCluster { - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) - } else if guestCluster { + + var vmUUID string + var exists bool + + if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") - defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -6663,19 +6556,10 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - if vanillaCluster { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() - ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, _, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) - defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") @@ -6703,9 +6587,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) } gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) err = fpod.DeletePodWithWait(ctx, client, pod2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -6718,24 +6600,25 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete Dynamic snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - // This test verifies the static provisioning workflow in guest cluster. - // - // Test Steps: - // 1. Create FCD with valid storage policy on gc-svc. - // 2. Create Resource quota. - // 3. Create CNS register volume with above created FCD on SVC. - // 4. verify PV, PVC got created , check the bidirectional reference on svc. - // 5. On GC create a PV by pointing volume handle got created by static - // provisioning on gc-svc (in step 4). - // 6. On GC create a PVC pointing to above created PV. - // 7. Wait for PV , PVC to get bound. - // 8. Create POD, verify the status. - // 9. Delete all the above created PV, PVC and resource quota. + /* + This test verifies the static provisioning workflow in guest cluster. + + Test Steps: + 1. Create FCD with valid storage policy on gc-svc. + 2. Create Resource quota. + 3. Create CNS register volume with above created FCD on SVC. + 4. verify PV, PVC got created , check the bidirectional reference on svc. + 5. On GC create a PV by pointing volume handle got created by static + provisioning on gc-svc (in step 4). + 6. On GC create a PVC pointing to above created PV. + 7. Wait for PV , PVC to get bound. + 8. Create POD, verify the status. + 9. Delete all the above created PV, PVC and resource quota. + */ ginkgo.It("[tkg-snapshot] Provisioning of static volume on guest cluster using FCD with snapshot "+ "creation", ginkgo.Label(p0, snapshot, tkg, newTest, stable), func() { @@ -6982,7 +6865,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { b) snapshot create/delete workflow c) Restart services */ - ginkgo.It("[tkg-snapshot] Scale up snapshot creation by increasing the volume counts and "+ + ginkgo.It("[tkg-snapshot][supervisor-snapshot] Tc23Scale up snapshot creation by increasing the volume counts and "+ "in between restart services", ginkgo.Label(p1, snapshot, tkg, newTest), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -6994,38 +6877,14 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaims := make([]*v1.PersistentVolumeClaim, volumeOpsScale) pvclaims2 := make([]*v1.PersistentVolumeClaim, volumeOpsScale) var persistentvolumes []*v1.PersistentVolume - var err error - ginkgo.By("Create storage class and PVC") - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } else if guestCluster { - scParameters[svStorageClassName] = storagePolicyName - } - - curtime := time.Now().Unix() - randomValue := rand.Int() - val := strconv.FormatInt(int64(randomValue), 10) - val = string(val[1:3]) - curtimestring := strconv.FormatInt(curtime, 10) - scName := "snapshot-scale" + curtimestring + val - storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, scName) + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() ginkgo.By("Create volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - if vanillaCluster { - err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }() ginkgo.By("Creating PVCs using the Storage Class") framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) @@ -7089,22 +6948,24 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Rebooting VC") err = invokeVCenterReboot(ctx, vcAddress) - isVcRebooted = true + //isVcRebooted = true gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = waitForHostToBeUp(e2eVSphere.Config.Global.VCenterHostname) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Done with reboot") - var essentialServices []string - if vanillaCluster { - essentialServices = []string{spsServiceName, vsanhealthServiceName, vpxdServiceName} - } else if guestCluster { - essentialServices = []string{spsServiceName, vsanhealthServiceName, vpxdServiceName, wcpServiceName} - } - checkVcenterServicesRunning(ctx, vcAddress, essentialServices) + // var essentialServices []string + // if vanillaCluster { + // essentialServices = []string{spsServiceName, vsanhealthServiceName, vpxdServiceName} + // } else if guestCluster { + // essentialServices = []string{spsServiceName, vsanhealthServiceName, vpxdServiceName, wcpServiceName} + // } + // checkVcenterServicesRunning(ctx, vcAddress, essentialServices) // After reboot bootstrap() + time.Sleep(15 * time.Minute) + fullSyncWaitTime := 0 if os.Getenv(envFullSyncWaitTime) != "" { @@ -7161,45 +7022,37 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 9. Create new snapshots on restore volume and verify it succeeds 10. Run cleanup: Delete snapshots, restored-volumes, pods */ - ginkgo.It("[tkg-snapshot] Max Snapshots per volume on GC", ginkgo.Label( + + ginkgo.It("[tkg-snapshot][supervisor-snapshot] TC11Max Snapshots per volume on wcp and gc", ginkgo.Label( p1, snapshot, tkg, newTest), func() { + ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim - var err error - var snapshotContentCreated = false - var volumeSnapshots []*snapV1.VolumeSnapshot - var volumeSnapshotContents []*snapV1.VolumeSnapshotContent - if vanillaCluster { - scParameters[scParamDatastoreURL] = datastoreURL - } + var volumeSnapshots []*snapV1.VolumeSnapshot + var snapshotIds []string + snapDeleted := false + noOfSnapshotToCreate := 33 - ginkgo.By("Get storage class and create PVC") - storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - pvclaim, err = createPVC(ctx, client, namespace, nil, "", storageclass, "") + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, framework.ClaimProvisionTimeout) + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, v1.ReadWriteOnce, + diskSize, storageclass, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + volHandle = persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -7207,17 +7060,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - ginkgo.By("Create/Get volume snapshot class") volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { if vanillaCluster { err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, @@ -7226,76 +7071,34 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } }() - for i := 0; i < 33; i++ { - ginkgo.By("Create a volume snapshot") - framework.Logf("Creating snapshot no: %d", i+1) - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + for i := 0; i < noOfSnapshotToCreate; i++ { + ginkgo.By(fmt.Sprintf("Creating snapshot no: %d for pvc %s", i+1, pvclaim.Name)) + volumeSnapshot, _, _, _, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - snapshotCreated := true - defer func() { - if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) - - framework.Logf("Wait till the volume snapshot is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) - } - }() - - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) - framework.Logf("VolumeSnapshot Name: %s", volumeSnapshot.Name) volumeSnapshots = append(volumeSnapshots, volumeSnapshot) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) - framework.Logf("VolumeSnapshotContent Name: %s", snapshotContent.Name) - volumeSnapshotContents = append(volumeSnapshotContents, snapshotContent) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("snapshot Id: %s", snapshotId) - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - + snapshotIds = append(snapshotIds, snapshotId) } - for i := 0; i < 33; i++ { - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, volumeSnapshotContents[i]) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Deleted volume snapshot is created above") - framework.Logf("VolumeSnapshot Name to be deleted: %s", volumeSnapshots[i].Name) - deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshots[i].Name, pandoraSyncWaitTime) - - framework.Logf("Wait till the volume snapshot is deleted") - framework.Logf("VolumeSnapshotContent Name to be deleted: %s", volumeSnapshotContents[i].Name) - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, volumeSnapshotContents[i].ObjectMeta.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if !snapDeleted { + for i := 0; i < noOfSnapshotToCreate; i++ { + ginkgo.By("Delete dynamic volume snapshot") + _, _, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshots[i], pandoraSyncWaitTime, volHandle, snapshotIds[i], true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + }() - ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + for i := 0; i < noOfSnapshotToCreate; i++ { + ginkgo.By("Delete dynamic volume snapshot") + _, _, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshots[i], pandoraSyncWaitTime, volHandle, snapshotIds[i], true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + snapDeleted = true }) /* @@ -7330,8 +7133,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace1.Name, nil, "", + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace1.Name, nil, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) @@ -7358,7 +7162,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create a dynamic volume snapshot on GC1") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, dynamicSnapshotId, err := createDynamicVolumeSnapshot(ctx, namespace1.Name, snapc, + snapshotContentCreated, dynamicSnapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace1.Name, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) defer func() { if snapshotCreated { @@ -7476,12 +7280,530 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Delete pre-provisioned snapshot from GC2") staticSnapshotCreated, staticSnapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc1, namespace2.Name, - staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Delete snapshot entries from GC1 in case left") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace1.Name, - volumeSnapshot, pandoraSyncWaitTime, volHandle, dynamicSnapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, dynamicSnapshotId, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-2 + Volume restore using dynamic snapshot + 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVC to reach the Bound state and verify CNS metadata for this created volume. + 3. Create a pod and attach it to the volume created in step #1. + 4. Wait for Pod to reach a running-ready state and write data into the volume. + 5. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 6. Create a volume snapshot using the above snapshot class (step #5) and PVC (step #1) as source. + 7. Ensure the snapshot is created, verify using get + 8. Also, verify that VolumeSnapshotContent is auto-created, verify the references to + PVC and volume snapshot on this object + 9. Verify that the VolumeSnapshot has ready-to-use set to True + 10. Verify that the Restore Size set on the snapshot is the same as that of the source volume size + 11. Query the snapshot from the CNS side using volume ID - should pass and return the snapshot entry + 12. Create a new volume using the snapshot created in step #6 and use the same SC. + 13. Ensure the PVC gets provisioned and is Bound + 14. Create a pod and attach it to a restored volume. + 15. Wait for Pod to reach ready running state. + 16. Ensure that the data from a snapshot is available (the file that was written in step #1 should be available) + 17. Also, write new data to the restored volumes and it should succeed + 18. Perform cleanup: Delete Snapshot, Pod, PVC. + */ + + ginkgo.It("[supervisor-snapshot] TC2Volume restore using a "+ + "dynamic snapshot", ginkgo.Label(p0, wcp, snapshot, block, stable), func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create PVC") + pvclaim, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle = pvs[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create a Pod using the volume created above and write data into the volume") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, + execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + pvs[0].Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + + var vmUUID string + var exists bool + + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + output := readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + writeDataOnFileFromPod(namespace, pod.Name, filePathPod1, "Hello message from test into Pod1") + output = readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + if !guestCluster { + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle, pvclaim, pvs[0], pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Create or restore a volume using the dynamically created volume snapshot") + pvclaim2, pvs2, pod2 := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, + volumeSnapshot, diskSize, true) + volHandle2 := pvs2[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + } + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + if !guestCluster { + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle2, pvclaim2, pvs2[0], pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + This test verifies the static provisioning workflow with snapshot on supervisor cluster + + Test Steps: + 1. Create CNS volume note the volumeID. + 2. Create Resource quota. + 3. create CNS register volume with above created VolumeID. + 4. verify created PV, PVC and check the bidirectional reference. + 5. Create Pod , with above created PVC. + 6. Verify volume is attached to the node and volume is accessible in the pod. + 7. Delete POD. + 8. Delete PVC. + 9. Verify PV is deleted automatically. + 10. Verify Volume id deleted automatically. + 11. Verify CRD deleted automatically. + */ + + ginkgo.It("[supervisor-snapshot] TC20Verify static provisioning workflow "+ + "with snapshot", ginkgo.Label(p0, block, wcp), func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + curtime := time.Now().Unix() + curtimestring := strconv.FormatInt(curtime, 10) + pvcName := "cns-pvc-" + curtimestring + framework.Logf("pvc name :%s", pvcName) + + restConfig, _, profileID := staticProvisioningPreSetUpUtil(ctx, f, client, storagePolicyName) + + ginkgo.By("Creating FCD (CNS Volume)") + fcdID, err := e2eVSphere.createFCDwithValidProfileID(ctx, "staticfcd"+curtimestring, + profileID, diskSizeInMb, defaultDatastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By("Create CNS register volume with above created FCD") + cnsRegisterVolume := getCNSRegisterVolumeSpec(ctx, namespace, fcdID, "", pvcName, v1.ReadWriteOnce) + err = createCNSRegisterVolume(ctx, restConfig, cnsRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(waitForCNSRegisterVolumeToGetCreated(ctx, restConfig, + namespace, cnsRegisterVolume, poll, supervisorClusterOperationsTimeout)) + cnsRegisterVolumeName := cnsRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + + ginkgo.By(" verify created PV, PVC and check the bidirectional reference") + pvc, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := getPvFromClaim(client, namespace, pvcName) + volHandle = pv.Spec.CSI.VolumeHandle + verifyBidirectionalReferenceOfPVandPVC(ctx, client, pvc, pv, fcdID) + + ginkgo.By("Creating pod") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvc}, false, execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + podName := pod.GetName + framework.Logf("podName : %s", podName) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + var vmUUID string + var exists bool + + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err = e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pv.Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is detached from PodVM with vmUUID: %s", + pv.Spec.CSI.VolumeHandle, vmUUID)) + _, err = e2eVSphere.getVMByUUIDWithWait(ctx, vmUUID, supervisorClusterOperationsTimeout) + gomega.Expect(err).To(gomega.HaveOccurred(), + fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", + vmUUID, pv.Spec.CSI.VolumeHandle)) + defer func() { + testCleanUpUtil(ctx, restConfig, client, cnsRegisterVolume, namespace, pvc.Name, pv.Name) + }() + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + VolumeSnapshotClass with Delete policy and PVC with Retain Policy + + Test Steps: + ========== + 1. Read Storage Class "shared-ds-policy-1571". + 2. Created a dynamic PVC using the above Storage Class. + 3. PVC/PV reached the Bound state. + 4. Created a standalone Pod and attached it to the above PVC. + 5. Pod reached the Running state. + 6. Wrote some data inside the volume through the Pod. + 7. Took a volume snapshot. + 8. Snapshot was created successfully. + 9. Edited the PVC reclaim policy from "Delete" to "Retain". + 10. Deleted the Pod, and the deletion was successful. + 11. Deleted the PVC, should get deleted successfully + 12. PV status changed from "Bound" to "Released". + 13. Attempted to create a static volume using the released PV. + 14. Wait for static PVC and PV to reach Bound state. + 15. Attach a new Pod to a static PVC. + 16. Write some data to a newly attached Pod. + 17. Take a volume snapshot. + 18. Perform cleanup. + */ + + ginkgo.It("[supervisor-snapshot] TC30", ginkgo.Label(p0, block, tkg, vanilla, wcp, snapshot, stable), func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create PVC") + pvclaim, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle = pvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create a Pod using the volume created above and write data into the volume") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, + execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + pvs[0].Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + + var vmUUID string + var exists bool + + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err = e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + output := readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + writeDataOnFileFromPod(namespace, pod.Name, filePathPod1, "Hello message from test into Pod1") + output = readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle, pvclaim, pvs[0], pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Edit PV reclaim policy from Delete to Reatin") + pvs[0].Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimRetain + pv, err := client.CoreV1().PersistentVolumes().Update(ctx, pvs[0], metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeId := pv.Spec.CSI.VolumeHandle + + ginkgo.By("Delete the PVC") + err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvclaim.Name, + *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("PVC %s is deleted successfully", pvclaim.Name) + + // Verify PV exist and is in released status + ginkgo.By("Check PV exists and is released") + pv, err = waitForPvToBeReleased(ctx, client, pv.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("PV status after deleting PVC: %s", pv.Status.Phase) + + // Remove claim from PV and check its status. + ginkgo.By("Remove claimRef from PV") + pv.Spec.ClaimRef = nil + pv, err = client.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("PV status after removing claim : %s", pv.Status.Phase) + + staticPVLabels := make(map[string]string) + staticPVLabels["fcd-id"] = volumeId + + ginkgo.By("Creating static PV and PVC from the volume id") + staticpv := getPersistentVolumeSpec(volumeId, v1.PersistentVolumeReclaimDelete, staticPVLabels, ext4FSType) + staticpv, err = client.CoreV1().PersistentVolumes().Create(ctx, staticpv, metav1.CreateOptions{}) + staticVolumeHandle := staticpv.Spec.CSI.VolumeHandle + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating the PVC") + staticpvc := getPersistentVolumeClaimSpec(namespace, staticPVLabels, staticpv.Name) + staticpvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, staticpvc, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Wait for PV and PVC to Bind. + framework.ExpectNoError(fpv.WaitOnPVandPVC(ctx, client, f.Timeouts, namespace, staticpv, staticpvc)) + + ginkgo.By("Create a Pod using the volume created above and write data into the volume") + pod1, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{staticpvc}, false, + execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod1.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + staticpv.Spec.CSI.VolumeHandle, pod1.Spec.NodeName)) + + annotations = pod1.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err = e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, staticpv.Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + output = readFileFromPod(namespace, pod1.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + writeDataOnFileFromPod(namespace, pod1.Name, filePathPod1, "Hello message from test into Pod1") + output = readFileFromPod(namespace, pod1.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, staticpvc, staticVolumeHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, staticVolumeHandle, staticpvc, staticpv, pod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle, snapshotId1, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, staticVolumeHandle, snapshotId2, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) }) @@ -7557,8 +7879,9 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c } } ginkgo.By("Create PVC") - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) @@ -7586,7 +7909,7 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, _, snapshotCreated, - snapshotContentCreated, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + snapshotContentCreated, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, true) framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) @@ -7629,6 +7952,6 @@ func invokeSnapshotOperationsOnSharedDatastore(client clientset.Interface, ctx c ginkgo.By("Delete Dynamic snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } diff --git a/tests/e2e/csi_snapshot_negative.go b/tests/e2e/csi_snapshot_negative.go index 932e90d115..0dbfdb577c 100644 --- a/tests/e2e/csi_snapshot_negative.go +++ b/tests/e2e/csi_snapshot_negative.go @@ -100,7 +100,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti } csiNamespace = GetAndExpectStringEnvVar(envCSINamespace) - if guestCluster { + if guestCluster || supervisorCluster { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, rqLimit) @@ -248,7 +248,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti 6. k8s side: csi pod restarts with improved_idempotency enabled as well as run a scenario with improved_idempotency disabled */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC24create volume snapshot when "+ "hostd goes down", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() { serviceName = hostdServiceName @@ -256,7 +256,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti csiNamespace, fullSyncWaitTime, isServiceStopped, true, csiReplicas, pandoraSyncWaitTime) }) - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when CSI "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC241create volume snapshot when CSI "+ "restarts", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() { serviceName = "CSI" @@ -264,7 +264,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti csiNamespace, fullSyncWaitTime, isServiceStopped, true, csiReplicas, pandoraSyncWaitTime) }) - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when VPXD "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] TC242create volume snapshot when VPXD "+ "goes down", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() { serviceName = vpxdServiceName @@ -272,7 +272,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti csiNamespace, fullSyncWaitTime, isServiceStopped, false, csiReplicas, pandoraSyncWaitTime) }) - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when CNS goes "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] TC243create volume snapshot when CNS goes "+ "down", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() { serviceName = vsanhealthServiceName @@ -280,7 +280,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti csiNamespace, fullSyncWaitTime, isServiceStopped, false, csiReplicas, pandoraSyncWaitTime) }) - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when SPS "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] TC244create volume snapshot when SPS "+ "goes down", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() { serviceName = spsServiceName @@ -288,7 +288,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti csiNamespace, fullSyncWaitTime, isServiceStopped, true, csiReplicas, pandoraSyncWaitTime) }) - ginkgo.It("[tkg-snapshot] create volume snapshot when SVC CSI restarts", ginkgo.Label(p0, + ginkgo.It("[tkg-snapshot] TC245create volume snapshot when SVC CSI restarts", ginkgo.Label(p0, tkg, snapshot, disruptive, newTest), func() { serviceName = "WCP CSI" @@ -332,8 +332,9 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, } ginkgo.By("Create PVC") - pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) @@ -350,6 +351,13 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, ginkgo.By("Create/Get volume snapshot class") volumeSnapshotClass, err = createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() ginkgo.By("Create a volume snapshot") snapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, @@ -447,6 +455,7 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime)) + time.Sleep(time.Duration(fullSyncWaitTime) * time.Second) } else { diff --git a/tests/e2e/csi_snapshot_utils.go b/tests/e2e/csi_snapshot_utils.go index aa86422563..353a9e1256 100644 --- a/tests/e2e/csi_snapshot_utils.go +++ b/tests/e2e/csi_snapshot_utils.go @@ -301,7 +301,7 @@ func getRestConfigClientForGuestCluster(guestClusterRestConfig *rest.Config) *re // deleteVolumeSnapshot deletes volume snapshot from K8s side and CNS side func deleteVolumeSnapshot(ctx context.Context, snapc *snapclient.Clientset, namespace string, volumeSnapshot *snapV1.VolumeSnapshot, pandoraSyncWaitTime int, - volHandle string, snapshotID string) (bool, bool, error) { + volHandle string, snapshotID string, performCnsQueryVolumeSnapshot bool) (bool, bool, error) { var err error framework.Logf("Delete volume snapshot and verify the snapshot content is deleted") @@ -315,16 +315,18 @@ func deleteVolumeSnapshot(ctx context.Context, snapc *snapclient.Clientset, name } snapshotContentCreated := false - framework.Logf("Verify snapshot entry %v is deleted from CNS for volume %v", snapshotID, volHandle) - err = waitForCNSSnapshotToBeDeleted(volHandle, snapshotID) - if err != nil { - return snapshotCreated, snapshotContentCreated, err - } + if performCnsQueryVolumeSnapshot { + framework.Logf("Verify snapshot entry %v is deleted from CNS for volume %v", snapshotID, volHandle) + err = waitForCNSSnapshotToBeDeleted(volHandle, snapshotID) + if err != nil { + return snapshotCreated, snapshotContentCreated, err + } - framework.Logf("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotID) - if err != nil { - return snapshotCreated, snapshotContentCreated, err + framework.Logf("Verify snapshot entry is deleted from CNS") + err = verifySnapshotIsDeletedInCNS(volHandle, snapshotID) + if err != nil { + return snapshotCreated, snapshotContentCreated, err + } } framework.Logf("Deleting volume snapshot again to check 'Not found' error") @@ -335,20 +337,21 @@ func deleteVolumeSnapshot(ctx context.Context, snapc *snapclient.Clientset, name // getVolumeSnapshotIdFromSnapshotHandle fetches VolumeSnapshotId From SnapshotHandle func getVolumeSnapshotIdFromSnapshotHandle(ctx context.Context, - snapshotContent *snapV1.VolumeSnapshotContent) (string, error) { + snapshotContent *snapV1.VolumeSnapshotContent) (string, string, error) { var snapshotID string + var snapshotHandle string var err error - if vanillaCluster { - snapshotHandle := *snapshotContent.Status.SnapshotHandle + if vanillaCluster || supervisorCluster { + snapshotHandle = *snapshotContent.Status.SnapshotHandle snapshotID = strings.Split(snapshotHandle, "+")[1] } else if guestCluster { - snapshotHandle := *snapshotContent.Status.SnapshotHandle + snapshotHandle = *snapshotContent.Status.SnapshotHandle snapshotID, _, _, err = getSnapshotHandleFromSupervisorCluster(ctx, snapshotHandle) if err != nil { - return "", err + return snapshotID, snapshotHandle, err } } - return snapshotID, nil + return snapshotID, snapshotHandle, nil } // createVolumeSnapshotClass creates VSC for a Vanilla cluster and @@ -399,53 +402,54 @@ func createDynamicVolumeSnapshot(ctx context.Context, namespace string, snapc *snapclient.Clientset, volumeSnapshotClass *snapV1.VolumeSnapshotClass, pvclaim *v1.PersistentVolumeClaim, volHandle string, diskSize string, performCnsQueryVolumeSnapshot bool) (*snapV1.VolumeSnapshot, - *snapV1.VolumeSnapshotContent, bool, bool, string, error) { + *snapV1.VolumeSnapshotContent, bool, bool, string, string, error) { volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) if err != nil { - return nil, nil, false, false, "", err + return volumeSnapshot, nil, false, false, "", "", err } framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) ginkgo.By("Verify volume snapshot is created") volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) if err != nil { - return nil, nil, false, false, "", err + return volumeSnapshot, nil, false, false, "", "", err } snapshotCreated := true if volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize)) != 0 { - return nil, nil, false, false, "", fmt.Errorf("unexpected restore size") + return volumeSnapshot, nil, false, false, "", "", fmt.Errorf("unexpected restore size") } ginkgo.By("Verify volume snapshot content is created") snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) if err != nil { - return nil, nil, false, false, "", err + return volumeSnapshot, snapshotContent, false, false, "", "", err } snapshotContentCreated := true snapshotContent, err = waitForVolumeSnapshotContentReadyToUse(*snapc, ctx, snapshotContent.Name) if err != nil { - return nil, nil, false, false, "", fmt.Errorf("volume snapshot content is not ready to use") + return volumeSnapshot, snapshotContent, false, false, "", "", + fmt.Errorf("volume snapshot content is not ready to use") } framework.Logf("Get volume snapshot ID from snapshot handle") - snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent) + snapshotId, snapshotHandle, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent) if err != nil { - return nil, nil, false, false, "", err + return volumeSnapshot, snapshotContent, false, false, snapshotId, "", err } if performCnsQueryVolumeSnapshot { ginkgo.By("Query CNS and check the volume snapshot entry") err = waitForCNSSnapshotToBeCreated(volHandle, snapshotId) if err != nil { - return nil, nil, false, false, snapshotId, err + return volumeSnapshot, snapshotContent, false, false, snapshotId, "", err } } - return volumeSnapshot, snapshotContent, snapshotCreated, snapshotContentCreated, snapshotId, nil + return volumeSnapshot, snapshotContent, snapshotCreated, snapshotContentCreated, snapshotId, snapshotHandle, nil } // getPersistentVolumeClaimSpecWithDatasource return the PersistentVolumeClaim @@ -636,6 +640,7 @@ func verifyVolumeRestoreOperation(ctx context.Context, client clientset.Interfac gomega.Expect(err).NotTo(gomega.HaveOccurred()) var vmUUID string + var exists bool nodeName := pod.Spec.NodeName if vanillaCluster { @@ -643,6 +648,12 @@ func verifyVolumeRestoreOperation(ctx context.Context, client clientset.Interfac } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if supervisorCluster { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle2, nodeName)) @@ -657,7 +668,7 @@ func verifyVolumeRestoreOperation(ctx context.Context, client clientset.Interfac gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", - "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} + "echo 'Hello message from test into Pod1' >> /mnt/volume1/Pod1.html"} e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) @@ -671,29 +682,43 @@ func verifyVolumeRestoreOperation(ctx context.Context, client clientset.Interfac func createPVCAndQueryVolumeInCNS(ctx context.Context, client clientset.Interface, namespace string, pvclaimLabels map[string]string, accessMode v1.PersistentVolumeAccessMode, ds string, storageclass *storagev1.StorageClass, - verifyCNSVolume bool) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { + verifyCNSVolume bool) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume, error) { + + // Create PVC pvclaim, err := createPVC(ctx, client, namespace, pvclaimLabels, ds, storageclass, accessMode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err != nil { + return pvclaim, nil, fmt.Errorf("failed to create PVC: %w", err) + } - ginkgo.By("Expect claim to provision volume successfully") + // Wait for PVC to be bound to a PV persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout*2) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err != nil { + return pvclaim, persistentvolumes, fmt.Errorf("failed to wait for PVC to bind to a PV: %w", err) + } + + // Get VolumeHandle from the PV volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle if guestCluster { volHandle = getVolumeIDFromSupervisorCluster(volHandle) } - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + if volHandle == "" { + return pvclaim, persistentvolumes, fmt.Errorf("volume handle is empty") + } + // Verify the volume in CNS if required if verifyCNSVolume { - // Verify using CNS Query API if VolumeID retrieved from PV is present. ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) + if err != nil { + return pvclaim, persistentvolumes, fmt.Errorf("failed to query CNS volume: %w", err) + } + if len(queryResult.Volumes) == 0 || queryResult.Volumes[0].VolumeId.Id != volHandle { + return pvclaim, persistentvolumes, fmt.Errorf("CNS query returned unexpected result") + } } - return pvclaim, persistentvolumes + + return pvclaim, persistentvolumes, nil } // waitForVolumeSnapshotContentReadyToUse waits for the volume's snapshot content to be in ReadyToUse @@ -748,3 +773,77 @@ func getRestConfigClientForGuestCluster2(guestClusterRestConfig *rest.Config) *r } return guestClusterRestConfig } + +// createDynamicVolumeSnapshot util creates dynamic volume snapshot for a volume +func createDynamicVolumeSnapshotWithoutSnapClass(ctx context.Context, namespace string, + snapc *snapclient.Clientset, volumeSnapshotClass *snapV1.VolumeSnapshotClass, + pvclaim *v1.PersistentVolumeClaim, volHandle string, diskSize string, + performCnsQueryVolumeSnapshot bool) (*snapV1.VolumeSnapshot, + *snapV1.VolumeSnapshotContent, bool, bool, string, string, error) { + + volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, + getVolumeSnapshotSpecWithoutSC(namespace, pvclaim.Name), metav1.CreateOptions{}) + if err != nil { + return volumeSnapshot, nil, false, false, "", "", err + } + framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) + + ginkgo.By("Verify volume snapshot is created") + volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) + if err != nil { + return volumeSnapshot, nil, false, false, "", "", err + } + + snapshotCreated := true + if volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize)) != 0 { + return volumeSnapshot, nil, false, false, "", "", fmt.Errorf("unexpected restore size") + } + + ginkgo.By("Verify volume snapshot content is created") + snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) + if err != nil { + return volumeSnapshot, snapshotContent, false, false, "", "", err + } + snapshotContentCreated := true + snapshotContent, err = waitForVolumeSnapshotContentReadyToUse(*snapc, ctx, snapshotContent.Name) + if err != nil { + return volumeSnapshot, snapshotContent, false, false, "", "", + fmt.Errorf("volume snapshot content is not ready to use") + } + + framework.Logf("Get volume snapshot ID from snapshot handle") + snapshotId, snapshotHandle, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent) + if err != nil { + return volumeSnapshot, snapshotContent, false, false, snapshotId, "", err + } + + if performCnsQueryVolumeSnapshot { + ginkgo.By("Query CNS and check the volume snapshot entry") + err = waitForCNSSnapshotToBeCreated(volHandle, snapshotId) + if err != nil { + return volumeSnapshot, snapshotContent, false, false, snapshotId, "", err + } + } + + return volumeSnapshot, snapshotContent, snapshotCreated, snapshotContentCreated, snapshotId, snapshotHandle, nil +} + +// getVolumeSnapshotSpecWithoutSC returns a spec for the volume snapshot without using snapshot class +func getVolumeSnapshotSpecWithoutSC(namespace string, pvcName string) *snapV1.VolumeSnapshot { + var volumesnapshotSpec = &snapV1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "snapshot-", + Namespace: namespace, + }, + Spec: snapV1.VolumeSnapshotSpec{ + Source: snapV1.VolumeSnapshotSource{ + PersistentVolumeClaimName: &pvcName, + }, + }, + } + return volumesnapshotSpec +} diff --git a/tests/e2e/e2e_common.go b/tests/e2e/e2e_common.go index 5e5b7650db..acab02f44e 100644 --- a/tests/e2e/e2e_common.go +++ b/tests/e2e/e2e_common.go @@ -250,10 +250,11 @@ const ( vsphereClusterIdConfigMapName = "vsphere-csi-cluster-id" authAPI = "https://console.cloud.vmware.com/csp/gateway/am/api/auth" + "/api-tokens/authorize" - storagePolicyQuota = "-storagepolicyquota" - podVMOnStretchedSupervisor = "stretched-svc" - stretchedSVCTopologyLevels = 1 - envZonalStoragePolicyName2 = "ZONAL2_STORAGECLASS" + storagePolicyQuota = "-storagepolicyquota" + podVMOnStretchedSupervisor = "stretched-svc" + stretchedSVCTopologyLevels = 1 + envZonalStoragePolicyName2 = "ZONAL2_STORAGECLASS" + envStoragePolicyNameForVsanNfsDatastores = "STORAGE_POLICY_FOR_VSAN_NFS_DATASTORES" ) /* diff --git a/tests/e2e/multi_vc.go b/tests/e2e/multi_vc.go index 71c0854c71..043dbdbea7 100644 --- a/tests/e2e/multi_vc.go +++ b/tests/e2e/multi_vc.go @@ -937,7 +937,7 @@ var _ = ginkgo.Describe("[multivc-positive] MultiVc-Topology-Positive", func() { ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(podList); i++ { err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], - namespace, allowedTopologies) + allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1373,7 +1373,7 @@ var _ = ginkgo.Describe("[multivc-positive] MultiVc-Topology-Positive", func() { ginkgo.By("Verify PV node affinity and that the PODS are running " + "on appropriate node as specified in the allowed topologies of SC") err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, - namespace, allowedTopologies) + allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1983,7 +1983,7 @@ var _ = ginkgo.Describe("[multivc-positive] MultiVc-Topology-Positive", func() { ginkgo.By("Verify pv and pod node affinity") err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, - namespace, allowedTopologies) + allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) diff --git a/tests/e2e/multi_vc_preferential_topology.go b/tests/e2e/multi_vc_preferential_topology.go index bf1d653cfa..fac762562c 100644 --- a/tests/e2e/multi_vc_preferential_topology.go +++ b/tests/e2e/multi_vc_preferential_topology.go @@ -630,7 +630,7 @@ var _ = ginkgo.Describe("[multivc-preferential] MultiVc-Preferential", func() { ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, - namespace, allowedTopologies) + allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) diff --git a/tests/e2e/preferential_topology.go b/tests/e2e/preferential_topology.go index 8f9f1710bd..b4f41005de 100644 --- a/tests/e2e/preferential_topology.go +++ b/tests/e2e/preferential_topology.go @@ -374,7 +374,7 @@ var _ = ginkgo.Describe("[preferential-positive] Preferential-Topology-Positive" ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologyForRack1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1173,7 +1173,7 @@ var _ = ginkgo.Describe("[preferential-positive] Preferential-Topology-Positive" ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologyForRack1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1348,7 +1348,7 @@ var _ = ginkgo.Describe("[preferential-positive] Preferential-Topology-Positive" ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologyForRack3) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2022,7 +2022,7 @@ var _ = ginkgo.Describe("[preferential-positive] Preferential-Topology-Positive" ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologyForRack2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -2278,7 +2278,7 @@ var _ = ginkgo.Describe("[preferential-positive] Preferential-Topology-Positive" ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -2398,7 +2398,7 @@ var _ = ginkgo.Describe("[preferential-positive] Preferential-Topology-Positive" ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologyForRack2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -2638,7 +2638,7 @@ var _ = ginkgo.Describe("[preferential-positive] Preferential-Topology-Positive" ginkgo.By("Verify pv and pod node affinity details for pv-1/pod-1 and pv-2/pod-2") for i := 0; i < len(podList); i++ { err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], - namespace, allowedTopologyForRack2) + allowedTopologyForRack2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) diff --git a/tests/e2e/preferential_topology_disruptive.go b/tests/e2e/preferential_topology_disruptive.go index 9762a38400..065ea7ad2f 100644 --- a/tests/e2e/preferential_topology_disruptive.go +++ b/tests/e2e/preferential_topology_disruptive.go @@ -634,7 +634,7 @@ var _ = ginkgo.Describe("[preferential-disruptive] Preferential-Topology-Disrupt ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologyForRack2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -867,7 +867,7 @@ var _ = ginkgo.Describe("[preferential-disruptive] Preferential-Topology-Disrupt ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologyForRack1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1098,7 +1098,7 @@ var _ = ginkgo.Describe("[preferential-disruptive] Preferential-Topology-Disrupt ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologyForRack2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/preferential_topology_snapshot.go b/tests/e2e/preferential_topology_snapshot.go index ae56ee5757..b31da8f31c 100644 --- a/tests/e2e/preferential_topology_snapshot.go +++ b/tests/e2e/preferential_topology_snapshot.go @@ -339,7 +339,7 @@ var _ = ginkgo.Describe("[preferential-snapshot] Preferential-Topology-Snapshot" ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologyForRack1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -687,7 +687,7 @@ var _ = ginkgo.Describe("[preferential-snapshot] Preferential-Topology-Snapshot" "appropriate node as specified in the allowed topologies of SC") for i := 0; i < len(podList); i++ { err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], - namespace, allowedTopologies) + allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -746,7 +746,7 @@ var _ = ginkgo.Describe("[preferential-snapshot] Preferential-Topology-Snapshot" ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod3, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod3, allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -841,7 +841,7 @@ var _ = ginkgo.Describe("[preferential-snapshot] Preferential-Topology-Snapshot" ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod4, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod4, allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -894,7 +894,7 @@ var _ = ginkgo.Describe("[preferential-snapshot] Preferential-Topology-Snapshot" ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod5, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod5, allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) diff --git a/tests/e2e/raw_block_volume.go b/tests/e2e/raw_block_volume.go index 9bd2c7acd9..6e698933de 100644 --- a/tests/e2e/raw_block_volume.go +++ b/tests/e2e/raw_block_volume.go @@ -1203,7 +1203,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvc1, volumeID, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -1301,7 +1301,7 @@ var _ = ginkgo.Describe("raw block volume support", func() { ginkgo.By("Delete dyanmic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volumeID, snapshotId) + volumeSnapshot, pandoraSyncWaitTime, volumeID, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) }) diff --git a/tests/e2e/snapshot_stretched_supervisor.go b/tests/e2e/snapshot_stretched_supervisor.go new file mode 100644 index 0000000000..49f7364008 --- /dev/null +++ b/tests/e2e/snapshot_stretched_supervisor.go @@ -0,0 +1,1158 @@ +/* +Copyright 2024 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + cnstypes "github.com/vmware/govmomi/cns/types" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpod "k8s.io/kubernetes/test/e2e/framework/pod" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + fss "k8s.io/kubernetes/test/e2e/framework/statefulset" + admissionapi "k8s.io/pod-security-admission/api" + + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" +) + +var _ = ginkgo.Describe("Stretched-Supervisor-Snapshot", func() { + f := framework.NewDefaultFramework("volume-snapshot") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + var ( + client clientset.Interface + namespace string + scParameters map[string]string + pandoraSyncWaitTime int + restConfig *restclient.Config + snapc *snapclient.Clientset + labels_ns map[string]string + labelsMap map[string]string + zonalPolicy string + zonalWffcPolicy string + allowedTopologies []v1.TopologySelectorLabelRequirement + defaultDatastore *object.Datastore + datacenters []string + defaultDatacenter *object.Datacenter + datastoreURL string + ) + + ginkgo.BeforeEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bootstrap() + client = f.ClientSet + namespace = getNamespaceToRunTests(f) + + // parameters set for storage policy + scParameters = make(map[string]string) + + // fetching node list and checking node status + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + + // delete nginx service + service, err := client.CoreV1().Services(namespace).Get(ctx, servicename, metav1.GetOptions{}) + if err == nil && service != nil { + deleteService(namespace, client, service) + } + + // Get snapshot client using the rest config + restConfig = getRestConfigClient() + snapc, err = snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // reading fullsync wait time + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + + // required for pod creation + labels_ns = map[string]string{} + labels_ns[admissionapi.EnforceLevelLabel] = string(admissionapi.LevelPrivileged) + labels_ns["e2e-framework"] = f.BaseName + + //setting map values + labelsMap = make(map[string]string) + labelsMap["app"] = "test" + + // reading topology labels + topologyHaMap := GetAndExpectStringEnvVar(topologyHaMap) + _, categories := createTopologyMapLevel5(topologyHaMap) + allowedTopologies = createAllowedTopolgies(topologyHaMap) + allowedTopologyHAMap := createAllowedTopologiesMap(allowedTopologies) + framework.Logf("Topology map: %v, categories: %v", allowedTopologyHAMap, categories) + + // reading shared datastore url + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + + // reading policies + zonalPolicy = GetAndExpectStringEnvVar(envZonalStoragePolicyName) + if zonalPolicy == "" { + ginkgo.Fail(envZonalStoragePolicyName + " env variable not set") + } + zonalWffcPolicy = GetAndExpectStringEnvVar(envZonalWffcStoragePolicyName) + if zonalWffcPolicy == "" { + ginkgo.Fail(envZonalWffcStoragePolicyName + " env variable not set") + } + framework.Logf("zonal policy: %s and zonal wffc policy: %s", zonalPolicy, zonalWffcPolicy) + + // fetching default datastore + finder := find.NewFinder(e2eVSphere.Client.Client, false) + cfg, err := getConfig() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + dcList := strings.Split(cfg.Global.Datacenters, ",") + for _, dc := range dcList { + dcName := strings.TrimSpace(dc) + if dcName != "" { + datacenters = append(datacenters, dcName) + } + } + for _, dc := range datacenters { + defaultDatacenter, err = finder.Datacenter(ctx, dc) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + finder.SetDatacenter(defaultDatacenter) + defaultDatastore, err = getDatastoreByURL(ctx, datastoreURL, defaultDatacenter) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) + fss.DeleteAllStatefulSets(ctx, client, namespace) + ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) + err := client.CoreV1().Services(namespace).Delete(ctx, servicename, *metav1.NewDeleteOptions(0)) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if supervisorCluster { + dumpSvcNsEventsOnTestFailure(client, namespace) + } + + service, err := client.CoreV1().Services(namespace).Get(ctx, servicename, metav1.GetOptions{}) + if err == nil && service != nil { + deleteService(namespace, client, service) + } + }) + + /* + Testcase-1 + ZonalPolicy → immediateBindingMode + Workflow Path: PVC → Pod → Snapshot → RestoreVol → Pod + + 1. SVC should list two storage classes + a) ZonalPolicy-immediateBindingMode + b) ZonalPolicy-lateBinding (WFFC) + 2. Create PVC using the storage class (ZonalPolicy-immediateBindingMode) + 3. Wait for PVC to reach the Bound state. + 4. Create a Pod using the PVC created in step #13 + 5. Wait for Pod to reach the Running state. Write data into the volume. + 6. Describe PV and verify the node affinity details should show up as Ex: topology.kubernetes.io/zone in [zone-2] + 7. Make sure Pod is scheduled on appropriate nodes preset in the availability zone + 8. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 9. Create a volume snapshot for the PVC created in step #3. + 10. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 11. Create a PVC using the volume snapshot in step #10. + 12. Wait for PVC to reach the Bound state. + 13. Create a new Pod and attach it to the volume created in step #12. + 14. Wait for Pod to reach the Running state. Verify reading/writing data into the volume + 15. Describe PV and verify the node affinity details should show up. + 16. Make sure Pod is scheduled on appropriate nodes preset in the availability zone + 17. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("Snapshot worklow verification with immediate binding mode", ginkgo.Label(p0, wcp, block, newTest), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + scParameters[svStorageClassName] = zonalPolicy + + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, zonalPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC") + pvclaim, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + ginkgo.By("Create a Pod using the volume created above and write data into the volume") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, + execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + pvs[0].Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + annotations := pod.Annotations + vmUUID, exists := annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err = e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + output := readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + writeDataOnFileFromPod(namespace, pod.Name, filePathPod1, "Hello message from test into Pod1") + output = readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + ginkgo.By("Get volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle, pvclaim, pvs[0], pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify pv and node affinity") + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologies) + + ginkgo.By("Restore snapshot to create a new volume") + pvclaim2, pvs2, pod2 := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, + volumeSnapshot, diskSize, true) + volHandle2 := pvs2[0].Spec.CSI.VolumeHandle + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify volume metadata for newly created volume and workload") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle2, pvclaim2, pvs2[0], pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify pv and node affinity for newly created workload") + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod2, allowedTopologies) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-2 + ZonalPolicy → Late Binding + Workflow Path: PVC → Pod → Snapshot → RestoreVol → Pod + + Note: Restore snapshot is not supported with Late Binding mode + + 1. SVC should list two storage classes + a) ZonalPolicy-immediateBindingMode + b) ZonalPolicy-lateBinding (WFFC) + 2. Create PVC using the storage class (ZonalPolicy-lateBinding). + 3. Create a Pod using the PVC created above. + 4. Verify PVC reaches the Bound state. + 5. Wait for Pod to reach the Running state. Write data into the volume. + 6. Describe PV and verify the node affinity details should show up as Ex: topology.kubernetes.io/zone in [zone-2] + 7. Make sure Pod is scheduled on appropriate nodes preset in the availability zone + 8. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster. + 9. Create a volume snapshot for the PVC created in step #2 + 10. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 11. Create a PVC using the volume snapshot created in step #9. + 12. Wait for PVC to reach the Bound state. + 13. Create a new Pod and attach it to the volume created in step #11. + 14. Wait for Pod to reach the Running state. Verify reading/writing data into the volume + 15. Describe PV and verify the node affinity details should show up as Ex: topology.kubernetes.io/zone in [zone-2] + 16. Make sure Pod is scheduled on appropriate nodes preset in the availability zone + 17. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("SS2", ginkgo.Label(p0, wcp, core), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + scParameters[svStorageClassName] = zonalWffcPolicy + + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, zonalWffcPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC") + _, pvclaims, err := createStorageClassWithMultiplePVCs(client, namespace, labelsMap, + scParameters, diskSize, nil, "", false, "", "", storageclass, 1, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a Pod using the volume created above and write data into the volume") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaims[0]}, false, + execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVC Bound state and CNS side verification") + pvs, err := checkVolumeStateAndPerformCnsVerification(ctx, client, pvclaims, "", "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaims[0].Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + pvs[0].Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + annotations := pod.Annotations + vmUUID, exists := annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err = e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + output := readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + writeDataOnFileFromPod(namespace, pod.Name, filePathPod1, "Hello message from test into Pod1") + output = readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaims[0], volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle, pvclaims[0], pvs[0], pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify pv and node affinity") + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologies) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC from snapshot") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, + v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) + pvclaim2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating pod to attach PV to the node") + pod2, err := createPod(ctx, client, namespace, nil, + []*v1.PersistentVolumeClaim{pvclaim2}, false, execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvs2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle2 := pvs2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + + nodeName := pod2.Spec.NodeName + annotations = pod2.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err = e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle2, nodeName)) + isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volHandle2, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + cmd := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat /mnt/volume1/Pod1.html "} + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + wrtiecmd := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle2, pvclaim2, pvs2[0], pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify pv and node affinity") + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod2, allowedTopologies) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-3 + ZonalPolicy → Late Binding + Workflow Path: Statefulset + (with node affinity set and with 3 replicas) → Snapshot of all 3 PVCs → Restore all 3 snapshots -> 3 deployments + + 1. SVC should list two storage classes + a) ZonalPolicy-immediateBindingMode + b) ZonalPolicy-lateBinding (WFFC) + 2. Create a Statefulset with node affinity rule set to any particular zone (zone-1) with replica + count set to 3 using the storage class (ZonalPolicy-lateBinding). + 3. Wait for PVC to reach the Bound state and Pods to reach the Running state. + 4. Write some data to volume + 5. Describe PV and verify the node affinity details should show up as topology.kubernetes.io/zone in [zone-1] + 6. Make sure Pod is scheduled on appropriate nodes preset in the availability zone + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster. + 8. Create 3 volume snapshots (vols-1, vols-2, vols-3) for all 3 statefulset PVCs created above. + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 10. Create 3 new PVCs using the volume snapshot created in step #8 + 11. Wait for all new PVCs to reach the Bound state. + 12. Describe PV and verify the node affinity details should show up as Ex: topology.kubernetes.io/zone in [zone-1] + 13. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("SS3", ginkgo.Label(p0, vanilla, block, wcp, core), func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + scParameters[svStorageClassName] = zonalPolicy + + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, zonalPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating StorageClass for Statefulset") + stsReplicas := 3 + depReplica := 1 + + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + + framework.Logf("Create StatefulSet") + statefulset := createCustomisedStatefulSets(ctx, client, namespace, true, + int32(stsReplicas), true, allowedTopologies, true, true, + zonalPolicy, "", storageclass, zonalPolicy) + defer func() { + fss.DeleteAllStatefulSets(ctx, client, namespace) + }() + + framework.Logf("Verify PV node affinity and that the PODS are running on appropriate node") + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("error verifying PV node affinity and POD node details: %v", err)) + + ssPodsBeforeScaleDown := fss.GetPodList(ctx, client, statefulset) + + framework.Logf("Fetching pod 1, pvc1 and pv1 details") + pod1, err := client.CoreV1().Pods(namespace).Get(ctx, + ssPodsBeforeScaleDown.Items[0].Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvc1 := pod1.Spec.Volumes[0].PersistentVolumeClaim + pvclaim1, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, + pvc1.ClaimName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pv1 := getPvFromClaim(client, statefulset.Namespace, pvc1.ClaimName) + volHandle1 := pv1.Spec.CSI.VolumeHandle + gomega.Expect(volHandle1).NotTo(gomega.BeEmpty()) + + framework.Logf("Fetching pod 2, pvc2 and pv2 details") + pod2, err := client.CoreV1().Pods(namespace).Get(ctx, + ssPodsBeforeScaleDown.Items[1].Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvc2 := pod2.Spec.Volumes[0].PersistentVolumeClaim + pvclaim2, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, + pvc2.ClaimName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pv2 := getPvFromClaim(client, statefulset.Namespace, pvc2.ClaimName) + volHandle2 := pv2.Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + + framework.Logf("Fetching pod3, pvc3 and pv3 details") + pod3, err := client.CoreV1().Pods(namespace).Get(ctx, + ssPodsBeforeScaleDown.Items[2].Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvc3 := pod3.Spec.Volumes[0].PersistentVolumeClaim + pvclaim3, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, + pvc3.ClaimName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pv3 := getPvFromClaim(client, statefulset.Namespace, pvc3.ClaimName) + volHandle3 := pv3.Spec.CSI.VolumeHandle + gomega.Expect(volHandle3).NotTo(gomega.BeEmpty()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot-1 for pvc-1") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim1, volHandle1, "1Gi", true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create volume snapshot-2 for pvc-2") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim2, volHandle2, "1Gi", true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create volume snapshot-3 for pvc-3") + volumeSnapshot3, snapshotContent3, snapshotCreated3, + snapshotContentCreated3, snapshotId3, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim3, volHandle3, "1Gi", true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated3 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent3, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated3 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot3.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot3.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create restorevol1 from snapshot1") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, "1Gi", storageclass, nil, + v1.ReadWriteOnce, volumeSnapshot1.Name, snapshotapigroup) + restoreVol1, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + restorepv1, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + []*v1.PersistentVolumeClaim{restoreVol1}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + restoreVolHandle1 := restorepv1[0].Spec.CSI.VolumeHandle + gomega.Expect(restoreVolHandle1).NotTo(gomega.BeEmpty()) + + ginkgo.By("Create restorevol2 from snapshot2") + pvcSpec = getPersistentVolumeClaimSpecWithDatasource(namespace, "1Gi", storageclass, nil, + v1.ReadWriteOnce, volumeSnapshot2.Name, snapshotapigroup) + restoreVol2, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + restorepv2, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + []*v1.PersistentVolumeClaim{restoreVol2}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + restoreVolHandle2 := restorepv2[0].Spec.CSI.VolumeHandle + gomega.Expect(restoreVolHandle2).NotTo(gomega.BeEmpty()) + + ginkgo.By("Create restorevol3 from snapshot3") + pvcSpec = getPersistentVolumeClaimSpecWithDatasource(namespace, "1Gi", storageclass, nil, + v1.ReadWriteOnce, volumeSnapshot3.Name, snapshotapigroup) + restoreVol3, err := fpv.CreatePVC(ctx, client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + restorepv3, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + []*v1.PersistentVolumeClaim{restoreVol3}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + restoreVolHandle3 := restorepv3[0].Spec.CSI.VolumeHandle + gomega.Expect(restoreVolHandle3).NotTo(gomega.BeEmpty()) + + ginkgo.By("Attach Deployment1 to restorevol1") + deployment1, err := createDeployment(ctx, client, int32(depReplica), labelsMap, + nil, namespace, []*v1.PersistentVolumeClaim{restoreVol1}, execRWXCommandPod1, false, busyBoxImageOnGcr) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + framework.Logf("Delete deployment set") + err := client.AppsV1().Deployments(namespace).Delete(ctx, deployment1.Name, + *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Attach Deployment2 to restorevol2") + deployment2, err := createDeployment(ctx, client, int32(depReplica), labelsMap, + nil, namespace, []*v1.PersistentVolumeClaim{restoreVol2}, execRWXCommandPod1, false, busyBoxImageOnGcr) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + framework.Logf("Delete deployment set") + err := client.AppsV1().Deployments(namespace).Delete(ctx, deployment2.Name, + *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Attach Deployment3 to restorevol3") + deployment3, err := createDeployment(ctx, client, int32(depReplica), labelsMap, + nil, namespace, []*v1.PersistentVolumeClaim{restoreVol3}, execRWXCommandPod1, false, busyBoxImageOnGcr) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + framework.Logf("Delete deployment set") + err := client.AppsV1().Deployments(namespace).Delete(ctx, deployment3.Name, + *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify pv and node affinity") + err = verifyPVnodeAffinityAndPODnodedetailsForDeploymentSetsLevel5(ctx, client, deployment1, + namespace, allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = verifyPVnodeAffinityAndPODnodedetailsForDeploymentSetsLevel5(ctx, client, deployment2, + namespace, allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = verifyPVnodeAffinityAndPODnodedetailsForDeploymentSetsLevel5(ctx, client, deployment3, + namespace, allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete volume snapshot-1") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle1, snapshotId1, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete volume snapshot-2") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle2, snapshotId2, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete volume snapshot-3") + snapshotCreated3, snapshotContentCreated3, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot3, pandoraSyncWaitTime, volHandle3, snapshotId3, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-4 + ZonalPolicy → immediateBindingMode + Workflow Path: PVC → Offline Expansion → Snapshot → Pod → delete Snapshot -> Online Expansion → Snapshot + + 1. SVC should list two storage classes + a) ZonalPolicy-immediateBindingMode + b) ZonalPolicy-lateBinding (WFFC) + 2. Create PVC using the storage class (ZonalPolicy-immediateBindingMode). + 3. Wait for PVC to reach the Bound state. + 4. Describe PV and verify the node affinity details + 5. [Offine expansion] Edit PVC and expand the size + 6. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster. + 7. Create a volume snapshot for the PVC created in step #2 + 8. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 9. Create a POD using the above PVC and make sure after the POD reaches a running + state volume expansion is honoured on the PVC + 10. [Online expansion] Use the same PVC and expand volume again + 11. Volume expansion should be successful + 12. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster. + 13. Create a volume snapshot for the PVC created in step #2 + 14. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 15. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("SS4", ginkgo.Label(p0, wcp, core), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + scParameters[svStorageClassName] = zonalPolicy + + ginkgo.By("Create storage class") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, zonalPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC") + pvclaim, persistentVolumes, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // Modify PVC spec to trigger volume expansion + // We expand the PVC while no pod is using it to ensure offline expansion + ginkgo.By("Expanding current pvc") + currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] + newSize := currentPvcSize.DeepCopy() + newSize.Add(resource.MustParse("1Gi")) + newDiskSize := "3Gi" + framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) + pvclaim, err = expandPVCSize(pvclaim, newSize, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(pvclaim).NotTo(gomega.BeNil()) + + pvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] + if pvcSize.Cmp(newSize) != 0 { + framework.Failf("error updating pvc size %q", pvclaim.Name) + } + + ginkgo.By("Waiting for controller volume resize to finish") + err = waitForPvResizeForGivenPvc(pvclaim, client, totalResizeWaitPeriod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Checking for conditions on pvc") + pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if len(queryResult.Volumes) == 0 { + err = fmt.Errorf("queryCNSVolumeWithResult returned no volume") + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verifying disk size requested in volume expansion is honored") + newSizeInMb := int64(3072) + if queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb != newSizeInMb { + err = fmt.Errorf("got wrong disk size after volume expansion") + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot-1 for pvc-1") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim, volHandle, newDiskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Creating pod to attach PV to the node") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, pod.Spec.NodeName)) + annotations := pod.Annotations + vmUUID, exists := annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + + framework.Logf("VMUUID : %s", vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + output := readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + writeDataOnFileFromPod(namespace, pod.Name, filePathPod1, "Hello message from test into Pod1") + output = readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + ginkgo.By("Waiting for file system resize to finish") + pvclaim, err = waitForFSResize(pvclaim, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvcConditions := pvclaim.Status.Conditions + expectEqual(len(pvcConditions), 0, "pvc should not have conditions") + ginkgo.By("Verify filesystem size for mount point /mnt/volume1") + fsSize, err := getFSSizeMb(f, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("File system size after expansion : %s, before expansion: %s", fsSize, diskSizeInMb) + if fsSize < diskSizeInMb { + framework.Failf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize) + } + ginkgo.By("File system resize finished successfully") + + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle, pvclaim, persistentVolumes[0], pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify pv and node affinity") + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologies) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Expanding current pvc before deleting volume snapshot") + currentPvcSize = pvclaim.Spec.Resources.Requests[v1.ResourceStorage] + newSize = currentPvcSize.DeepCopy() + newSize.Add(resource.MustParse("1Gi")) + framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) + _, err = expandPVCSize(pvclaim, newSize, client) + ginkgo.By("Snapshot webhook does not allow volume expansion on PVC") + gomega.Expect(err).To(gomega.HaveOccurred()) + + ginkgo.By("Delete volume snapshot") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle, snapshotId1, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Expanding current pvc after deleting volume snapshot") + currentPvcSize = pvclaim.Spec.Resources.Requests[v1.ResourceStorage] + newSize = currentPvcSize.DeepCopy() + newSize.Add(resource.MustParse("1Gi")) + newDiskSize = "4Gi" + framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) + pvclaim, err = expandPVCSize(pvclaim, newSize, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(pvclaim).NotTo(gomega.BeNil()) + + pvcSize = pvclaim.Spec.Resources.Requests[v1.ResourceStorage] + if pvcSize.Cmp(newSize) != 0 { + framework.Failf("error updating pvc size %q", pvclaim.Name) + } + + ginkgo.By("Waiting for controller volume resize to finish") + err = waitForPvResizeForGivenPvc(pvclaim, client, totalResizeWaitPeriod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Checking for conditions on pvc") + pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, + namespace, pvclaim.Name, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) + queryResult, err = e2eVSphere.queryCNSVolumeWithResult(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if len(queryResult.Volumes) == 0 { + err = fmt.Errorf("queryCNSVolumeWithResult returned no volume") + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verifying disk size requested in volume expansion is honored") + newSizeInMb = int64(4096) + if queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb != + newSizeInMb { + err = fmt.Errorf("got wrong disk size after volume expansion +%v ", + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb) + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot-2 for pvc-1") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim, volHandle, newDiskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Delete volume snapshot-2") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle, snapshotId2, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-6 + ZonalPolicy → immediateBindingMode + Static volume provisioning + + Workflow Path: Static PVC → Pod → Snapshot + + Steps: + 1. Create FCD with valid zonal policy. Make sure the storage policy has sufficient quota and note the FCD ID + 2. Note the "reserved" and "used" StoragePolicyQuota + 3. Call CNSRegisterVolume API by specifying VolumeID, AccessMode set to "ReadWriteOnce” and PVC Name + 4. Wait for some time to get the status of CRD Verify the CRD status should be successful. + 5. CNS operator creates PV and PVC. + 6. Verify Bidirectional reference between PV and PVC - validate volumeName, + storage class, PVC name, namespace and the size. + 7. Verify PV and PVC’s are bound + 8. Verify that the "reserved" quota should decrease and the "used" quota should increase in StoragePolicyQuota + 9. Verify node affinity on PV. + 10. Invoke CNS query API, to validate volume is registered in CNS and volume shows the PV PVC information + 11. Validate health annotation is added on the PVC + 12. Create a Pod using the PVC created above. + 13. Wait for Pod to reach running state. + 14. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster. + 15. Create a volume snapshot for the static PVC. + 16. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 17. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + ginkgo.It("ss6", ginkgo.Label(p0, block, wcp), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + curtime := time.Now().Unix() + curtimestring := strconv.FormatInt(curtime, 10) + pvcName := "cns-pvc-" + curtimestring + framework.Logf("pvc name :%s", pvcName) + + restConfig, _, profileID := staticProvisioningPreSetUpUtil(ctx, f, client, zonalPolicy) + + ginkgo.By("Creating FCD (CNS Volume)") + fcdID, err := e2eVSphere.createFCDwithValidProfileID(ctx, "staticfcd"+curtimestring, + profileID, diskSizeInMb, defaultDatastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By("Create CNS register volume with above created FCD") + cnsRegisterVolume := getCNSRegisterVolumeSpec(ctx, namespace, fcdID, "", pvcName, v1.ReadWriteOnce) + err = createCNSRegisterVolume(ctx, restConfig, cnsRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(waitForCNSRegisterVolumeToGetCreated(ctx, restConfig, + namespace, cnsRegisterVolume, poll, supervisorClusterOperationsTimeout)) + cnsRegisterVolumeName := cnsRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + + ginkgo.By(" verify created PV, PVC and check the bidirectional reference") + pvc, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := getPvFromClaim(client, namespace, pvcName) + volHandle := pv.Spec.CSI.VolumeHandle + verifyBidirectionalReferenceOfPVandPVC(ctx, client, pvc, pv, fcdID) + + ginkgo.By("Creating pod") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvc}, false, execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + podName := pod.GetName + framework.Logf("podName : %s", podName) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + var vmUUID string + var exists bool + + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err = e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pv.Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + output := readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + writeDataOnFileFromPod(namespace, pod.Name, filePathPod1, "Hello message from test into Pod1") + output = readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle, pvc, pv, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify pv and node affinity") + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologies) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is detached from PodVM with vmUUID: %s", + pv.Spec.CSI.VolumeHandle, vmUUID)) + _, err = e2eVSphere.getVMByUUIDWithWait(ctx, vmUUID, supervisorClusterOperationsTimeout) + gomega.Expect(err).To(gomega.HaveOccurred(), + fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", + vmUUID, pv.Spec.CSI.VolumeHandle)) + defer func() { + testCleanUpUtil(ctx, restConfig, client, cnsRegisterVolume, namespace, pvc.Name, pv.Name) + }() + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) +}) diff --git a/tests/e2e/snapshot_vmservice_vm.go b/tests/e2e/snapshot_vmservice_vm.go new file mode 100644 index 0000000000..dbd9dc3916 --- /dev/null +++ b/tests/e2e/snapshot_vmservice_vm.go @@ -0,0 +1,3395 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "strconv" + "strings" + "time" + + snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + cnstypes "github.com/vmware/govmomi/cns/types" + "github.com/vmware/govmomi/vim25/types" + + vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpod "k8s.io/kubernetes/test/e2e/framework/pod" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + admissionapi "k8s.io/pod-security-admission/api" + ctlrclient "sigs.k8s.io/controller-runtime/pkg/client" + + cnsop "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator" +) + +var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { + + f := framework.NewDefaultFramework("vmsvc") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + f.SkipNamespaceCreation = true // tests will create their own namespaces + var ( + client clientset.Interface + namespace string + datastoreURL string + storagePolicyName string + storageClassName string + storageProfileId string + vcRestSessionId string + vmi string + vmClass string + vmopC ctlrclient.Client + cnsopC ctlrclient.Client + isVsanHealthServiceStopped bool + isSPSserviceStopped bool + vcAddress string + restConfig *restclient.Config + snapc *snapclient.Clientset + pandoraSyncWaitTime int + err error + dsRef types.ManagedObjectReference + labelsMap map[string]string + ) + + ginkgo.BeforeEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // client connection + client = f.ClientSet + bootstrap() + + // fetch the testbed type for executing testcases + topologyFeature := os.Getenv(topologyFeature) + + // fetching nodes and reading storage policy name + if topologyFeature != topologyTkgHaName { + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) + } else { + storagePolicyName = GetAndExpectStringEnvVar(envZonalStoragePolicyName) + } + + // fetching vc ip and creating creating vc session + vcAddress = e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + vcRestSessionId = createVcSession4RestApis(ctx) + + // reading storage class name for wcp setup "wcpglobal_storage_profile" + storageClassName = strings.ReplaceAll(storagePolicyName, "_", "-") // since this is a wcp setup + + // fetching shared datastore url + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + + // reading datastore morf reference + dsRef = getDsMoRefFromURL(ctx, datastoreURL) + framework.Logf("dsmoId: %v", dsRef.Value) + + // reading storage profile id of "wcpglobal_storage_profile" + storageProfileId = e2eVSphere.GetSpbmPolicyID(storagePolicyName) + + /* creating/reading content library + "https://wp-content-pstg.broadcom.com/vmsvc/lib.json" */ + contentLibId := createAndOrGetContentlibId4Url(vcRestSessionId, GetAndExpectStringEnvVar(envContentLibraryUrl), + dsRef.Value, GetAndExpectStringEnvVar(envContentLibraryUrlSslThumbprint)) + + /* + [ ~ ]# kubectl get vmclass -n csi-vmsvcns-2227 + NAME CPU MEMORY + best-effort-small 2 4Gi + + [ ~ ]# kubectl get vmclass best-effort-small -n csi-vmsvcns-2227 -o jsonpath='{.spec}' | jq + */ + vmClass = os.Getenv(envVMClass) + if vmClass == "" { + vmClass = vmClassBestEffortSmall + } + + framework.Logf("Create a WCP namespace for the test") + // creating wcp test namespace and setting vmclass, contlib, storage class fields in test ns + namespace = createTestWcpNs( + vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId)) + + // Get snapshot client using the rest config + restConfig = getRestConfigClient() + snapc, err = snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // setting resource quota for storage policy tagged to supervisor namespace + //setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimitScaleTest) + + // creating vm schema + vmopScheme := runtime.NewScheme() + gomega.Expect(vmopv1.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + vmopC, err = ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + cnsOpScheme := runtime.NewScheme() + gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + cnsopC, err = ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: cnsOpScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + /* + *** reading vm image name "ubuntu-2004-cloud-init-21.4-kube-v1.20.10" *** + + [ ~ ]# kubectl get vmimage -o wide -n csi-vmsvcns-2227 | grep ubuntu-2004-cloud-init-21.4-kube-v1.20.10 + vmi-819319608e5ba43d1 ubuntu-2004-cloud-init-21.4-kube-v1.20.10 OVF kube-v1.20.10 ubuntu64Guest + + [ ~ ]# kubectl get vmimage vmi-819319608e5ba43d1 -n csi-vmsvcns-2227 -o jsonpath='{.spec}' | jq + */ + vmImageName := GetAndExpectStringEnvVar(envVmsvcVmImageName) + framework.Logf("Waiting for virtual machine image list to be available in namespace '%s' for image '%s'", + namespace, vmImageName) + vmi = waitNGetVmiForImageName(ctx, vmopC, namespace, vmImageName) + gomega.Expect(vmi).NotTo(gomega.BeEmpty()) + + // reading full sync wait time + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + + //setting map values + labelsMap = make(map[string]string) + labelsMap["app"] = "test" + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if isVsanHealthServiceStopped { + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", vsanhealthServiceName)) + startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) + } + + if isSPSserviceStopped { + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", spsServiceName)) + startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isSPSserviceStopped) + } + + dumpSvcNsEventsOnTestFailure(client, namespace) + delTestWcpNs(vcRestSessionId, namespace) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + }) + + /* + Testcase-1 + Dynamic PVC → VM → Snapshot + Steps: + 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVC to reach the Bound state. + 3. Create a VM service VM using the PVC created in step #1 + 4. Wait for the VM service to be up and in the powered-on state. + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some data into the volume. + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Create a volume snapshot for the PVC created in step #1. + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 10. Verify CNS metadata for a PVC + 11. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VMTC1Creating snapshot of vm service vm attachd to a dynamic volume", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC") + pvc, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + /* + [ ~ ]# kubectl get secret -n csi-vmsvcns-2227 + NAME TYPE DATA AGE + vm-bootstrap-data Opaque 1 72s + + [ ~ ]# kubectl get secret -n csi-vmsvcns-2227 -o yaml + */ + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + /* + [ ~ ]# kubectl get vm -o wide -n csi-vmsvcns-2227 + NAME POWER-STATE CLASS IMAGE PRIMARY-IP4 AGE + csi-test-vm-2668 PoweredOn best-effort-small vmi-819319608e5ba43d1 94s + + [ ~ ]# kubectl describe vm -n csi-vmsvcns-2227 + */ + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + /* + [ ~ ]# kubectl get network -n csi-vmsvcns-2227 + NAME AGE + primary 21m + + [ ~ ]# kubectl get network -n csi-vmsvcns-2227 -o jsonpath='{.items[0].spec}' | jq + + [ ~ ]# kubectl get vmservice -n csi-vmsvcns-2227 + NAME TYPE AGE + csi-test-vm-2668-svc LoadBalancer 2m17s + root@4203ec75780f15c3cd295b6bad330232 [ ~ ]# + + [ ~ ]# kubectl get svc -n csi-vmsvcns-2227 + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + csi-test-vm-2668-svc LoadBalancer 172.24.176.180 192.168.130.7 22/TCP 2m32s + root@4203ec75780f15c3cd295b6bad330232 [ ~ ]# + + [ ~ ]# kubectl get endpoints -n csi-vmsvcns-2227 + NAME ENDPOINTS AGE + csi-test-vm-2668-svc 2m40s + root@4203ec75780f15c3cd295b6bad330232 [ ~ ]# + */ + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + /* + [ ~ ]# kubectl get cnsnodevmattachment -n csi-vmsvcns-2227 -o yaml + */ + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-2 + Static PVC → VM → Snapshot + Steps: + 1. Create FCD + 2. Create a static PV and PVC using cns register volume API + 3. Wait for PV and PVC to reach the Bound state. + 4. Create a VM service VM using the PVC created in step #2 + 5. Wait for the VM service to be up and in the powered-on state. + 6. Once the VM is up, verify that the volume is accessible inside the VM + 7. Write some data into the volume. + 8. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 9. Create a volume snapshot for the PVC created in step #1. + 10. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 11. Verify CNS metadata for a PVC + 12. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VMM2", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var fcdID string + curr_time := time.Now().Unix() + curTimeString := strconv.FormatInt(curr_time, 10) + pvcName := "cns-pvc-" + curTimeString + framework.Logf("pvc name :%s", pvcName) + + // ginkgo.By("Creating FCD (CNS Volume)") + // fcdID, err = e2eVSphere.createFCDwithValidProfileID(ctx, + // "staticfcd"+curTimeString, storageProfileId, diskSizeInMb, dsRef.Reference()) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Creating FCD Disk") + // fcdID, err := e2eVSphere.createFCD(ctx, fcdName, diskSizeInMb, dsRef.Reference()) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + // pandoraSyncWaitTime, fcdID)) + // time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + // ginkgo.By(fmt.Sprintf("Creating the PV with the fcdID %s", fcdID)) + // staticPVLabels := make(map[string]string) + // staticPVLabels["fcd-id"] = fcdID + // staticPv := getPersistentVolumeSpec(fcdID, v1.PersistentVolumeReclaimDelete, nil, ext4FSType) + // staticPv, err = client.CoreV1().PersistentVolumes().Create(ctx, staticPv, metav1.CreateOptions{}) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // volHandle := staticPv.Spec.CSI.VolumeHandle + // gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + // err = e2eVSphere.waitForCNSVolumeToBeCreated(staticPv.Spec.CSI.VolumeHandle) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // ginkgo.By("Creating a static PVC") + // staticPvc := getPersistentVolumeClaimSpec(namespace, staticPVLabels, staticPv.Name) + // staticPvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create( + // ctx, staticPvc, metav1.CreateOptions{}) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + //restConfig, _, profileID := staticProvisioningPreSetUpUtil(ctx, f, client, storagePolicyName) + + ginkgo.By("Creating FCD (CNS Volume)") + fcdID, err := e2eVSphere.createFCDwithValidProfileID(ctx, + "staticfcd"+curTimeString, storageProfileId, diskSizeInMb, dsRef.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By("Create CNS register volume with above created FCD ") + cnsRegisterVolume := getCNSRegisterVolumeSpec(ctx, namespace, fcdID, "", pvcName, v1.ReadWriteOnce) + err = createCNSRegisterVolume(ctx, restConfig, cnsRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(waitForCNSRegisterVolumeToGetCreated(ctx, restConfig, + namespace, cnsRegisterVolume, poll, supervisorClusterOperationsTimeout)) + cnsRegisterVolumeName := cnsRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + + ginkgo.By(" verify created PV, PVC and check the bidirectional reference") + pvc, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := getPvFromClaim(client, namespace, pvcName) + volHandle := pv.Spec.CSI.VolumeHandle + verifyBidirectionalReferenceOfPVandPVC(ctx, client, pvc, pv, fcdID) + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-3 + Dynamic PVC  → VM → Snapshot → RestoreVol → VM + Steps: + 1. Create a dynamic PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for dynamic PVC to reach the Bound state. + 3. Create a VM service VM using dynamic PVC. + 4. Wait for the VM service to be up and in the powered-on state. + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some IO to the CSI volumes, read it back from them and verify the data integrity + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Create a volume snapshot for the dynamic PVC created in step #1 + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 10. Verify CNS metadata for a PVC + 11. Create a new PVC from the snapshot created in step #11. + 12. Wait for PVC to reach the Bound state. + 13. Create a VM service VM using the PVC created in step #14 + Wait for the VM service to be up and in the powered-on state. + Once the VM is up, verify that the volume is accessible inside the VM + Verify reading/writing data in the volume. + Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VMM3", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC") + pvc, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a volume from a snapshot") + pvc2, pv2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot, diskSize, false) + volHandle2 := pv2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc2}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc2 := createService4Vm(ctx, vmopC, namespace, vm2.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{pvc2})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm2.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp2) + verifyDataIntegrityOnVmDisk(vmIp2, volFolder) + } + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-4 + Dynamic PVC → VM → Snapshot1 and Snapshot2 → PVC1 and PVC2 → VM + Steps: + 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVC to reach the Bound state. + 3. Create a VM service VM using the PVC created in step #1 + 4. Wait for the VM service to be up and in the powered-on state. + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some IO to the CSI volumes, read it back from them and verify the data integrity + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Take 2 snapshots (snapshot-1, snapshot-2) for the PVC created in step #1. + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 10. Verify CNS metadata for a PVC created in step #1 + 11. Create PVC-1 from Snapshot-1, PVC-2 from Snapshot-2 + 12. Wait for PVCs to reach the Bound state. + 13. Create a VM service VM using pvc-1 and pvc-2 created in step #11 + 14. Wait for the VM service to be up and in the powered-on state. + 15. Once the VM is up, verify that the volume is accessible inside the VM + 16. Write some IO to the CSI volumes, read it back from them and verify the data integrity + 17. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VMM4", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC") + pvc, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot-1 for the volume") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot-2 for the volume") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a volume from a snapshot") + pvc2, pv2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot1, diskSize, false) + volHandle2 := pv2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + + ginkgo.By("Create a volume from a snapshot") + pvc3, pv3, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot2, diskSize, false) + volHandle3 := pv3[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle3).NotTo(gomega.BeEmpty()) + + ginkgo.By("Creating VM") + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc2, pvc3}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc2 := createService4Vm(ctx, vmopC, namespace, vm2.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{pvc2, pvc3})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm2.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp2) + verifyDataIntegrityOnVmDisk(vmIp2, volFolder) + } + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle, snapshotId1, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle, snapshotId2, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-5 + Offline resize + PVC → VM → Snapshot → Delete VM → restore snapshot → Offline restored PVC resize → Create new VM → Snapshot + + Steps: + 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVC to reach the Bound state. + 3. Create a VM service VM using the PVC created in step #1 + 4. Wait for the VM service to be up and in the powered-on state. + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some data into the volume. + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Take snapshots of the PVC created in step #1 + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 10. Verify CNS metadata for a PVC created in step #1 + 11. Delete VM service VM created in step #3. + 12. Delete volume snapshot + 13. Perform offline volume resizing and verify that the operation went successfully. + 14. Create a new VM service VM with PVC created in step #1 + 15. Once the VM is up, verify that the volume is accessible and the filesystem on the volume has expanded. + 16. Perform online volume resizing and verify that the operation went successfully. + 17. Take a snapshot of the PVC created in step #1. + 18. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 19. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VMM5", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC") + pvc, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot-1 for the volume") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a volume from a snapshot") + pvc2, pv2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot1, diskSize, false) + volHandle2 := pv2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expanding the current pvc") + currentPvcSize := pvc2.Spec.Resources.Requests[v1.ResourceStorage] + newSize := currentPvcSize.DeepCopy() + newSize.Add(resource.MustParse("4Gi")) + newDiskSize := "6Gi" + framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) + pvc2, err = expandPVCSize(pvc2, newSize, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(pvc2).NotTo(gomega.BeNil()) + pvcSize := pvc2.Spec.Resources.Requests[v1.ResourceStorage] + if pvcSize.Cmp(newSize) != 0 { + framework.Failf("error updating pvc size %q", pvc2.Name) + } + + ginkgo.By("Waiting for controller volume resize to finish") + err = waitForPvResizeForGivenPvc(pvc2, client, totalResizeWaitPeriod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle2)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if len(queryResult.Volumes) == 0 { + err = fmt.Errorf("queryCNSVolumeWithResult returned no volume") + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verifying disk size requested in volume expansion is honored") + newSizeInMb := int64(6144) + if queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb != + newSizeInMb { + err = fmt.Errorf("got wrong disk size after volume expansion +%v ", + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb) + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating VM") + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc2}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc2 := createService4Vm(ctx, vmopC, namespace, vm2.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{pvc2})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm2.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+2, vmIp2) + verifyDataIntegrityOnVmDisk(vmIp2, volFolder) + } + + ginkgo.By("Create volume snapshot-2 for the volume") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc2, volHandle2, newDiskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Delete volume snapshot-1") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle, snapshotId1, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete volume snapshot-2") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle2, snapshotId2, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) + + /* + Testcase-6 + PVC-1, PVC-2 → VM-1(PVC-1), VM-2(PVC-2), VM-3(no vol attach) → VolS-1 (PVC-1), VolS-2(PVC-2) → + RestoreVol-1 (PVC-1), RestoreVol-2 (PVC-2)→ Attach VM-1 (PVC-1, RestoreVol-2) -> RestoreVol-1 Attach -> VM-3 + + Steps: + 1. Create 2 PVCs (PVC-1, PVC-2) using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVCs to reach the Bound state. + 3. Create two VM service VMs (VM-1, VM-2) such that VM-1 is attached to PVC-1, VM-2 is attached to PVC-2 + 4. Create VM-3 but not attched to any volume + 5. Wait for the VM service to be up and in the powered-on state. + 6. Once the VM is up, verify that the volume is accessible inside the VM + 7. Write some data to a file in PVC-1 from VM-1 and in PVC-2 from VM-2. + 8. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 9. Take snapshots of the PVC-1 (vols-1) and PVC-2(vols-2) created in step #1 + 10. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 11. Create a new PVC using the snapshot created in step #8. (RestoreVol-2 from vols-2) + 12. Modify VM-1 spec to attach RestoreVol-2 to VM-1 + 13. Verify that RestoreVol-2 is accessible in VM-1 and can read and verify the + contents of the file written in step 6 from VM-1 + 14. Create a new PVC using the snapshot created in step #8. (RestoreVol-1 from vols-1) + 15. Attach RestoreVol-1 to VM-3 + 16. Verify reading writing data in the volume from VM3 + 17. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VMM6", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC1") + pvc1, pvs1, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle1 := pvs1[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle1).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc1.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create PVC2") + pvc2, pvs2, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle2 := pvs2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VMs") + vm1 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc1}, vmi, storageClassName, secretName) + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc2}, vmi, storageClassName, secretName) + vm3 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm1.Name, + Namespace: namespace, + }}) + if err != nil { + if strings.Contains(err.Error(), "not found") { + framework.Logf("virtualmachines.vmoperator.vmware.com not found") + } else { + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "VM deletion failed with error: %s", err) + } + } + + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + if err != nil { + if strings.Contains(err.Error(), "not found") { + framework.Logf("virtualmachines.vmoperator.vmware.com not found") + } else { + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "VM deletion failed with error: %s", err) + } + } + + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm3.Name, + Namespace: namespace, + }}) + if err != nil { + if strings.Contains(err.Error(), "not found") { + framework.Logf("virtualmachines.vmoperator.vmware.com not found") + } else { + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "VM deletion failed with error: %s", err) + } + } + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc1 := createService4Vm(ctx, vmopC, namespace, vm1.Name) + vmlbsvc2 := createService4Vm(ctx, vmopC, namespace, vm2.Name) + vmlbsvc3 := createService4Vm(ctx, vmopC, namespace, vm3.Name) + defer func() { + ginkgo.By("Deleting loadbalancing services for ssh for the VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc1.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc3.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VMs to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vmIp3, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm3.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to respective VMs") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm1, + []*v1.PersistentVolumeClaim{pvc1})).To(gomega.Succeed()) + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{pvc2})).To(gomega.Succeed()) + + ginkgo.By("Verify PVCs are accessible to respective VMs") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _ = formatNVerifyPvcIsAccessible(vm1.Status.Volumes[0].DiskUuid, 1, vmIp1) + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _ = formatNVerifyPvcIsAccessible(vm2.Status.Volumes[0].DiskUuid, 1, vmIp2) + + //TODO : Need to check writing textfile to local and later copying it to VM machine for both new VMs + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create snapshot-1 for PVC1") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc1, volHandle1, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot-2 for the volume") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc2, volHandle2, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create restorevol1 from snapshot1") + restorepvc1, restorepv1, _ := verifyVolumeRestoreOperation(ctx, client, namespace, + storageclass, volumeSnapshot1, diskSize, false) + restorevolHandle1 := restorepv1[0].Spec.CSI.VolumeHandle + gomega.Expect(restorevolHandle1).NotTo(gomega.BeEmpty()) + + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, restorepvc1.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(restorevolHandle1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create restorevol2 from snapshot2") + restorepvc2, restorepv2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, + volumeSnapshot2, diskSize, false) + restorevolHandle2 := restorepv2[0].Spec.CSI.VolumeHandle + gomega.Expect(restorevolHandle2).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, restorepvc2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(restorevolHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Modify VM1 spec to attach restore snapshot-2 to VM1") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = updateVmWithNewPvc(ctx, vmopC, vm1.Name, namespace, restorepvc2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify restorevol2 is attached to the VM1") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm1, + []*v1.PersistentVolumeClaim{pvc1, restorepvc2})).To(gomega.Succeed()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + // Refresh VM information + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Format the PVC and verify accessibility + volFolder := formatNVerifyPvcIsAccessible(vm1.Status.Volumes[0].DiskUuid, 2, vmIp1) + // Verify data integrity on the VM disk + verifyDataIntegrityOnVmDisk(vmIp1, volFolder) + + // TODO verifying restore volume data from VM1 + + ginkgo.By("Attach restore snapshot-1 to vm3") + vm3, err = getVmsvcVM(ctx, vmopC, vm3.Namespace, vm3.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = updateVmWithNewPvc(ctx, vmopC, vm3.Name, namespace, restorepvc1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify restore snapshot-1 is attached to the VM3") + vm3, err = getVmsvcVM(ctx, vmopC, vm3.Namespace, vm3.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm3, + []*v1.PersistentVolumeClaim{restorepvc1})).To(gomega.Succeed()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm3, err = getVmsvcVM(ctx, vmopC, vm3.Namespace, vm3.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm3.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp3) + verifyDataIntegrityOnVmDisk(vmIp3, volFolder) + } + + ginkgo.By("Deleting VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm1.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm3.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle1, snapshotId1, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle2, snapshotId2, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /*Testcase-7 + PVC → Pod → Snapshot → VM (PVC) → should fail → Delete Pod → VM (PVC) → should succeed → RestoreVol (PVC) → Pod + + Steps: + 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVCs to reach the Bound state. + 3. Create a Pod using the PVC created in step #1. + 4. Wait for Pod to reach running state. + 5. Write some data in the volume from Pod. + 6. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 7. Take a snapshot of the PVC created in step #1 + 8. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 9. Create a VM service VM using the above PVC. It should fail. + Verify that an appropriate error message should be displayed. + Delete the Pod created in step #3 to ensure the volume is detached from the Pod. + Try creating a VM service again using the PVC created above. + Wait for the VM service to be up and in the powered-on state. + Once the VM is up, verify that the volume is accessible inside the VM + Write some data to the volume from the VM. + Create a new PVC from the snapshot created in step #7. + Wait for PVC to reach Bound state. + Create a new Pod and attach it to the newly created volume. + Confirm that the Pod reaches the running state and that read and write operations can be performed on the volume. + Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + ginkgo.It("vmm7", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC") + pvclaim, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create a Pod using the volume created above and write data into the volume") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, + execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + pvs[0].Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + + var vmUUID string + var exists bool + + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + _, err = e2eVSphere.getVMByUUID(ctx, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pvs[0].Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + output := readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + writeDataOnFileFromPod(namespace, pod.Name, filePathPod1, "Hello message from test into Pod1") + output = readFileFromPod(namespace, pod.Name, filePathPod1) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle, pvclaim, pvs[0], pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing services for ssh for the VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VMs to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("edit vm spec and try to attach pvclaim to vm1, which should fail") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{{Name: pvclaim.Name, + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name}, + }}} + err = vmopC.Update(ctx, vm) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = wait4PvcAttachmentFailure(ctx, vmopC, vm, pvclaim) + gomega.Expect(err).To(gomega.HaveOccurred()) + + ginkgo.By("delete pod") + deletePodsAndWaitForVolsToDetach(ctx, client, []*v1.Pod{pod}, true) + + ginkgo.By("retry to attach pvc2 to vm1 and verify it is accessible from vm1") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvclaim})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Create volume using the snapshot") + pvclaim2, pvs2, pod2 := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, + volumeSnapshot, diskSize, true) + volHandle2 := pvs2[0].Spec.CSI.VolumeHandle + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle2, pvclaim2, pvs2[0], pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-9 + Power on-off VM + + Workflow Path: + + PVC → VM1 → Snapshot → Power-off VM1 → VM2 (PVC) → should fail → RestoreVol → VM2(RestoreVol) → should succeed + 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVC to reach the Bound state. + 3. Create a VM service VM (VM-1) and attach it to the PVC created in step #1. + 4. Wait for the VM service to be up and in the powered-on state. + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some data into the volume. + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Take a snapshot of the PVC created in step #1. + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 10. Now, Power off VM-1 + 11. Create a new VM service VM (VM-2) and attach it to the PVC created in step #1, it + should fail. Verify an appropriate error message should be displayed. + 12. Create a new PVC from the snapshot created in step #8 + 13. Edit the VM-2 spec and attach it to the volume created in step #12. + 14. Wait for the VM service to be up and in the powered-on state. + 15. Once the VM is up, verify that the volume is accessible inside the VM + 16. Write some data into the volume. + 17. Power on VM-1 and verify it comes up fine + 18. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VMM9", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC") + pvc, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VMs") + vm1 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm1.Name, + Namespace: namespace, + }}) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc1 := createService4Vm(ctx, vmopC, namespace, vm1.Name) + vmlbsvc2 := createService4Vm(ctx, vmopC, namespace, vm2.Name) + defer func() { + ginkgo.By("Deleting loadbalancing services for ssh for the VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc1.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VMs to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVC is attached to the VM1") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm1, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm1.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp1) + verifyDataIntegrityOnVmDisk(vmIp1, volFolder) + } + + // ginkgo.By("write some data to a file in pvc from vm1") + // rand.New(rand.NewSource(time.Now().Unix())) + // testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + // framework.Logf("Creating a 100mb test data file %v", testdataFile) + // op, err := exec.Command( + // "bash", "-c", "dd if=/dev/urandom bs=1M count=1 | tr -dc 'a-zA-Z0-9' >"+testdataFile).Output() + // fmt.Println(op) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // op, err = exec.Command("md5sum", testdataFile).Output() + // fmt.Println("md5sum", string(op[:])) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // op, err = exec.Command("ls", "-l", testdataFile).Output() + // fmt.Println(string(op[:])) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // defer func() { + // op, err = exec.Command("rm", "-f", testdataFile).Output() + // fmt.Println(op) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // }() + // framework.Logf("Copying test data file to VM") + // copyFileToVm(vmIp1, testdataFile, volFolder+"/f1") + + // _ = execSshOnVmThroughGatewayVm(vmIp1, + // []string{"ls -l " + volFolder + "/f1", "md5sum " + volFolder + "/f1", "sync"}) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Power off vm1") + vm1 = setVmPowerState(ctx, vmopC, vm1, vmopv1.VirtualMachinePoweredOff) + vm1, err = wait4Vm2ReachPowerStateInSpec(ctx, vmopC, vm1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("edit vm2 spec and try to attach pvc to vm2, which should fail") + vm2.Spec.Volumes = append(vm2.Spec.Volumes, vmopv1.VirtualMachineVolume{Name: pvc.Name, + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvc.Name}, + }}) + err = vmopC.Update(ctx, vm2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = wait4PvcAttachmentFailure(ctx, vmopC, vm2, pvc) + gomega.Expect(err).To(gomega.HaveOccurred()) + + ginkgo.By("Removing the volume attached to Vm1 from vm2") + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vm2.Spec.Volumes = nil + err = vmopC.Update(ctx, vm2) + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a volume from a snapshot") + pvc2, pv2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot, diskSize, false) + volHandle2 := pv2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("modify vm2 spec to attach restore volume to vm2") + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vm2.Spec.Volumes = append(vm2.Spec.Volumes, vmopv1.VirtualMachineVolume{Name: pvc2.Name, + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ClaimName: pvc2.Name}, + }}) + err = vmopC.Update(ctx, vm2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Deleting VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait and verify PVC2 is attached to the VM2") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{pvc2})).To(gomega.Succeed()) + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm2.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp2) + verifyDataIntegrityOnVmDisk(vmIp2, volFolder) + } + + // ginkgo.By("verify data in restore volume from vm2") + // framework.Logf("Mounting the volume") + // volFolder = mountFormattedVol2Vm(vm2.Status.Volumes[0].DiskUuid, 1, vmIp2) + // vmFileData := fmt.Sprintf("/tmp/vmdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + // _ = execSshOnVmThroughGatewayVm(vmIp2, []string{"md5sum " + volFolder + "/f1"}) + // framework.Logf("Fetching file from the VM") + // copyFileFromVm(vmIp2, volFolder+"/f1", vmFileData) + // defer func() { + // c := []string{"rm", "-f", vmFileData} + // op, err = exec.Command(c[0], c[1:]...).Output() + // framework.Logf("Command: %c, output: %v", c, op) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // }() + // framework.Logf("Comparing file fetched from the VM with test data file") + // c := []string{"md5sum", testdataFile, vmFileData} + // op, err = exec.Command(c[0], c[1:]...).Output() + // framework.Logf("Command: %c, output: %v", c, op) + // lines := strings.Split(string(op[:]), "\n") + // gomega.Expect(strings.Fields(lines[0])[0]).To(gomega.Equal(strings.Fields(lines[1])[0])) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-8 + + vSAN-Health and SPS service down + + Workflow Path: + PVC → VM → Multiple Snapshots(vols-1, vols-2, vols-3) and in parallel vSAN Health service down → + snapshot should not pass → start vsan-health service → Snapshot Verification → + Restore Vols(ResVol-1, ResVol-2, ResVol-3) → New VM(ResVol-1, ResVol-2) → + sps service down → Attach RestoreVol (ResVol-3)) → + new VM → should not pass → bring up services → volume attachment should pass + + 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVC to reach the Bound state. + 3. Create a VM service VM using the PVC created in step #1 + 4. Wait for the VM service to be up and in the powered-on state. + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some data into the volume. + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Create multiple volume snapshots(vols-1, vols-2, vols-3) for the PVC created in step #1. + 9. Stop the vSAN Health service while a snapshot creation operation is in progress. + 10. Snapshot creation readyToUse status should be 'false' and creation should be stuck. Verify the error message. + 11. Bring up the vsan-health service. + 12. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 13. Create PVCs (ResVol-1, ResVol-2, ResVol-3) from the snapshot created in step #8. + 14. Wait for PVCs to reach the Bound state. + 15. Create a VM service VM using the restored PVCs (ResVol-1, ResVol-2). + 16. Wait for the VM service to be up and in the powered-on state. + 17. Once the VM is up, verify that the volume is accessible inside the VM + 18. Write some data into the volume. + 19. Now bring down the SPS service + 20. Attach a restored volume (ResVol-3) to the VM service created in step #15. + 21. Verify that the attachment is stuck and does not go through + 22. Bring up the SPS service. + 23. Wait for ResVol-3 to get attached and verify volume is accessible to the VM created in step #15. + 24. Perform read/write operation on the volume from VM. + 25. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VMM8", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + volumeOpsScale := 3 + var snapshotIds []string + volumesnapshots := make([]*snapV1.VolumeSnapshot, volumeOpsScale) + snapshotContents := make([]*snapV1.VolumeSnapshotContent, volumeOpsScale) + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC") + pvc, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintln("Stopping vsan-health on the vCenter host")) + isVsanHealthServiceStopped = true + err = invokeVCenterServiceControl(ctx, stopOperation, vsanhealthServiceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow vsan-health to completely shutdown", + vsanHealthServiceWaitTime)) + time.Sleep(time.Duration(vsanHealthServiceWaitTime) * time.Second) + + ginkgo.By("Create a volume snapshot") + framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) + for i := 0; i < volumeOpsScale; i++ { + framework.Logf("Creating snapshot %v", i) + volumesnapshots[i], _ = snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, + getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvc.Name), metav1.CreateOptions{}) + framework.Logf("Volume snapshot name is : %s", volumesnapshots[i].Name) + } + + ginkgo.By(fmt.Sprintln("Starting vsan-health on the vCenter host")) + startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", pandoraSyncWaitTime)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + for i := 0; i < volumeOpsScale; i++ { + ginkgo.By("Verify volume snapshot is created") + volumesnapshots[i], err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumesnapshots[i].Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(volumesnapshots[i].Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) + + ginkgo.By("Verify volume snapshot content is created") + snapshotContents[i], err = snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, + *volumesnapshots[i].Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(*snapshotContents[i].Status.ReadyToUse).To(gomega.BeTrue()) + + framework.Logf("Get volume snapshot ID from snapshot handle") + snapshotId, _, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContents[i]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + snapshotIds = append(snapshotIds, snapshotId) + } + defer func() { + for i := 0; i < volumeOpsScale; i++ { + ginkgo.By("Delete dynamic volume snapshot") + _, _, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumesnapshots[i], pandoraSyncWaitTime, volHandle, snapshotIds[i], true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create volume-1 from snapshot-1") + pvc1, pv1, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumesnapshots[0], + diskSize, false) + volHandle1 := pv1[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle1).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc1.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create volume-2 from snapshot-2") + pvc2, pv2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumesnapshots[1], + diskSize, false) + volHandle2 := pv2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle1).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create volume-3 from snapshot-3") + pvc3, pv3, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumesnapshots[2], + diskSize, false) + volHandle3 := pv3[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle3).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc3.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Stopping %v on the vCenter host", spsServiceName)) + isSPSserviceStopped = true + err = invokeVCenterServiceControl(ctx, stopOperation, spsServiceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = waitVCenterServiceToBeInState(ctx, spsServiceName, vcAddress, svcStoppedMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if isSPSserviceStopped { + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", spsServiceName)) + startVCServiceWait4VPs(ctx, vcAddress, spsServiceName, &isSPSserviceStopped) + } + }() + + ginkgo.By("Creating VM-1") + vm1 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc1, pvc2}, vmi, storageClassName, secretName) + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc3}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm1.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verifying vm1 and vm2 is stuck in creation") + framework.Logf("sleeping for a min...") + time.Sleep(time.Minute) + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(vm1.Status.PowerState).NotTo(gomega.Equal(vmopv1.VirtualMachinePoweredOn)) + + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(vm2.Status.PowerState).NotTo(gomega.Equal(vmopv1.VirtualMachinePoweredOn)) + + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", spsServiceName)) + startVCServiceWait4VPs(ctx, vcAddress, spsServiceName, &isSPSserviceStopped) + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc1 := createService4Vm(ctx, vmopC, namespace, vm1.Name) + vmlbsvc2 := createService4Vm(ctx, vmopC, namespace, vm2.Name) + defer func() { + ginkgo.By("Deleting loadbalancing services for ssh for the VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc1.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Deleting loadbalancing services for ssh for the VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VMs to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify pvc1 and pvc2 is attached to VM1") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm1, + []*v1.PersistentVolumeClaim{pvc1, pvc2})).To(gomega.Succeed()) + + ginkgo.By("Verify pvc3 is attached to VM2") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{pvc3})).To(gomega.Succeed()) + + ginkgo.By("Verify pvc1 and pvc2 is accessible to VM1") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _ = formatNVerifyPvcIsAccessible(vm1.Status.Volumes[0].DiskUuid, 1, vmIp1) + _ = formatNVerifyPvcIsAccessible(vm1.Status.Volumes[1].DiskUuid, 1, vmIp1) + + ginkgo.By("Verify pvc3 is accessible to VM2") + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _ = formatNVerifyPvcIsAccessible(vm2.Status.Volumes[0].DiskUuid, 1, vmIp2) + }) + + /* + Testcase-10 + vMotion volume from one DS to another + + Workflow Path: + PVC-1, PVC-2 → VM-1(PVC-1), VM-2(PVC-2) → Snapshot (volS-1,volS-2) → Relocate PVC-1 → + Snapshot of relocated PVC-1 → RestoreVol (volS-1) → new VM + + 1. Create 2 PVCs (PVC-1, PVC-2) using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVCs to reach the Bound state. + 3. Create 2 VM service VMs. Attach VM-1 to PVC-1, VM-2 to PVC-2. + 4. Wait until the VM service is up and in a powered-on state + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some data into the volume. + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Take snapshots (vols-1, vols-2) of the PVCs created in step #1. + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 10. Relocate PVC-1 to a different datastore. + 11. Verify that PVC-1 is still accessible. + 12. Take a snapshot of relocated PVC-1. + 13. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 14. Create a new PVC from the volume snapshot created in step #12 + 15. Wait for PVC to reach Bound state. + 16. Create a new VM and attach it to the restored volume. + 17. Wait until the VM service is up and in a powered-on state + 18. Once the VM is up, verify that the volume is accessible inside the VM + 19. Perform read write operation in the volume. + Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VMM10", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC-1") + pvc1, pvs1, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle1 := pvs1[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle1).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc1.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create PVC-2") + pvc2, pvs2, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle2 := pvs2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VMs") + vm1 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc1}, vmi, storageClassName, secretName) + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc2}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm1.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc1 := createService4Vm(ctx, vmopC, namespace, vm1.Name) + vmlbsvc2 := createService4Vm(ctx, vmopC, namespace, vm2.Name) + defer func() { + ginkgo.By("Deleting loadbalancing services for ssh for the VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc1.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VMs to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to respective VMs") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm1, + []*v1.PersistentVolumeClaim{pvc1})).To(gomega.Succeed()) + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{pvc2})).To(gomega.Succeed()) + + ginkgo.By("Verify PVCs are accessible to respective VMs") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _ = formatNVerifyPvcIsAccessible(vm1.Status.Volumes[0].DiskUuid, 1, vmIp1) + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _ = formatNVerifyPvcIsAccessible(vm2.Status.Volumes[0].DiskUuid, 1, vmIp2) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create snapshot-1 for pvc-1") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc1, volHandle1, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create snapshot-2 for pvc-2") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc2, volHandle2, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + ginkgo.By("Delete snapshot-1") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle1, snapshotId1, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete snapshot-2") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle2, snapshotId2, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Workflow Path: + + PVC-1, PVC-2 → VM-1(PVC-1), VM-2(PVC-2) → Snapshot (volS-1,volS-2 of PVC-1, pvc-2) → + RestoreVol (PVC-3, PVC-4) → Attach RestoreVol1 (VM-1), RestoreVol2 (VM-2) → verify data + + 1. Create 2 PVCs (PVC-1, PVC-2) using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVCs to reach the Bound state. + 3. Create 2 VM service VMs. Attach VM-1 to PVC-1, VM-2 to PVC-2. + 4. Wait until the VM service is up and in a powered-on state + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some data to PVC-1 from VM-1. + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Take 2 snapshots (vols-1, vols-2) of PVC-1 + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 10. Create a new PVC (PVC-3, PVC-4) from the snapshots created in step #8. + Attach PVC-3 to VM-1 and PVC-4 to VM-2 + Read and verify data in PVC-3 and PVC-4 + Write fresh data on PVC-3 and PVC-4 + Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + ginkgo.It("VMM11", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC1") + pvc1, pvs1, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle1 := pvs1[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle1).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc1.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create PVC2") + pvc2, pvs2, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle2 := pvs2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VMs") + vm1 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc1}, vmi, storageClassName, secretName) + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc2}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm1.Name, + Namespace: namespace, + }}) + if err != nil { + if strings.Contains(err.Error(), "not found") { + framework.Logf("virtualmachines.vmoperator.vmware.com not found") + } else { + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "VM deletion failed with error: %s", err) + } + } + + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + if err != nil { + if strings.Contains(err.Error(), "not found") { + framework.Logf("virtualmachines.vmoperator.vmware.com not found") + } else { + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "VM deletion failed with error: %s", err) + } + } + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc1 := createService4Vm(ctx, vmopC, namespace, vm1.Name) + vmlbsvc2 := createService4Vm(ctx, vmopC, namespace, vm2.Name) + defer func() { + ginkgo.By("Deleting loadbalancing services for ssh for the VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc1.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VMs to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to respective VMs") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm1, + []*v1.PersistentVolumeClaim{pvc1})).To(gomega.Succeed()) + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{pvc2})).To(gomega.Succeed()) + + ginkgo.By("Verify PVCs are accessible to respective VMs") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _ = formatNVerifyPvcIsAccessible(vm1.Status.Volumes[0].DiskUuid, 1, vmIp1) + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _ = formatNVerifyPvcIsAccessible(vm2.Status.Volumes[0].DiskUuid, 1, vmIp2) + + //TODO : Need to check writing textfile to local and later copying it to VM machine for both new VMs + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create snapshot-1 for PVC1") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc1, volHandle1, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot-2 for the volume") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc2, volHandle2, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create restorevol1 from snapshot1") + restorepvc1, restorepv1, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, + volumeSnapshot1, diskSize, false) + restorevolHandle1 := restorepv1[0].Spec.CSI.VolumeHandle + gomega.Expect(restorevolHandle1).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, restorepvc1.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(restorevolHandle1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create restorevol2 from snapshot2") + restorepvc2, restorepv2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, + volumeSnapshot2, diskSize, false) + restorevolHandle2 := restorepv2[0].Spec.CSI.VolumeHandle + gomega.Expect(restorevolHandle2).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, restorepvc2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(restorevolHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Modify VM1 spec to attach restore snapshot-2 to VM1") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = updateVmWithNewPvc(ctx, vmopC, vm1.Name, namespace, restorepvc2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify restorevol2 is attached to the VM1") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm1, + []*v1.PersistentVolumeClaim{pvc1, restorepvc2})).To(gomega.Succeed()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + // Refresh VM information + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Format the PVC and verify accessibility + volFolder := formatNVerifyPvcIsAccessible(vm1.Status.Volumes[0].DiskUuid, 2, vmIp1) + // Verify data integrity on the VM disk + verifyDataIntegrityOnVmDisk(vmIp1, volFolder) + + // TODO verifying restore volume data from VM1 + + ginkgo.By("Attach restore snapshot-1 to vm2") + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = updateVmWithNewPvc(ctx, vmopC, vm2.Name, namespace, restorepvc1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify restore snapshot-1 is attached to the VM2") + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{pvc2, restorepvc1})).To(gomega.Succeed()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + // Refresh VM information + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Format the PVC and verify accessibility + volFolder = formatNVerifyPvcIsAccessible(vm2.Status.Volumes[0].DiskUuid, 2, vmIp2) + // Verify data integrity on the VM disk + verifyDataIntegrityOnVmDisk(vmIp2, volFolder) + + ginkgo.By("Deleting VMs") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm1.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle1, snapshotId1, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle2, snapshotId2, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-12 + Workflow Path: + + PVC → VM → Write data → Snapshot1 → Write new data → Snapshot2 → Write new data → Snapshot3 → + RestoreVol from snapshot3 → VM → Read/Write and verify data + + Steps: + Create PVC using the storage class (storage policy) tagged to the supervisor namespace + Wait for PVC to reach the Bound state. + Create a VM service VM. Attach VM to PVC-1 + Wait until the VM service is up and in a powered-on state + Once the VM is up, verify that the volume is accessible inside the VM + Write some data to PVC-1 from VM-1. + Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + Create a volume snapshot (snapshot1) using the above snapshot class (step #3) and PVC (step #1). + Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + Write new data into the volume + Take a new snapshot (snapshot2) of PVC-1. + Write new data into the volume. + Take a new snapshot (snapshot3) of PVC-1. + Create a new PVC using the snapshot3. + Wait for the new PVC (PVC-2) to reach the Bound state. + Create a new VM service VM and attach it to volume PVC-2 + Wait until the VM service is up and in a powered-on state + Once the VM is up, verify that the volume is accessible inside the VM + Read, write and verify data in the volume. + Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VMM12", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC") + pvc, pvs, err := createPVCAndQueryVolumeInCNS(ctx, client, namespace, labelsMap, "", + diskSize, storageclass, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm1 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm1.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc1 := createService4Vm(ctx, vmopC, namespace, vm1.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc1.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm1, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm1.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp1) + verifyDataIntegrityOnVmDisk(vmIp1, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create snapshot-1") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volFolder := formatNVerifyPvcIsAccessible(vm1.Status.Volumes[0].DiskUuid, 2, vmIp1) + verifyDataIntegrityOnVmDisk(vmIp1, volFolder) + + ginkgo.By("Create snapshot-2") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volFolder = formatNVerifyPvcIsAccessible(vm1.Status.Volumes[0].DiskUuid, 3, vmIp1) + verifyDataIntegrityOnVmDisk(vmIp1, volFolder) + + ginkgo.By("Create snapshot-3") + volumeSnapshot3, snapshotContent3, snapshotCreated3, + snapshotContentCreated3, snapshotId3, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated3 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent3, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated3 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot3.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot3.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Restore volume from latest snapshot") + restorepvc, restorepv, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, + volumeSnapshot3, diskSize, false) + restorevolHandle := restorepv[0].Spec.CSI.VolumeHandle + gomega.Expect(restorevolHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, restorepvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(restorevolHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{restorepvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc2 := createService4Vm(ctx, vmopC, namespace, vm2.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{restorepvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm2.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp2) + verifyDataIntegrityOnVmDisk(vmIp2, volFolder) + } + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle, snapshotId1, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle, snapshotId2, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated3, snapshotContentCreated3, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot3, pandoraSyncWaitTime, volHandle, snapshotId3, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Stretched WCP: VM and PVC both belong to the same zone, Immediate Binding mode + Workflow Path: + PVC → VM-1 → Snapshot → RestoreVol → Attach to VM-1 + + Steps: + Assign a zonal SPBM policy to test the namespace with sufficient quota + Create a PVC (PVC-1) + Wait for PVC-1 to reach Bound state. + Verify PV node affinity + Create a VM service VM using PVC-1 created in step #2. + Wait until the VM service is up and in a powered-on state + Once the VM is up verify that the volume is accessible inside the VM + Verify PVC CNS metadata. + Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + Take a snapshot of PVC + Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + Restore Snapshot to create a new PVC (PVC-2) + Wait for PVC-2 to reach the Bound state. + Verify PV node affinity. + Attach this newly created PVC-2 to the VM created in step #5 + Verify that the volume is accessible inside the VM. + Read, write and verify data in the volume. + Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + + 1 Assign a zonal spbm policy to test namespace with sufficient quota + 2 Create two PVCs say pvc1, pvc2 under zone1 + 3 Create a VMservice VM say vm1 under zone1 with pvc1 + 4 Once the vm1 is up verify that the volume is accessible inside vm1 + 5 verify pvc1 CNS metadata. + 6 Attach pvc2 to vm1 and verify that the volume is accessible inside vm1 + 7 Delete vm1 + 8 delete pvc1, pvc2 + 9 Remove spbm policy attached to test namespace in step1 + */ + ginkgo.It("VMM13", ginkgo.Label(p0, wcp, core), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + topologyHaMap := GetAndExpectStringEnvVar(topologyHaMap) + allowedTopos := createAllowedTopolgies(topologyHaMap) + allowedTopologyHAMap := createAllowedTopologiesMap(allowedTopos) + + ginkgo.By("Creating Pvc with Immediate topology storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", storageclass, nil, "") + pvc, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvcSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for SV PVC to come to bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pvs[0].Spec.CSI.VolumeHandle + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeID)) + }() + + ginkgo.By("Verify SV PV has has required PV node affinity details") + _, err = verifyVolumeTopologyForLevel5(pvs[0], allowedTopologyHAMap) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("SVC PV: %s has required PV node affinity details", pvs[0].Name) + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + _ = formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volumeID, diskSize, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Restore volume from snapshot") + restorepvc, restorepv, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, + volumeSnapshot, diskSize, false) + restorevolHandle := restorepv[0].Spec.CSI.VolumeHandle + gomega.Expect(restorevolHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, restorepvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(restorevolHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify SV PV has has required PV node affinity details") + _, err = verifyVolumeTopologyForLevel5(restorepv[0], allowedTopologyHAMap) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("SVC PV: %s has required PV node affinity details", restorepv[0].Name) + + ginkgo.By("Creating VM") + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{restorepvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc2 := createService4Vm(ctx, vmopC, namespace, vm2.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{restorepvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm2.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp2) + verifyDataIntegrityOnVmDisk(vmIp2, volFolder) + } + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volumeID, snapshotId, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-14 + VMotion and storage VMotion within the same zone + Immediate Binding mode + Workflow Path: + PVC-1 → VM → Write data → Snapshot1 → RestoreVol1 → new VM(RestoreVol1) → Write data -> + Snapshot2 -> RestoreVol2 -> new VM(RestoreVol3) -> Snapshot + 1. Assign a zonal SPBM policy to test the namespace with a sufficient quota which + matches two shared datastores say ds1 and ds2 in the same zone + 2. Create two PVCs say pvc1, and pvc2 (assuming that both of them are provisioned on ds1) + 3. Create a VM service VM say vm1 under the same zone with pvc1, pvc2 + 4. Wait until the VM service is up and in a powered-on state + 5. Once the vm1 is up write some data to pvc1 and pvc2 + 6. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 7. Take snapshots of both the PVCs. + 8. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 9. VMotion VM to a different host in the same zone + 10. Verify data in pvc1, pvc2 written in step 4 + 11. Write new data to pvc1, pvc2 + 12. Storage VMotion pvc1 and pvc2 to a different datastore (ds2) in the same zone + 13. Verify reading, and writing data on the volume. + 14. Take volume snapshots of both migrated PVCs i.e. Snapshot3(PVC-1), Snapshot4(PVC-2) + 15. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 16. Create new PVCs i.e. RestoreVol1 from Snapshot1 → RestoreVol2 from Snapshot2 + 17. Create new PVCs i.e. RestoreVol3 from Snapshot3 → RestoreVol4 from Snapshot4 + 18. Create a VM service VM using volumes created in step #16. + 19. Wait until the VM service is up and in a powered-on state + 20. Once the vm1 is up write some data to the attached volumes. + 21. Create new PVCs i.e. RestoreVol3 from Snapshot3 → RestoreVol4 from Snapshot4 + 22. Create a VM service VM using volumes created in step #17 + 23. Wait until the VM service is up and in a powered-on state + 24. Once the VM is up write some data to the attached volumes. + 25. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VMM14", ginkgo.Label(p0, wcp, core), func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + topologyHaMap := GetAndExpectStringEnvVar(topologyHaMap) + allowedTopos := createAllowedTopolgies(topologyHaMap) + allowedTopologyHAMap := createAllowedTopologiesMap(allowedTopos) + + ginkgo.By("Get Storage Class") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating pvc with Immediate topology storageclass") + pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", storageclass, nil, "") + pvc, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvcSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for SV PVC to come to bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeId := pvs[0].Spec.CSI.VolumeHandle + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeId) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeId)) + }() + + ginkgo.By("Verify SV PV has has required PV node affinity details") + _, err = verifyVolumeTopologyForLevel5(pvs[0], allowedTopologyHAMap) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("SVC PV: %s has required PV node affinity details", pvs[0].Name) + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm1 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm1.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc1 := createService4Vm(ctx, vmopC, namespace, vm1.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc1.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp1, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm1, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + vm1, err = getVmsvcVM(ctx, vmopC, vm1.Namespace, vm1.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm1.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp1) + verifyDataIntegrityOnVmDisk(vmIp1, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create snapshot-1") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvc, volumeId, diskSize, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Restore volume from snapshot-1") + restorepvc1, restorepv1, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, + volumeSnapshot1, diskSize, false) + restorevolHandle1 := restorepv1[0].Spec.CSI.VolumeHandle + gomega.Expect(restorevolHandle1).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, restorepvc1.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(restorevolHandle1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify SV PV has has required PV node affinity details") + _, err = verifyVolumeTopologyForLevel5(restorepv1[0], allowedTopologyHAMap) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("SVC PV: %s has required PV node affinity details", restorepv1[0].Name) + + ginkgo.By("Creating VM") + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{restorepvc1}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc2 := createService4Vm(ctx, vmopC, namespace, vm2.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{restorepvc1})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm2, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm2.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp2) + verifyDataIntegrityOnVmDisk(vmIp2, volFolder) + } + + ginkgo.By("Create volume snapshot-2") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, restorepvc1, restorevolHandle1, diskSize, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Restore volume from snapshot-2") + restorepvc2, restorepv2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, + volumeSnapshot2, diskSize, false) + restorevolHandle2 := restorepv2[0].Spec.CSI.VolumeHandle + gomega.Expect(restorevolHandle2).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, restorepvc2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(restorevolHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify SV PV has has required PV node affinity details") + _, err = verifyVolumeTopologyForLevel5(restorepv2[0], allowedTopologyHAMap) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("SVC PV: %s has required PV node affinity details", restorepv2[0].Name) + + ginkgo.By("Creating VM") + vm3 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{restorepvc2}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm3.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc3 := createService4Vm(ctx, vmopC, namespace, vm3.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc3.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp3, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm3.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm3, + []*v1.PersistentVolumeClaim{restorepvc2})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm3, err = getVmsvcVM(ctx, vmopC, vm3.Namespace, vm3.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm3.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp3) + verifyDataIntegrityOnVmDisk(vmIp3, volFolder) + } + + ginkgo.By("Create volume snapshot-3") + volumeSnapshot3, snapshotContent3, snapshotCreated3, + snapshotContentCreated3, snapshotId3, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, restorepvc2, restorevolHandle2, diskSize, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated3 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent3, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated3 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot3.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot3.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Delete volume snapshot-1") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volumeId, snapshotId1, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete volume snapshot-2") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, restorevolHandle1, snapshotId2, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete volume snapshot-3") + snapshotCreated3, snapshotContentCreated3, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot3, pandoraSyncWaitTime, restorevolHandle2, snapshotId3, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + +}) diff --git a/tests/e2e/tkgs_ha.go b/tests/e2e/tkgs_ha.go index d9e4d6363d..7435fe82c6 100644 --- a/tests/e2e/tkgs_ha.go +++ b/tests/e2e/tkgs_ha.go @@ -266,7 +266,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, dynamicSnapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, + snapshotContentCreated, dynamicSnapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -319,7 +319,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volHandle, dynamicSnapshotId) + volumeSnapshot, pandoraSyncWaitTime, volHandle, dynamicSnapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -875,7 +875,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, dynamicSnapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, + snapshotContentCreated, dynamicSnapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, staticPvc, volumeID, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -928,7 +928,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - volumeSnapshot, pandoraSyncWaitTime, volumeID, dynamicSnapshotId) + volumeSnapshot, pandoraSyncWaitTime, volumeID, dynamicSnapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1044,7 +1044,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, "1Gi", false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -1128,7 +1128,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Delete pre-provisioned snapshot") staticSnapshotCreated, staticSnapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1351,7 +1351,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, - snapshotContentCreated, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + snapshotContentCreated, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, pvclaim, volHandle, diskSize, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -1435,7 +1435,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Delete pre-provisioned snapshot") staticSnapshotCreated, staticSnapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, - staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) diff --git a/tests/e2e/topology_multi_replica.go b/tests/e2e/topology_multi_replica.go index 7db53530fa..18b79a1356 100644 --- a/tests/e2e/topology_multi_replica.go +++ b/tests/e2e/topology_multi_replica.go @@ -956,7 +956,7 @@ var _ = ginkgo.Describe("[topology-multireplica] Topology-MultiReplica", ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(podList); i++ { err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], - namespace, allowedTopologies) + allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) @@ -1164,7 +1164,7 @@ var _ = ginkgo.Describe("[topology-multireplica] Topology-MultiReplica", ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(podList); i++ { err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], - namespace, allowedTopologies) + allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) @@ -1673,7 +1673,7 @@ var _ = ginkgo.Describe("[topology-multireplica] Topology-MultiReplica", ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(podList); i++ { err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], - namespace, allowedTopologies) + allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) @@ -1914,8 +1914,7 @@ var _ = ginkgo.Describe("[topology-multireplica] Topology-MultiReplica", for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(podList); i++ { - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], - namespace, allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) @@ -2083,8 +2082,7 @@ var _ = ginkgo.Describe("[topology-multireplica] Topology-MultiReplica", ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") for i := 0; i < len(podList); i++ { - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } diff --git a/tests/e2e/topology_snapshot.go b/tests/e2e/topology_snapshot.go index a9029255ea..17223c0526 100644 --- a/tests/e2e/topology_snapshot.go +++ b/tests/e2e/topology_snapshot.go @@ -272,7 +272,7 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology-Snapshot", func() { specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/util.go b/tests/e2e/util.go index 2456b8deb9..a18d69f918 100644 --- a/tests/e2e/util.go +++ b/tests/e2e/util.go @@ -3428,8 +3428,13 @@ func writeDataOnFileFromPod(namespace string, podName string, filePath string, d cmdArg = "-c" } wrtiecmd := []string{"exec", podName, "--namespace=" + namespace, "--", shellExec, cmdArg, - fmt.Sprintf(" echo '%s' > %s ", data, filePath)} + fmt.Sprintf(" echo '%s' >> %s ", data, filePath)} e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + + data2 := "fsync" + wrtiecmd2 := []string{"exec", podName, "--namespace=" + namespace, "--", shellExec, cmdArg, + fmt.Sprintf(" echo '%s' >> %s ", data2, filePath)} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd2...) } // readFileFromPod read data from given Pod and the given file. @@ -5568,7 +5573,7 @@ Also it verifies that a pod is scheduled on a node that belongs to the topology is provisioned. */ func verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx context.Context, - client clientset.Interface, pod *v1.Pod, namespace string, + client clientset.Interface, pod *v1.Pod, allowedTopologies []v1.TopologySelectorLabelRequirement) error { allowedTopologiesMap := createAllowedTopologiesMap(allowedTopologies) for _, volumespec := range pod.Spec.Volumes { diff --git a/tests/e2e/vmservice_utils.go b/tests/e2e/vmservice_utils.go index f198187c8e..6656689eb0 100644 --- a/tests/e2e/vmservice_utils.go +++ b/tests/e2e/vmservice_utils.go @@ -508,29 +508,91 @@ func waitNverifyPvcsAreAttachedToVmsvcVm(ctx context.Context, vmopC ctlrclient.C // formatNVerifyPvcIsAccessible format the pvc inside vm and create a file system on it and returns a folder with 777 // permissions under the mount point func formatNVerifyPvcIsAccessible(diskUuid string, mountIndex int, vmIp string) string { + // Construct the disk path from the UUID p := "/dev/disk/by-id/wwn-0x" + strings.ReplaceAll(strings.ToLower(diskUuid), "-", "") - results := execSshOnVmThroughGatewayVm(vmIp, []string{"ls -l /dev/disk/by-id/", "ls -l " + p}) - dev := "/dev/" + strings.TrimSpace(strings.Split(results[1].Stdout, "/")[6]) + fmt.Println("Checking disk path:", p) + + // List the available disks + results := execSshOnVmThroughGatewayVm(vmIp, []string{ + "ls -l /dev/disk/by-id/", + }) + fmt.Println("Disk list results:", results) + + // Check if the desired disk exists + diskCheckResults := execSshOnVmThroughGatewayVm(vmIp, []string{ + "ls -l " + p, + }) + + // If the disk is not found, try rescanning SCSI devices + if strings.Contains(diskCheckResults[0].Stderr, "No such file or directory") { + fmt.Printf("Disk %s not found. Rescanning SCSI devices.\n", p) + rescanResults := execSshOnVmThroughGatewayVm(vmIp, []string{ + "echo '- - -' | sudo tee /sys/class/scsi_host/host*/scan", + "ls -l /dev/disk/by-id/", + "ls -l " + p, + }) + fmt.Println("Rescan results:", rescanResults) + + // Check again if the disk is available after rescanning + diskCheckResults = execSshOnVmThroughGatewayVm(vmIp, []string{ + "ls -l " + p, + }) + } + + // If the disk is still not found, fail the test + if strings.Contains(diskCheckResults[0].Stderr, "No such file or directory") { + framework.Failf("Disk %s not found on VM %s after rescanning.", p, vmIp) + } + + // Extract the device name + parts := strings.Split(strings.TrimSpace(diskCheckResults[0].Stdout), "/") + if len(parts) < 7 { + framework.Failf("Unexpected ls output: %s", diskCheckResults[0].Stdout) + } + dev := "/dev/" + parts[6] + fmt.Println("Device:", dev) + gomega.Expect(dev).ShouldNot(gomega.Equal("/dev/")) - framework.Logf("Found %s dev for disk with uuid %s", dev, diskUuid) + framework.Logf("Found device %s for disk with UUID %s", dev, diskUuid) partitionDev := dev + "1" - _ = execSshOnVmThroughGatewayVm(vmIp, []string{"sudo parted --script " + dev + " mklabel gpt", - "sudo parted --script -a optimal " + dev + " mkpart primary 0% 100%", "lsblk -l", - "sudo mkfs.ext4 " + partitionDev}) + fmt.Println("Partition Device:", partitionDev) + + // Unmount any existing partitions on the device + unmountCommands := []string{ + fmt.Sprintf("sudo umount %s* || true", dev), + } + res := execSshOnVmThroughGatewayVm(vmIp, unmountCommands) + fmt.Println("Unmount Results:", res) + + // Partition and format the disk + partitionCommands := []string{ + fmt.Sprintf("sudo parted --script %s mklabel gpt", dev), + fmt.Sprintf("sudo parted --script -a optimal %s mkpart primary 0%% 100%%", dev), + "lsblk -l", + fmt.Sprintf("sudo mkfs.ext4 %s", partitionDev), + } + res = execSshOnVmThroughGatewayVm(vmIp, partitionCommands) + fmt.Println("Partitioning Results:", res) + // Mount the new partition volMountPath := "/mnt/volume" + strconv.Itoa(mountIndex) volFolder := volMountPath + "/data" - results = execSshOnVmThroughGatewayVm(vmIp, []string{ - "sudo mkdir -p " + volMountPath, - "sudo mount " + partitionDev + " " + volMountPath, - "sudo mkdir -p " + volFolder, - "sudo chmod -R 777 " + volFolder, + mountCommands := []string{ + fmt.Sprintf("sudo mkdir -p %s", volMountPath), + fmt.Sprintf("sudo mount %s %s", partitionDev, volMountPath), + fmt.Sprintf("sudo mkdir -p %s", volFolder), + fmt.Sprintf("sudo chmod -R 777 %s", volFolder), fmt.Sprintf("bash -c 'df -Th %s | tee %s/fstype'", partitionDev, volFolder), - "grep -c ext4 " + volFolder + "/fstype", + fmt.Sprintf("grep -c ext4 %s/fstype", volFolder), "sync", - }) - gomega.Expect(strings.TrimSpace(results[5].Stdout)).To(gomega.Equal("1")) + } + results = execSshOnVmThroughGatewayVm(vmIp, mountCommands) + fmt.Println("Mounting Results:", results) + + // Verify the filesystem type + gomega.Expect(strings.TrimSpace(results[5].Stdout)).To(gomega.Equal("1"), "Filesystem type is not ext4") + return volFolder } @@ -896,3 +958,32 @@ func performVolumeLifecycleActionForVmServiceVM(ctx context.Context, client clie gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) } + +func updateVmWithNewPvc(ctx context.Context, vmopC ctlrclient.Client, vmName string, + namespace string, newPvc *v1.PersistentVolumeClaim) error { + // Fetch the existing VM + vm := &vmopv1.VirtualMachine{} + err := vmopC.Get(ctx, ctlrclient.ObjectKey{Name: vmName, Namespace: namespace}, vm) + if err != nil { + return fmt.Errorf("failed to get VM: %v", err) + } + + // Create a new volume using the new PVC + newVolume := vmopv1.VirtualMachineVolume{ + Name: newPvc.Name, + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ + ClaimName: newPvc.Name, + }, + }, + } + + // Append the new volume to the existing VM's volumes + vm.Spec.Volumes = append(vm.Spec.Volumes, newVolume) + + // Update the VM spec in the Kubernetes cluster + if err = vmopC.Update(ctx, vm); err != nil { + return fmt.Errorf("failed to update VM: %v", err) + } + return nil +} diff --git a/tests/e2e/volume_provisioning_with_level5_topology.go b/tests/e2e/volume_provisioning_with_level5_topology.go index e95155b950..0ab5893105 100644 --- a/tests/e2e/volume_provisioning_with_level5_topology.go +++ b/tests/e2e/volume_provisioning_with_level5_topology.go @@ -906,8 +906,7 @@ var _ = ginkgo.Describe("[topology-positive] Topology-Positive", func() { specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -995,8 +994,7 @@ var _ = ginkgo.Describe("[topology-positive] Topology-Positive", func() { specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1126,7 +1124,7 @@ var _ = ginkgo.Describe("[topology-positive] Topology-Positive", func() { as specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1272,7 +1270,7 @@ var _ = ginkgo.Describe("[topology-positive] Topology-Positive", func() { specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1339,7 +1337,7 @@ var _ = ginkgo.Describe("[topology-positive] Topology-Positive", func() { specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, allowedTopologies) gomega.Expect(err).NotTo(gomega.HaveOccurred())