From 36ef7e9583f1f43fa10b1edb63516a51ea492fd6 Mon Sep 17 00:00:00 2001 From: sipriyaa Date: Thu, 22 Aug 2024 11:46:57 +0530 Subject: [PATCH] new changes --- tests/e2e/csi_snapshot_basic.go | 11 +- tests/e2e/snapshot_vmservice_vm.go | 460 ++++++++++++++++++++++++++--- 2 files changed, 433 insertions(+), 38 deletions(-) diff --git a/tests/e2e/csi_snapshot_basic.go b/tests/e2e/csi_snapshot_basic.go index 98291fd86a..ed39805a75 100644 --- a/tests/e2e/csi_snapshot_basic.go +++ b/tests/e2e/csi_snapshot_basic.go @@ -1317,7 +1317,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 17. Delete SC and VolumeSnapshotClass */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] Volume snapshot creation and restoration workflow "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC16Volume snapshot creation and restoration workflow "+ "with xfs filesystem", ginkgo.Label(p0, block, vanilla, tkg, snapshot, stable), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -1476,6 +1476,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { xfsFSType, time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + time.Sleep(2 * time.Minute) // Ensure that file1.txt is available as expected on the restored PVC ginkgo.By("Verify that file1.txt data is available as part of snapshot") output := readFileFromPod(namespace, pod2.Name, filePath1) @@ -3299,10 +3300,10 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - var allowExpansion = true - storageclass.AllowVolumeExpansion = &allowExpansion - storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // var allowExpansion = true + // storageclass.AllowVolumeExpansion = &allowExpansion + // storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if vanillaCluster { diff --git a/tests/e2e/snapshot_vmservice_vm.go b/tests/e2e/snapshot_vmservice_vm.go index c68113732c..720c772ad1 100644 --- a/tests/e2e/snapshot_vmservice_vm.go +++ b/tests/e2e/snapshot_vmservice_vm.go @@ -20,11 +20,14 @@ import ( "context" "fmt" "os" + "strconv" "strings" + "time" snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/vmware/govmomi/vim25/types" vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" v1 "k8s.io/api/core/v1" @@ -65,6 +68,8 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { snapc *snapclient.Clientset pandoraSyncWaitTime int err error + dsRef types.ManagedObjectReference + labelsMap map[string]string ) ginkgo.BeforeEach(func() { @@ -148,6 +153,19 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { restConfig = getRestConfigClient() snapc, err = snapclient.NewForConfig(restConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // reading full sync wait time + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + + //setting map values + labelsMap = make(map[string]string) + labelsMap["app"] = "test" + }) ginkgo.AfterEach(func() { @@ -168,23 +186,23 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { }) /* - Dynamic PVC → VM → Snapshot - Steps: - 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace - 2. Wait for PVC to reach the Bound state. - 3. Create a VM service VM using the PVC created in step #1 - 4. Wait for the VM service to be up and in the powered-on state. - 5. Once the VM is up, verify that the volume is accessible inside the VM - 6. Write some data into the volume. - 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster - 8. Create a volume snapshot for the PVC created in step #1. - 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks - 10. Verify CNS metadata for a PVC - 11. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + Testcase-1 + Dynamic PVC → VM → Snapshot + Steps: + 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVC to reach the Bound state. + 3. Create a VM service VM using the PVC created in step #1 + 4. Wait for the VM service to be up and in the powered-on state. + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some data into the volume. + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Create a volume snapshot for the PVC created in step #1. + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 10. Verify CNS metadata for a PVC + 11. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks */ - ginkgo.It("TC1VM", func() { - + ginkgo.It("VM1", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -193,7 +211,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") - pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") + pvc, err := createPVC(ctx, client, namespace, labelsMap, diskSize, storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) @@ -297,27 +315,165 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) /* - Dynamic PVC → VM → Snapshot - Steps: - 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace - 2. Wait for PVC to reach the Bound state. - 3. Create a VM service VM using the PVC created in step #1 - 4. Wait for the VM service to be up and in the powered-on state. - 5. Once the VM is up, verify that the volume is accessible inside the VM - 6. Write some data into the volume. - 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster - 8. Create a volume snapshot for the PVC created in step #1. - 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks - 10. Verify CNS metadata for a PVC - 11. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + Testcase-2 + Static PVC → VM → Snapshot + Steps: + 1. Create FCD + 2. Create a static PV and PVC using cns register volume API + 3. Wait for PV and PVC to reach the Bound state. + 4. Create a VM service VM using the PVC created in step #2 + 5. Wait for the VM service to be up and in the powered-on state. + 6. Once the VM is up, verify that the volume is accessible inside the VM + 7. Write some data into the volume. + 8. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 9. Create a volume snapshot for the PVC created in step #1. + 10. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 11. Verify CNS metadata for a PVC + 12. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks */ - ginkgo.It("TC2VM", func() { + ginkgo.It("VM2", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Creating FCD Disk") + fcdID, err := e2eVSphere.createFCD(ctx, fcdName, diskSizeInMb, dsRef.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By(fmt.Sprintf("Creating the PV with the fcdID %s", fcdID)) + staticPVLabels := make(map[string]string) + staticPVLabels["fcd-id"] = fcdID + staticPv := getPersistentVolumeSpec(fcdID, v1.PersistentVolumeReclaimDelete, nil, ext4FSType) + staticPv, err = client.CoreV1().PersistentVolumes().Create(ctx, staticPv, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := staticPv.Spec.CSI.VolumeHandle + + err = e2eVSphere.waitForCNSVolumeToBeCreated(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating a static PVC") + staticPvc := getPersistentVolumeClaimSpec(namespace, staticPVLabels, staticPv.Name) + staticPvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create( + ctx, staticPvc, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{staticPvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{staticPvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + staticPvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-3 + Dynamic PVC  → VM → Snapshot → RestoreVol → VM + Steps: + 1. Create a dynamic PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for dynamic PVC to reach the Bound state. + 3. Create a VM service VM using dynamic PVC. + 4. Wait for the VM service to be up and in the powered-on state. + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some IO to the CSI volumes, read it back from them and verify the data integrity + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Create a volume snapshot for the dynamic PVC created in step #1 + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 10. Verify CNS metadata for a PVC + 11. Create a new PVC from the snapshot created in step #11. + 12. Wait for PVC to reach the Bound state. + 13. Create a VM service VM using the PVC created in step #14 + Wait for the VM service to be up and in the powered-on state. + Once the VM is up, verify that the volume is accessible inside the VM + Verify reading/writing data in the volume. + Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + ginkgo.It("VM3", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -326,7 +482,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a PVC") - pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") + pvc, err := createPVC(ctx, client, namespace, labelsMap, "", storageclass, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for all claims to be in bound state") pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) @@ -407,7 +563,7 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { ginkgo.By("Create a dynamic volume snapshot") volumeSnapshot, snapshotContent, snapshotCreated, snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, - pvc, volHandle, diskSize, false) + pvc, volHandle, diskSize, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if snapshotContentCreated { @@ -426,10 +582,248 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { } }() + ginkgo.By("Create a volume from a snapshot") + pvc2, pv2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot, diskSize, false) + volHandle2 := pv2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + + ginkgo.By("Creating VM") + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc2}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{pvc2})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp2) + verifyDataIntegrityOnVmDisk(vmIp2, volFolder) + } + ginkgo.By("Delete dynamic volume snapshot") snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Testcase-4 + Dynamic PVC → VM → Snapshot1 and Snapshot2 → PVC1 and PVC2 → VM + Steps: + 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVC to reach the Bound state. + 3. Create a VM service VM using the PVC created in step #1 + 4. Wait for the VM service to be up and in the powered-on state. + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some IO to the CSI volumes, read it back from them and verify the data integrity + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Take 2 snapshots (snapshot-1, snapshot-2) for the PVC created in step #1. + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + + Verify CNS metadata for a PVC created in step #1 + Create PVC-1 from Snapshot-1, PVC-2 from Snapshot-2 + Wait for PVCs to reach the Bound state. + Create a VM service VM using pvc-1 and pvc-2 created in step #11 + Wait for the VM service to be up and in the powered-on state. + Once the VM is up, verify that the volume is accessible inside the VM + Write some IO to the CSI volumes, read it back from them and verify the data integrity + Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("VM3", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, labelsMap, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Waiting for CNS volumes to be deleted") + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + // creating a secret for vm credentials + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot-1 for the volume") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot-2 for the volume") + volumeSnapshot2, snapshotContent2, snapshotCreated2, + snapshotContentCreated2, snapshotId2, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated2 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent2, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot2.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a volume from a snapshot") + pvc2, pv2, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot1, diskSize, false) + volHandle2 := pv2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + + ginkgo.By("Create a volume from a snapshot") + pvc3, pv3, _ := verifyVolumeRestoreOperation(ctx, client, namespace, storageclass, volumeSnapshot2, diskSize, false) + volHandle3 := pv3[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle3).NotTo(gomega.BeEmpty()) + + ginkgo.By("Creating VM") + vm2 := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc2, pvc3}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm2.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp2, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm2, + []*v1.PersistentVolumeClaim{pvc2})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm2.Namespace, vm2.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp2) + verifyDataIntegrityOnVmDisk(vmIp2, volFolder) + } + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle, snapshotId1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated2, snapshotContentCreated2, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot2, pandoraSyncWaitTime, volHandle, snapshotId2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) })