diff --git a/tests/e2e/csi_snapshot_basic.go b/tests/e2e/csi_snapshot_basic.go index b17f93bf5c..17d2ad78ab 100644 --- a/tests/e2e/csi_snapshot_basic.go +++ b/tests/e2e/csi_snapshot_basic.go @@ -3112,6 +3112,12 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create storage class") storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + var allowExpansion = true + storageclass.AllowVolumeExpansion = &allowExpansion + storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { if vanillaCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -3273,7 +3279,7 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { 6. Run resize and it should succeed 7. Cleanup the pvc */ - ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] Volume online resize of a volume having "+ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] TC13Volume online resize of a volume having "+ "snapshots", ginkgo.Label(p0, block, vanilla, tkg, snapshot, stable, negative), func() { ctx, cancel := context.WithCancel(context.Background()) @@ -3282,6 +3288,12 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ginkgo.By("Create storage class") storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + var allowExpansion = true + storageclass.AllowVolumeExpansion = &allowExpansion + storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { if vanillaCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -3335,6 +3347,12 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + if !guestCluster { + ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") + err = waitAndVerifyCnsVolumeMetadata(ctx, volHandle, pvclaim, pvs[0], pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Modify the PVC spec to enable online volume expansion when no snapshot exists for this PVC") currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] newSize := currentPvcSize.DeepCopy() @@ -3498,8 +3516,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var volHandle string - ginkgo.By("Create storage class") storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3629,7 +3645,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { var volumesnapshots []*snapV1.VolumeSnapshot var volumesnapshotsReadytoUse []*snapV1.VolumeSnapshot var snapshotContents []*snapV1.VolumeSnapshotContent - var volHandle string ginkgo.By("Create storage class") storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) @@ -3871,8 +3886,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var volHandle string - ginkgo.By("Create storage class") storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4571,13 +4584,11 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { snapshot creation should succeed after resize completes */ - ginkgo.It("Volume snapshot creation when resize is in progress", func() { + ginkgo.It("TC14Volume snapshot creation when resize is in progress", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var volHandle string - ginkgo.By("Create storage class") storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4804,7 +4815,6 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { pvclaims := make([]*v1.PersistentVolumeClaim, volumeOpsScale) pvclaims2 := make([]*v1.PersistentVolumeClaim, volumeOpsScale) var persistentvolumes []*v1.PersistentVolume - var volHandle string var volHandles []string ginkgo.By("Create storage class") diff --git a/tests/e2e/snapshot_vmservice_vm.go b/tests/e2e/snapshot_vmservice_vm.go new file mode 100644 index 0000000000..23e30c7196 --- /dev/null +++ b/tests/e2e/snapshot_vmservice_vm.go @@ -0,0 +1,300 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "strings" + + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + + vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + admissionapi "k8s.io/pod-security-admission/api" + ctlrclient "sigs.k8s.io/controller-runtime/pkg/client" + + cnsop "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator" +) + +var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { + + f := framework.NewDefaultFramework("vmsvc") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + f.SkipNamespaceCreation = true // tests will create their own namespaces + var ( + client clientset.Interface + namespace string + datastoreURL string + storagePolicyName string + storageClassName string + storageProfileId string + vcRestSessionId string + vmi string + vmClass string + vmopC ctlrclient.Client + cnsopC ctlrclient.Client + isVsanHealthServiceStopped bool + isSPSserviceStopped bool + vcAddress string + restConfig *restclient.Config + snapc *snapclient.Clientset + pandoraSyncWaitTime int + err error + ) + + ginkgo.BeforeEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // client connection + client = f.ClientSet + bootstrap() + + // fetch the testbed type for executing testcases + topologyFeature := os.Getenv(topologyFeature) + + // fetch node and read storage policy name + if topologyFeature != topologyTkgHaName { + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) + } else { + storagePolicyName = GetAndExpectStringEnvVar(envZonalStoragePolicyName) + } + + // fetching vc ip and creating creating vc session + vcAddress = e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + vcRestSessionId = createVcSession4RestApis(ctx) + + // reading storage class name for wcp setup "wcpglobal_storage_profile" + storageClassName = strings.ReplaceAll(storagePolicyName, "_", "-") // since this is a wcp setup + + // fetching shared datastore url + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + + // reading datastore morf reference + dsRef := getDsMoRefFromURL(ctx, datastoreURL) + framework.Logf("dsmoId: %v", dsRef.Value) + + // reading storage profile id of "wcpglobal_storage_profile" + storageProfileId = e2eVSphere.GetSpbmPolicyID(storagePolicyName) + + // creating/reading content library "https://wp-content-pstg.broadcom.com/vmsvc/lib.json" + contentLibId := createAndOrGetContentlibId4Url(vcRestSessionId, GetAndExpectStringEnvVar(envContentLibraryUrl), + dsRef.Value, GetAndExpectStringEnvVar(envContentLibraryUrlSslThumbprint)) + + // creating test wcp namespace + framework.Logf("Create a WCP namespace for the test") + + // reading vm class required for vm creation + vmClass = os.Getenv(envVMClass) + if vmClass == "" { + vmClass = vmClassBestEffortSmall + } + + // creating wcp test namespace and setting vmclass, contlib, storage class fields in test ns + namespace = createTestWcpNs( + vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId)) + + // creating vm schema + vmopScheme := runtime.NewScheme() + gomega.Expect(vmopv1.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + vmopC, err = ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + cnsOpScheme := runtime.NewScheme() + gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + cnsopC, err = ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: cnsOpScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // reading vm image name "ubuntu-2004-cloud-init-21.4-kube-v1.20.10" + vmImageName := GetAndExpectStringEnvVar(envVmsvcVmImageName) + + // listing the vm images available and added in test ns + framework.Logf("Waiting for virtual machine image list to be available in namespace '%s' for image '%s'", + namespace, vmImageName) + vmi = waitNGetVmiForImageName(ctx, vmopC, namespace, vmImageName) + gomega.Expect(vmi).NotTo(gomega.BeEmpty()) + + // Get snapshot client using the rest config + restConfig = getRestConfigClient() + snapc, err = snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if isVsanHealthServiceStopped { + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", vsanhealthServiceName)) + startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) + } + + if isSPSserviceStopped { + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", spsServiceName)) + startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isSPSserviceStopped) + } + dumpSvcNsEventsOnTestFailure(client, namespace) + delTestWcpNs(vcRestSessionId, namespace) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + }) + + /* + Dynamic PVC → VM → Snapshot + Steps: + 1. Create a PVC using the storage class (storage policy) tagged to the supervisor namespace + 2. Wait for PVC to reach the Bound state. + 3. Create a VM service VM using the PVC created in step #1 + 4. Wait for the VM service to be up and in the powered-on state. + 5. Once the VM is up, verify that the volume is accessible inside the VM + 6. Write some data into the volume. + 7. Get VolumeSnapshotClass "volumesnapshotclass-delete" from supervisor cluster + 8. Create a volume snapshot for the PVC created in step #1. + 9. Snapshot Verification: Execute and verify the steps mentioned in the Create snapshot mandatory checks + 10. Verify CNS metadata for a PVC + 11. Cleanup: Execute and verify the steps mentioned in the Delete snapshot mandatory checks + */ + + ginkgo.It("TC1VM", func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Waiting for CNS volumes to be deleted") + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) +})