From 5485d1d1aa37f2bfcf402e930b929fa549da12f3 Mon Sep 17 00:00:00 2001 From: rpanduranga Date: Thu, 5 Sep 2024 17:24:25 +0530 Subject: [PATCH] [CNS-UnRegisterVolume-API]: Automation tests for CNS-UnRegisterVolume API Feature --- tests/e2e/cns_unregister_volume_api.go | 750 +++++++++++++++++++++++++ tests/e2e/csi_snapshot_basic.go | 104 ++++ tests/e2e/gc_rwx_basic.go | 355 ++++++++++++ tests/e2e/util.go | 106 ++++ tests/e2e/vmservice_vm.go | 291 ++++++++++ 5 files changed, 1606 insertions(+) create mode 100644 tests/e2e/cns_unregister_volume_api.go diff --git a/tests/e2e/cns_unregister_volume_api.go b/tests/e2e/cns_unregister_volume_api.go new file mode 100644 index 0000000000..6f74a3c0e1 --- /dev/null +++ b/tests/e2e/cns_unregister_volume_api.go @@ -0,0 +1,750 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "math/rand" + "os" + "strconv" + "sync" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpod "k8s.io/kubernetes/test/e2e/framework/pod" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + admissionapi "k8s.io/pod-security-admission/api" +) + +var _ = ginkgo.Describe("[csi-unregister-volume] CNS Unregister Volume", func() { + f := framework.NewDefaultFramework("cns-unregister-volume") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + const defaultVolumeOpsScale = 30 + const defaultVolumeOpsScaleWCP = 29 + var ( + client clientset.Interface + c clientset.Interface + fullSyncWaitTime int + namespace string + scParameters map[string]string + storagePolicyName string + volumeOpsScale int + isServiceStopped bool + serviceName string + csiReplicaCount int32 + deployment *appsv1.Deployment + ) + + ginkgo.BeforeEach(func() { + bootstrap() + client = f.ClientSet + namespace = getNamespaceToRunTests(f) + scParameters = make(map[string]string) + isServiceStopped = false + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, rqLimit) + } + + if os.Getenv("VOLUME_OPS_SCALE") != "" { + volumeOpsScale, err = strconv.Atoi(os.Getenv(envVolumeOperationsScale)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + if vanillaCluster { + volumeOpsScale = defaultVolumeOpsScale + } else { + volumeOpsScale = defaultVolumeOpsScaleWCP + } + } + framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) + + if os.Getenv(envFullSyncWaitTime) != "" { + fullSyncWaitTime, err = strconv.Atoi(os.Getenv(envFullSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Full sync interval can be 1 min at minimum so full sync wait time has to be more than 120s + if fullSyncWaitTime < 120 || fullSyncWaitTime > defaultFullSyncWaitTime { + framework.Failf("The FullSync Wait time %v is not set correctly", fullSyncWaitTime) + } + } else { + fullSyncWaitTime = defaultFullSyncWaitTime + } + + // Get CSI Controller's replica count from the setup + controllerClusterConfig := os.Getenv(contollerClusterKubeConfig) + c = client + if controllerClusterConfig != "" { + framework.Logf("Creating client for remote kubeconfig") + remoteC, err := createKubernetesClientFromConfig(controllerClusterConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + c = remoteC + } + deployment, err = c.AppsV1().Deployments(csiSystemNamespace).Get(ctx, + vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + csiReplicaCount = *deployment.Spec.Replicas + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if isServiceStopped { + if serviceName == "CSI" { + framework.Logf("Starting CSI driver") + ignoreLabels := make(map[string]string) + err := updateDeploymentReplicawithWait(c, csiReplicaCount, vSphereCSIControllerPodNamePrefix, + csiSystemNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Wait for the CSI Pods to be up and Running + list_of_pods, err := fpod.GetPodsInNamespace(ctx, client, csiSystemNamespace, ignoreLabels) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + num_csi_pods := len(list_of_pods) + err = fpod.WaitForPodsRunningReady(ctx, client, csiSystemNamespace, int32(num_csi_pods), 0, + pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if serviceName == hostdServiceName { + framework.Logf("In afterEach function to start the hostd service on all hosts") + hostIPs := getAllHostsIP(ctx, true) + for _, hostIP := range hostIPs { + startHostDOnHost(ctx, hostIP) + } + } else { + vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", serviceName)) + err := invokeVCenterServiceControl(ctx, startOperation, serviceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcRunningMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + + ginkgo.By(fmt.Sprintf("Resetting provisioner time interval to %s sec", defaultProvisionerTimeInSec)) + updateCSIDeploymentProvisionerTimeout(c, csiSystemNamespace, defaultProvisionerTimeInSec) + + if supervisorCluster { + deleteResourceQuota(client, namespace) + dumpSvcNsEventsOnTestFailure(client, namespace) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } + }) + + /* + Create SPBM Policy: + Define and create a Storage Policy-Based Management (SPBM) policy. + Assign Policy to Namespace: + Apply the SPBM policy to the test namespace with sufficient storage quota. + + Create PVC: + + Create a PersistentVolumeClaim (PVC) named pvc1 in the test namespace. + Verify CNS Metadata + + Verify Storage Quota: + Check that the storage quota in the test namespace reflects the space occupied by pvc1. + Create CnsUnregisterVolume CR: + + Create a CnsUnregisterVolume Custom Resource (CR) with the volumeID of pvc1 and apply it to the test namespace. + + Check CnsUnregisterVolume Status: + Verify that the status.Unregistered field in the CnsUnregisterVolume CR is set to true. + + Verify Deletion: + Ensure that the PersistentVolume (PV) and PVC are deleted from the Supervisor cluster. + Verify CNS Metadata + + Check Quota Release: + Confirm that the storage quota has been freed after the deletion of PV/PVC. + + Verify FCD: + Use the RetrieveVStorageObject API from vCenter MOB to confirm that the FCD associated with the PVC has not been deleted. + + Cleanup: + Delete the FCD, the test namespace, and the SPBM policy. + */ + ginkgo.It("[csi-unregister-volume] Export detached volume", func() { + exportDetachedVolume(namespace, client, storagePolicyName, scParameters, + volumeOpsScale, true) + }) + + ginkgo.It("export attached volume", func() { + serviceName = vsanhealthServiceName + exportAttacheddVolume(namespace, client, storagePolicyName, scParameters, + volumeOpsScale, true) + }) + + ginkgo.It("export volume when hostd service goes down", func() { + serviceName = hostdServiceName + exportVolumeWithServiceDown(serviceName, namespace, client, storagePolicyName, + scParameters, volumeOpsScale, isServiceStopped, c) + }) + + ginkgo.It("export volume when cns service goes down", func() { + serviceName = vsanhealthServiceName + exportVolumeWithServiceDown(serviceName, namespace, client, storagePolicyName, + scParameters, volumeOpsScale, isServiceStopped, c) + }) + + ginkgo.It("export volume when vpxd service goes down", func() { + serviceName = vpxdServiceName + exportVolumeWithServiceDown(serviceName, namespace, client, storagePolicyName, + scParameters, volumeOpsScale, isServiceStopped, c) + }) + + ginkgo.It("export volume when sps service goes down", func() { + serviceName = spsServiceName + exportVolumeWithServiceDown(serviceName, namespace, client, storagePolicyName, + scParameters, volumeOpsScale, isServiceStopped, c) + }) + + ginkgo.It("export volume when csi service goes down", func() { + serviceName = "CSI" + exportVolumeWithServiceDown(serviceName, namespace, client, storagePolicyName, + scParameters, volumeOpsScale, isServiceStopped, c) + }) + +}) + +func exportDetachedVolume(namespace string, client clientset.Interface, + storagePolicyName string, scParameters map[string]string, volumeOpsScale int, extendVolume bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var storageclass *storagev1.StorageClass + var persistentvolumes []*v1.PersistentVolume + var pvclaims []*v1.PersistentVolumeClaim + var err error + //var fullSyncWaitTime int + pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + framework.Logf("storagePolicyName %v", storagePolicyName) + framework.Logf("extendVolume %v", extendVolume) + + if supervisorCluster { + ginkgo.By("CNS_TEST: Running for WCP setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) + scParameters[scParamStoragePolicyID] = profileID + // create resource quota + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, thickProvPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + ginkgo.By("CNS_TEST: Running for GC setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + scParameters[svStorageClassName] = thickProvPolicy + scParameters[scParamFsType] = ext4FSType + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + var allowExpansion = true + storageclass.AllowVolumeExpansion = &allowExpansion + storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating PVCs using the Storage Class") + framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) + for i := 0; i < volumeOpsScale; i++ { + framework.Logf("Creating pvc-%v", i) + pvclaims[i], err = fpv.CreatePVC(ctx, client, namespace, + getPersistentVolumeClaimSpecWithStorageClass(namespace, diskSize, storageclass, nil, "")) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Waiting for all claims to be in bound state") + persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + 2*framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // TODO: Add a logic to check for the no orphan volumes + defer func() { + for _, claim := range pvclaims { + err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Verify PVs, volumes are deleted from CNS") + for _, pv := range persistentvolumes { + err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + framework.PodDeleteTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the "+ + "CNS after it is deleted from kubernetes", volumeID)) + } + }() + + ginkgo.By("Invoking CNS Unregister Volume API for all the FCD's created above") + for _, pv := range persistentvolumes { + volumeID := pv.Spec.CSI.VolumeHandle + time.Sleep(30 * time.Second) + + ginkgo.By("Create CNS unregister volume with above created FCD " + pv.Spec.CSI.VolumeHandle) + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volumeID) + + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, namespace, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) + } + + ginkgo.By("Verify PVs, volumes are also deleted from CNS") + for _, pv := range persistentvolumes { + err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + framework.PodDeleteTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the "+ + "CNS after it is deleted from kubernetes", volumeID)) + } + + defaultDatastore = getDefaultDatastore(ctx) + ginkgo.By(fmt.Sprintf("defaultDatastore %v sec", defaultDatastore)) + + for _, pv1 := range persistentvolumes { + ginkgo.By(fmt.Sprintf("Deleting FCD: %s", pv1.Spec.CSI.VolumeHandle)) + err = deleteFcdWithRetriesForSpecificErr(ctx, pv1.Spec.CSI.VolumeHandle, defaultDatastore.Reference(), + []string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } +} + +func exportAttacheddVolume(namespace string, client clientset.Interface, + storagePolicyName string, scParameters map[string]string, volumeOpsScale int, extendVolume bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var storageclass *storagev1.StorageClass + var persistentvolumes []*v1.PersistentVolume + var pvclaims []*v1.PersistentVolumeClaim + var err error + //var fullSyncWaitTime int + pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + framework.Logf("storagePolicyName %v", storagePolicyName) + framework.Logf("extendVolume %v", extendVolume) + + if supervisorCluster { + ginkgo.By("CNS_TEST: Running for WCP setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) + scParameters[scParamStoragePolicyID] = profileID + // create resource quota + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, thickProvPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + ginkgo.By("CNS_TEST: Running for GC setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + scParameters[svStorageClassName] = thickProvPolicy + scParameters[scParamFsType] = ext4FSType + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + var allowExpansion = true + storageclass.AllowVolumeExpansion = &allowExpansion + storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating PVCs using the Storage Class") + framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) + for i := 0; i < volumeOpsScale; i++ { + framework.Logf("Creating pvc%v", i) + pvclaims[i], err = fpv.CreatePVC(ctx, client, namespace, + getPersistentVolumeClaimSpecWithStorageClass(namespace, diskSize, storageclass, nil, "")) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Waiting for all claims to be in bound state") + persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + 2*framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // TODO: Add a logic to check for the no orphan volumes + defer func() { + for _, claim := range pvclaims { + err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Verify PVs, volumes are deleted from CNS") + for _, pv := range persistentvolumes { + err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + framework.PodDeleteTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the "+ + "CNS after it is deleted from kubernetes", volumeID)) + } + }() + + ginkgo.By("Create POD") + pod, err := createPod(ctx, client, namespace, nil, pvclaims, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + err = fpod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + for _, pv := range persistentvolumes { + volumeID := pv.Spec.CSI.VolumeHandle + time.Sleep(30 * time.Second) + + ginkgo.By("Create CNS unregister volume with above created FCD " + pv.Spec.CSI.VolumeHandle) + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volumeID) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, namespace, cnsUnRegisterVolume, poll, + supervisorClusterOperationsTimeout)).To(gomega.HaveOccurred()) + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + } + + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + for _, pv := range persistentvolumes { + volumeID := pv.Spec.CSI.VolumeHandle + time.Sleep(30 * time.Second) + + ginkgo.By("Create CNS unregister volume with above created FCD " + pv.Spec.CSI.VolumeHandle) + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volumeID) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, namespace, cnsUnRegisterVolume, poll, + supervisorClusterOperationsTimeout)).NotTo(gomega.HaveOccurred()) + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + } + + ginkgo.By("Verify PVs, volumes are deleted from CNS") + for _, pv := range persistentvolumes { + err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + framework.PodDeleteTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the "+ + "CNS after it is deleted from kubernetes", volumeID)) + } + + defaultDatastore = getDefaultDatastore(ctx) + ginkgo.By(fmt.Sprintf("defaultDatastore %v sec", defaultDatastore)) + + for _, pv1 := range persistentvolumes { + ginkgo.By(fmt.Sprintf("Deleting FCD: %s", pv1.Spec.CSI.VolumeHandle)) + err = deleteFcdWithRetriesForSpecificErr(ctx, pv1.Spec.CSI.VolumeHandle, defaultDatastore.Reference(), + []string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } +} + +// exportVolumeWithServiceDown creates the volumes and immediately stops the services and wait for +// the service to be up again and validates the volumes are bound +func exportVolumeWithServiceDown(serviceName string, namespace string, client clientset.Interface, + storagePolicyName string, scParameters map[string]string, volumeOpsScale int, isServiceStopped bool, + c clientset.Interface) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ginkgo.By(fmt.Sprintf("Invoking Test for create volume when %v goes down", serviceName)) + var storageclass *storagev1.StorageClass + var persistentvolumes []*v1.PersistentVolume + var pvclaims []*v1.PersistentVolumeClaim + var err error + var fullSyncWaitTime int + pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) + + // Decide which test setup is available to run + if vanillaCluster { + ginkgo.By("CNS_TEST: Running for vanilla k8s setup") + // TODO: Create Thick Storage Policy from Pre-setup to support 6.7 Setups + scParameters[scParamStoragePolicyName] = "Management Storage Policy - Regular" + // Check if it is file volumes setups + if rwxAccessMode { + scParameters[scParamFsType] = nfs4FSType + } + curtime := time.Now().Unix() + randomValue := rand.Int() + val := strconv.FormatInt(int64(randomValue), 10) + val = string(val[1:3]) + curtimestring := strconv.FormatInt(curtime, 10) + scName := "idempotency" + curtimestring + val + storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, scName) + } else if supervisorCluster { + ginkgo.By("CNS_TEST: Running for WCP setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) + scParameters[scParamStoragePolicyID] = profileID + // create resource quota + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, thickProvPolicy) + } else { + ginkgo.By("CNS_TEST: Running for GC setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + scParameters[svStorageClassName] = thickProvPolicy + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, + *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Creating PVCs using the Storage Class") + framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) + for i := 0; i < volumeOpsScale; i++ { + framework.Logf("Creating pvc%v", i) + accessMode := v1.ReadWriteOnce + + // Check if it is file volumes setups + if rwxAccessMode { + accessMode = v1.ReadWriteMany + } + pvclaims[i], err = createPVC(ctx, client, namespace, nil, "", storageclass, accessMode) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Waiting for all claims to be in bound state") + persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + 2*framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if serviceName == "CSI" { + // Get CSI Controller's replica count from the setup + deployment, err := c.AppsV1().Deployments(csiSystemNamespace).Get(ctx, + vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + csiReplicaCount := *deployment.Spec.Replicas + + ginkgo.By("Stopping CSI driver") + isServiceStopped, err = stopCSIPods(ctx, c, csiSystemNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + if isServiceStopped { + framework.Logf("Starting CSI driver") + isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + framework.Logf("Starting CSI driver") + isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if os.Getenv(envFullSyncWaitTime) != "" { + fullSyncWaitTime, err = strconv.Atoi(os.Getenv(envFullSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Full sync interval can be 1 min at minimum so full sync wait time has to be more than 120s + if fullSyncWaitTime < 120 || fullSyncWaitTime > defaultFullSyncWaitTime { + framework.Failf("The FullSync Wait time %v is not set correctly", fullSyncWaitTime) + } + } else { + fullSyncWaitTime = defaultFullSyncWaitTime + } + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime)) + time.Sleep(time.Duration(fullSyncWaitTime) * time.Second) + } else if serviceName == hostdServiceName { + ginkgo.By("Fetch IPs for the all the hosts in the cluster") + hostIPs := getAllHostsIP(ctx, true) + isServiceStopped = true + + var wg sync.WaitGroup + wg.Add(len(hostIPs)) + + for _, hostIP := range hostIPs { + go stopHostD(ctx, hostIP, &wg) + } + wg.Wait() + + defer func() { + framework.Logf("In defer function to start the hostd service on all hosts") + if isServiceStopped { + for _, hostIP := range hostIPs { + startHostDOnHost(ctx, hostIP) + } + isServiceStopped = false + } + }() + + ginkgo.By("Sleeping for 5+1 min for default provisioner timeout") + time.Sleep(pollTimeoutSixMin) + + for _, hostIP := range hostIPs { + startHostDOnHost(ctx, hostIP) + } + isServiceStopped = false + } else { + ginkgo.By(fmt.Sprintf("Stopping %v on the vCenter host", serviceName)) + vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + err = invokeVCenterServiceControl(ctx, stopOperation, serviceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isServiceStopped = true + err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcStoppedMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + if isServiceStopped { + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", serviceName)) + err = invokeVCenterServiceControl(ctx, startOperation, serviceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcRunningMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isServiceStopped = false + } + }() + + ginkgo.By("Sleeping for 5+1 min for default provisioner timeout") + time.Sleep(pollTimeoutSixMin) + + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", serviceName)) + err = invokeVCenterServiceControl(ctx, startOperation, serviceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isServiceStopped = false + err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcRunningMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Sleeping for full sync interval") + time.Sleep(time.Duration(fullSyncWaitTime) * time.Second) + } + + //After service restart + bootstrap() + + for _, pv := range persistentvolumes { + volumeID := pv.Spec.CSI.VolumeHandle + time.Sleep(30 * time.Second) + + ginkgo.By("Create CNS unregister volume with above created FCD " + pv.Spec.CSI.VolumeHandle) + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volumeID) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, namespace, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + } + + // TODO: Add a logic to check for the no orphan volumes + defer func() { + for _, claim := range pvclaims { + err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Verify PVs, volumes are deleted from CNS") + for _, pv := range persistentvolumes { + err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + framework.PodDeleteTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeID)) + } + }() +} diff --git a/tests/e2e/csi_snapshot_basic.go b/tests/e2e/csi_snapshot_basic.go index 8ac4849255..b6caaf74a9 100644 --- a/tests/e2e/csi_snapshot_basic.go +++ b/tests/e2e/csi_snapshot_basic.go @@ -189,6 +189,110 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { } }) + /* + Create/Delete snapshot via k8s API using PVC (Dynamic Provisioning) + + 1. Create a storage class (eg: vsan default) and create a pvc using this sc + 2. Create a VolumeSnapshot class with snapshotter as vsphere-csi-driver and set deletionPolicy to Delete + 3. Create a volume-snapshot with labels, using the above snapshot-class and pvc (from step-1) as source + 4. Ensure the snapshot is created, verify using get VolumeSnapshot + 5. Also verify that VolumeSnapshotContent is auto-created + 6. Verify the references to pvc and volume-snapshot on this object + 7. Verify that the VolumeSnapshot has ready-to-use set to True + 8. Verify that the Restore Size set on the snapshot is same as that of the source volume size + 9. Query the snapshot from CNS side using volume id - should pass and return the snapshot entry + 10. Delete the above snapshot from k8s side using kubectl delete, run a get and ensure it is removed + 11. Also ensure that the VolumeSnapshotContent is deleted along with the + volume snapshot as the policy is delete + 12. Query the snapshot from CNS side - should return 0 entries + 13. Cleanup: Delete PVC, SC (validate they are removed) + */ + ginkgo.It("[CNS-Unregister] Test 11", ginkgo.Label(p0, block, tkg, vanilla, snapshot, stable), func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var volHandle string + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } + + ginkgo.By("Create storage class and PVC") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(ctx, client, namespace, nil, "", + diskSize, storageclass, true) + volHandle = persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + ginkgo.By("Create CNS unregister volume with above created FCD " + volHandle) + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volHandle) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, namespace, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + /* Create/Delete snapshot via k8s API using PVC (Dynamic Provisioning) diff --git a/tests/e2e/gc_rwx_basic.go b/tests/e2e/gc_rwx_basic.go index 3ac3643253..9e5cca2440 100644 --- a/tests/e2e/gc_rwx_basic.go +++ b/tests/e2e/gc_rwx_basic.go @@ -169,6 +169,361 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Basic File Volume Provision Test", func() gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) }) + /* + CNS Unregister Volume API + Test to verify file volume provision - basic tests. + + Steps + 1. Create StorageClass + 2. Create PVC which uses the StorageClass created in step 1 + 3. Wait for PV to be provisioned + 4. Wait for PVC's status to become Bound + 5. Query CNS and check if the PVC entry is pushed into CNS or not + 6. Create pod using PVC + 7. Wait for Pod to be up and running and verify CnsFileAccessConfig CRD is created or not + 8. Verify Read/Write operation on File volume + 9. Delete pod and confirm the CnsFileAccessConfig is deleted + 10. Delete PVC, PV and Storage Class + */ + ginkgo.It("Verify Basic RWX volume provision", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var storageclasspvc *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + + ginkgo.By("CNS_TEST: Running for GC setup") + scParameters[svStorageClassName] = storagePolicyName + ginkgo.By("Creating a PVC") + storageclasspvc, pvclaim, err = createPVCAndStorageClass(ctx, client, + namespace, nil, scParameters, diskSize, nil, "", false, v1.ReadWriteMany) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to provision volume successfully") + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") + + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + volumeID := getVolumeIDFromSupervisorCluster(volHandle) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + + defer func() { + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, pvclaim.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // Verify using CNS Query API if VolumeID retrieved from PV is present. + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volumeID)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + ginkgo.By(fmt.Sprintf("volume Name:%s, capacity:%d volumeType:%s health:%s accesspoint: %s", + queryResult.Volumes[0].Name, + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsVsanFileShareBackingDetails).CapacityInMb, + queryResult.Volumes[0].VolumeType, queryResult.Volumes[0].HealthStatus, + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsVsanFileShareBackingDetails).AccessPoints), + ) + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + ginkgo.By("Create CNS unregister volume with above created FCD " + volumeID) + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volumeID) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, namespace, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + }) + + /* + CNS Unregister Volume API + Test to verify file volume provision - basic tests. + + Steps + 1. Create StorageClass + 2. Create PVC which uses the StorageClass created in step 1 + 3. Wait for PV to be provisioned + 4. Wait for PVC's status to become Bound + 5. Query CNS and check if the PVC entry is pushed into CNS or not + 6. Create pod using PVC + 7. Wait for Pod to be up and running and verify CnsFileAccessConfig CRD is created or not + 8. Verify Read/Write operation on File volume + 9. Delete pod and confirm the CnsFileAccessConfig is deleted + 10. Delete PVC, PV and Storage Class + */ + ginkgo.It("Verify Basic RWX volume provision", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var storageclasspvc *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + + ginkgo.By("CNS_TEST: Running for GC setup") + scParameters[svStorageClassName] = storagePolicyName + ginkgo.By("Creating a PVC") + storageclasspvc, pvclaim, err = createPVCAndStorageClass(ctx, client, + namespace, nil, scParameters, diskSize, nil, "", false, v1.ReadWriteMany) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to provision volume successfully") + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") + + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + volumeID := getVolumeIDFromSupervisorCluster(volHandle) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + + defer func() { + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, pvclaim.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // Verify using CNS Query API if VolumeID retrieved from PV is present. + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volumeID)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + ginkgo.By(fmt.Sprintf("volume Name:%s, capacity:%d volumeType:%s health:%s accesspoint: %s", + queryResult.Volumes[0].Name, + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsVsanFileShareBackingDetails).CapacityInMb, + queryResult.Volumes[0].VolumeType, queryResult.Volumes[0].HealthStatus, + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsVsanFileShareBackingDetails).AccessPoints), + ) + + // Create a Pod to use this PVC, and verify volume has been attached + ginkgo.By("Creating pod to attach PV to the node") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Wait till the CnsFileAccessConfig CRD is deleted %s", + pod.Spec.NodeName+"-"+volHandle)) + err = waitTillCNSFileAccesscrdDeleted(ctx, f, pod.Spec.NodeName+"-"+volHandle, crdCNSFileAccessConfig, + crdVersion, crdGroup, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verifying whether the CnsFileAccessConfig CRD is Deleted or not for Pod1") + verifyCNSFileAccessConfigCRDInSupervisor(ctx, f, pod.Spec.NodeName+"-"+volHandle, + crdCNSFileAccessConfig, crdVersion, crdGroup, false) + }() + + ginkgo.By("Verifying whether the CnsFileAccessConfig CRD is created or not for Pod1") + verifyCNSFileAccessConfigCRDInSupervisor(ctx, f, pod.Spec.NodeName+"-"+volHandle, + crdCNSFileAccessConfig, crdVersion, crdGroup, true) + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat /mnt/volume1/Pod1.html "} + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + ginkgo.By("Create CNS unregister volume with above created FCD " + volHandle) + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volumeID) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, namespace, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + }) + + /* + CNS Unregister Volume API tests + Test to verify file volume provision - two pods using the PVC at the same time. + + Steps + 1. Create StorageClass + 2. Create PVC which uses the StorageClass created in step 1 + 3. Wait for PV to be provisioned + 4. Wait for PVC's status to become Bound + 5. Query CNS and check if the PVC entry is pushed into CNS or not + 6. Create pod using PVC + 7. Wait for Pod to be up and running and verify CnsFileAccessConfig CRD is created or not + 8. Verify Read/Write operation on File volume + 9. Create a Pod to use the same PVC + 10. Wait for the Pod to be up and running and verify CnsFileAccessConfig CRD is created or not + 11. Verfiy Read/Write operation on the files created by Pod1 + 12. Delete pod and confirm the CnsFileAccessConfig is deleted + 13. Delete PVC, PV and Storage Class. + */ + + ginkgo.It("Verify RWX volume can be accessed by multiple pods", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var storageclasspvc *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + + ginkgo.By("CNS_TEST: Running for GC setup") + scParameters[svStorageClassName] = storagePolicyName + ginkgo.By("Creating a PVC") + storageclasspvc, pvclaim, err = createPVCAndStorageClass(ctx, client, + namespace, nil, scParameters, diskSize, nil, "", false, v1.ReadWriteMany) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to provision volume successfully") + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") + + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + volumeID := getVolumeIDFromSupervisorCluster(volHandle) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + + defer func() { + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, pvclaim.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // Verify using CNS Query API if VolumeID retrieved from PV is present. + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volumeID)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + ginkgo.By(fmt.Sprintf("volume Name:%s, capacity:%d volumeType:%s health:%s accesspoint: %s", + queryResult.Volumes[0].Name, + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsVsanFileShareBackingDetails).CapacityInMb, + queryResult.Volumes[0].VolumeType, queryResult.Volumes[0].HealthStatus, + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsVsanFileShareBackingDetails).AccessPoints), + ) + + // Create a Pod to use this PVC, and verify volume has been attached + ginkgo.By("Creating pod to attach PV to the node") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Wait till the CnsFileAccessConfig CRD is deleted %s", + pod.Spec.NodeName+"-"+volHandle)) + err = waitTillCNSFileAccesscrdDeleted(ctx, f, pod.Spec.NodeName+"-"+volHandle, crdCNSFileAccessConfig, + crdVersion, crdGroup, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verifying whether the CnsFileAccessConfig CRD is Deleted or not for Pod1") + verifyCNSFileAccessConfigCRDInSupervisor(ctx, f, pod.Spec.NodeName+"-"+volHandle, + crdCNSFileAccessConfig, crdVersion, crdGroup, false) + }() + + ginkgo.By("Verifying whether the CnsFileAccessConfig CRD is created or not for Pod1") + verifyCNSFileAccessConfigCRDInSupervisor(ctx, f, pod.Spec.NodeName+"-"+volHandle, + crdCNSFileAccessConfig, crdVersion, crdGroup, true) + + // Create a Pod to use this PVC, and verify volume has been attached + ginkgo.By("Creating pod2 to attach PV to the node") + pod2, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execRWXCommandPod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Wait till the CnsFileAccessConfig CRD is deleted %s", + pod.Spec.NodeName+"-"+volHandle)) + err = waitTillCNSFileAccesscrdDeleted(ctx, f, pod.Spec.NodeName+"-"+volHandle, crdCNSFileAccessConfig, + crdVersion, crdGroup, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verifying whether the CnsFileAccessConfig CRD is Deleted or not for Pod2") + verifyCNSFileAccessConfigCRDInSupervisor(ctx, f, pod2.Spec.NodeName+"-"+volHandle, + crdCNSFileAccessConfig, crdVersion, crdGroup, false) + }() + + ginkgo.By("Verifying whether the CnsFileAccessConfig CRD is created or not for Pod2") + verifyCNSFileAccessConfigCRDInSupervisor(ctx, f, pod2.Spec.NodeName+"-"+volHandle, + crdCNSFileAccessConfig, crdVersion, crdGroup, true) + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat /mnt/volume1/Pod1.html "} + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + ginkgo.By("Verify the volume is accessible and Read/write is possible from pod2") + cmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat /mnt/volume1/Pod1.html "} + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + wrtiecmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod1.html"} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + ginkgo.By("Create CNS unregister volume with above created FCD " + volHandle) + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volHandle) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, namespace, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + }) + /* Test to verify file volume provision - two pods using the PVC at the same time. diff --git a/tests/e2e/util.go b/tests/e2e/util.go index 4a16536ca0..0c89a689d7 100644 --- a/tests/e2e/util.go +++ b/tests/e2e/util.go @@ -88,6 +88,7 @@ import ( cnsfileaccessconfigv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsfileaccessconfig/v1alpha1" cnsnodevmattachmentv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsnodevmattachment/v1alpha1" cnsregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsregistervolume/v1alpha1" + cnsunregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsunregistervolume/v1alpha1" cnsvolumemetadatav1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsvolumemetadata/v1alpha1" storagepolicyv1alpha2 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/storagepolicy/v1alpha2" k8s "sigs.k8s.io/vsphere-csi-driver/v3/pkg/kubernetes" @@ -3243,6 +3244,27 @@ func getCNSRegisterVolumeSpec(ctx context.Context, namespace string, fcdID strin return cnsRegisterVolume } +// Function to create CNS UnregisterVolume spec, with given FCD ID +func getCNSUnregisterVolumeSpec(namespace string, + fcdID string) *cnsunregistervolumev1alpha1.CnsUnregisterVolume { + var ( + cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume + ) + framework.Logf("get CNS UnregisterVolume API spec") + + cnsUnRegisterVolume = &cnsunregistervolumev1alpha1.CnsUnregisterVolume{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "cnsunregvol-", + Namespace: namespace, + }, + Spec: cnsunregistervolumev1alpha1.CnsUnregisterVolumeSpec{ + VolumeID: fcdID, + }, + } + return cnsUnRegisterVolume +} + // Create CNS register volume. func createCNSRegisterVolume(ctx context.Context, restConfig *rest.Config, cnsRegisterVolume *cnsregistervolumev1alpha1.CnsRegisterVolume) error { @@ -3255,6 +3277,30 @@ func createCNSRegisterVolume(ctx context.Context, restConfig *rest.Config, return err } +// Create CNS Unregister volume. +func createCNSUnRegisterVolume(ctx context.Context, restConfig *rest.Config, + cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume) error { + + cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restConfig, cnsoperatorv1alpha1.GroupName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Create CNSUnRegisterVolume") + err = cnsOperatorClient.Create(ctx, cnsUnRegisterVolume) + + return err +} + +// Delete CNS Unregister volume. +func deleteCNSUnRegisterVolume(ctx context.Context, restConfig *rest.Config, + cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume) error { + + cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restConfig, cnsoperatorv1alpha1.GroupName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Delete CNSUnRegisterVolume") + err = cnsOperatorClient.Delete(ctx, cnsUnRegisterVolume) + + return err +} + // Query CNS Register volume. Returns true if the CNSRegisterVolume is // available otherwise false. func queryCNSRegisterVolume(ctx context.Context, restClientConfig *rest.Config, @@ -3280,6 +3326,31 @@ func queryCNSRegisterVolume(ctx context.Context, restClientConfig *rest.Config, } +// Query CNS Unregister volume. Returns true if the CNSUnregisterVolume is +// available otherwise false. +func queryCNSUnregisterVolume(ctx context.Context, restClientConfig *rest.Config, + cnsUnregistervolumeName string, namespace string) bool { + isPresent := false + framework.Logf("cleanUpCnsUnregisterVolumeInstances: start") + cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restClientConfig, cnsoperatorv1alpha1.GroupName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Get list of CnsUnregisterVolume instances from all namespaces. + cnsUnregisterVolumesList := &cnsunregistervolumev1alpha1.CnsUnregisterVolumeList{} + err = cnsOperatorClient.List(ctx, cnsUnregisterVolumesList) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + cns := &cnsunregistervolumev1alpha1.CnsUnregisterVolume{} + err = cnsOperatorClient.Get(ctx, pkgtypes.NamespacedName{Name: cnsUnregistervolumeName, Namespace: namespace}, cns) + if err == nil { + framework.Logf("CNS UnregisterVolume %s Found in the namespace %s:", cnsUnregistervolumeName, namespace) + isPresent = true + } + + return isPresent + +} + // Verify Bi-directional referance of Pv and PVC in case of static volume // provisioning. func verifyBidirectionalReferenceOfPVandPVC(ctx context.Context, client clientset.Interface, @@ -3329,6 +3400,21 @@ func getCNSRegistervolume(ctx context.Context, restClientConfig *rest.Config, return cns } +// Get CNS Unregister volume. +func getCNSUnRegistervolume(ctx context.Context, + restClientConfig *rest.Config, cnsUnRegisterVolume *cnsunregistervolumev1alpha1. + CnsUnregisterVolume) *cnsunregistervolumev1alpha1.CnsUnregisterVolume { + cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restClientConfig, cnsoperatorv1alpha1.GroupName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + cns := &cnsunregistervolumev1alpha1.CnsUnregisterVolume{} + err = cnsOperatorClient.Get(ctx, + pkgtypes.NamespacedName{Name: cnsUnRegisterVolume.Name, Namespace: cnsUnRegisterVolume.Namespace}, cns) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + return cns +} + // Update CNS register volume. func updateCNSRegistervolume(ctx context.Context, restClientConfig *rest.Config, cnsRegisterVolume *cnsregistervolumev1alpha1.CnsRegisterVolume) *cnsregistervolumev1alpha1.CnsRegisterVolume { @@ -4051,6 +4137,26 @@ func waitForCNSRegisterVolumeToGetCreated(ctx context.Context, restConfig *rest. return fmt.Errorf("cnsRegisterVolume %s creation is failed within %v", cnsRegisterVolumeName, timeout) } +// waitForCNSUnRegisterVolumeToGetUnregistered waits for a cnsUnRegisterVolume to get +// created or until timeout occurs, whichever comes first. +func waitForCNSUnRegisterVolumeToGetUnregistered(ctx context.Context, restConfig *rest.Config, namespace string, + cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume, Poll, timeout time.Duration) error { + framework.Logf("Waiting up to %v for CnsUnRegisterVolume %s to get created", timeout, cnsUnRegisterVolume) + + cnsUnRegisterVolumeName := cnsUnRegisterVolume.GetName() + for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { + cnsUnRegisterVolume = getCNSUnRegistervolume(ctx, restConfig, cnsUnRegisterVolume) + flag := cnsUnRegisterVolume.Status.Unregistered + if !flag { + continue + } else { + return nil + } + } + + return fmt.Errorf("cnsRegisterVolume %s unregister is failed within %v", cnsUnRegisterVolumeName, timeout) +} + // waitForCNSRegisterVolumeToGetDeleted waits for a cnsRegisterVolume to get // deleted or until timeout occurs, whichever comes first. func waitForCNSRegisterVolumeToGetDeleted(ctx context.Context, restConfig *rest.Config, namespace string, diff --git a/tests/e2e/vmservice_vm.go b/tests/e2e/vmservice_vm.go index 7673994740..bdd566fba9 100644 --- a/tests/e2e/vmservice_vm.go +++ b/tests/e2e/vmservice_vm.go @@ -273,6 +273,297 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { } }) + /* + CNS Unregister Volume API Basic test + Steps: + 1 Assign a spbm policy to test namespace with sufficient quota + 2 Create a PVC say pvc1 + 3 Create a VMservice VM say vm1, pvc1 + 4 verify pvc1 CNS metadata. + 5 Once the vm1 is up verify that the volume is accessible inside vm1 + 6 Export CNS volume and export fails + 6 Delete vm1 + 7 delete pvc1 + 8 Remove spbm policy attached to test namespace + */ + ginkgo.It("verify vmservice vm creation with a pvc in its spec", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var pandoraSyncWaitTime int + var err error + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + datastore := getDsMoRefFromURL(ctx, datastoreURL) + ginkgo.By("Creating FCD Disk") + fcdID, err := e2eVSphere.createFCD(ctx, fcdName, diskSizeInMb, datastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting FCD: %s", fcdID)) + err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, defaultDatastore.Reference(), + []string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By(fmt.Sprintf("Creating the PV with the fcdID %s", fcdID)) + staticPVLabels := make(map[string]string) + staticPVLabels["fcd-id"] = fcdID + staticPv := getPersistentVolumeSpec(fcdID, v1.PersistentVolumeReclaimDelete, nil, ext4FSType) + staticPv, err = client.CoreV1().PersistentVolumes().Create(ctx, staticPv, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = e2eVSphere.waitForCNSVolumeToBeCreated(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating a static PVC") + staticPvc := getPersistentVolumeClaimSpec(namespace, staticPVLabels, staticPv.Name) + staticPvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create( + ctx, staticPvc, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(ctx, client, staticPvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc, staticPvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc, staticPvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + ginkgo.By("Create CNS unregister volume with above created FCD " + volHandle) + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volHandle) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, namespace, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + + }) + + /* + CNS Unregister Volume API Basic test + Steps: + 1 Assign a spbm policy to test namespace with sufficient quota + 2 Create a PVC say pvc1 + 3 Create a VMservice VM say vm1, pvc1 + 4 verify pvc1 CNS metadata. + 5 Once the vm1 is up verify that the volume is accessible inside vm1 + 6 Export CNS volume and export fails + 6 Delete vm1 + 7 delete pvc1 + 8 Remove spbm policy attached to test namespace + */ + ginkgo.It("verify vmservice vm creation with a pvc in its spec", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var pandoraSyncWaitTime int + var err error + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + datastore := getDsMoRefFromURL(ctx, datastoreURL) + ginkgo.By("Creating FCD Disk") + fcdID, err := e2eVSphere.createFCD(ctx, fcdName, diskSizeInMb, datastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting FCD: %s", fcdID)) + err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, defaultDatastore.Reference(), + []string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By(fmt.Sprintf("Creating the PV with the fcdID %s", fcdID)) + staticPVLabels := make(map[string]string) + staticPVLabels["fcd-id"] = fcdID + staticPv := getPersistentVolumeSpec(fcdID, v1.PersistentVolumeReclaimDelete, nil, ext4FSType) + staticPv, err = client.CoreV1().PersistentVolumes().Create(ctx, staticPv, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = e2eVSphere.waitForCNSVolumeToBeCreated(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating a static PVC") + staticPvc := getPersistentVolumeClaimSpec(namespace, staticPVLabels, staticPv.Name) + staticPvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create( + ctx, staticPvc, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(ctx, client, staticPvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc, staticPvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc, staticPvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Power off vm") + vm = setVmPowerState(ctx, vmopC, vm, vmopv1.VirtualMachinePoweredOff) + vm, err = wait4Vm2ReachPowerStateInSpec(ctx, vmopC, vm) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + ginkgo.By("Create CNS unregister volume with above created FCD " + volHandle) + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volHandle) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, namespace, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + + }) + /* hot detach and attach Steps: