diff --git a/tests/e2e/cns_unregister_volume_api.go b/tests/e2e/cns_unregister_volume_api.go new file mode 100644 index 0000000000..880618eb52 --- /dev/null +++ b/tests/e2e/cns_unregister_volume_api.go @@ -0,0 +1,1176 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "math/rand" + "os" + "strconv" + "sync" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpod "k8s.io/kubernetes/test/e2e/framework/pod" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + admissionapi "k8s.io/pod-security-admission/api" + cnsunregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsunregistervolume/v1alpha1" +) + +var _ = ginkgo.Describe("[csi-unregister-volume] CNS Unregister Volume", func() { + f := framework.NewDefaultFramework("cns-unregister-volume") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + const defaultVolumeOpsScale = 30 + const defaultVolumeOpsScaleWCP = 29 + var ( + client clientset.Interface + c clientset.Interface + fullSyncWaitTime int + namespace string + scParameters map[string]string + storagePolicyName string + volumeOpsScale int + isServiceStopped bool + serviceName string + csiReplicaCount int32 + deployment *appsv1.Deployment + zonalPolicy string + zonalWffcPolicy string + categories []string + labels_ns map[string]string + allowedTopologyHAMap map[string][]string + allowedTopologies []v1.TopologySelectorLabelRequirement + ) + + ginkgo.BeforeEach(func() { + bootstrap() + client = f.ClientSet + namespace = getNamespaceToRunTests(f) + scParameters = make(map[string]string) + isServiceStopped = false + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) + + if stretchedSVC { + storagePolicyName = GetAndExpectStringEnvVar(envZonalStoragePolicyName) + labels_ns = map[string]string{} + labels_ns[admissionapi.EnforceLevelLabel] = string(admissionapi.LevelPrivileged) + labels_ns["e2e-framework"] = f.BaseName + + if storagePolicyName == "" { + ginkgo.Fail(envZonalStoragePolicyName + " env variable not set") + } + zonalWffcPolicy = GetAndExpectStringEnvVar(envZonalWffcStoragePolicyName) + if zonalWffcPolicy == "" { + ginkgo.Fail(envZonalWffcStoragePolicyName + " env variable not set") + } + framework.Logf("zonal policy: %s and zonal wffc policy: %s", zonalPolicy, zonalWffcPolicy) + + topologyHaMap := GetAndExpectStringEnvVar(topologyHaMap) + _, categories = createTopologyMapLevel5(topologyHaMap) + allowedTopologies = createAllowedTopolgies(topologyHaMap) + allowedTopologyHAMap = createAllowedTopologiesMap(allowedTopologies) + framework.Logf("Topology map: %v, categories: %v", allowedTopologyHAMap, categories) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, rqLimit) + } + + if os.Getenv("VOLUME_OPS_SCALE") != "" { + volumeOpsScale, err = strconv.Atoi(os.Getenv(envVolumeOperationsScale)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + if vanillaCluster { + volumeOpsScale = defaultVolumeOpsScale + } else { + volumeOpsScale = defaultVolumeOpsScaleWCP + } + } + framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) + + if os.Getenv(envFullSyncWaitTime) != "" { + fullSyncWaitTime, err = strconv.Atoi(os.Getenv(envFullSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Full sync interval can be 1 min at minimum so full sync wait time has to be more than 120s + if fullSyncWaitTime < 120 || fullSyncWaitTime > defaultFullSyncWaitTime { + framework.Failf("The FullSync Wait time %v is not set correctly", fullSyncWaitTime) + } + } else { + fullSyncWaitTime = defaultFullSyncWaitTime + } + + // Get CSI Controller's replica count from the setup + controllerClusterConfig := os.Getenv(contollerClusterKubeConfig) + c = client + if controllerClusterConfig != "" { + framework.Logf("Creating client for remote kubeconfig") + remoteC, err := createKubernetesClientFromConfig(controllerClusterConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + c = remoteC + } + deployment, err = c.AppsV1().Deployments(csiSystemNamespace).Get(ctx, + vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + csiReplicaCount = *deployment.Spec.Replicas + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if isServiceStopped { + if serviceName == "CSI" { + framework.Logf("Starting CSI driver") + ignoreLabels := make(map[string]string) + err := updateDeploymentReplicawithWait(c, csiReplicaCount, vSphereCSIControllerPodNamePrefix, + csiSystemNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Wait for the CSI Pods to be up and Running + list_of_pods, err := fpod.GetPodsInNamespace(ctx, client, csiSystemNamespace, ignoreLabels) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + num_csi_pods := len(list_of_pods) + err = fpod.WaitForPodsRunningReady(ctx, client, csiSystemNamespace, int32(num_csi_pods), 0, + pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if serviceName == hostdServiceName { + framework.Logf("In afterEach function to start the hostd service on all hosts") + hostIPs := getAllHostsIP(ctx, true) + for _, hostIP := range hostIPs { + startHostDOnHost(ctx, hostIP) + } + } else { + vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", serviceName)) + err := invokeVCenterServiceControl(ctx, startOperation, serviceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcRunningMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + + ginkgo.By(fmt.Sprintf("Resetting provisioner time interval to %s sec", defaultProvisionerTimeInSec)) + updateCSIDeploymentProvisionerTimeout(c, csiSystemNamespace, defaultProvisionerTimeInSec) + + if supervisorCluster { + deleteResourceQuota(client, namespace) + dumpSvcNsEventsOnTestFailure(client, namespace) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } + }) + + /* + Export detached PVC + + Create SPBM Policy: + Define and create a Storage Policy-Based Management (SPBM) policy. + + Assign Policy to Namespace: + Apply the SPBM policy to the test namespace with sufficient storage quota. + + Create PVC: + Create a PersistentVolumeClaim (PVC) named pvc1 in the test namespace. + Verify CNS Metadata + + Verify Storage Quota: + Check that the storage quota in the test namespace reflects the space occupied by pvc1. + + Create CnsUnregisterVolume CR: + Create a CnsUnregisterVolume Custom Resource (CR) with the volumeID of pvc1 and apply it to the test namespace. + + Check CnsUnregisterVolume Status: + Verify that the status.Unregistered field in the CnsUnregisterVolume CR is set to true. + + Verify Deletion: + Ensure that the PersistentVolume (PV) and PVC are deleted from the Supervisor cluster. + Verify CNS Metadata + + Check Quota Release: + Confirm that the storage quota has been freed after the deletion of PV/PVC. + + Verify FCD: + Use the RetrieveVStorageObject API from vCenter MOB to confirm that the FCD associated with the PVC has not been deleted. + + Cleanup: + Delete the FCD, the test namespace, and the SPBM policy. + */ + ginkgo.It("[csi-unregister-volume] Export detached PVC", func() { + exportDetachedVolume(namespace, client, storagePolicyName, scParameters, + volumeOpsScale, true) + }) + + /* + Export PVC attached with Pod + + Create SPBM Policy and Assign to Namespace: + Create a SPBM policy and assign it to the test namespace with sufficient quota. + + Create PVC: + Create a PersistentVolumeClaim (PVC) named pvc1 in the test namespace. + Verify CNS Metadata + Verify List Volume results lists volume ID + + Verify Storage Quota: + Verify that the storage quota in the test namespace appropriately shows the occupied quota from pvc1. + + Create Pod and Use PVC: + Create a Pod named pod1 in the test namespace using pvc1. + Wait for pod1 to be up and running, then perform I/O operations on pvc1 through pod1. + + Create and Apply CnsUnregisterVolume CR: + Create a CnsUnregisterVolume CR with the volumeID of pvc1 and apply it to the test namespace. + + Check CnsUnregisterVolume Status: + Check the CnsUnregisterVolume CR and verify that the status.Unregistered field is set to false. + + Verify PV/PVC Not Deleted: + Verify that the PersistentVolume (PV) and PVC are not deleted from the Supervisor cluster. + + Delete Pod and Wait: + Delete pod1 and wait until the pod is fully deleted. + + Reapply CnsUnregisterVolume CR: + Create and apply a new CnsUnregisterVolume CR with the volumeID of pvc1 to the test namespace. + + Check Updated CnsUnregisterVolume Status: + Verify that the status.Unregistered field in the CnsUnregisterVolume CR is now set to true. + + Check PV/PVC Deletion: + Confirm that the PV and PVC are deleted from the Supervisor cluster. + Verify CNS Metadata + Verify List Volume results does not lists volume ID + + Verify Storage Quota Freed: + Verify that the storage quota has been freed following the deletion of PV/PVC. + + Verify FCD Status: + Use the RetrieveVStorageObject API from vCenter MOB to ensure that the FCD is not deleted. + + Cleanup: + Delete the FCD, the test namespace, and the SPBM policy. + */ + ginkgo.It("[csi-unregister-volume] Export PVC attached with Pod", func() { + exportAttachedVolume(namespace, client, storagePolicyName, scParameters, + volumeOpsScale, true) + }) + + /* + Export detached PVC and PVC attached in TKG + + Running test in TKG Context + + Create SPBM Policy and Assign to TKG Namespace: + Create a SPBM policy and assign it to the TKG namespace with sufficient quota. + + Create PVC in TKG test namespace: + Create a PersistentVolumeClaim (PVC) named pvc1 in the test namespace. + Verify CNS Metadata + + Verify Storage Quota: + Verify that the storage quota in the TKG namespace appropriately shows the space occupied by pvc1. + + Create and Use Pod: + Create a Pod named pod1 in the test namespace using pvc1. + Wait for pod1 to be up and running, then perform I/O operations on pvc1 through pod1. + + Create and Apply CnsUnregisterVolume CR (SVC Context): + Create a CnsUnregisterVolume Custom Resource (CR) with the volumeID of pvc1 and apply it to the TKG namespace in the Supervisor cluster (SVC context). + + Check CnsUnregisterVolume Status (SVC Context): + Verify that the status.Unregistered field in the CnsUnregisterVolume CR is set to false (SVC context). + + Check PV/PVC Status (SVC Context): + Confirm that the PersistentVolume (PV) and PVC are not deleted from the Supervisor cluster (SVC context). + + Delete Pod and Wait: + Delete pod1 and wait until the pod is fully deleted. + + Reapply CnsUnregisterVolume CR (SVC Context): + Create and apply a new CnsUnregisterVolume CR with the volumeID of pvc1 to the test namespace in the Supervisor cluster (SVC context). + + Check Updated CnsUnregisterVolume Status (SVC Context): + Verify that the status.Unregistered field in the CnsUnregisterVolume CR is set to false (SVC context). + + Check PV/PVC Deletion (SVC Context): + Verify that the PV and PVC are not deleted from the Supervisor cluster (SVC context). + Verify CNS Metadata + + Cleanup: + Delete the PVC, the test namespace, and the SPBM policy. + */ + ginkgo.It("[csi-unregister-volume] Export detached PVC and PVC attached in TKG", func() { + exportAttachedVolumeInTKG(namespace, client, storagePolicyName, scParameters, + volumeOpsScale, true) + }) + + /* + Export detached PVC while the services are down + + vSAN Health is down + HostD is down + VPXD is down + SpS is down + CSI Pods are down + + Create SPBM Policy and Assign to Namespace: + Create a SPBM policy and assign it to the test namespace with sufficient quota. + + Create PVC: + Create a PersistentVolumeClaim (PVC) named pvc1 in the test namespace. + Verify CNS Metadata + + Bring Down VC Services: + Bring down the VC services: sps, vpxd, vsan, and hostd. + Wait for Services to be Down: + Wait for the services to be fully down. + + Create and Apply CnsUnregisterVolume CR: + Create a CnsUnregisterVolume Custom Resource (CR) with the volumeID of pvc1 and apply it to the test namespace. + + Check CnsUnregisterVolume Status: + Verify that the status.Unregistered field in the CnsUnregisterVolume CR is set to false. + + Bring Up VC Services: + Bring up the VC services: sps, vpxd, vsan, and hostd. + + Wait for Services to be Up: + Wait for the services to be fully up and running. + + Wait for Full Sync Time: + Wait for the system to complete the full synchronization. + + Check Updated CnsUnregisterVolume Status: + Verify that the status.Unregistered field in the CnsUnregisterVolume CR is now set to true. + + Check PV/PVC Deletion: + Confirm that the PersistentVolume (PV) and PVC are deleted from the Supervisor cluster. + Verify CNS Metadata + + Verify Storage Quota Freed: + Verify that the storage quota has been freed following the deletion of PV/PVC. + + Verify FCD Status: + Invoke the FCD API RetrieveVStorageObject from vCenter MOB and verify that the FCD is not deleted. + + Cleanup: + Delete the FCD, the test namespace, and the SPBM policy. + */ + ginkgo.It("[csi-unregister-volume] Export detached PVC while the services are down - HOSTD", func() { + serviceName = hostdServiceName + exportVolumeWithServiceDown(serviceName, namespace, client, + scParameters, volumeOpsScale, isServiceStopped, c) + }) + + ginkgo.It("[csi-unregister-volume] Export detached PVC while the services are down - VSAN", func() { + serviceName = vsanhealthServiceName + exportVolumeWithServiceDown(serviceName, namespace, client, + scParameters, volumeOpsScale, isServiceStopped, c) + }) + + ginkgo.It("[csi-unregister-volume] Export detached PVC while the services are down - VPXD", func() { + serviceName = vpxdServiceName + exportVolumeWithServiceDown(serviceName, namespace, client, + scParameters, volumeOpsScale, isServiceStopped, c) + }) + + ginkgo.It("[csi-unregister-volume] Export detached PVC while the services are down - SPS", func() { + serviceName = spsServiceName + exportVolumeWithServiceDown(serviceName, namespace, client, + scParameters, volumeOpsScale, isServiceStopped, c) + }) + + ginkgo.It("[csi-unregister-volume] Export detached PVC while the services are down - CSI", func() { + serviceName = "CSI" + exportVolumeWithServiceDown(serviceName, namespace, client, + scParameters, volumeOpsScale, isServiceStopped, c) + }) + +}) + +func exportDetachedVolume(namespace string, client clientset.Interface, + storagePolicyName string, scParameters map[string]string, volumeOpsScale int, extendVolume bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var storageclass *storagev1.StorageClass + var persistentvolumes []*v1.PersistentVolume + var pvclaims []*v1.PersistentVolumeClaim + var err error + //var fullSyncWaitTime int + pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + framework.Logf("storagePolicyName %v", storagePolicyName) + framework.Logf("extendVolume %v", extendVolume) + + if stretchedSVC { + ginkgo.By("CNS_TEST: Running for Stretch setup") + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } else if supervisorCluster { + ginkgo.By("CNS_TEST: Running for WCP setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) + scParameters[scParamStoragePolicyID] = profileID + // create resource quota + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, thickProvPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + ginkgo.By("CNS_TEST: Running for GC setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + scParameters[svStorageClassName] = thickProvPolicy + scParameters[scParamFsType] = ext4FSType + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + var allowExpansion = true + storageclass.AllowVolumeExpansion = &allowExpansion + storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating PVCs using the Storage Class") + framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) + for i := 0; i < volumeOpsScale; i++ { + framework.Logf("Creating pvc-%v", i) + pvclaims[i], err = fpv.CreatePVC(ctx, client, namespace, + getPersistentVolumeClaimSpecWithStorageClass(namespace, diskSize, storageclass, nil, "")) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Waiting for all claims to be in bound state") + persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + 2*framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // TODO: Add a logic to check for the no orphan volumes + defer func() { + for _, claim := range pvclaims { + err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Verify PVs, volumes are deleted from CNS") + for _, pv := range persistentvolumes { + err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + framework.PodDeleteTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the "+ + "CNS after it is deleted from kubernetes", volumeID)) + } + }() + + ginkgo.By("Invoking CNS Unregister Volume API for all the FCD's created above") + for _, pv := range persistentvolumes { + volumeID := pv.Spec.CSI.VolumeHandle + time.Sleep(30 * time.Second) + + ginkgo.By("Create CNS unregister volume with above created FCD " + pv.Spec.CSI.VolumeHandle) + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volumeID) + + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS unregister volume name : %s", cnsRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) + } + + ginkgo.By("Verify PVs, volumes are also deleted from CNS") + for _, pv := range persistentvolumes { + err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + framework.PodDeleteTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the "+ + "CNS after it is deleted from kubernetes", volumeID)) + } + + defaultDatastore = getDefaultDatastore(ctx) + ginkgo.By(fmt.Sprintf("defaultDatastore %v sec", defaultDatastore)) + + for _, pv1 := range persistentvolumes { + ginkgo.By(fmt.Sprintf("Deleting FCD: %s", pv1.Spec.CSI.VolumeHandle)) + err = deleteFcdWithRetriesForSpecificErr(ctx, pv1.Spec.CSI.VolumeHandle, defaultDatastore.Reference(), + []string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } +} + +func exportAttachedVolume(namespace string, client clientset.Interface, + storagePolicyName string, scParameters map[string]string, volumeOpsScale int, extendVolume bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var storageclass *storagev1.StorageClass + var persistentvolumes []*v1.PersistentVolume + var pvclaims []*v1.PersistentVolumeClaim + var err error + //var fullSyncWaitTime int + pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + framework.Logf("storagePolicyName %v", storagePolicyName) + framework.Logf("extendVolume %v", extendVolume) + + if stretchedSVC { + ginkgo.By("CNS_TEST: Running for Stretch setup") + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } else if supervisorCluster { + ginkgo.By("CNS_TEST: Running for WCP setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) + scParameters[scParamStoragePolicyID] = profileID + // create resource quota + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, thickProvPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + ginkgo.By("CNS_TEST: Running for GC setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + scParameters[svStorageClassName] = thickProvPolicy + scParameters[scParamFsType] = ext4FSType + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + var allowExpansion = true + storageclass.AllowVolumeExpansion = &allowExpansion + + //Priya to fix + //storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + //gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating PVCs using the Storage Class") + framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) + for i := 0; i < volumeOpsScale; i++ { + framework.Logf("Creating pvc%v", i) + pvclaims[i], err = fpv.CreatePVC(ctx, client, namespace, + getPersistentVolumeClaimSpecWithStorageClass(namespace, diskSize, storageclass, nil, "")) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Waiting for all claims to be in bound state") + persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + 2*framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // TODO: Add a logic to check for the no orphan volumes + defer func() { + for _, claim := range pvclaims { + err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Verify PVs, volumes are deleted from CNS") + for _, pv := range persistentvolumes { + err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + framework.PodDeleteTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the "+ + "CNS after it is deleted from kubernetes", volumeID)) + } + }() + + ginkgo.By("Create POD") + pod, err := createPod(ctx, client, namespace, nil, pvclaims, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + err = fpod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Invoking CNS Unregister Volume API for all the FCD's created above") + for _, pv := range persistentvolumes { + volumeID := pv.Spec.CSI.VolumeHandle + time.Sleep(30 * time.Second) + + ginkgo.By("Create CNS unregister volume with above created FCD " + pv.Spec.CSI.VolumeHandle) + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volumeID) + + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS unregister volume name : %s", cnsRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + gomega.Expect(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, cnsUnRegisterVolume, poll, + supervisorClusterOperationsTimeout)).To(gomega.HaveOccurred()) + } + + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Retry: Invoking CNS Unregister Volume API for all the FCD's created above") + for _, pv := range persistentvolumes { + volumeID := pv.Spec.CSI.VolumeHandle + time.Sleep(30 * time.Second) + + ginkgo.By("Create CNS unregister volume with above created FCD " + pv.Spec.CSI.VolumeHandle) + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volumeID) + + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS unregister volume name : %s", cnsRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) + } + + ginkgo.By("Verify PVs, volumes are deleted from CNS") + for _, pv := range persistentvolumes { + err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + framework.PodDeleteTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the "+ + "CNS after it is deleted from kubernetes", volumeID)) + } + + defaultDatastore = getDefaultDatastore(ctx) + ginkgo.By(fmt.Sprintf("defaultDatastore %v sec", defaultDatastore)) + + for _, pv1 := range persistentvolumes { + ginkgo.By(fmt.Sprintf("Deleting FCD: %s", pv1.Spec.CSI.VolumeHandle)) + err = deleteFcdWithRetriesForSpecificErr(ctx, pv1.Spec.CSI.VolumeHandle, defaultDatastore.Reference(), + []string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } +} + +func exportAttachedVolumeInTKG(namespace string, client clientset.Interface, + storagePolicyName string, scParameters map[string]string, volumeOpsScale int, extendVolume bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var storageclass *storagev1.StorageClass + var persistentvolumes []*v1.PersistentVolume + var pvclaims []*v1.PersistentVolumeClaim + var err error + //var fullSyncWaitTime int + pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) + + svNamespace := GetAndExpectStringEnvVar(envSupervisorClusterNamespace) + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + framework.Logf("storagePolicyName %v", storagePolicyName) + framework.Logf("extendVolume %v", extendVolume) + + if stretchedSVC { + ginkgo.By("CNS_TEST: Running for Stretch setup") + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } else if supervisorCluster { + ginkgo.By("CNS_TEST: Running for WCP setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) + scParameters[scParamStoragePolicyID] = profileID + // create resource quota + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, thickProvPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + ginkgo.By("CNS_TEST: Running for GC setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + scParameters[svStorageClassName] = thickProvPolicy + scParameters[scParamFsType] = ext4FSType + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + var allowExpansion = true + storageclass.AllowVolumeExpansion = &allowExpansion + + //Priya to fix + //storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) + //gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating PVCs using the Storage Class") + framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) + for i := 0; i < volumeOpsScale; i++ { + framework.Logf("Creating pvc%v", i) + pvclaims[i], err = fpv.CreatePVC(ctx, client, namespace, + getPersistentVolumeClaimSpecWithStorageClass(namespace, diskSize, storageclass, nil, "")) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Waiting for all claims to be in bound state") + persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + 2*framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // TODO: Add a logic to check for the no orphan volumes + defer func() { + for _, claim := range pvclaims { + err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Verify PVs, volumes are deleted from CNS") + for _, pv := range persistentvolumes { + err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + framework.PodDeleteTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the "+ + "CNS after it is deleted from kubernetes", volumeID)) + } + }() + + ginkgo.By("Create POD") + pod, err := createPod(ctx, client, namespace, nil, pvclaims, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + err = fpod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Invoking CNS Unregister Volume API for all the FCD's created above") + for _, pv := range persistentvolumes { + volumeID := pv.Spec.CSI.VolumeHandle + time.Sleep(30 * time.Second) + + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) + } + + ginkgo.By("Create CNS unregister volume with above created FCD " + volumeID) + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(svNamespace, volumeID) + + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS unregister volume name : %s", cnsRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + gomega.Expect(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, cnsUnRegisterVolume, poll, + supervisorClusterOperationsTimeout)).To(gomega.HaveOccurred()) + } + + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Retry: Invoking CNS Unregister Volume API for all the FCD's created above") + for _, pv := range persistentvolumes { + volumeID := pv.Spec.CSI.VolumeHandle + time.Sleep(30 * time.Second) + + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) + } + + ginkgo.By("Create CNS unregister volume with above created FCD " + pv.Spec.CSI.VolumeHandle) + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(svNamespace, volumeID) + + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS unregister volume name : %s", cnsRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + gomega.Expect(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, cnsUnRegisterVolume, poll, + supervisorClusterOperationsTimeout)).To(gomega.HaveOccurred()) + } +} + +// exportVolumeWithServiceDown creates the volumes and immediately stops the services and wait for +// the service to be up again and validates the volumes are bound +func exportVolumeWithServiceDown(serviceName string, namespace string, client clientset.Interface, + scParameters map[string]string, volumeOpsScale int, isServiceStopped bool, + c clientset.Interface) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ginkgo.By(fmt.Sprintf("Invoking Test for create volume when %v goes down", serviceName)) + var storageclass *storagev1.StorageClass + var persistentvolumes []*v1.PersistentVolume + var pvclaims []*v1.PersistentVolumeClaim + var cnscrds []*cnsunregistervolumev1alpha1.CnsUnregisterVolume + var err error + var fullSyncWaitTime int + pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) + cnscrds = make([]*cnsunregistervolumev1alpha1.CnsUnregisterVolume, volumeOpsScale) + + // Decide which test setup is available to run + if vanillaCluster { + ginkgo.By("CNS_TEST: Running for vanilla k8s setup") + // TODO: Create Thick Storage Policy from Pre-setup to support 6.7 Setups + scParameters[scParamStoragePolicyName] = "Management Storage Policy - Regular" + // Check if it is file volumes setups + if rwxAccessMode { + scParameters[scParamFsType] = nfs4FSType + } + curtime := time.Now().Unix() + randomValue := rand.Int() + val := strconv.FormatInt(int64(randomValue), 10) + val = string(val[1:3]) + curtimestring := strconv.FormatInt(curtime, 10) + scName := "idempotency" + curtimestring + val + storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, scName) + } else if stretchedSVC { + ginkgo.By("CNS_TEST: Running for Stretch setup") + storagePolicyName := GetAndExpectStringEnvVar(envZonalStoragePolicyName) + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } else if supervisorCluster { + ginkgo.By("CNS_TEST: Running for WCP setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) + scParameters[scParamStoragePolicyID] = profileID + // create resource quota + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, thickProvPolicy) + } else { + ginkgo.By("CNS_TEST: Running for GC setup") + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) + if thickProvPolicy == "" { + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") + } + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) + scParameters[svStorageClassName] = thickProvPolicy + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + if vanillaCluster { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, + *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Creating PVCs using the Storage Class") + framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) + for i := 0; i < volumeOpsScale; i++ { + framework.Logf("Creating pvc%v", i) + accessMode := v1.ReadWriteOnce + + // Check if it is file volumes setups + if rwxAccessMode { + accessMode = v1.ReadWriteMany + } + pvclaims[i], err = createPVC(ctx, client, namespace, nil, "", storageclass, accessMode) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Waiting for all claims to be in bound state") + persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, + 2*framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if serviceName == "CSI" { + // Get CSI Controller's replica count from the setup + deployment, err := c.AppsV1().Deployments(csiSystemNamespace).Get(ctx, + vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + csiReplicaCount := *deployment.Spec.Replicas + + ginkgo.By("Stopping CSI driver") + isServiceStopped, err = stopCSIPods(ctx, c, csiSystemNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + if isServiceStopped { + framework.Logf("Starting CSI driver") + isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + time.Sleep(30 * time.Second) + + for i := 0; i < volumeOpsScale; i++ { + + volumeID := persistentvolumes[i].Spec.CSI.VolumeHandle + + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(persistentvolumes[i].Spec.CSI.VolumeHandle) + } + + ginkgo.By("Create CNS unregister volume with above created FCD " + persistentvolumes[i].Spec.CSI.VolumeHandle) + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volumeID) + cnscrds[i] = cnsUnRegisterVolume + + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS unregister volume name : %s", cnsRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("CNS unregister volume should fail") + gomega.Expect(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, cnsUnRegisterVolume, poll, + supervisorClusterOperationsTimeout)).To(gomega.HaveOccurred()) + } + + framework.Logf("Starting CSI driver") + isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if os.Getenv(envFullSyncWaitTime) != "" { + fullSyncWaitTime, err = strconv.Atoi(os.Getenv(envFullSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Full sync interval can be 1 min at minimum so full sync wait time has to be more than 120s + if fullSyncWaitTime < 120 || fullSyncWaitTime > defaultFullSyncWaitTime { + framework.Failf("The FullSync Wait time %v is not set correctly", fullSyncWaitTime) + } + } else { + fullSyncWaitTime = defaultFullSyncWaitTime + } + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime)) + time.Sleep(time.Duration(fullSyncWaitTime) * time.Second) + + for i := 0; i < volumeOpsScale; i++ { + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, cnscrds[i], poll, supervisorClusterOperationsTimeout)) + } + + } else if serviceName == hostdServiceName { + ginkgo.By("Fetch IPs for the all the hosts in the cluster") + hostIPs := getAllHostsIP(ctx, true) + isServiceStopped = true + + var wg sync.WaitGroup + wg.Add(len(hostIPs)) + + for _, hostIP := range hostIPs { + go stopHostD(ctx, hostIP, &wg) + } + wg.Wait() + + defer func() { + framework.Logf("In defer function to start the hostd service on all hosts") + if isServiceStopped { + for _, hostIP := range hostIPs { + startHostDOnHost(ctx, hostIP) + } + isServiceStopped = false + } + }() + + ginkgo.By("Sleeping for 5+1 min for default provisioner timeout") + time.Sleep(pollTimeoutSixMin) + + for _, hostIP := range hostIPs { + startHostDOnHost(ctx, hostIP) + } + isServiceStopped = false + } else { + ginkgo.By(fmt.Sprintf("Stopping %v on the vCenter host", serviceName)) + vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + err = invokeVCenterServiceControl(ctx, stopOperation, serviceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isServiceStopped = true + err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcStoppedMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + if isServiceStopped { + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", serviceName)) + err = invokeVCenterServiceControl(ctx, startOperation, serviceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcRunningMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isServiceStopped = false + } + }() + + ginkgo.By("Sleeping for 5+1 min for default provisioner timeout") + time.Sleep(pollTimeoutSixMin) + + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", serviceName)) + err = invokeVCenterServiceControl(ctx, startOperation, serviceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isServiceStopped = false + err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcRunningMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Sleeping for full sync interval") + time.Sleep(time.Duration(fullSyncWaitTime) * time.Second) + } + + //After service restart + bootstrap() + + // TODO: Add a logic to check for the no orphan volumes + defer func() { + for _, claim := range pvclaims { + err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Verify PVs, volumes are deleted from CNS") + for _, pv := range persistentvolumes { + err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, + framework.PodDeleteTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeID)) + } + }() +} diff --git a/tests/e2e/csi_snapshot_basic.go b/tests/e2e/csi_snapshot_basic.go index 8ac4849255..a9faf0d19e 100644 --- a/tests/e2e/csi_snapshot_basic.go +++ b/tests/e2e/csi_snapshot_basic.go @@ -5724,6 +5724,8 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { scParameters[svStorageClassName] = zonalPolicy storagePolicyName = zonalPolicy } + // Get supvervisor cluster client. + svcClient, _ := getSvcClientAndNamespace() storageclass, err := svcClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -7484,6 +7486,157 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { volumeSnapshot, pandoraSyncWaitTime, volHandle, dynamicSnapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) + + /* + Export PVC having snapshots in TKG/WCP if supported and qualified + + Running test in TKG Context + Create SPBM Policy and Assign to Namespace: + Create a SPBM policy and assign it to the test namespace with sufficient quota. + + Create PVC: + Create a PersistentVolumeClaim (PVC) named pvc1 in the test namespace. + Verify CNS Metadata + + Create and Verify Snapshots: + Create a couple of snapshots for pvc1. + Verify that the snapshots are in the "ready" state. + + Verify Storage Quota: + Confirm that the storage quota in the test namespace accurately reflects the space occupied by pvc1 and its snapshots. + + Create and Apply CnsUnregisterVolume CR: + Create a CnsUnregisterVolume Custom Resource (CR) with the volumeID of pvc1 and apply it to the test namespace. + + Check CnsUnregisterVolume Status: + Verify that the status.Unregistered field in the CnsUnregisterVolume CR is set to false. + + Check Webhook Behavior: + Since the PVC has live CNS snapshots, confirm that webhooks deny the deletion of PVC/PV. + + Verify PV/PVC Not Deleted: + Ensure that the PersistentVolume (PV) and PVC are not deleted from the Supervisor cluster. + + Delete Volume Snapshots: + Delete the volume snapshots for pvc1 and wait for the snapshots to be cleaned up. + + Reapply CnsUnregisterVolume CR: + Create and apply a new CnsUnregisterVolume CR with the volumeID of pvc1 to the test namespace. + + Check Updated CnsUnregisterVolume Status: + Verify that the status.Unregistered field in the CnsUnregisterVolume CR is set to true. + + Check PV/PVC Deletion: + Confirm that the PV and PVC are deleted from the Supervisor cluster. + Verify CNS Metadata + + Verify Storage Quota Freed: + Verify that the storage quota has been freed following the deletion of PV/PVC. + + Verify FCD Status: + Invoke the FCD API RetrieveVStorageObject from vCenter MOB and verify that the FCD is not deleted. + + Cleanup: + Delete the FCD, the test namespace, and the SPBM policy. + */ + ginkgo.It("[csi-unregister-volume] Export PVC having snapshots in TKG", + ginkgo.Label(p0, block, tkg, vanilla, snapshot, stable), func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var volHandle string + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } + + ginkgo.By("Creating a PVC") + storageclasspvc, pvclaim, err := createPVCAndStorageClass(ctx, client, + namespace, nil, scParameters, diskSize, nil, "", false, v1.ReadWriteOnce) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to provision volume successfully") + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") + + volHandle = persistentvolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + volumeID := getVolumeIDFromSupervisorCluster(volHandle) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + + defer func() { + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, pvclaim.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + ginkgo.By("Create CNS unregister volume with above created FCD " + volHandle) + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(svcNamespace, volHandle) + + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS unregister volume name : %s", cnsRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + gomega.Expect(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, cnsUnRegisterVolume, poll, + supervisorClusterOperationsTimeout)).To(gomega.HaveOccurred()) + }) + }) // invokeSnapshotOperationsOnSharedDatastore is a wrapper method which invokes creation of volume snapshot diff --git a/tests/e2e/gc_rwx_basic.go b/tests/e2e/gc_rwx_basic.go index ad507564e3..efe4013313 100644 --- a/tests/e2e/gc_rwx_basic.go +++ b/tests/e2e/gc_rwx_basic.go @@ -465,4 +465,465 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Basic File Volume Provision Test", func() output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod2 file")).NotTo(gomega.BeFalse()) }) + + /* + Test 1: + Export detached RWX PVC in TKG + + Running test in TKG Context + + Create SPBM Policy and Assign to TKG Namespace: + Create a SPBM policy and assign it to the TKG namespace with sufficient quota. + + Create RWX PVC: + Create a ReadWriteMany (RWX) PersistentVolumeClaim (PVC) named pvc1 in the test namespace in TKG. + Verify CNS Metadata + + Create and Apply CnsUnregisterVolume CR (SVC Context): + Create a CnsUnregisterVolume Custom Resource (CR) with the volumeID of pvc1 and + apply it to the test namespace in the Supervisor cluster (SVC context). + + Check CnsUnregisterVolume Status (SVC Context): + Check the CnsUnregisterVolume CR and verify that the status.Unregistered field is set to false (SVC context). + + Verify PV/PVC Deletion (SVC Context): + Confirm that the PersistentVolume (PV) and PVC are not deleted from the Supervisor cluster (SVC context). + Verify CNS Metadata + + Cleanup: + Delete the File Share, the test namespace, and the SPBM policy. + */ + ginkgo.It("[csi-unregister-volume] Export detached RWX PVC in TKG", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var storageclasspvc *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + + ginkgo.By("CNS_TEST: Running for GC setup") + scParameters[svStorageClassName] = storagePolicyName + ginkgo.By("Creating a PVC") + storageclasspvc, pvclaim, err = createPVCAndStorageClass(ctx, client, + namespace, nil, scParameters, diskSize, nil, "", false, v1.ReadWriteMany) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to provision volume successfully") + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") + + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + volumeID := getVolumeIDFromSupervisorCluster(volHandle) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + + defer func() { + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, pvclaim.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // Verify using CNS Query API if VolumeID retrieved from PV is present. + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volumeID)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + ginkgo.By(fmt.Sprintf("volume Name:%s, capacity:%d volumeType:%s health:%s accesspoint: %s", + queryResult.Volumes[0].Name, + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsVsanFileShareBackingDetails).CapacityInMb, + queryResult.Volumes[0].VolumeType, queryResult.Volumes[0].HealthStatus, + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsVsanFileShareBackingDetails).AccessPoints), + ) + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + svNamespace := GetAndExpectStringEnvVar(envSupervisorClusterNamespace) + + ginkgo.By("Create CNS unregister volume with above created FCD " + volumeID) + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(svNamespace, volumeID) + + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS unregister volume name : %s", cnsRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + gomega.Expect(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, cnsUnRegisterVolume, poll, + supervisorClusterOperationsTimeout)).To(gomega.HaveOccurred()) + }) + + /* + Test 2: + Export RWX PVC attached in TKG + + Running test in TKG Context + + Create SPBM Policy and Assign to TKG Namespace: + Create a SPBM policy and assign it to the TKG namespace with sufficient quota in the TKG context. + + Create PVC: + Create a PersistentVolumeClaim (PVC) named pvc1 in the test namespace in the TKG context. + Verify CNS Metadata + + Create and Use Pod: + Create a Pod named pod1 in the test namespace using pvc1. + Wait for pod1 to be up and running, then perform I/O operations on pvc1 through pod1. + + Create and Apply CnsUnregisterVolume CR (SVC Context): + Create a CnsUnregisterVolume Custom Resource (CR) with the volumeID of pvc1 and + apply it to the test namespace in the Supervisor cluster (SVC context). + + Check CnsUnregisterVolume Status (SVC Context): + Check the CnsUnregisterVolume CR and verify that the status.Unregistered field is set to false (SVC context). + + Check PV/PVC Status (SVC Context): + Ensure that the PersistentVolume (PV) and PVC are not deleted from the Supervisor cluster (SVC context). + + Delete Pod and Wait: + Delete pod1 and wait until the pod is fully deleted. + Reapply CnsUnregisterVolume CR (SVC Context): + Create and apply a new CnsUnregisterVolume CR with the volumeID of pvc1 to + the test namespace in the Supervisor cluster (SVC context). + + Check Updated CnsUnregisterVolume Status (SVC Context): + Verify that the status.Unregistered field in the CnsUnregisterVolume CR is set to false (SVC context). + + Check PV/PVC Deletion (SVC Context): + Confirm that the PV and PVC are not deleted from the Supervisor cluster (SVC context). + Verify CNS Metadata + + Cleanup: + Delete the File Share, the test namespace, and the SPBM policy. + */ + ginkgo.It("[csi-unregister-volume] Export RWX PVC attached in TKG", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var storageclasspvc *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + + ginkgo.By("CNS_TEST: Running for GC setup") + scParameters[svStorageClassName] = storagePolicyName + ginkgo.By("Creating a PVC") + storageclasspvc, pvclaim, err = createPVCAndStorageClass(ctx, client, + namespace, nil, scParameters, diskSize, nil, "", false, v1.ReadWriteMany) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to provision volume successfully") + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") + + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + volumeID := getVolumeIDFromSupervisorCluster(volHandle) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + + defer func() { + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, pvclaim.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // Verify using CNS Query API if VolumeID retrieved from PV is present. + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volumeID)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + ginkgo.By(fmt.Sprintf("volume Name:%s, capacity:%d volumeType:%s health:%s accesspoint: %s", + queryResult.Volumes[0].Name, + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsVsanFileShareBackingDetails).CapacityInMb, + queryResult.Volumes[0].VolumeType, queryResult.Volumes[0].HealthStatus, + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsVsanFileShareBackingDetails).AccessPoints), + ) + + // Create a Pod to use this PVC, and verify volume has been attached + ginkgo.By("Creating pod to attach PV to the node") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Wait till the CnsFileAccessConfig CRD is deleted %s", + pod.Spec.NodeName+"-"+volHandle)) + err = waitTillCNSFileAccesscrdDeleted(ctx, f, pod.Spec.NodeName+"-"+volHandle, crdCNSFileAccessConfig, + crdVersion, crdGroup, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verifying whether the CnsFileAccessConfig CRD is Deleted or not for Pod1") + verifyCNSFileAccessConfigCRDInSupervisor(ctx, pod.Spec.NodeName+"-"+volHandle, + crdCNSFileAccessConfig, crdVersion, crdGroup, false) + }() + + ginkgo.By("Verifying whether the CnsFileAccessConfig CRD is created or not for Pod1") + verifyCNSFileAccessConfigCRDInSupervisor(ctx, pod.Spec.NodeName+"-"+volHandle, + crdCNSFileAccessConfig, crdVersion, crdGroup, true) + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + svNamespace := GetAndExpectStringEnvVar(envSupervisorClusterNamespace) + + ginkgo.By("Create CNS unregister volume with above created FCD " + volumeID) + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(svNamespace, volumeID) + + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS unregister volume name : %s", cnsRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + gomega.Expect(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, cnsUnRegisterVolume, poll, + supervisorClusterOperationsTimeout)).To(gomega.HaveOccurred()) + }) + + /* + Test 3: + Export RWX PVC attached to multiple Pods in TKG + + Running test in TKG Context + + Create SPBM Policy and Assign to TKG Namespace: + Create a SPBM policy and assign it to the TKG namespace with sufficient quota in the TKG context. + + Create PVC: + + Create a PersistentVolumeClaim (PVC) named pvc1 in the test namespace in the TKG context. + Verify CNS Metadata + + Create multiple (two) pods attaching it to same PVC: + Create a multiple Pods in the test namespace using pvc1. + Wait for pods are up and running. + + Create and Apply CnsUnregisterVolume CR (SVC Context): + Create a CnsUnregisterVolume Custom Resource (CR) with the volumeID of pvc1 and + apply it to the test namespace in the Supervisor cluster (SVC context). + + Check CnsUnregisterVolume Status (SVC Context): + Check the CnsUnregisterVolume CR and verify that the status.Unregistered field is set to false (SVC context). + + Check PV/PVC Status (SVC Context): + Ensure that the PersistentVolume (PV) and PVC are not deleted from the Supervisor cluster (SVC context). + + Delete Pod1 and Wait: + Delete pod1 and wait until the pod is fully deleted. + + Reapply CnsUnregisterVolume CR (SVC Context): + Create and apply a new CnsUnregisterVolume CR with the volumeID of pvc1 to the + test namespace in the Supervisor cluster (SVC context). + + Check CnsUnregisterVolume Status (SVC Context): + Check the CnsUnregisterVolume CR and verify that the status.Unregistered field is set to false (SVC context). + + Check PV/PVC Status (SVC Context): + Ensure that the PersistentVolume (PV) and PVC are not deleted from the Supervisor cluster (SVC context). + + Delete pods and Wait: + Delete pods and wait until the pod is fully deleted. + + Reapply CnsUnregisterVolume CR (SVC Context): + Create and apply a new CnsUnregisterVolume CR with the volumeID of pvc1 to the + test namespace in the Supervisor cluster (SVC context). + + Check Updated CnsUnregisterVolume Status (SVC Context): + + Verify that the status.Unregistered field in the CnsUnregisterVolume CR is set to false (SVC context). + + Check PV/PVC Deletion (SVC Context): + Confirm that the PV and PVC are deleted from the Supervisor cluster (SVC context). + Verify CNS Metadata + + Verify that the File Share associated with pvc1 is still available. + Cleanup: + + Delete the File Share, the test namespace, and the SPBM policy. + */ + ginkgo.It("[csi-unregister-volume] Export RWX PVC attached to multiple Pods in TKG", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var storageclasspvc *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + + ginkgo.By("CNS_TEST: Running for GC setup") + scParameters[svStorageClassName] = storagePolicyName + ginkgo.By("Creating a PVC") + storageclasspvc, pvclaim, err = createPVCAndStorageClass(ctx, client, + namespace, nil, scParameters, diskSize, nil, "", false, v1.ReadWriteMany) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclasspvc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to provision volume successfully") + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") + + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + volumeID := getVolumeIDFromSupervisorCluster(volHandle) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + + defer func() { + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, pvclaim.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // Verify using CNS Query API if VolumeID retrieved from PV is present. + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volumeID)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + ginkgo.By(fmt.Sprintf("volume Name:%s, capacity:%d volumeType:%s health:%s accesspoint: %s", + queryResult.Volumes[0].Name, + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsVsanFileShareBackingDetails).CapacityInMb, + queryResult.Volumes[0].VolumeType, queryResult.Volumes[0].HealthStatus, + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsVsanFileShareBackingDetails).AccessPoints), + ) + + // Create a Pod to use this PVC, and verify volume has been attached + ginkgo.By("Creating pod to attach PV to the node") + pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Wait till the CnsFileAccessConfig CRD is deleted %s", + pod.Spec.NodeName+"-"+volHandle)) + err = waitTillCNSFileAccesscrdDeleted(ctx, f, pod.Spec.NodeName+"-"+volHandle, crdCNSFileAccessConfig, + crdVersion, crdGroup, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verifying whether the CnsFileAccessConfig CRD is Deleted or not for Pod1") + verifyCNSFileAccessConfigCRDInSupervisor(ctx, pod.Spec.NodeName+"-"+volHandle, + crdCNSFileAccessConfig, crdVersion, crdGroup, false) + }() + + ginkgo.By("Verifying whether the CnsFileAccessConfig CRD is created or not for Pod1") + verifyCNSFileAccessConfigCRDInSupervisor(ctx, pod.Spec.NodeName+"-"+volHandle, + crdCNSFileAccessConfig, crdVersion, crdGroup, true) + + // Create a Pod to use this PVC, and verify volume has been attached + ginkgo.By("Creating pod2 to attach PV to the node") + pod2, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execRWXCommandPod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(ctx, client, pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Wait till the CnsFileAccessConfig CRD is deleted %s", + pod.Spec.NodeName+"-"+volHandle)) + err = waitTillCNSFileAccesscrdDeleted(ctx, f, pod.Spec.NodeName+"-"+volHandle, crdCNSFileAccessConfig, + crdVersion, crdGroup, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verifying whether the CnsFileAccessConfig CRD is Deleted or not for Pod2") + verifyCNSFileAccessConfigCRDInSupervisor(ctx, pod2.Spec.NodeName+"-"+volHandle, + crdCNSFileAccessConfig, crdVersion, crdGroup, false) + }() + + ginkgo.By("Verifying whether the CnsFileAccessConfig CRD is created or not for Pod2") + verifyCNSFileAccessConfigCRDInSupervisor(ctx, pod2.Spec.NodeName+"-"+volHandle, + crdCNSFileAccessConfig, crdVersion, crdGroup, true) + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat /mnt/volume1/Pod1.html "} + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + ginkgo.By("Verify the volume is accessible and Read/write is possible from pod2") + cmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat /mnt/volume1/Pod1.html "} + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + wrtiecmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod1.html"} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + svNamespace := GetAndExpectStringEnvVar(envSupervisorClusterNamespace) + + ginkgo.By("Create CNS unregister volume with above created FCD " + volumeID) + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(svNamespace, volumeID) + + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS unregister volume name : %s", cnsRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + gomega.Expect(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, cnsUnRegisterVolume, poll, + supervisorClusterOperationsTimeout)).To(gomega.HaveOccurred()) + }) }) diff --git a/tests/e2e/staging_env_basic.go b/tests/e2e/staging_env_basic.go index c2b88e0d34..79bd4f8177 100644 --- a/tests/e2e/staging_env_basic.go +++ b/tests/e2e/staging_env_basic.go @@ -1200,6 +1200,98 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal ginkgo.By("File system resize finished successfully") }) + /* + CNS Unregister volume API + + */ + ginkgo.It("Staging verify cns unregister volume API", func() { + ginkgo.By("Staging verify cns unregister volume API") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var pvclaim *v1.PersistentVolumeClaim + var pv *v1.PersistentVolume + var err error + + ginkgo.By("Creating a PVC") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvclaim, err = fpv.CreatePVC(ctx, client, namespace, + getPersistentVolumeClaimSpecWithStorageClass(namespace, diskSize, storageclass, nil, v1.ReadWriteOnce)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Expect claim to provision volume successfully") + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(ctx, client, + []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + pv = persistentvolumes[0] + + defer func() { + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, pvclaim.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create Pod using the above PVC") + pod, vmUUID := createPODandVerifyVolumeMountWithoutF(ctx, client, namespace, pvclaim, volHandle) + + defer func() { + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err := fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is detached from PodVM with vmUUID: %s", + pv.Spec.CSI.VolumeHandle, vmUUID)) + _, err = e2eVSphere.getVMByUUIDWithWait(ctx, vmUUID, supervisorClusterOperationsTimeout) + gomega.Expect(err).To(gomega.HaveOccurred(), + fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", + vmUUID, pv.Spec.CSI.VolumeHandle)) + + }() + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + // Get supvervisor cluster client. + _, svNamespace := getSvcClientAndNamespace() + + ginkgo.By("Create CNS register volume with above created FCD") + cnsRegisterVolume := getCNSRegisterVolumeSpec(ctx, svNamespace, volHandle, "", "hello", v1.ReadWriteOnce) + err = createCNSRegisterVolume(ctx, restConfig, cnsRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(waitForCNSRegisterVolumeToGetCreated(ctx, + restConfig, namespace, cnsRegisterVolume, poll, supervisorClusterOperationsTimeout)) + cnsRegisterVolumeName := cnsRegisterVolume.GetName() + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) + + ginkgo.By("Create CNS unregister volume with above created FCD " + volHandle) + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volHandle) + + cnsUnRegisterVolumeName := cnsUnRegisterVolume.GetName() + + framework.Logf("CNS unregister volume name : %s", cnsUnRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsUnRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + gomega.Expect(waitForCNSUnRegisterVolumeToGetUnregistered(ctx, + restConfig, cnsUnRegisterVolume, poll, + supervisorClusterOperationsTimeout)).To(gomega.HaveOccurred()) + + }) + // Test for valid disk size of 2Gi ginkgo.It("Verify dynamic provisioning of pv using storageclass with a valid disk size passes", func() { ctx, cancel := context.WithCancel(context.Background()) diff --git a/tests/e2e/tkgs_ha_site_down.go b/tests/e2e/tkgs_ha_site_down.go index dda5b0c6bc..1a5f1b0a9f 100644 --- a/tests/e2e/tkgs_ha_site_down.go +++ b/tests/e2e/tkgs_ha_site_down.go @@ -92,7 +92,8 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SiteDownTests", func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, rqLimit) } - readVcEsxIpsViaTestbedInfoJson(GetAndExpectStringEnvVar(envTestbedInfoJsonPath)) + + //readVcEsxIpsViaTestbedInfoJson(GetAndExpectStringEnvVar(envTestbedInfoJsonPath)) }) @@ -157,7 +158,8 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SiteDownTests", func() { // Get Cluster details clusterComputeResource, _, err := getClusterName(ctx, &e2eVSphere) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - svcMasterIp := getApiServerIpOfZone(ctx, "zone-2") + //svcMasterIp := getApiServerIpOfZone(ctx, "zone-2") + svcMasterIp := "10.43.214.217" clusterName := getClusterNameFromZone(ctx, "zone-1") for i := 0; i < stsCount; i++ { @@ -773,15 +775,15 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SiteDownTests", func() { ginkgo.By("Put 2 ESX hosts of AZ3 into MM - ensureObjectAccessibilty") hostsInCluster := getHostsByClusterName(ctx, clusterComputeResource, clusterName) - for i := 0; i < len(hostsInCluster)-1; i++ { + for i := 1; i < len(hostsInCluster)-1; i++ { enterHostIntoMM(ctx, hostsInCluster[i], ensureAccessibilityMModeType, timeout, false) } - defer func() { - framework.Logf("Exit the hosts from MM before terminating the test") - for i := 0; i < len(hostsInCluster)-1; i++ { - exitHostMM(ctx, hostsInCluster[i], timeout) - } - }() + // defer func() { + // framework.Logf("Exit the hosts from MM before terminating the test") + // for i := 1; i < len(hostsInCluster)-1; i++ { + // exitHostMM(ctx, hostsInCluster[i], timeout) + // } + // }() ginkgo.By("Verify SVC PVC annotations and node affinities on GC and SVC PVs") for _, statefulset := range stsList { @@ -790,7 +792,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SiteDownTests", func() { } ginkgo.By("Exit the hosts from MM") - for i := 0; i < len(hostsInCluster)-1; i++ { + for i := 1; i < len(hostsInCluster)-1; i++ { exitHostMM(ctx, hostsInCluster[i], timeout) } diff --git a/tests/e2e/util.go b/tests/e2e/util.go index 2456b8deb9..07a14b7676 100644 --- a/tests/e2e/util.go +++ b/tests/e2e/util.go @@ -88,6 +88,7 @@ import ( cnsfileaccessconfigv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsfileaccessconfig/v1alpha1" cnsnodevmattachmentv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsnodevmattachment/v1alpha1" cnsregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsregistervolume/v1alpha1" + cnsunregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsunregistervolume/v1alpha1" cnsvolumemetadatav1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsvolumemetadata/v1alpha1" storagepolicyv1alpha2 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/storagepolicy/v1alpha2" k8s "sigs.k8s.io/vsphere-csi-driver/v3/pkg/kubernetes" @@ -3243,6 +3244,27 @@ func getCNSRegisterVolumeSpec(ctx context.Context, namespace string, fcdID strin return cnsRegisterVolume } +// Function to create CNS UnregisterVolume spec, with given FCD ID +func getCNSUnregisterVolumeSpec(namespace string, + fcdID string) *cnsunregistervolumev1alpha1.CnsUnregisterVolume { + var ( + cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume + ) + framework.Logf("get CNS UnregisterVolume API spec") + + cnsUnRegisterVolume = &cnsunregistervolumev1alpha1.CnsUnregisterVolume{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "cnsunregvol-", + Namespace: namespace, + }, + Spec: cnsunregistervolumev1alpha1.CnsUnregisterVolumeSpec{ + VolumeID: fcdID, + }, + } + return cnsUnRegisterVolume +} + // Create CNS register volume. func createCNSRegisterVolume(ctx context.Context, restConfig *rest.Config, cnsRegisterVolume *cnsregistervolumev1alpha1.CnsRegisterVolume) error { @@ -3255,6 +3277,30 @@ func createCNSRegisterVolume(ctx context.Context, restConfig *rest.Config, return err } +// Create CNS Unregister volume. +func createCNSUnRegisterVolume(ctx context.Context, restConfig *rest.Config, + cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume) error { + + cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restConfig, cnsoperatorv1alpha1.GroupName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Create CNSUnRegisterVolume") + err = cnsOperatorClient.Create(ctx, cnsUnRegisterVolume) + + return err +} + +// Delete CNS Unregister volume. +func deleteCNSUnRegisterVolume(ctx context.Context, restConfig *rest.Config, + cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume) error { + + cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restConfig, cnsoperatorv1alpha1.GroupName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Delete CNSUnRegisterVolume") + err = cnsOperatorClient.Delete(ctx, cnsUnRegisterVolume) + + return err +} + // Query CNS Register volume. Returns true if the CNSRegisterVolume is // available otherwise false. func queryCNSRegisterVolume(ctx context.Context, restClientConfig *rest.Config, @@ -3280,6 +3326,31 @@ func queryCNSRegisterVolume(ctx context.Context, restClientConfig *rest.Config, } +// Query CNS Unregister volume. Returns true if the CNSUnregisterVolume is +// available otherwise false. +func queryCNSUnregisterVolume(ctx context.Context, restClientConfig *rest.Config, + cnsUnregistervolumeName string, namespace string) bool { + isPresent := false + framework.Logf("cleanUpCnsUnregisterVolumeInstances: start") + cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restClientConfig, cnsoperatorv1alpha1.GroupName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Get list of CnsUnregisterVolume instances from all namespaces. + cnsUnregisterVolumesList := &cnsunregistervolumev1alpha1.CnsUnregisterVolumeList{} + err = cnsOperatorClient.List(ctx, cnsUnregisterVolumesList) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + cns := &cnsunregistervolumev1alpha1.CnsUnregisterVolume{} + err = cnsOperatorClient.Get(ctx, pkgtypes.NamespacedName{Name: cnsUnregistervolumeName, Namespace: namespace}, cns) + if err == nil { + framework.Logf("CNS UnregisterVolume %s Found in the namespace %s:", cnsUnregistervolumeName, namespace) + isPresent = true + } + + return isPresent + +} + // Verify Bi-directional referance of Pv and PVC in case of static volume // provisioning. func verifyBidirectionalReferenceOfPVandPVC(ctx context.Context, client clientset.Interface, @@ -3329,6 +3400,20 @@ func getCNSRegistervolume(ctx context.Context, restClientConfig *rest.Config, return cns } +// Get CNS Unregister volume. +func getCNSUnRegistervolume(ctx context.Context, + restClientConfig *rest.Config, cnsUnRegisterVolume *cnsunregistervolumev1alpha1. + CnsUnregisterVolume) *cnsunregistervolumev1alpha1.CnsUnregisterVolume { + cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restClientConfig, cnsoperatorv1alpha1.GroupName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + cns := &cnsunregistervolumev1alpha1.CnsUnregisterVolume{} + err = cnsOperatorClient.Get(ctx, + pkgtypes.NamespacedName{Name: cnsUnRegisterVolume.Name, Namespace: cnsUnRegisterVolume.Namespace}, cns) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cns +} + // Update CNS register volume. func updateCNSRegistervolume(ctx context.Context, restClientConfig *rest.Config, cnsRegisterVolume *cnsregistervolumev1alpha1.CnsRegisterVolume) *cnsregistervolumev1alpha1.CnsRegisterVolume { @@ -4051,6 +4136,46 @@ func waitForCNSRegisterVolumeToGetCreated(ctx context.Context, restConfig *rest. return fmt.Errorf("cnsRegisterVolume %s creation is failed within %v", cnsRegisterVolumeName, timeout) } +// waitForCNSUnRegisterVolumeToGetUnregistered waits for a cnsUnRegisterVolume to get +// created or until timeout occurs, whichever comes first. +func waitForCNSUnRegisterVolumeToGetUnregistered(ctx context.Context, restConfig *rest.Config, + cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume, Poll, timeout time.Duration) error { + framework.Logf("Waiting up to %v for CnsUnRegisterVolume %s to get created", timeout, cnsUnRegisterVolume) + + cnsUnRegisterVolumeName := cnsUnRegisterVolume.GetName() + for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { + cnsUnRegisterVolume = getCNSUnRegistervolume(ctx, restConfig, cnsUnRegisterVolume) + flag := cnsUnRegisterVolume.Status.Unregistered + if !flag { + continue + } else { + return nil + } + } + + return fmt.Errorf("cnsRegisterVolume %s unregister is failed within %v", cnsUnRegisterVolumeName, timeout) +} + +// waitForCNSUnRegisterVolumeFailToUnregistered waits for a cnsUnRegisterVolume to get +// fail or until timeout occurs, whichever comes first. +func waitForCNSUnRegisterVolumeFailToUnregistered(ctx context.Context, restConfig *rest.Config, + cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume, Poll, timeout time.Duration) error { + framework.Logf("Waiting up to %v for CnsUnRegisterVolume %s to get created", timeout, cnsUnRegisterVolume) + + cnsUnRegisterVolumeName := cnsUnRegisterVolume.GetName() + for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { + cnsUnRegisterVolume = getCNSUnRegistervolume(ctx, restConfig, cnsUnRegisterVolume) + flag := cnsUnRegisterVolume.Status.Unregistered + if flag { + continue + } else { + return nil + } + } + + return fmt.Errorf("cnsRegisterVolume %s unregister is failed within %v", cnsUnRegisterVolumeName, timeout) +} + // waitForCNSRegisterVolumeToGetDeleted waits for a cnsRegisterVolume to get // deleted or until timeout occurs, whichever comes first. func waitForCNSRegisterVolumeToGetDeleted(ctx context.Context, restConfig *rest.Config, namespace string, diff --git a/tests/e2e/vmservice_vm.go b/tests/e2e/vmservice_vm.go index 7673994740..d28297156f 100644 --- a/tests/e2e/vmservice_vm.go +++ b/tests/e2e/vmservice_vm.go @@ -1335,4 +1335,372 @@ var _ bool = ginkgo.Describe("[vmsvc] vm service with csi vol tests", func() { _ = formatNVerifyPvcIsAccessible(vm.Status.Volumes[0].DiskUuid, 1, vmIp) }) + /* + Test1: Export PVC attached with VM Service + Test2: Export statically created PVC attached with VM Service + + + Create SPBM Policy and Assign to Namespace: + Create a SPBM policy and assign it to the test namespace with sufficient quota. + + Create PVC: + Create a PersistentVolumeClaim (PVC) named pvc1 in the test namespace. + Verify CNS Metadata + + Create VM Service VM: + Create a VM Service VM named vm1 and attach pvc1 to it. + + Verify Volume Access: + Once vm1 is up, verify that the volume is accessible inside vm1 and perform I/O operations on pvc1. + + Verify Storage Quota: + Confirm that the storage quota in the test namespace appropriately shows the occupied quota from pvc1 and the VM. + + Create and Apply CnsUnregisterVolume CR: + Create a CnsUnregisterVolume CR with the volumeID of pvc1 and apply it to the test namespace. + + Verify Unregister Failure: + Ensure that the unregister operation fails because the volume is attached to vm1. + + Delete VM Service VM: + Delete vm1 and confirm that the deletion is successful. + + Reapply CnsUnregisterVolume CR: + Create and apply a new CnsUnregisterVolume CR with the volumeID of pvc1 to the test namespace. + + Check PV/PVC Deletion: + Verify that the PersistentVolume (PV) and PVC are deleted from the Supervisor cluster. + Verify CNS Metadata + + Verify Storage Quota Freed: + Confirm that the storage quota has been freed following the deletion of PV/PVC. + + Cleanup: + Delete the FCD, the test namespace, and the SPBM policy. + */ + ginkgo.It("[csi-unregister-volume] Export PVC attached with VM Service", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var pandoraSyncWaitTime int + var err error + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + datastore := getDsMoRefFromURL(ctx, datastoreURL) + + ginkgo.By("Creating FCD Disk") + fcdID, err := e2eVSphere.createFCD(ctx, fcdName, diskSizeInMb, datastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By(fmt.Sprintf("Creating the PV with the fcdID %s", fcdID)) + staticPVLabels := make(map[string]string) + staticPVLabels["fcd-id"] = fcdID + staticPv := getPersistentVolumeSpec(fcdID, v1.PersistentVolumeReclaimDelete, nil, ext4FSType) + staticPv, err = client.CoreV1().PersistentVolumes().Create(ctx, staticPv, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = e2eVSphere.waitForCNSVolumeToBeCreated(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating a static PVC") + staticPvc := getPersistentVolumeClaimSpec(namespace, staticPVLabels, staticPv.Name) + staticPvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create( + ctx, staticPvc, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + defer func() { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(ctx, client, staticPvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc, staticPvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc, staticPvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volHandle) + + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS unregister volume name : %s", cnsRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + framework.ExpectNoError(waitForCNSUnRegisterVolumeFailToUnregistered(ctx, + restConfig, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) + }) + + /* + Export PVC attached with VM Service while the VM Service VM is in power-off state + + Create SPBM Policy and Assign to Namespace: + Create a SPBM policy and assign it to the test namespace with sufficient quota. + + Create PVC: + Create a PersistentVolumeClaim (PVC) named pvc1 in the test namespace. + Verify CNS Metadata + + Create VM Service VM: + Create a VM Service VM named vm1 and attach pvc1 to it. + + Verify Volume Access: + Once vm1 is up, verify that the volume is accessible inside vm1 and perform I/O operations on pvc1. + + Verify Storage Quota: + Confirm that the storage quota in the test namespace appropriately shows the occupied quota from pvc1 and the VM. + Power-off VM Service VM vm1 + + Create and Apply CnsUnregisterVolume CR: + Create a CnsUnregisterVolume CR with the volumeID of pvc1 and apply it to the test namespace. + + Verify Unregister Failure: + Ensure that the unregister operation fails because the volume is attached to vm1. + + Delete VM Service VM: + Delete vm1 and confirm that the deletion is successful. + + Reapply CnsUnregisterVolume CR: + Create and apply a new CnsUnregisterVolume CR with the volumeID of pvc1 to the test namespace. + + Check PV/PVC Deletion: + Verify that the PersistentVolume (PV) and PVC are deleted from the Supervisor cluster. + Verify CNS Metadata + + Verify Storage Quota Freed: + Confirm that the storage quota has been freed following the deletion of PV/PVC. + + Cleanup: + Delete the FCD, the test namespace, and the SPBM policy. + */ + ginkgo.It("[csi-unregister-volume] Export PVC attached with VM Service while the "+ + "VM Service VM is in power-off state", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var pandoraSyncWaitTime int + var err error + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + datastore := getDsMoRefFromURL(ctx, datastoreURL) + + ginkgo.By("Creating FCD Disk") + fcdID, err := e2eVSphere.createFCD(ctx, fcdName, diskSizeInMb, datastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By(fmt.Sprintf("Creating the PV with the fcdID %s", fcdID)) + staticPVLabels := make(map[string]string) + staticPVLabels["fcd-id"] = fcdID + staticPv := getPersistentVolumeSpec(fcdID, v1.PersistentVolumeReclaimDelete, nil, ext4FSType) + staticPv, err = client.CoreV1().PersistentVolumes().Create(ctx, staticPv, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = e2eVSphere.waitForCNSVolumeToBeCreated(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating a static PVC") + staticPvc := getPersistentVolumeClaimSpec(namespace, staticPVLabels, staticPv.Name) + staticPvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create( + ctx, staticPvc, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + defer func() { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(ctx, client, staticPvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc, staticPvc}, vmi, storageClassName, secretName) + + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc, staticPvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i, vol := range vm.Status.Volumes { + volFolder := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + verifyDataIntegrityOnVmDisk(vmIp, volFolder) + } + + ginkgo.By("Power off vm") + vm = setVmPowerState(ctx, vmopC, vm, vmopv1.VirtualMachinePoweredOff) + vm, err = wait4Vm2ReachPowerStateInSpec(ctx, vmopC, vm) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + + ginkgo.By("Create CNS unregister volume with above created FCD " + volHandle) + + cnsUnRegisterVolume := getCNSUnregisterVolumeSpec(namespace, volHandle) + + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() + framework.Logf("CNS unregister volume name : %s", cnsRegisterVolumeName) + + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete CNS unregister volume CR by name " + cnsRegisterVolumeName) + err = deleteCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Waiting for CNS unregister volume to be unregistered") + framework.ExpectNoError(waitForCNSUnRegisterVolumeFailToUnregistered(ctx, + restConfig, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) + }) })