-
Notifications
You must be signed in to change notification settings - Fork 181
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[CNS-UnRegisterVolume-API]: Automation tests for CNS-UnRegisterVolume…
… API Feature
- Loading branch information
1 parent
e03934b
commit 1c13c2b
Showing
2 changed files
with
363 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,290 @@ | ||
/* | ||
Copyright 2021 The Kubernetes Authors. | ||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
http://www.apache.org/licenses/LICENSE-2.0 | ||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package e2e | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"os" | ||
"strconv" | ||
"time" | ||
|
||
"github.com/onsi/ginkgo/v2" | ||
"github.com/onsi/gomega" | ||
appsv1 "k8s.io/api/apps/v1" | ||
v1 "k8s.io/api/core/v1" | ||
storagev1 "k8s.io/api/storage/v1" | ||
apierrors "k8s.io/apimachinery/pkg/api/errors" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
clientset "k8s.io/client-go/kubernetes" | ||
"k8s.io/kubernetes/test/e2e/framework" | ||
fnodes "k8s.io/kubernetes/test/e2e/framework/node" | ||
fpod "k8s.io/kubernetes/test/e2e/framework/pod" | ||
fpv "k8s.io/kubernetes/test/e2e/framework/pv" | ||
admissionapi "k8s.io/pod-security-admission/api" | ||
) | ||
|
||
var _ = ginkgo.Describe("[csi-guest] [csi-supervisor] CNS Unregister Volume", func() { | ||
f := framework.NewDefaultFramework("cns-unregister-volume") | ||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged | ||
const defaultVolumeOpsScale = 30 | ||
const defaultVolumeOpsScaleWCP = 29 | ||
var ( | ||
client clientset.Interface | ||
c clientset.Interface | ||
fullSyncWaitTime int | ||
namespace string | ||
scParameters map[string]string | ||
storagePolicyName string | ||
volumeOpsScale int | ||
isServiceStopped bool | ||
serviceName string | ||
csiReplicaCount int32 | ||
deployment *appsv1.Deployment | ||
) | ||
|
||
ginkgo.BeforeEach(func() { | ||
bootstrap() | ||
client = f.ClientSet | ||
namespace = getNamespaceToRunTests(f) | ||
scParameters = make(map[string]string) | ||
isServiceStopped = false | ||
storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) | ||
ctx, cancel := context.WithCancel(context.Background()) | ||
defer cancel() | ||
nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) | ||
framework.ExpectNoError(err, "Unable to find ready and schedulable Node") | ||
|
||
if !(len(nodeList.Items) > 0) { | ||
framework.Failf("Unable to find ready and schedulable Node") | ||
} | ||
|
||
if guestCluster { | ||
svcClient, svNamespace := getSvcClientAndNamespace() | ||
setResourceQuota(svcClient, svNamespace, rqLimit) | ||
} | ||
|
||
if os.Getenv("VOLUME_OPS_SCALE") != "" { | ||
volumeOpsScale, err = strconv.Atoi(os.Getenv(envVolumeOperationsScale)) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
} else { | ||
if vanillaCluster { | ||
volumeOpsScale = defaultVolumeOpsScale | ||
} else { | ||
volumeOpsScale = defaultVolumeOpsScaleWCP | ||
} | ||
} | ||
framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) | ||
|
||
if os.Getenv(envFullSyncWaitTime) != "" { | ||
fullSyncWaitTime, err = strconv.Atoi(os.Getenv(envFullSyncWaitTime)) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
// Full sync interval can be 1 min at minimum so full sync wait time has to be more than 120s | ||
if fullSyncWaitTime < 120 || fullSyncWaitTime > defaultFullSyncWaitTime { | ||
framework.Failf("The FullSync Wait time %v is not set correctly", fullSyncWaitTime) | ||
} | ||
} else { | ||
fullSyncWaitTime = defaultFullSyncWaitTime | ||
} | ||
|
||
// Get CSI Controller's replica count from the setup | ||
controllerClusterConfig := os.Getenv(contollerClusterKubeConfig) | ||
c = client | ||
if controllerClusterConfig != "" { | ||
framework.Logf("Creating client for remote kubeconfig") | ||
remoteC, err := createKubernetesClientFromConfig(controllerClusterConfig) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
c = remoteC | ||
} | ||
deployment, err = c.AppsV1().Deployments(csiSystemNamespace).Get(ctx, | ||
vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
csiReplicaCount = *deployment.Spec.Replicas | ||
}) | ||
|
||
ginkgo.AfterEach(func() { | ||
ctx, cancel := context.WithCancel(context.Background()) | ||
defer cancel() | ||
if isServiceStopped { | ||
if serviceName == "CSI" { | ||
framework.Logf("Starting CSI driver") | ||
ignoreLabels := make(map[string]string) | ||
err := updateDeploymentReplicawithWait(c, csiReplicaCount, vSphereCSIControllerPodNamePrefix, | ||
csiSystemNamespace) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
|
||
// Wait for the CSI Pods to be up and Running | ||
list_of_pods, err := fpod.GetPodsInNamespace(ctx, client, csiSystemNamespace, ignoreLabels) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
num_csi_pods := len(list_of_pods) | ||
err = fpod.WaitForPodsRunningReady(ctx, client, csiSystemNamespace, int32(num_csi_pods), 0, | ||
pollTimeout) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
} else if serviceName == hostdServiceName { | ||
framework.Logf("In afterEach function to start the hostd service on all hosts") | ||
hostIPs := getAllHostsIP(ctx, true) | ||
for _, hostIP := range hostIPs { | ||
startHostDOnHost(ctx, hostIP) | ||
} | ||
} else { | ||
vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort | ||
ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", serviceName)) | ||
err := invokeVCenterServiceControl(ctx, startOperation, serviceName, vcAddress) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcRunningMessage) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
} | ||
} | ||
|
||
ginkgo.By(fmt.Sprintf("Resetting provisioner time interval to %s sec", defaultProvisionerTimeInSec)) | ||
updateCSIDeploymentProvisionerTimeout(c, csiSystemNamespace, defaultProvisionerTimeInSec) | ||
|
||
if supervisorCluster { | ||
deleteResourceQuota(client, namespace) | ||
dumpSvcNsEventsOnTestFailure(client, namespace) | ||
} | ||
if guestCluster { | ||
svcClient, svNamespace := getSvcClientAndNamespace() | ||
setResourceQuota(svcClient, svNamespace, defaultrqLimit) | ||
dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) | ||
} | ||
}) | ||
|
||
ginkgo.It("export detached volume", func() { | ||
serviceName = vsanhealthServiceName | ||
exportDetachedVolume(namespace, client, storagePolicyName, scParameters, | ||
volumeOpsScale, true) | ||
}) | ||
}) | ||
|
||
func exportDetachedVolume(namespace string, client clientset.Interface, | ||
storagePolicyName string, scParameters map[string]string, volumeOpsScale int, extendVolume bool) { | ||
ctx, cancel := context.WithCancel(context.Background()) | ||
defer cancel() | ||
var storageclass *storagev1.StorageClass | ||
var persistentvolumes []*v1.PersistentVolume | ||
var pvclaims []*v1.PersistentVolumeClaim | ||
var err error | ||
//var fullSyncWaitTime int | ||
pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) | ||
|
||
// Get a config to talk to the apiserver | ||
restConfig := getRestConfigClient() | ||
|
||
framework.Logf("storagePolicyName %v", storagePolicyName) | ||
framework.Logf("extendVolume %v", extendVolume) | ||
|
||
if supervisorCluster { | ||
ginkgo.By("CNS_TEST: Running for WCP setup") | ||
thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) | ||
if thickProvPolicy == "" { | ||
ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") | ||
} | ||
profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) | ||
scParameters[scParamStoragePolicyID] = profileID | ||
// create resource quota | ||
createResourceQuota(client, namespace, rqLimit, thickProvPolicy) | ||
storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, thickProvPolicy) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
} else { | ||
ginkgo.By("CNS_TEST: Running for GC setup") | ||
thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) | ||
if thickProvPolicy == "" { | ||
ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") | ||
} | ||
createResourceQuota(client, namespace, rqLimit, thickProvPolicy) | ||
scParameters[svStorageClassName] = thickProvPolicy | ||
scParameters[scParamFsType] = ext4FSType | ||
storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) | ||
if !apierrors.IsNotFound(err) { | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
} | ||
var allowExpansion = true | ||
storageclass.AllowVolumeExpansion = &allowExpansion | ||
storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
} | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
|
||
ginkgo.By("Creating PVCs using the Storage Class") | ||
framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) | ||
for i := 0; i < volumeOpsScale; i++ { | ||
framework.Logf("Creating pvc%v", i) | ||
pvclaims[i], err = fpv.CreatePVC(ctx, client, namespace, | ||
getPersistentVolumeClaimSpecWithStorageClass(namespace, diskSize, storageclass, nil, "")) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
} | ||
|
||
ginkgo.By("Waiting for all claims to be in bound state") | ||
persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, | ||
2*framework.ClaimProvisionTimeout) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
|
||
// TODO: Add a logic to check for the no orphan volumes | ||
defer func() { | ||
for _, claim := range pvclaims { | ||
err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
} | ||
ginkgo.By("Verify PVs, volumes are deleted from CNS") | ||
for _, pv := range persistentvolumes { | ||
err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, | ||
framework.PodDeleteTimeout) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
volumeID := pv.Spec.CSI.VolumeHandle | ||
err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), | ||
fmt.Sprintf("Volume: %s should not be present in the "+ | ||
"CNS after it is deleted from kubernetes", volumeID)) | ||
} | ||
}() | ||
for _, pv := range persistentvolumes { | ||
volumeID := pv.Spec.CSI.VolumeHandle | ||
time.Sleep(30 * time.Second) | ||
|
||
ginkgo.By("Create CNS unregister volume with above created FCD " + pv.Spec.CSI.VolumeHandle) | ||
cnsUnRegisterVolume := getCNSUnRegisterVolumeSpec(ctx, namespace, volumeID) | ||
err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetCreated(ctx, | ||
restConfig, namespace, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) | ||
cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() | ||
framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) | ||
} | ||
|
||
ginkgo.By("Verify PVs, volumes are deleted from CNS") | ||
for _, pv := range persistentvolumes { | ||
err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, | ||
framework.PodDeleteTimeout) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
volumeID := pv.Spec.CSI.VolumeHandle | ||
err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), | ||
fmt.Sprintf("Volume: %s should not be present in the "+ | ||
"CNS after it is deleted from kubernetes", volumeID)) | ||
} | ||
|
||
defaultDatastore = getDefaultDatastore(ctx) | ||
ginkgo.By(fmt.Sprintf("defaultDatastore %v sec", defaultDatastore)) | ||
|
||
for _, pv1 := range persistentvolumes { | ||
ginkgo.By(fmt.Sprintf("Deleting FCD: %s", pv1.Spec.CSI.VolumeHandle)) | ||
err = deleteFcdWithRetriesForSpecificErr(ctx, pv1.Spec.CSI.VolumeHandle, defaultDatastore.Reference(), | ||
[]string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters