Skip to content

Commit

Permalink
supervisor snapshot code changes
Browse files Browse the repository at this point in the history
  • Loading branch information
sipriyaa committed Aug 1, 2024
1 parent 2bb14e7 commit c2f9bc1
Show file tree
Hide file tree
Showing 8 changed files with 1,526 additions and 1,525 deletions.
2,871 changes: 1,421 additions & 1,450 deletions tests/e2e/csi_snapshot_basic.go

Large diffs are not rendered by default.

43 changes: 21 additions & 22 deletions tests/e2e/csi_snapshot_file_volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import (
fnodes "k8s.io/kubernetes/test/e2e/framework/node"
fpv "k8s.io/kubernetes/test/e2e/framework/pv"

snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned"
)

Expand Down Expand Up @@ -101,7 +100,6 @@ var _ = ginkgo.Describe("[file-vanilla-snapshot] Volume Snapshot file volume Tes
storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client,
namespace, nil, scParameters, diskSize, nil, "", false, v1.ReadWriteMany)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

defer func() {
err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expand All @@ -113,7 +111,6 @@ var _ = ginkgo.Describe("[file-vanilla-snapshot] Volume Snapshot file volume Tes
gomega.Expect(err).NotTo(gomega.HaveOccurred())
volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle
gomega.Expect(volHandle).NotTo(gomega.BeEmpty())

defer func() {
err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expand All @@ -122,32 +119,34 @@ var _ = ginkgo.Describe("[file-vanilla-snapshot] Volume Snapshot file volume Tes
}()

ginkgo.By("Create volume snapshot class")
volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx,
getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{})
volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
if vanillaCluster {
err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name,
metav1.DeleteOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}()

ginkgo.By("Create a dynamic volume snapshot")
volumeSnapshot, snapshotContent, _,
_, _, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass,
pvclaim, volHandle, diskSize, true)
// snapshot is not supported on file volume, expecting an error to occur
gomega.Expect(err).To(gomega.HaveOccurred())
defer func() {
err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, metav1.DeleteOptions{})
err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, pandoraSyncWaitTime)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()

ginkgo.By("Create a volume snapshot")
volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx,
getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name)
snapshotCreated := true
framework.Logf("Deleting volume snapshot")
deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime)

defer func() {
if snapshotCreated {
framework.Logf("Deleting volume snapshot")
deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime)
}
framework.Logf("Wait till the volume snapshot is deleted")
err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc,
*volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()

ginkgo.By("Verify volume snapshot is created")
volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name)
gomega.Expect(err).To(gomega.HaveOccurred())
} else {
ginkgo.Skip("Block volume setup, so skip file volume provision")
}
Expand Down
31 changes: 18 additions & 13 deletions tests/e2e/csi_snapshot_negative.go
Original file line number Diff line number Diff line change
Expand Up @@ -237,58 +237,59 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti
volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name)
gomega.Expect(err).To(gomega.HaveOccurred())
})
/*

/* Testcase-20
Snapshot lifecycle ops with fault-injection
1. Create Snapshot (Pre-provisioned and dynamic)
2. Delete Snapshot
3. Create Volume from Snapshot
4. During 1a, 1b and 1c run the following fault events and ensure the operator
eventually succeeds and there is no functional impact
eventually succeeds and there is no functional impact
5. vSphere side service restarts: vpxd, sps, vsan-health, host-restart
6. k8s side: csi pod restarts with improved_idempotency enabled as well
as run a scenario with improved_idempotency disabled
as run a scenario with improved_idempotency disabled
*/
ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when "+
ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] create volume snapshot when "+
"hostd goes down", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() {

serviceName = hostdServiceName
snapshotOperationWhileServiceDown(serviceName, namespace, client, snapc, datastoreURL,
csiNamespace, fullSyncWaitTime, isServiceStopped, true, csiReplicas, pandoraSyncWaitTime)
})

ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when CSI "+
ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot]create volume snapshot when CSI "+
"restarts", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() {

serviceName = "CSI"
snapshotOperationWhileServiceDown(serviceName, namespace, client, snapc, datastoreURL,
csiNamespace, fullSyncWaitTime, isServiceStopped, true, csiReplicas, pandoraSyncWaitTime)
})

ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when VPXD "+
ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] create volume snapshot when VPXD "+
"goes down", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() {

serviceName = vpxdServiceName
snapshotOperationWhileServiceDown(serviceName, namespace, client, snapc, datastoreURL,
csiNamespace, fullSyncWaitTime, isServiceStopped, false, csiReplicas, pandoraSyncWaitTime)
})

ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when CNS goes "+
ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] create volume snapshot when CNS goes "+
"down", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() {

serviceName = vsanhealthServiceName
snapshotOperationWhileServiceDown(serviceName, namespace, client, snapc, datastoreURL,
csiNamespace, fullSyncWaitTime, isServiceStopped, false, csiReplicas, pandoraSyncWaitTime)
})

ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] create volume snapshot when SPS "+
ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot][supervisor-snapshot] create volume snapshot when SPS "+
"goes down", ginkgo.Label(p0, block, vanilla, tkg, snapshot, disruptive), func() {

serviceName = spsServiceName
snapshotOperationWhileServiceDown(serviceName, namespace, client, snapc, datastoreURL,
csiNamespace, fullSyncWaitTime, isServiceStopped, true, csiReplicas, pandoraSyncWaitTime)
})

ginkgo.It("[tkg-snapshot] create volume snapshot when SVC CSI restarts", ginkgo.Label(p0,
ginkgo.It("[tkg-snapshot] [supervisor-snapshot] create volume snapshot when SVC CSI restarts", ginkgo.Label(p0,
tkg, snapshot, disruptive, newTest), func() {

serviceName = "WCP CSI"
Expand Down Expand Up @@ -318,7 +319,6 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string,
scParameters[scParamDatastoreURL] = datastoreURL
storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())

defer func() {
err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expand All @@ -339,25 +339,30 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string,
volHandle = getVolumeIDFromSupervisorCluster(volHandle)
}
gomega.Expect(volHandle).NotTo(gomega.BeEmpty())

defer func() {
err := fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()

ginkgo.By("Create/Get volume snapshot class")
ginkgo.By("Create volume snapshot class")
volumeSnapshotClass, err = createVolumeSnapshotClass(ctx, snapc, deletionPolicy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
if vanillaCluster {
err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name,
metav1.DeleteOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}()

ginkgo.By("Create a volume snapshot")
snapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx,
getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Volume snapshot name is : %s", snapshot.Name)
snapshotCreated := true

defer func() {
if snapshotCreated {
framework.Logf("Deleting volume snapshot")
Expand Down
38 changes: 23 additions & 15 deletions tests/e2e/csi_snapshot_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -335,20 +335,21 @@ func deleteVolumeSnapshot(ctx context.Context, snapc *snapclient.Clientset, name

// getVolumeSnapshotIdFromSnapshotHandle fetches VolumeSnapshotId From SnapshotHandle
func getVolumeSnapshotIdFromSnapshotHandle(ctx context.Context,
snapshotContent *snapV1.VolumeSnapshotContent) (string, error) {
snapshotContent *snapV1.VolumeSnapshotContent) (string, string, error) {
var snapshotID string
var snapshotHandle string
var err error
if vanillaCluster {
snapshotHandle := *snapshotContent.Status.SnapshotHandle
snapshotHandle = *snapshotContent.Status.SnapshotHandle
snapshotID = strings.Split(snapshotHandle, "+")[1]
} else if guestCluster {
snapshotHandle := *snapshotContent.Status.SnapshotHandle
snapshotHandle = *snapshotContent.Status.SnapshotHandle
snapshotID, _, _, err = getSnapshotHandleFromSupervisorCluster(ctx, snapshotHandle)
if err != nil {
return "", err
return snapshotID, snapshotHandle, err
}
}
return snapshotID, nil
return snapshotID, snapshotHandle, nil
}

// createVolumeSnapshotClass creates VSC for a Vanilla cluster and
Expand Down Expand Up @@ -399,53 +400,53 @@ func createDynamicVolumeSnapshot(ctx context.Context, namespace string,
snapc *snapclient.Clientset, volumeSnapshotClass *snapV1.VolumeSnapshotClass,
pvclaim *v1.PersistentVolumeClaim, volHandle string, diskSize string,
performCnsQueryVolumeSnapshot bool) (*snapV1.VolumeSnapshot,
*snapV1.VolumeSnapshotContent, bool, bool, string, error) {
*snapV1.VolumeSnapshotContent, bool, bool, string, string, error) {

volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx,
getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{})
if err != nil {
return nil, nil, false, false, "", err
return volumeSnapshot, nil, false, false, "", "", err
}
framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name)

ginkgo.By("Verify volume snapshot is created")
volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name)
if err != nil {
return nil, nil, false, false, "", err
return volumeSnapshot, nil, false, false, "", "", err
}

snapshotCreated := true
if volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize)) != 0 {
return nil, nil, false, false, "", fmt.Errorf("unexpected restore size")
return volumeSnapshot, nil, false, false, "", "", fmt.Errorf("unexpected restore size")
}

ginkgo.By("Verify volume snapshot content is created")
snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx,
*volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{})
if err != nil {
return nil, nil, false, false, "", err
return volumeSnapshot, snapshotContent, false, false, "", "", err
}
snapshotContentCreated := true
snapshotContent, err = waitForVolumeSnapshotContentReadyToUse(*snapc, ctx, snapshotContent.Name)
if err != nil {
return nil, nil, false, false, "", fmt.Errorf("volume snapshot content is not ready to use")
return volumeSnapshot, snapshotContent, false, false, "", "", fmt.Errorf("volume snapshot content is not ready to use")
}

framework.Logf("Get volume snapshot ID from snapshot handle")
snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent)
snapshotId, snapshotHandle, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent)
if err != nil {
return nil, nil, false, false, "", err
return volumeSnapshot, snapshotContent, false, false, snapshotId, "", err
}

if performCnsQueryVolumeSnapshot {
ginkgo.By("Query CNS and check the volume snapshot entry")
err = waitForCNSSnapshotToBeCreated(volHandle, snapshotId)
if err != nil {
return nil, nil, false, false, snapshotId, err
return volumeSnapshot, snapshotContent, false, false, snapshotId, "", err
}
}

return volumeSnapshot, snapshotContent, snapshotCreated, snapshotContentCreated, snapshotId, nil
return volumeSnapshot, snapshotContent, snapshotCreated, snapshotContentCreated, snapshotId, snapshotHandle, nil
}

// getPersistentVolumeClaimSpecWithDatasource return the PersistentVolumeClaim
Expand Down Expand Up @@ -636,13 +637,20 @@ func verifyVolumeRestoreOperation(ctx context.Context, client clientset.Interfac
gomega.Expect(err).NotTo(gomega.HaveOccurred())

var vmUUID string
var exists bool
nodeName := pod.Spec.NodeName

if vanillaCluster {
vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName)
} else if guestCluster {
vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
} else if supervisorCluster {
annotations := pod.Annotations
vmUUID, exists = annotations[vmUUIDLabel]
gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel))
_, err := e2eVSphere.getVMByUUID(ctx, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}

ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle2, nodeName))
Expand Down
6 changes: 5 additions & 1 deletion tests/e2e/e2e_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,18 +89,22 @@ const (
envSharedNFSDatastoreURL = "SHARED_NFS_DATASTORE_URL"
envSharedVMFSDatastoreURL = "SHARED_VMFS_DATASTORE_URL"
envSharedVMFSDatastore2URL = "SHARED_VMFS_DATASTORE2_URL"
envSharedVsan2DatastoreURL = "SHARED_VSAN2_DATASTORE_URL"
envSharedVsanDirectDatastoreURL = "SHARED_VSANDIRECT_DATASTORE_URL"
envVMClass = "VM_CLASS"
envVsanDirectSetup = "USE_VSAN_DIRECT_DATASTORE_IN_WCP"
envVsanDDatastoreURL = "SHARED_VSAND_DATASTORE_URL"
envVsanDDatastore2URL = "SHARED_VSAND_DATASTORE2_URL"
envStoragePolicyNameForNonSharedDatastores = "STORAGE_POLICY_FOR_NONSHARED_DATASTORES"
envStoragePolicyNameForSharedDatastores = "STORAGE_POLICY_FOR_SHARED_DATASTORES"
envStoragePolicyNameForHCIRemoteDatastores = "STORAGE_POLICY_FOR_HCI_REMOTE_DS"
envStoragePolicyNameForVsanVmfsDatastores = "STORAGE_POLICY_FOR_VSAN_VMFS_DATASTORES"
envStoragePolicyNameForVsanNfsDatastores = "STORAGE_POLICY_FOR_VSAN_NFS_DATASTORES"
envStoragePolicyNameForSharedDatastores2 = "STORAGE_POLICY_FOR_SHARED_DATASTORES_2"
envStoragePolicyNameForVmfsDatastores = "STORAGE_POLICY_FOR_VMFS_DATASTORES"
envStoragePolicyNameForNfsDatastores = "STORAGE_POLICY_FOR_NFS_DATASTORES"
envStoragePolicyNameForVvolDatastores = "STORAGE_POLICY_FOR_VVOL_DATASTORES"
envStoragePolicyNameForVsan2Datastore = "STORAGE_POLICY_FOR_VSAN2_DATASTORE"
envStoragePolicyNameForVsanDirectDatastore = "STORAGE_POLICY_FOR_VSANDIRECT_DATASTORE"
envStoragePolicyNameFromInaccessibleZone = "STORAGE_POLICY_FROM_INACCESSIBLE_ZONE"
envStoragePolicyNameWithThickProvision = "STORAGE_POLICY_WITH_THICK_PROVISIONING"
envSupervisorClusterNamespace = "SVC_NAMESPACE"
Expand Down
Loading

0 comments on commit c2f9bc1

Please sign in to comment.