From 4c9bb703410cf28aeb8d288769f504c0c527a186 Mon Sep 17 00:00:00 2001 From: Benamar Mekhissi Date: Tue, 17 Dec 2024 03:54:07 -0500 Subject: [PATCH 1/5] Enable VolSync protection for any PVC type via DRPC annotation VolSync protection can now be enabled for any PVC type by adding a specific annotation to the DRPC. Signed-off-by: Benamar Mekhissi (cherry picked from commit 7b0ad5195eb0e048699d1d5d41832923628e4010) --- internal/controller/drplacementcontrol.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/controller/drplacementcontrol.go b/internal/controller/drplacementcontrol.go index df8486ec9..a25df76e6 100644 --- a/internal/controller/drplacementcontrol.go +++ b/internal/controller/drplacementcontrol.go @@ -1707,6 +1707,7 @@ func (d *DRPCInstance) updateVRGOptionalFields(vrg, vrgFromView *rmn.VolumeRepli DoNotDeletePVCAnnotation: d.instance.GetAnnotations()[DoNotDeletePVCAnnotation], DRPCUIDAnnotation: string(d.instance.UID), rmnutil.IsCGEnabledAnnotation: d.instance.GetAnnotations()[rmnutil.IsCGEnabledAnnotation], + rmnutil.UseVolSyncAnnotation: d.instance.GetAnnotations()[rmnutil.UseVolSyncAnnotation], } vrg.Spec.ProtectedNamespaces = d.instance.Spec.ProtectedNamespaces From 42aeee75f9dedb59128bc1609d0bf50e79d5b468 Mon Sep 17 00:00:00 2001 From: Benamar Mekhissi Date: Tue, 17 Dec 2024 03:55:43 -0500 Subject: [PATCH 2/5] Restrict ReadOnlyMany accessMode to CephFS PVCs only Previously, temporary PVCs for all storage types were being created with the ReadOnlyMany access mode. This commit ensures that only CephFS temporary PVCs will use the ReadOnlyMany access mode. Additionally, the need for a special storage class tagged for backingSnapshot has been removed. Instead, the same storage class used for ReadWriteMany will now be used to create temporary ReadOnlyMany PVCs, simplifying the storage configuration. Signed-off-by: Benamar Mekhissi (cherry picked from commit e6dece758c925a943ee480cd818dcd128adfff89) --- internal/controller/volsync/vshandler.go | 88 ++++--------------- internal/controller/volsync/vshandler_test.go | 10 ++- 2 files changed, 24 insertions(+), 74 deletions(-) diff --git a/internal/controller/volsync/vshandler.go b/internal/controller/volsync/vshandler.go index 03b1f9b1e..e67e94912 100644 --- a/internal/controller/volsync/vshandler.go +++ b/internal/controller/volsync/vshandler.go @@ -408,13 +408,9 @@ func (v *VSHandler) createOrUpdateRS(rsSpec ramendrv1alpha1.VolSyncReplicationSo return nil, err } - volumeSnapshotClassName, err := v.getVolumeSnapshotClassFromPVCStorageClass(storageClass) - if err != nil { - return nil, err - } + v.ModifyRSSpecForCephFS(&rsSpec, storageClass) - // Fix for CephFS (replication source only) - may need different storageclass and access modes - err = v.ModifyRSSpecForCephFS(&rsSpec, storageClass) + volumeSnapshotClassName, err := v.getVolumeSnapshotClassFromPVCStorageClass(storageClass) if err != nil { return nil, err } @@ -1338,60 +1334,13 @@ func (v *VSHandler) getRsyncServiceType() *corev1.ServiceType { // For CephFS only, there is a problem where restoring a PVC from snapshot can be very slow when there are a lot of // files - on every replication cycle we need to create a PVC from snapshot in order to get a point-in-time copy of // the source PVC to sync with the replicationdestination. -// This workaround follows the instructions here: -// https://github.com/ceph/ceph-csi/blob/devel/docs/cephfs-snapshot-backed-volumes.md -// -// Steps: -// 1. If the storageclass detected is cephfs, create a new storageclass with backingSnapshot: "true" parameter -// (or reuse if it already exists). If not cephfs, return and do not modify rsSpec. -// 2. Modify rsSpec to use the new storageclass and also update AccessModes to 'ReadOnlyMany' as per the instructions -// above. +// If CephFS PVC, modify rsSpec AccessModes to use 'ReadOnlyMany'. func (v *VSHandler) ModifyRSSpecForCephFS(rsSpec *ramendrv1alpha1.VolSyncReplicationSourceSpec, storageClass *storagev1.StorageClass, -) error { - if storageClass.Provisioner != v.defaultCephFSCSIDriverName { - return nil // No workaround required - } - - v.log.Info("CephFS storageclass detected on source PVC, creating replicationsource with read-only "+ - " PVC from snapshot", "storageClassName", storageClass.GetName()) - - // Create/update readOnlyPVCStorageClass - readOnlyPVCStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: storageClass.GetName() + "-vrg", - }, - } - - op, err := ctrlutil.CreateOrUpdate(v.ctx, v.client, readOnlyPVCStorageClass, func() error { - // Do not update the storageclass if it already exists - Provisioner and Parameters are immutable anyway - if readOnlyPVCStorageClass.CreationTimestamp.IsZero() { - readOnlyPVCStorageClass.Provisioner = storageClass.Provisioner - - // Copy other parameters from the original storage class - // Note - not copying volumebindingmode or reclaim policy from the source storageclass will leave defaults - readOnlyPVCStorageClass.Parameters = map[string]string{} - for k, v := range storageClass.Parameters { - readOnlyPVCStorageClass.Parameters[k] = v - } - - // Set backingSnapshot parameter to true - readOnlyPVCStorageClass.Parameters["backingSnapshot"] = "true" - } - - return nil - }) - if err != nil { - return fmt.Errorf("%w", err) +) { + if storageClass.Provisioner == v.defaultCephFSCSIDriverName { + rsSpec.ProtectedPVC.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} } - - v.log.Info("StorageClass for readonly cephfs PVC createOrUpdate Complete", "op", op) - - // Update the rsSpec with access modes and the special storageclass - rsSpec.ProtectedPVC.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} - rsSpec.ProtectedPVC.StorageClassName = &readOnlyPVCStorageClass.Name - - return nil } func (v *VSHandler) GetVolumeSnapshotClassFromPVCStorageClass(storageClassName *string) (string, error) { @@ -1821,21 +1770,10 @@ func (v *VSHandler) reconcileLocalRS(rd *volsyncv1alpha1.ReplicationDestination, ) { v.log.Info("Reconciling localRS", "RD", rd.GetName()) - storageClass, err := v.getStorageClass(rdSpec.ProtectedPVC.StorageClassName) - if err != nil { - return nil, err - } - rsSpec := &ramendrv1alpha1.VolSyncReplicationSourceSpec{ ProtectedPVC: rdSpec.ProtectedPVC, } - // Fix for CephFS (replication source only) - may need different storageclass and access modes - err = v.ModifyRSSpecForCephFS(rsSpec, storageClass) - if err != nil { - return nil, err - } - pvc, err := v.setupLocalRS(rd, rdSpec, snapshotRef) if err != nil { return nil, err @@ -1983,10 +1921,10 @@ func (v *VSHandler) setupLocalRS(rd *volsyncv1alpha1.ReplicationDestination, } // In all other cases, we have to create a RO PVC. - return v.createReadOnlyPVCFromSnapshot(rd, rdSpec, snapshotRef, restoreSize) + return v.createPVCFromSnapshot(rd, rdSpec, snapshotRef, restoreSize) } -func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestination, +func (v *VSHandler) createPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestination, rdSpec *ramendrv1alpha1.VolSyncReplicationDestinationSpec, snapshotRef *corev1.TypedLocalObjectReference, snapRestoreSize *resource.Quantity, @@ -1994,6 +1932,11 @@ func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.Replicatio l := v.log.WithValues("pvcName", rd.GetName(), "snapshotRef", snapshotRef, "snapRestoreSize", snapRestoreSize) + storageClass, err := v.getStorageClass(rdSpec.ProtectedPVC.StorageClassName) + if err != nil { + return nil, err + } + pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: snapshotRef.Name, @@ -2017,6 +1960,11 @@ func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.Replicatio accessModes := []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} + // Use the protectedPVC accessModes when csi driver is not the default (openshift-storage.cephfs.csi.ceph.com) + if storageClass.Provisioner != v.defaultCephFSCSIDriverName { + accessModes = rdSpec.ProtectedPVC.AccessModes + } + if pvc.CreationTimestamp.IsZero() { // set immutable fields pvc.Spec.AccessModes = accessModes pvc.Spec.StorageClassName = rd.Spec.RsyncTLS.StorageClassName diff --git a/internal/controller/volsync/vshandler_test.go b/internal/controller/volsync/vshandler_test.go index 67f0dc8be..92b6087af 100644 --- a/internal/controller/volsync/vshandler_test.go +++ b/internal/controller/volsync/vshandler_test.go @@ -252,10 +252,12 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { }, storageClassForTest)).To(Succeed()) - // - // Call ModifyRSSpecForCephFS - // - Expect(vsHandler.ModifyRSSpecForCephFS(&testRsSpec, storageClassForTest)).To(Succeed()) + vsHandler.ModifyRSSpecForCephFS(&testRsSpec, storageClassForTest) + + Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( + []corev1.PersistentVolumeAccessMode{ + corev1.ReadOnlyMany, + })) }) Context("When the source PVC is not using a cephfs storageclass", func() { From 842d781cf760a4d732b141255b4e296709128669 Mon Sep 17 00:00:00 2001 From: Benamar Mekhissi Date: Wed, 18 Dec 2024 02:24:56 -0500 Subject: [PATCH 3/5] Support original accessModes for non-CephFS source PVCs For non-CephFS PVCs, temporary PVCs now inherit the accessModes of the source PVC rather than using ReadOnlyMany. Signed-off-by: Benamar Mekhissi (cherry picked from commit ca28628251be92a1437eca7542249cb5bbdc2728) --- internal/controller/cephfscg/utils.go | 51 ------------------- .../cephfscg/volumegroupsourcehandler.go | 19 ++++--- 2 files changed, 11 insertions(+), 59 deletions(-) diff --git a/internal/controller/cephfscg/utils.go b/internal/controller/cephfscg/utils.go index 9790ab32f..070232df9 100644 --- a/internal/controller/cephfscg/utils.go +++ b/internal/controller/cephfscg/utils.go @@ -11,10 +11,8 @@ import ( "github.com/ramendr/ramen/internal/controller/volsync" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) // ------------- [Begin] Copied from existing code in Ramen ---- @@ -54,55 +52,6 @@ func getLocalServiceNameForRD(rdName string) string { return fmt.Sprintf("volsync-rsync-tls-dst-%s", rdName) } -// ------------- [End] Copied from existing code in Ramen ---- - -// ------------- [Begin] Edited from existing code in Ramen ---- - -// Copied from func (v *VSHandler) ModifyRSSpecForCephFS -func GetRestoreStorageClass( - ctx context.Context, k8sClient client.Client, storageClassName string, - defaultCephFSCSIDriverName string, -) (*storagev1.StorageClass, error) { - storageClass, err := GetStorageClass(ctx, k8sClient, &storageClassName) - if err != nil { - return nil, err - } - - if storageClass.Provisioner != defaultCephFSCSIDriverName { - return storageClass, nil // No workaround required - } - - // Create/update readOnlyPVCStorageClass - readOnlyPVCStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: storageClass.GetName() + "-vrg", - }, - } - - _, err = ctrlutil.CreateOrUpdate(ctx, k8sClient, readOnlyPVCStorageClass, func() error { - // Do not update the storageclass if it already exists - Provisioner and Parameters are immutable anyway - if readOnlyPVCStorageClass.CreationTimestamp.IsZero() { - readOnlyPVCStorageClass.Provisioner = storageClass.Provisioner - - // Copy other parameters from the original storage class - readOnlyPVCStorageClass.Parameters = map[string]string{} - for k, v := range storageClass.Parameters { - readOnlyPVCStorageClass.Parameters[k] = v - } - - // Set backingSnapshot parameter to true - readOnlyPVCStorageClass.Parameters["backingSnapshot"] = "true" - } - - return nil - }) - if err != nil { - return nil, fmt.Errorf("%w", err) - } - - return readOnlyPVCStorageClass, nil -} - // Copied from func (v *VSHandler) getStorageClass( func GetStorageClass( ctx context.Context, k8sClient client.Client, storageClassName *string, diff --git a/internal/controller/cephfscg/volumegroupsourcehandler.go b/internal/controller/cephfscg/volumegroupsourcehandler.go index 07ea318d8..77b594e10 100644 --- a/internal/controller/cephfscg/volumegroupsourcehandler.go +++ b/internal/controller/cephfscg/volumegroupsourcehandler.go @@ -246,10 +246,14 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( volumeGroupSnapshot.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err) } - restoreStorageClass, err := GetRestoreStorageClass(ctx, h.Client, - *pvc.Spec.StorageClassName, h.DefaultCephFSCSIDriverName) + storageClass, err := GetStorageClass(ctx, h.Client, pvc.Spec.StorageClassName) if err != nil { - return nil, fmt.Errorf("failed to get Restore Storage Class from PVC %s: %w", pvc.Name+"/"+pvc.Namespace, err) + return nil, err + } + + restoreAccessModes := []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} + if storageClass.Provisioner != h.DefaultCephFSCSIDriverName { + restoreAccessModes = pvc.Spec.AccessModes } RestoredPVCNamespacedName := types.NamespacedName{ @@ -258,7 +262,7 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( } if err := h.RestoreVolumesFromSnapshot( ctx, pvcVSRef.VolumeSnapshotRef.Name, pvc, RestoredPVCNamespacedName, - restoreStorageClass.GetName(), owner); err != nil { + restoreAccessModes, owner); err != nil { return nil, fmt.Errorf("failed to restore volumes from snapshot %s: %w", pvcVSRef.VolumeSnapshotRef.Name+"/"+pvc.Namespace, err) } @@ -286,7 +290,7 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromSnapshot( vsName string, pvc *corev1.PersistentVolumeClaim, restoredPVCNamespacedname types.NamespacedName, - restoreStorageClassName string, + restoreAccessModes []corev1.PersistentVolumeAccessMode, owner metav1.Object, ) error { logger := h.Logger.WithName("RestoreVolumesFromSnapshot"). @@ -351,8 +355,8 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromSnapshot( } if restoredPVC.CreationTimestamp.IsZero() { // set immutable fields - restoredPVC.Spec.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} - restoredPVC.Spec.StorageClassName = &restoreStorageClassName + restoredPVC.Spec.AccessModes = restoreAccessModes + restoredPVC.Spec.StorageClassName = pvc.Spec.StorageClassName restoredPVC.Spec.DataSource = &snapshotRef } @@ -424,7 +428,6 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateReplicationSourceForRestoredPVC } replicationSource.Spec.RsyncTLS = &volsyncv1alpha1.ReplicationSourceRsyncTLSSpec{ ReplicationSourceVolumeOptions: volsyncv1alpha1.ReplicationSourceVolumeOptions{ - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}, CopyMethod: volsyncv1alpha1.CopyMethodDirect, }, From a256e657d833f1eae570e6126dc7c24f217040df Mon Sep 17 00:00:00 2001 From: Benamar Mekhissi Date: Wed, 18 Dec 2024 02:42:36 -0500 Subject: [PATCH 4/5] Delay temporary PVC creation until scheduled time on fresh deploy For consistency groups, temporary PVCs will now be created at the scheduled time rather than immediately upon fresh deployment. Signed-off-by: Benamar Mekhissi (cherry picked from commit 021c6d76ab083e9642b20762bc8900cbe366e9b6) --- .../cephfscg/replicationgroupsource.go | 18 +++++++++--------- .../cephfscg/volumegroupsourcehandler.go | 4 ++-- internal/controller/vrg_volrep.go | 4 ++-- internal/controller/vrg_volsync.go | 6 ++++++ 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/internal/controller/cephfscg/replicationgroupsource.go b/internal/controller/cephfscg/replicationgroupsource.go index 477d3fbc3..d217d0e1d 100644 --- a/internal/controller/cephfscg/replicationgroupsource.go +++ b/internal/controller/cephfscg/replicationgroupsource.go @@ -113,15 +113,6 @@ func (m *replicationGroupSourceMachine) Synchronize(ctx context.Context) (mover. return mover.InProgress(), err } - m.Logger.Info("Restore PVCs from volume group snapshot") - - restoredPVCs, err := m.VolumeGroupHandler.RestoreVolumesFromVolumeGroupSnapshot(ctx, m.ReplicationGroupSource) - if err != nil { - m.Logger.Error(err, "Failed to restore volume group snapshot") - - return mover.InProgress(), err - } - m.Logger.Info("Create ReplicationSource for each Restored PVC") vrgName := m.ReplicationGroupSource.GetLabels()[volsync.VRGOwnerNameLabel] // Pre-allocated shared secret - DRPC will generate and propagate this secret from hub to clusters @@ -141,6 +132,15 @@ func (m *replicationGroupSourceMachine) Synchronize(ctx context.Context) (mover. return mover.InProgress(), nil } + m.Logger.Info("Restore PVCs from volume group snapshot") + + restoredPVCs, err := m.VolumeGroupHandler.RestoreVolumesFromVolumeGroupSnapshot(ctx, m.ReplicationGroupSource) + if err != nil { + m.Logger.Error(err, "Failed to restore volume group snapshot") + + return mover.InProgress(), err + } + replicationSources, err := m.VolumeGroupHandler.CreateOrUpdateReplicationSourceForRestoredPVCs( ctx, m.ReplicationGroupSource.Status.LastSyncStartTime.String(), restoredPVCs, m.ReplicationGroupSource) if err != nil { diff --git a/internal/controller/cephfscg/volumegroupsourcehandler.go b/internal/controller/cephfscg/volumegroupsourcehandler.go index 77b594e10..8a11d2841 100644 --- a/internal/controller/cephfscg/volumegroupsourcehandler.go +++ b/internal/controller/cephfscg/volumegroupsourcehandler.go @@ -26,8 +26,8 @@ import ( ) var ( - VolumeGroupSnapshotNameFormat = "cephfscg-%s" - RestorePVCinCGNameFormat = "cephfscg-%s" + VolumeGroupSnapshotNameFormat = "vs-cg-%s" + RestorePVCinCGNameFormat = "vs-cg-%s" SnapshotGroup = "snapshot.storage.k8s.io" SnapshotGroupKind = "VolumeSnapshot" ) diff --git a/internal/controller/vrg_volrep.go b/internal/controller/vrg_volrep.go index 701c5c212..75af66dc0 100644 --- a/internal/controller/vrg_volrep.go +++ b/internal/controller/vrg_volrep.go @@ -437,7 +437,7 @@ func (v *VRGInstance) protectPVC(pvc *corev1.PersistentVolumeClaim, log logr.Log // any further and it can be skipped. The pvc will go away eventually. func skipPVC(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, string) { if pvc.Status.Phase != corev1.ClaimBound { - log.Info("Skipping handling of VR as PersistentVolumeClaim is not bound", "pvcPhase", pvc.Status.Phase) + log.Info("Skipping handling of VR as PVC is not bound", "pvcPhase", pvc.Status.Phase) msg := "PVC not bound yet" // v.updateProtectedPVCCondition(pvc.Name, VRGConditionReasonProgressing, msg) @@ -451,7 +451,7 @@ func skipPVC(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, string) func isPVCDeletedAndNotProtected(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, string) { // If PVC deleted but not yet protected with a finalizer, skip it! if !containsString(pvc.Finalizers, PvcVRFinalizerProtected) && rmnutil.ResourceIsDeleted(pvc) { - log.Info("Skipping PersistentVolumeClaim, as it is marked for deletion and not yet protected") + log.Info("Skipping PVC, as it is marked for deletion and not yet protected") msg := "Skipping pvc marked for deletion" // v.updateProtectedPVCCondition(pvc.Name, VRGConditionReasonProgressing, msg) diff --git a/internal/controller/vrg_volsync.go b/internal/controller/vrg_volsync.go index 2f7b107f7..500abee86 100644 --- a/internal/controller/vrg_volsync.go +++ b/internal/controller/vrg_volsync.go @@ -113,6 +113,12 @@ func (v *VRGInstance) reconcileVolSyncAsPrimary(finalSyncPrepared *bool) (requeu } for _, pvc := range v.volSyncPVCs { + if pvc.Status.Phase != corev1.ClaimBound { + v.log.Info("Skipping PVC - PVC is not Bound.", "name", pvc.GetName()) + + continue + } + requeuePVC := v.reconcilePVCAsVolSyncPrimary(pvc) if requeuePVC { requeue = true From d3b4c330f65800ffe80d97f7790125ced55707b1 Mon Sep 17 00:00:00 2001 From: Benamar Mekhissi Date: Thu, 19 Dec 2024 09:56:36 -0500 Subject: [PATCH 5/5] Clean up unit test Signed-off-by: Benamar Mekhissi (cherry picked from commit aae07f624b1df024a0dc5133cd81ad2338ba4cc1) --- .../cephfscg/replicationgroupsource.go | 2 +- .../cephfscg/volumegroupsourcehandler.go | 22 +-- internal/controller/drplacementcontrol.go | 2 +- internal/controller/volsync/vshandler.go | 5 +- internal/controller/volsync/vshandler_test.go | 135 ++---------------- .../volumereplicationgroup_controller.go | 8 ++ 6 files changed, 36 insertions(+), 138 deletions(-) diff --git a/internal/controller/cephfscg/replicationgroupsource.go b/internal/controller/cephfscg/replicationgroupsource.go index d217d0e1d..71a1258e9 100644 --- a/internal/controller/cephfscg/replicationgroupsource.go +++ b/internal/controller/cephfscg/replicationgroupsource.go @@ -140,7 +140,7 @@ func (m *replicationGroupSourceMachine) Synchronize(ctx context.Context) (mover. return mover.InProgress(), err } - + replicationSources, err := m.VolumeGroupHandler.CreateOrUpdateReplicationSourceForRestoredPVCs( ctx, m.ReplicationGroupSource.Status.LastSyncStartTime.String(), restoredPVCs, m.ReplicationGroupSource) if err != nil { diff --git a/internal/controller/cephfscg/volumegroupsourcehandler.go b/internal/controller/cephfscg/volumegroupsourcehandler.go index 8a11d2841..e9c197736 100644 --- a/internal/controller/cephfscg/volumegroupsourcehandler.go +++ b/internal/controller/cephfscg/volumegroupsourcehandler.go @@ -146,7 +146,7 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateVolumeGroupSnapshot( return nil } -// CleanVolumeGroupSnapshot delete restored pvc, replicationsource and VolumeGroupSnapshot +// CleanVolumeGroupSnapshot delete restored pvc and VolumeGroupSnapshot // //nolint:funlen func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot( @@ -214,36 +214,38 @@ func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot( return nil } -// RestoreVolumesFromVolumeGroupSnapshot restore VolumeGroupSnapshot to PVCs +// RestoreVolumesFromVolumeGroupSnapshot restores VolumeGroupSnapshot to PVCs +// +//nolint:funlen,cyclop func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( ctx context.Context, owner metav1.Object, ) ([]RestoredPVC, error) { logger := h.Logger.WithName("RestoreVolumesFromVolumeGroupSnapshot") logger.Info("Get volume group snapshot") - volumeGroupSnapshot := &vgsv1alphfa1.VolumeGroupSnapshot{} + vgs := &vgsv1alphfa1.VolumeGroupSnapshot{} if err := h.Client.Get(ctx, types.NamespacedName{Name: h.VolumeGroupSnapshotName, Namespace: h.VolumeGroupSnapshotNamespace}, - volumeGroupSnapshot); err != nil { + vgs); err != nil { return nil, fmt.Errorf("failed to get volume group snapshot: %w", err) } - if volumeGroupSnapshot.Status == nil || volumeGroupSnapshot.Status.ReadyToUse == nil || - (volumeGroupSnapshot.Status.ReadyToUse != nil && !*volumeGroupSnapshot.Status.ReadyToUse) { + if vgs.Status == nil || vgs.Status.ReadyToUse == nil || + (vgs.Status.ReadyToUse != nil && !*vgs.Status.ReadyToUse) { return nil, fmt.Errorf("can't restore volume group snapshot: volume group snapshot is not ready to be used") } restoredPVCs := []RestoredPVC{} - for _, pvcVSRef := range volumeGroupSnapshot.Status.PVCVolumeSnapshotRefList { + for _, pvcVSRef := range vgs.Status.PVCVolumeSnapshotRefList { logger.Info("Get PVCName from volume snapshot", "PVCName", pvcVSRef.PersistentVolumeClaimRef.Name, "VolumeSnapshotName", pvcVSRef.VolumeSnapshotRef.Name) pvc, err := util.GetPVC(ctx, h.Client, - types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: volumeGroupSnapshot.Namespace}) + types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: vgs.Namespace}) if err != nil { return nil, fmt.Errorf("failed to get PVC from VGS %s: %w", - volumeGroupSnapshot.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err) + vgs.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err) } storageClass, err := GetStorageClass(ctx, h.Client, pvc.Spec.StorageClassName) @@ -428,7 +430,7 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateReplicationSourceForRestoredPVC } replicationSource.Spec.RsyncTLS = &volsyncv1alpha1.ReplicationSourceRsyncTLSSpec{ ReplicationSourceVolumeOptions: volsyncv1alpha1.ReplicationSourceVolumeOptions{ - CopyMethod: volsyncv1alpha1.CopyMethodDirect, + CopyMethod: volsyncv1alpha1.CopyMethodDirect, }, KeySecret: &h.VolsyncKeySecretName, diff --git a/internal/controller/drplacementcontrol.go b/internal/controller/drplacementcontrol.go index a25df76e6..785720c66 100644 --- a/internal/controller/drplacementcontrol.go +++ b/internal/controller/drplacementcontrol.go @@ -1707,7 +1707,7 @@ func (d *DRPCInstance) updateVRGOptionalFields(vrg, vrgFromView *rmn.VolumeRepli DoNotDeletePVCAnnotation: d.instance.GetAnnotations()[DoNotDeletePVCAnnotation], DRPCUIDAnnotation: string(d.instance.UID), rmnutil.IsCGEnabledAnnotation: d.instance.GetAnnotations()[rmnutil.IsCGEnabledAnnotation], - rmnutil.UseVolSyncAnnotation: d.instance.GetAnnotations()[rmnutil.UseVolSyncAnnotation], + rmnutil.UseVolSyncAnnotation: d.instance.GetAnnotations()[rmnutil.UseVolSyncAnnotation], } vrg.Spec.ProtectedNamespaces = d.instance.Spec.ProtectedNamespaces diff --git a/internal/controller/volsync/vshandler.go b/internal/controller/volsync/vshandler.go index e67e94912..7f0c9981e 100644 --- a/internal/controller/volsync/vshandler.go +++ b/internal/controller/volsync/vshandler.go @@ -1929,8 +1929,7 @@ func (v *VSHandler) createPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestina snapshotRef *corev1.TypedLocalObjectReference, snapRestoreSize *resource.Quantity, ) (*corev1.PersistentVolumeClaim, error) { - l := v.log.WithValues("pvcName", rd.GetName(), "snapshotRef", snapshotRef, - "snapRestoreSize", snapRestoreSize) + l := v.log.WithValues("pvcName", rd.GetName(), "snapshotRef", snapshotRef, "snapRestoreSize", snapRestoreSize) storageClass, err := v.getStorageClass(rdSpec.ProtectedPVC.StorageClassName) if err != nil { @@ -1980,8 +1979,6 @@ func (v *VSHandler) createPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestina return nil }) if err != nil { - l.Error(err, "Unable to createOrUpdate PVC from snapshot for localRS") - return nil, fmt.Errorf("error creating or updating PVC from snapshot for localRS (%w)", err) } diff --git a/internal/controller/volsync/vshandler_test.go b/internal/controller/volsync/vshandler_test.go index 92b6087af..a3ebfb264 100644 --- a/internal/controller/volsync/vshandler_test.go +++ b/internal/controller/volsync/vshandler_test.go @@ -253,11 +253,17 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { storageClassForTest)).To(Succeed()) vsHandler.ModifyRSSpecForCephFS(&testRsSpec, storageClassForTest) - - Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( - []corev1.PersistentVolumeAccessMode{ - corev1.ReadOnlyMany, - })) + if storageClassForTest.Provisioner == testCephFSStorageDriverName { + Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( + []corev1.PersistentVolumeAccessMode{ + corev1.ReadOnlyMany, + })) + } else { + Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( + []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, + })) + } }) Context("When the source PVC is not using a cephfs storageclass", func() { @@ -272,128 +278,13 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { }) Context("When the sourcePVC is using a cephfs storageclass", func() { - customBackingSnapshotStorageClassName := testCephFSStorageClassName + "-vrg" - BeforeEach(func() { // Make sure the source PVC uses the cephfs storageclass testSourcePVC.Spec.StorageClassName = &testCephFSStorageClassName }) - JustBeforeEach(func() { - // Common tests - rsSpec should be modified with settings to allow pvc from snapshot - // to use our custom cephfs storageclass and ReadOnlyMany accessModes - Expect(testRsSpecOrig).NotTo(Equal(testRsSpec)) - - // Should use the custom storageclass with backingsnapshot: true parameter - Expect(*testRsSpec.ProtectedPVC.StorageClassName).To(Equal(customBackingSnapshotStorageClassName)) - - // AccessModes should be updated to ReadOnlyMany - Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( - []corev1.PersistentVolumeAccessMode{ - corev1.ReadOnlyMany, - })) - }) - - AfterEach(func() { - // Delete the custom storage class that may have been created by test - custStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - err := k8sClient.Delete(ctx, custStorageClass) - if err != nil { - Expect(kerrors.IsNotFound(err)).To(BeTrue()) - } - - Eventually(func() bool { - err := k8sClient.Get(ctx, client.ObjectKeyFromObject(custStorageClass), custStorageClass) - - return kerrors.IsNotFound(err) - }, maxWait, interval).Should(BeTrue()) - }) - - Context("When the custom cephfs backing storage class for readonly pvc from snap does not exist", func() { - // Delete the custom vrg storageclass if it exists - BeforeEach(func() { - custStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - err := k8sClient.Delete(ctx, custStorageClass) - if err != nil { - Expect(kerrors.IsNotFound(err)).To(BeTrue()) - } - - Eventually(func() bool { - err := k8sClient.Get(ctx, client.ObjectKeyFromObject(custStorageClass), custStorageClass) - - return kerrors.IsNotFound(err) - }, maxWait, interval).Should(BeTrue()) - }) - - It("ModifyRSSpecForCephFS should modify the rsSpec and create the new storageclass", func() { - // RSspec modification checks in the outer context JustBeforeEach() - - newStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - - Eventually(func() error { - return k8sClient.Get(ctx, client.ObjectKeyFromObject(newStorageClass), newStorageClass) - }, maxWait, interval).Should(Succeed()) - - Expect(newStorageClass.Parameters["backingSnapshot"]).To(Equal("true")) - - // Other parameters from the test cephfs storageclass should be copied over - for k, v := range testCephFSStorageClass.Parameters { - Expect(newStorageClass.Parameters[k]).To(Equal(v)) - } - }) - }) - - Context("When the custom cephfs backing storage class for readonly pvc from snap exists", func() { - var preExistingCustStorageClass *storagev1.StorageClass - - BeforeEach(func() { - preExistingCustStorageClass = &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - Provisioner: testCephFSStorageDriverName, - Parameters: map[string]string{ // Not the same params as our CephFS storageclass for test - "different-param-1": "abc", - "different-param-2": "def", - "backingSnapshot": "true", - }, - } - Expect(k8sClient.Create(ctx, preExistingCustStorageClass)).To(Succeed()) - - // Confirm it's created - Eventually(func() error { - return k8sClient.Get(ctx, - client.ObjectKeyFromObject(preExistingCustStorageClass), preExistingCustStorageClass) - }, maxWait, interval).Should(Succeed()) - }) - - It("ModifyRSSpecForCephFS should modify the rsSpec but not modify the new custom storageclass", func() { - // Load the custom storageclass - newStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - - Eventually(func() error { - return k8sClient.Get(ctx, client.ObjectKeyFromObject(newStorageClass), newStorageClass) - }, maxWait, interval).Should(Succeed()) - - // Parameters should match the original, unmodified - Expect(newStorageClass.Parameters).To(Equal(preExistingCustStorageClass.Parameters)) - }) + It("ModifyRSSpecForCephFS should modify the rsSpec protectedPVC accessModes", func() { + Expect(testRsSpecOrig).ToNot(Equal(testRsSpec)) }) }) }) diff --git a/internal/controller/volumereplicationgroup_controller.go b/internal/controller/volumereplicationgroup_controller.go index f02fd6d25..226523428 100644 --- a/internal/controller/volumereplicationgroup_controller.go +++ b/internal/controller/volumereplicationgroup_controller.go @@ -757,6 +757,14 @@ func (v *VRGInstance) addConsistencyGroupLabel(pvc *corev1.PersistentVolumeClaim return fmt.Errorf("missing storageID for PVC %s/%s", pvc.GetNamespace(), pvc.GetName()) } + // FIXME: a temporary workaround for issue DFBUGS-1209 + // Remove this block once DFBUGS-1209 is fixed + if storageClass.Provisioner == DefaultCephFSCSIDriverName { + storageID = "cephfs-" + storageID + } else { + storageID = "rbd-" + storageID + } + // Add label for PVC, showing that this PVC is part of consistency group return util.NewResourceUpdater(pvc). AddLabel(ConsistencyGroupLabel, storageID).