diff --git a/internal/controller/cephfscg/replicationgroupsource.go b/internal/controller/cephfscg/replicationgroupsource.go index 477d3fbc3..71a1258e9 100644 --- a/internal/controller/cephfscg/replicationgroupsource.go +++ b/internal/controller/cephfscg/replicationgroupsource.go @@ -113,15 +113,6 @@ func (m *replicationGroupSourceMachine) Synchronize(ctx context.Context) (mover. return mover.InProgress(), err } - m.Logger.Info("Restore PVCs from volume group snapshot") - - restoredPVCs, err := m.VolumeGroupHandler.RestoreVolumesFromVolumeGroupSnapshot(ctx, m.ReplicationGroupSource) - if err != nil { - m.Logger.Error(err, "Failed to restore volume group snapshot") - - return mover.InProgress(), err - } - m.Logger.Info("Create ReplicationSource for each Restored PVC") vrgName := m.ReplicationGroupSource.GetLabels()[volsync.VRGOwnerNameLabel] // Pre-allocated shared secret - DRPC will generate and propagate this secret from hub to clusters @@ -141,6 +132,15 @@ func (m *replicationGroupSourceMachine) Synchronize(ctx context.Context) (mover. return mover.InProgress(), nil } + m.Logger.Info("Restore PVCs from volume group snapshot") + + restoredPVCs, err := m.VolumeGroupHandler.RestoreVolumesFromVolumeGroupSnapshot(ctx, m.ReplicationGroupSource) + if err != nil { + m.Logger.Error(err, "Failed to restore volume group snapshot") + + return mover.InProgress(), err + } + replicationSources, err := m.VolumeGroupHandler.CreateOrUpdateReplicationSourceForRestoredPVCs( ctx, m.ReplicationGroupSource.Status.LastSyncStartTime.String(), restoredPVCs, m.ReplicationGroupSource) if err != nil { diff --git a/internal/controller/cephfscg/utils.go b/internal/controller/cephfscg/utils.go index 9790ab32f..070232df9 100644 --- a/internal/controller/cephfscg/utils.go +++ b/internal/controller/cephfscg/utils.go @@ -11,10 +11,8 @@ import ( "github.com/ramendr/ramen/internal/controller/volsync" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) // ------------- [Begin] Copied from existing code in Ramen ---- @@ -54,55 +52,6 @@ func getLocalServiceNameForRD(rdName string) string { return fmt.Sprintf("volsync-rsync-tls-dst-%s", rdName) } -// ------------- [End] Copied from existing code in Ramen ---- - -// ------------- [Begin] Edited from existing code in Ramen ---- - -// Copied from func (v *VSHandler) ModifyRSSpecForCephFS -func GetRestoreStorageClass( - ctx context.Context, k8sClient client.Client, storageClassName string, - defaultCephFSCSIDriverName string, -) (*storagev1.StorageClass, error) { - storageClass, err := GetStorageClass(ctx, k8sClient, &storageClassName) - if err != nil { - return nil, err - } - - if storageClass.Provisioner != defaultCephFSCSIDriverName { - return storageClass, nil // No workaround required - } - - // Create/update readOnlyPVCStorageClass - readOnlyPVCStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: storageClass.GetName() + "-vrg", - }, - } - - _, err = ctrlutil.CreateOrUpdate(ctx, k8sClient, readOnlyPVCStorageClass, func() error { - // Do not update the storageclass if it already exists - Provisioner and Parameters are immutable anyway - if readOnlyPVCStorageClass.CreationTimestamp.IsZero() { - readOnlyPVCStorageClass.Provisioner = storageClass.Provisioner - - // Copy other parameters from the original storage class - readOnlyPVCStorageClass.Parameters = map[string]string{} - for k, v := range storageClass.Parameters { - readOnlyPVCStorageClass.Parameters[k] = v - } - - // Set backingSnapshot parameter to true - readOnlyPVCStorageClass.Parameters["backingSnapshot"] = "true" - } - - return nil - }) - if err != nil { - return nil, fmt.Errorf("%w", err) - } - - return readOnlyPVCStorageClass, nil -} - // Copied from func (v *VSHandler) getStorageClass( func GetStorageClass( ctx context.Context, k8sClient client.Client, storageClassName *string, diff --git a/internal/controller/cephfscg/volumegroupsourcehandler.go b/internal/controller/cephfscg/volumegroupsourcehandler.go index 07ea318d8..e9c197736 100644 --- a/internal/controller/cephfscg/volumegroupsourcehandler.go +++ b/internal/controller/cephfscg/volumegroupsourcehandler.go @@ -26,8 +26,8 @@ import ( ) var ( - VolumeGroupSnapshotNameFormat = "cephfscg-%s" - RestorePVCinCGNameFormat = "cephfscg-%s" + VolumeGroupSnapshotNameFormat = "vs-cg-%s" + RestorePVCinCGNameFormat = "vs-cg-%s" SnapshotGroup = "snapshot.storage.k8s.io" SnapshotGroupKind = "VolumeSnapshot" ) @@ -146,7 +146,7 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateVolumeGroupSnapshot( return nil } -// CleanVolumeGroupSnapshot delete restored pvc, replicationsource and VolumeGroupSnapshot +// CleanVolumeGroupSnapshot delete restored pvc and VolumeGroupSnapshot // //nolint:funlen func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot( @@ -214,42 +214,48 @@ func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot( return nil } -// RestoreVolumesFromVolumeGroupSnapshot restore VolumeGroupSnapshot to PVCs +// RestoreVolumesFromVolumeGroupSnapshot restores VolumeGroupSnapshot to PVCs +// +//nolint:funlen,cyclop func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( ctx context.Context, owner metav1.Object, ) ([]RestoredPVC, error) { logger := h.Logger.WithName("RestoreVolumesFromVolumeGroupSnapshot") logger.Info("Get volume group snapshot") - volumeGroupSnapshot := &vgsv1alphfa1.VolumeGroupSnapshot{} + vgs := &vgsv1alphfa1.VolumeGroupSnapshot{} if err := h.Client.Get(ctx, types.NamespacedName{Name: h.VolumeGroupSnapshotName, Namespace: h.VolumeGroupSnapshotNamespace}, - volumeGroupSnapshot); err != nil { + vgs); err != nil { return nil, fmt.Errorf("failed to get volume group snapshot: %w", err) } - if volumeGroupSnapshot.Status == nil || volumeGroupSnapshot.Status.ReadyToUse == nil || - (volumeGroupSnapshot.Status.ReadyToUse != nil && !*volumeGroupSnapshot.Status.ReadyToUse) { + if vgs.Status == nil || vgs.Status.ReadyToUse == nil || + (vgs.Status.ReadyToUse != nil && !*vgs.Status.ReadyToUse) { return nil, fmt.Errorf("can't restore volume group snapshot: volume group snapshot is not ready to be used") } restoredPVCs := []RestoredPVC{} - for _, pvcVSRef := range volumeGroupSnapshot.Status.PVCVolumeSnapshotRefList { + for _, pvcVSRef := range vgs.Status.PVCVolumeSnapshotRefList { logger.Info("Get PVCName from volume snapshot", "PVCName", pvcVSRef.PersistentVolumeClaimRef.Name, "VolumeSnapshotName", pvcVSRef.VolumeSnapshotRef.Name) pvc, err := util.GetPVC(ctx, h.Client, - types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: volumeGroupSnapshot.Namespace}) + types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: vgs.Namespace}) if err != nil { return nil, fmt.Errorf("failed to get PVC from VGS %s: %w", - volumeGroupSnapshot.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err) + vgs.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err) } - restoreStorageClass, err := GetRestoreStorageClass(ctx, h.Client, - *pvc.Spec.StorageClassName, h.DefaultCephFSCSIDriverName) + storageClass, err := GetStorageClass(ctx, h.Client, pvc.Spec.StorageClassName) if err != nil { - return nil, fmt.Errorf("failed to get Restore Storage Class from PVC %s: %w", pvc.Name+"/"+pvc.Namespace, err) + return nil, err + } + + restoreAccessModes := []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} + if storageClass.Provisioner != h.DefaultCephFSCSIDriverName { + restoreAccessModes = pvc.Spec.AccessModes } RestoredPVCNamespacedName := types.NamespacedName{ @@ -258,7 +264,7 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( } if err := h.RestoreVolumesFromSnapshot( ctx, pvcVSRef.VolumeSnapshotRef.Name, pvc, RestoredPVCNamespacedName, - restoreStorageClass.GetName(), owner); err != nil { + restoreAccessModes, owner); err != nil { return nil, fmt.Errorf("failed to restore volumes from snapshot %s: %w", pvcVSRef.VolumeSnapshotRef.Name+"/"+pvc.Namespace, err) } @@ -286,7 +292,7 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromSnapshot( vsName string, pvc *corev1.PersistentVolumeClaim, restoredPVCNamespacedname types.NamespacedName, - restoreStorageClassName string, + restoreAccessModes []corev1.PersistentVolumeAccessMode, owner metav1.Object, ) error { logger := h.Logger.WithName("RestoreVolumesFromSnapshot"). @@ -351,8 +357,8 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromSnapshot( } if restoredPVC.CreationTimestamp.IsZero() { // set immutable fields - restoredPVC.Spec.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} - restoredPVC.Spec.StorageClassName = &restoreStorageClassName + restoredPVC.Spec.AccessModes = restoreAccessModes + restoredPVC.Spec.StorageClassName = pvc.Spec.StorageClassName restoredPVC.Spec.DataSource = &snapshotRef } @@ -424,8 +430,7 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateReplicationSourceForRestoredPVC } replicationSource.Spec.RsyncTLS = &volsyncv1alpha1.ReplicationSourceRsyncTLSSpec{ ReplicationSourceVolumeOptions: volsyncv1alpha1.ReplicationSourceVolumeOptions{ - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}, - CopyMethod: volsyncv1alpha1.CopyMethodDirect, + CopyMethod: volsyncv1alpha1.CopyMethodDirect, }, KeySecret: &h.VolsyncKeySecretName, diff --git a/internal/controller/drplacementcontrol.go b/internal/controller/drplacementcontrol.go index df8486ec9..785720c66 100644 --- a/internal/controller/drplacementcontrol.go +++ b/internal/controller/drplacementcontrol.go @@ -1707,6 +1707,7 @@ func (d *DRPCInstance) updateVRGOptionalFields(vrg, vrgFromView *rmn.VolumeRepli DoNotDeletePVCAnnotation: d.instance.GetAnnotations()[DoNotDeletePVCAnnotation], DRPCUIDAnnotation: string(d.instance.UID), rmnutil.IsCGEnabledAnnotation: d.instance.GetAnnotations()[rmnutil.IsCGEnabledAnnotation], + rmnutil.UseVolSyncAnnotation: d.instance.GetAnnotations()[rmnutil.UseVolSyncAnnotation], } vrg.Spec.ProtectedNamespaces = d.instance.Spec.ProtectedNamespaces diff --git a/internal/controller/volsync/vshandler.go b/internal/controller/volsync/vshandler.go index 03b1f9b1e..7f0c9981e 100644 --- a/internal/controller/volsync/vshandler.go +++ b/internal/controller/volsync/vshandler.go @@ -408,13 +408,9 @@ func (v *VSHandler) createOrUpdateRS(rsSpec ramendrv1alpha1.VolSyncReplicationSo return nil, err } - volumeSnapshotClassName, err := v.getVolumeSnapshotClassFromPVCStorageClass(storageClass) - if err != nil { - return nil, err - } + v.ModifyRSSpecForCephFS(&rsSpec, storageClass) - // Fix for CephFS (replication source only) - may need different storageclass and access modes - err = v.ModifyRSSpecForCephFS(&rsSpec, storageClass) + volumeSnapshotClassName, err := v.getVolumeSnapshotClassFromPVCStorageClass(storageClass) if err != nil { return nil, err } @@ -1338,60 +1334,13 @@ func (v *VSHandler) getRsyncServiceType() *corev1.ServiceType { // For CephFS only, there is a problem where restoring a PVC from snapshot can be very slow when there are a lot of // files - on every replication cycle we need to create a PVC from snapshot in order to get a point-in-time copy of // the source PVC to sync with the replicationdestination. -// This workaround follows the instructions here: -// https://github.com/ceph/ceph-csi/blob/devel/docs/cephfs-snapshot-backed-volumes.md -// -// Steps: -// 1. If the storageclass detected is cephfs, create a new storageclass with backingSnapshot: "true" parameter -// (or reuse if it already exists). If not cephfs, return and do not modify rsSpec. -// 2. Modify rsSpec to use the new storageclass and also update AccessModes to 'ReadOnlyMany' as per the instructions -// above. +// If CephFS PVC, modify rsSpec AccessModes to use 'ReadOnlyMany'. func (v *VSHandler) ModifyRSSpecForCephFS(rsSpec *ramendrv1alpha1.VolSyncReplicationSourceSpec, storageClass *storagev1.StorageClass, -) error { - if storageClass.Provisioner != v.defaultCephFSCSIDriverName { - return nil // No workaround required - } - - v.log.Info("CephFS storageclass detected on source PVC, creating replicationsource with read-only "+ - " PVC from snapshot", "storageClassName", storageClass.GetName()) - - // Create/update readOnlyPVCStorageClass - readOnlyPVCStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: storageClass.GetName() + "-vrg", - }, - } - - op, err := ctrlutil.CreateOrUpdate(v.ctx, v.client, readOnlyPVCStorageClass, func() error { - // Do not update the storageclass if it already exists - Provisioner and Parameters are immutable anyway - if readOnlyPVCStorageClass.CreationTimestamp.IsZero() { - readOnlyPVCStorageClass.Provisioner = storageClass.Provisioner - - // Copy other parameters from the original storage class - // Note - not copying volumebindingmode or reclaim policy from the source storageclass will leave defaults - readOnlyPVCStorageClass.Parameters = map[string]string{} - for k, v := range storageClass.Parameters { - readOnlyPVCStorageClass.Parameters[k] = v - } - - // Set backingSnapshot parameter to true - readOnlyPVCStorageClass.Parameters["backingSnapshot"] = "true" - } - - return nil - }) - if err != nil { - return fmt.Errorf("%w", err) +) { + if storageClass.Provisioner == v.defaultCephFSCSIDriverName { + rsSpec.ProtectedPVC.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} } - - v.log.Info("StorageClass for readonly cephfs PVC createOrUpdate Complete", "op", op) - - // Update the rsSpec with access modes and the special storageclass - rsSpec.ProtectedPVC.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} - rsSpec.ProtectedPVC.StorageClassName = &readOnlyPVCStorageClass.Name - - return nil } func (v *VSHandler) GetVolumeSnapshotClassFromPVCStorageClass(storageClassName *string) (string, error) { @@ -1821,21 +1770,10 @@ func (v *VSHandler) reconcileLocalRS(rd *volsyncv1alpha1.ReplicationDestination, ) { v.log.Info("Reconciling localRS", "RD", rd.GetName()) - storageClass, err := v.getStorageClass(rdSpec.ProtectedPVC.StorageClassName) - if err != nil { - return nil, err - } - rsSpec := &ramendrv1alpha1.VolSyncReplicationSourceSpec{ ProtectedPVC: rdSpec.ProtectedPVC, } - // Fix for CephFS (replication source only) - may need different storageclass and access modes - err = v.ModifyRSSpecForCephFS(rsSpec, storageClass) - if err != nil { - return nil, err - } - pvc, err := v.setupLocalRS(rd, rdSpec, snapshotRef) if err != nil { return nil, err @@ -1983,16 +1921,20 @@ func (v *VSHandler) setupLocalRS(rd *volsyncv1alpha1.ReplicationDestination, } // In all other cases, we have to create a RO PVC. - return v.createReadOnlyPVCFromSnapshot(rd, rdSpec, snapshotRef, restoreSize) + return v.createPVCFromSnapshot(rd, rdSpec, snapshotRef, restoreSize) } -func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestination, +func (v *VSHandler) createPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestination, rdSpec *ramendrv1alpha1.VolSyncReplicationDestinationSpec, snapshotRef *corev1.TypedLocalObjectReference, snapRestoreSize *resource.Quantity, ) (*corev1.PersistentVolumeClaim, error) { - l := v.log.WithValues("pvcName", rd.GetName(), "snapshotRef", snapshotRef, - "snapRestoreSize", snapRestoreSize) + l := v.log.WithValues("pvcName", rd.GetName(), "snapshotRef", snapshotRef, "snapRestoreSize", snapRestoreSize) + + storageClass, err := v.getStorageClass(rdSpec.ProtectedPVC.StorageClassName) + if err != nil { + return nil, err + } pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -2017,6 +1959,11 @@ func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.Replicatio accessModes := []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} + // Use the protectedPVC accessModes when csi driver is not the default (openshift-storage.cephfs.csi.ceph.com) + if storageClass.Provisioner != v.defaultCephFSCSIDriverName { + accessModes = rdSpec.ProtectedPVC.AccessModes + } + if pvc.CreationTimestamp.IsZero() { // set immutable fields pvc.Spec.AccessModes = accessModes pvc.Spec.StorageClassName = rd.Spec.RsyncTLS.StorageClassName @@ -2032,8 +1979,6 @@ func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.Replicatio return nil }) if err != nil { - l.Error(err, "Unable to createOrUpdate PVC from snapshot for localRS") - return nil, fmt.Errorf("error creating or updating PVC from snapshot for localRS (%w)", err) } diff --git a/internal/controller/volsync/vshandler_test.go b/internal/controller/volsync/vshandler_test.go index 67f0dc8be..a3ebfb264 100644 --- a/internal/controller/volsync/vshandler_test.go +++ b/internal/controller/volsync/vshandler_test.go @@ -252,10 +252,18 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { }, storageClassForTest)).To(Succeed()) - // - // Call ModifyRSSpecForCephFS - // - Expect(vsHandler.ModifyRSSpecForCephFS(&testRsSpec, storageClassForTest)).To(Succeed()) + vsHandler.ModifyRSSpecForCephFS(&testRsSpec, storageClassForTest) + if storageClassForTest.Provisioner == testCephFSStorageDriverName { + Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( + []corev1.PersistentVolumeAccessMode{ + corev1.ReadOnlyMany, + })) + } else { + Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( + []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, + })) + } }) Context("When the source PVC is not using a cephfs storageclass", func() { @@ -270,128 +278,13 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { }) Context("When the sourcePVC is using a cephfs storageclass", func() { - customBackingSnapshotStorageClassName := testCephFSStorageClassName + "-vrg" - BeforeEach(func() { // Make sure the source PVC uses the cephfs storageclass testSourcePVC.Spec.StorageClassName = &testCephFSStorageClassName }) - JustBeforeEach(func() { - // Common tests - rsSpec should be modified with settings to allow pvc from snapshot - // to use our custom cephfs storageclass and ReadOnlyMany accessModes - Expect(testRsSpecOrig).NotTo(Equal(testRsSpec)) - - // Should use the custom storageclass with backingsnapshot: true parameter - Expect(*testRsSpec.ProtectedPVC.StorageClassName).To(Equal(customBackingSnapshotStorageClassName)) - - // AccessModes should be updated to ReadOnlyMany - Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( - []corev1.PersistentVolumeAccessMode{ - corev1.ReadOnlyMany, - })) - }) - - AfterEach(func() { - // Delete the custom storage class that may have been created by test - custStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - err := k8sClient.Delete(ctx, custStorageClass) - if err != nil { - Expect(kerrors.IsNotFound(err)).To(BeTrue()) - } - - Eventually(func() bool { - err := k8sClient.Get(ctx, client.ObjectKeyFromObject(custStorageClass), custStorageClass) - - return kerrors.IsNotFound(err) - }, maxWait, interval).Should(BeTrue()) - }) - - Context("When the custom cephfs backing storage class for readonly pvc from snap does not exist", func() { - // Delete the custom vrg storageclass if it exists - BeforeEach(func() { - custStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - err := k8sClient.Delete(ctx, custStorageClass) - if err != nil { - Expect(kerrors.IsNotFound(err)).To(BeTrue()) - } - - Eventually(func() bool { - err := k8sClient.Get(ctx, client.ObjectKeyFromObject(custStorageClass), custStorageClass) - - return kerrors.IsNotFound(err) - }, maxWait, interval).Should(BeTrue()) - }) - - It("ModifyRSSpecForCephFS should modify the rsSpec and create the new storageclass", func() { - // RSspec modification checks in the outer context JustBeforeEach() - - newStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - - Eventually(func() error { - return k8sClient.Get(ctx, client.ObjectKeyFromObject(newStorageClass), newStorageClass) - }, maxWait, interval).Should(Succeed()) - - Expect(newStorageClass.Parameters["backingSnapshot"]).To(Equal("true")) - - // Other parameters from the test cephfs storageclass should be copied over - for k, v := range testCephFSStorageClass.Parameters { - Expect(newStorageClass.Parameters[k]).To(Equal(v)) - } - }) - }) - - Context("When the custom cephfs backing storage class for readonly pvc from snap exists", func() { - var preExistingCustStorageClass *storagev1.StorageClass - - BeforeEach(func() { - preExistingCustStorageClass = &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - Provisioner: testCephFSStorageDriverName, - Parameters: map[string]string{ // Not the same params as our CephFS storageclass for test - "different-param-1": "abc", - "different-param-2": "def", - "backingSnapshot": "true", - }, - } - Expect(k8sClient.Create(ctx, preExistingCustStorageClass)).To(Succeed()) - - // Confirm it's created - Eventually(func() error { - return k8sClient.Get(ctx, - client.ObjectKeyFromObject(preExistingCustStorageClass), preExistingCustStorageClass) - }, maxWait, interval).Should(Succeed()) - }) - - It("ModifyRSSpecForCephFS should modify the rsSpec but not modify the new custom storageclass", func() { - // Load the custom storageclass - newStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - - Eventually(func() error { - return k8sClient.Get(ctx, client.ObjectKeyFromObject(newStorageClass), newStorageClass) - }, maxWait, interval).Should(Succeed()) - - // Parameters should match the original, unmodified - Expect(newStorageClass.Parameters).To(Equal(preExistingCustStorageClass.Parameters)) - }) + It("ModifyRSSpecForCephFS should modify the rsSpec protectedPVC accessModes", func() { + Expect(testRsSpecOrig).ToNot(Equal(testRsSpec)) }) }) }) diff --git a/internal/controller/volumereplicationgroup_controller.go b/internal/controller/volumereplicationgroup_controller.go index f02fd6d25..226523428 100644 --- a/internal/controller/volumereplicationgroup_controller.go +++ b/internal/controller/volumereplicationgroup_controller.go @@ -757,6 +757,14 @@ func (v *VRGInstance) addConsistencyGroupLabel(pvc *corev1.PersistentVolumeClaim return fmt.Errorf("missing storageID for PVC %s/%s", pvc.GetNamespace(), pvc.GetName()) } + // FIXME: a temporary workaround for issue DFBUGS-1209 + // Remove this block once DFBUGS-1209 is fixed + if storageClass.Provisioner == DefaultCephFSCSIDriverName { + storageID = "cephfs-" + storageID + } else { + storageID = "rbd-" + storageID + } + // Add label for PVC, showing that this PVC is part of consistency group return util.NewResourceUpdater(pvc). AddLabel(ConsistencyGroupLabel, storageID). diff --git a/internal/controller/vrg_volrep.go b/internal/controller/vrg_volrep.go index 701c5c212..75af66dc0 100644 --- a/internal/controller/vrg_volrep.go +++ b/internal/controller/vrg_volrep.go @@ -437,7 +437,7 @@ func (v *VRGInstance) protectPVC(pvc *corev1.PersistentVolumeClaim, log logr.Log // any further and it can be skipped. The pvc will go away eventually. func skipPVC(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, string) { if pvc.Status.Phase != corev1.ClaimBound { - log.Info("Skipping handling of VR as PersistentVolumeClaim is not bound", "pvcPhase", pvc.Status.Phase) + log.Info("Skipping handling of VR as PVC is not bound", "pvcPhase", pvc.Status.Phase) msg := "PVC not bound yet" // v.updateProtectedPVCCondition(pvc.Name, VRGConditionReasonProgressing, msg) @@ -451,7 +451,7 @@ func skipPVC(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, string) func isPVCDeletedAndNotProtected(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, string) { // If PVC deleted but not yet protected with a finalizer, skip it! if !containsString(pvc.Finalizers, PvcVRFinalizerProtected) && rmnutil.ResourceIsDeleted(pvc) { - log.Info("Skipping PersistentVolumeClaim, as it is marked for deletion and not yet protected") + log.Info("Skipping PVC, as it is marked for deletion and not yet protected") msg := "Skipping pvc marked for deletion" // v.updateProtectedPVCCondition(pvc.Name, VRGConditionReasonProgressing, msg) diff --git a/internal/controller/vrg_volsync.go b/internal/controller/vrg_volsync.go index 2f7b107f7..500abee86 100644 --- a/internal/controller/vrg_volsync.go +++ b/internal/controller/vrg_volsync.go @@ -113,6 +113,12 @@ func (v *VRGInstance) reconcileVolSyncAsPrimary(finalSyncPrepared *bool) (requeu } for _, pvc := range v.volSyncPVCs { + if pvc.Status.Phase != corev1.ClaimBound { + v.log.Info("Skipping PVC - PVC is not Bound.", "name", pvc.GetName()) + + continue + } + requeuePVC := v.reconcilePVCAsVolSyncPrimary(pvc) if requeuePVC { requeue = true