Skip to content

Commit

Permalink
Merge pull request #432 from red-hat-storage/sync_us--main
Browse files Browse the repository at this point in the history
Syncing latest changes from upstream main for ramen
  • Loading branch information
ShyamsundarR authored Jan 8, 2025
2 parents cb66838 + dae64cd commit d0032a7
Show file tree
Hide file tree
Showing 9 changed files with 83 additions and 277 deletions.
18 changes: 9 additions & 9 deletions internal/controller/cephfscg/replicationgroupsource.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,15 +113,6 @@ func (m *replicationGroupSourceMachine) Synchronize(ctx context.Context) (mover.
return mover.InProgress(), err
}

m.Logger.Info("Restore PVCs from volume group snapshot")

restoredPVCs, err := m.VolumeGroupHandler.RestoreVolumesFromVolumeGroupSnapshot(ctx, m.ReplicationGroupSource)
if err != nil {
m.Logger.Error(err, "Failed to restore volume group snapshot")

return mover.InProgress(), err
}

m.Logger.Info("Create ReplicationSource for each Restored PVC")
vrgName := m.ReplicationGroupSource.GetLabels()[volsync.VRGOwnerNameLabel]
// Pre-allocated shared secret - DRPC will generate and propagate this secret from hub to clusters
Expand All @@ -141,6 +132,15 @@ func (m *replicationGroupSourceMachine) Synchronize(ctx context.Context) (mover.
return mover.InProgress(), nil
}

m.Logger.Info("Restore PVCs from volume group snapshot")

restoredPVCs, err := m.VolumeGroupHandler.RestoreVolumesFromVolumeGroupSnapshot(ctx, m.ReplicationGroupSource)
if err != nil {
m.Logger.Error(err, "Failed to restore volume group snapshot")

return mover.InProgress(), err
}

replicationSources, err := m.VolumeGroupHandler.CreateOrUpdateReplicationSourceForRestoredPVCs(
ctx, m.ReplicationGroupSource.Status.LastSyncStartTime.String(), restoredPVCs, m.ReplicationGroupSource)
if err != nil {
Expand Down
51 changes: 0 additions & 51 deletions internal/controller/cephfscg/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,8 @@ import (
"github.com/ramendr/ramen/internal/controller/volsync"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)

// ------------- [Begin] Copied from existing code in Ramen ----
Expand Down Expand Up @@ -54,55 +52,6 @@ func getLocalServiceNameForRD(rdName string) string {
return fmt.Sprintf("volsync-rsync-tls-dst-%s", rdName)
}

// ------------- [End] Copied from existing code in Ramen ----

// ------------- [Begin] Edited from existing code in Ramen ----

// Copied from func (v *VSHandler) ModifyRSSpecForCephFS
func GetRestoreStorageClass(
ctx context.Context, k8sClient client.Client, storageClassName string,
defaultCephFSCSIDriverName string,
) (*storagev1.StorageClass, error) {
storageClass, err := GetStorageClass(ctx, k8sClient, &storageClassName)
if err != nil {
return nil, err
}

if storageClass.Provisioner != defaultCephFSCSIDriverName {
return storageClass, nil // No workaround required
}

// Create/update readOnlyPVCStorageClass
readOnlyPVCStorageClass := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: storageClass.GetName() + "-vrg",
},
}

_, err = ctrlutil.CreateOrUpdate(ctx, k8sClient, readOnlyPVCStorageClass, func() error {
// Do not update the storageclass if it already exists - Provisioner and Parameters are immutable anyway
if readOnlyPVCStorageClass.CreationTimestamp.IsZero() {
readOnlyPVCStorageClass.Provisioner = storageClass.Provisioner

// Copy other parameters from the original storage class
readOnlyPVCStorageClass.Parameters = map[string]string{}
for k, v := range storageClass.Parameters {
readOnlyPVCStorageClass.Parameters[k] = v
}

// Set backingSnapshot parameter to true
readOnlyPVCStorageClass.Parameters["backingSnapshot"] = "true"
}

return nil
})
if err != nil {
return nil, fmt.Errorf("%w", err)
}

return readOnlyPVCStorageClass, nil
}

// Copied from func (v *VSHandler) getStorageClass(
func GetStorageClass(
ctx context.Context, k8sClient client.Client, storageClassName *string,
Expand Down
45 changes: 25 additions & 20 deletions internal/controller/cephfscg/volumegroupsourcehandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ import (
)

var (
VolumeGroupSnapshotNameFormat = "cephfscg-%s"
RestorePVCinCGNameFormat = "cephfscg-%s"
VolumeGroupSnapshotNameFormat = "vs-cg-%s"
RestorePVCinCGNameFormat = "vs-cg-%s"
SnapshotGroup = "snapshot.storage.k8s.io"
SnapshotGroupKind = "VolumeSnapshot"
)
Expand Down Expand Up @@ -146,7 +146,7 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateVolumeGroupSnapshot(
return nil
}

// CleanVolumeGroupSnapshot delete restored pvc, replicationsource and VolumeGroupSnapshot
// CleanVolumeGroupSnapshot delete restored pvc and VolumeGroupSnapshot
//
//nolint:funlen
func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot(
Expand Down Expand Up @@ -214,42 +214,48 @@ func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot(
return nil
}

// RestoreVolumesFromVolumeGroupSnapshot restore VolumeGroupSnapshot to PVCs
// RestoreVolumesFromVolumeGroupSnapshot restores VolumeGroupSnapshot to PVCs
//
//nolint:funlen,cyclop
func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot(
ctx context.Context, owner metav1.Object,
) ([]RestoredPVC, error) {
logger := h.Logger.WithName("RestoreVolumesFromVolumeGroupSnapshot")
logger.Info("Get volume group snapshot")

volumeGroupSnapshot := &vgsv1alphfa1.VolumeGroupSnapshot{}
vgs := &vgsv1alphfa1.VolumeGroupSnapshot{}
if err := h.Client.Get(ctx,
types.NamespacedName{Name: h.VolumeGroupSnapshotName, Namespace: h.VolumeGroupSnapshotNamespace},
volumeGroupSnapshot); err != nil {
vgs); err != nil {
return nil, fmt.Errorf("failed to get volume group snapshot: %w", err)
}

if volumeGroupSnapshot.Status == nil || volumeGroupSnapshot.Status.ReadyToUse == nil ||
(volumeGroupSnapshot.Status.ReadyToUse != nil && !*volumeGroupSnapshot.Status.ReadyToUse) {
if vgs.Status == nil || vgs.Status.ReadyToUse == nil ||
(vgs.Status.ReadyToUse != nil && !*vgs.Status.ReadyToUse) {
return nil, fmt.Errorf("can't restore volume group snapshot: volume group snapshot is not ready to be used")
}

restoredPVCs := []RestoredPVC{}

for _, pvcVSRef := range volumeGroupSnapshot.Status.PVCVolumeSnapshotRefList {
for _, pvcVSRef := range vgs.Status.PVCVolumeSnapshotRefList {
logger.Info("Get PVCName from volume snapshot",
"PVCName", pvcVSRef.PersistentVolumeClaimRef.Name, "VolumeSnapshotName", pvcVSRef.VolumeSnapshotRef.Name)

pvc, err := util.GetPVC(ctx, h.Client,
types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: volumeGroupSnapshot.Namespace})
types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: vgs.Namespace})
if err != nil {
return nil, fmt.Errorf("failed to get PVC from VGS %s: %w",
volumeGroupSnapshot.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err)
vgs.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err)
}

restoreStorageClass, err := GetRestoreStorageClass(ctx, h.Client,
*pvc.Spec.StorageClassName, h.DefaultCephFSCSIDriverName)
storageClass, err := GetStorageClass(ctx, h.Client, pvc.Spec.StorageClassName)
if err != nil {
return nil, fmt.Errorf("failed to get Restore Storage Class from PVC %s: %w", pvc.Name+"/"+pvc.Namespace, err)
return nil, err
}

restoreAccessModes := []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}
if storageClass.Provisioner != h.DefaultCephFSCSIDriverName {
restoreAccessModes = pvc.Spec.AccessModes
}

RestoredPVCNamespacedName := types.NamespacedName{
Expand All @@ -258,7 +264,7 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot(
}
if err := h.RestoreVolumesFromSnapshot(
ctx, pvcVSRef.VolumeSnapshotRef.Name, pvc, RestoredPVCNamespacedName,
restoreStorageClass.GetName(), owner); err != nil {
restoreAccessModes, owner); err != nil {
return nil, fmt.Errorf("failed to restore volumes from snapshot %s: %w",
pvcVSRef.VolumeSnapshotRef.Name+"/"+pvc.Namespace, err)
}
Expand Down Expand Up @@ -286,7 +292,7 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromSnapshot(
vsName string,
pvc *corev1.PersistentVolumeClaim,
restoredPVCNamespacedname types.NamespacedName,
restoreStorageClassName string,
restoreAccessModes []corev1.PersistentVolumeAccessMode,
owner metav1.Object,
) error {
logger := h.Logger.WithName("RestoreVolumesFromSnapshot").
Expand Down Expand Up @@ -351,8 +357,8 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromSnapshot(
}

if restoredPVC.CreationTimestamp.IsZero() { // set immutable fields
restoredPVC.Spec.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}
restoredPVC.Spec.StorageClassName = &restoreStorageClassName
restoredPVC.Spec.AccessModes = restoreAccessModes
restoredPVC.Spec.StorageClassName = pvc.Spec.StorageClassName
restoredPVC.Spec.DataSource = &snapshotRef
}

Expand Down Expand Up @@ -424,8 +430,7 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateReplicationSourceForRestoredPVC
}
replicationSource.Spec.RsyncTLS = &volsyncv1alpha1.ReplicationSourceRsyncTLSSpec{
ReplicationSourceVolumeOptions: volsyncv1alpha1.ReplicationSourceVolumeOptions{
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany},
CopyMethod: volsyncv1alpha1.CopyMethodDirect,
CopyMethod: volsyncv1alpha1.CopyMethodDirect,
},

KeySecret: &h.VolsyncKeySecretName,
Expand Down
1 change: 1 addition & 0 deletions internal/controller/drplacementcontrol.go
Original file line number Diff line number Diff line change
Expand Up @@ -1711,6 +1711,7 @@ func (d *DRPCInstance) updateVRGOptionalFields(vrg, vrgFromView *rmn.VolumeRepli
DoNotDeletePVCAnnotation: d.instance.GetAnnotations()[DoNotDeletePVCAnnotation],
DRPCUIDAnnotation: string(d.instance.UID),
rmnutil.IsCGEnabledAnnotation: d.instance.GetAnnotations()[rmnutil.IsCGEnabledAnnotation],
rmnutil.UseVolSyncAnnotation: d.instance.GetAnnotations()[rmnutil.UseVolSyncAnnotation],
}

vrg.Spec.ProtectedNamespaces = d.instance.Spec.ProtectedNamespaces
Expand Down
93 changes: 19 additions & 74 deletions internal/controller/volsync/vshandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -408,13 +408,9 @@ func (v *VSHandler) createOrUpdateRS(rsSpec ramendrv1alpha1.VolSyncReplicationSo
return nil, err
}

volumeSnapshotClassName, err := v.getVolumeSnapshotClassFromPVCStorageClass(storageClass)
if err != nil {
return nil, err
}
v.ModifyRSSpecForCephFS(&rsSpec, storageClass)

// Fix for CephFS (replication source only) - may need different storageclass and access modes
err = v.ModifyRSSpecForCephFS(&rsSpec, storageClass)
volumeSnapshotClassName, err := v.getVolumeSnapshotClassFromPVCStorageClass(storageClass)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -1338,60 +1334,13 @@ func (v *VSHandler) getRsyncServiceType() *corev1.ServiceType {
// For CephFS only, there is a problem where restoring a PVC from snapshot can be very slow when there are a lot of
// files - on every replication cycle we need to create a PVC from snapshot in order to get a point-in-time copy of
// the source PVC to sync with the replicationdestination.
// This workaround follows the instructions here:
// https://github.com/ceph/ceph-csi/blob/devel/docs/cephfs-snapshot-backed-volumes.md
//
// Steps:
// 1. If the storageclass detected is cephfs, create a new storageclass with backingSnapshot: "true" parameter
// (or reuse if it already exists). If not cephfs, return and do not modify rsSpec.
// 2. Modify rsSpec to use the new storageclass and also update AccessModes to 'ReadOnlyMany' as per the instructions
// above.
// If CephFS PVC, modify rsSpec AccessModes to use 'ReadOnlyMany'.
func (v *VSHandler) ModifyRSSpecForCephFS(rsSpec *ramendrv1alpha1.VolSyncReplicationSourceSpec,
storageClass *storagev1.StorageClass,
) error {
if storageClass.Provisioner != v.defaultCephFSCSIDriverName {
return nil // No workaround required
}

v.log.Info("CephFS storageclass detected on source PVC, creating replicationsource with read-only "+
" PVC from snapshot", "storageClassName", storageClass.GetName())

// Create/update readOnlyPVCStorageClass
readOnlyPVCStorageClass := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: storageClass.GetName() + "-vrg",
},
}

op, err := ctrlutil.CreateOrUpdate(v.ctx, v.client, readOnlyPVCStorageClass, func() error {
// Do not update the storageclass if it already exists - Provisioner and Parameters are immutable anyway
if readOnlyPVCStorageClass.CreationTimestamp.IsZero() {
readOnlyPVCStorageClass.Provisioner = storageClass.Provisioner

// Copy other parameters from the original storage class
// Note - not copying volumebindingmode or reclaim policy from the source storageclass will leave defaults
readOnlyPVCStorageClass.Parameters = map[string]string{}
for k, v := range storageClass.Parameters {
readOnlyPVCStorageClass.Parameters[k] = v
}

// Set backingSnapshot parameter to true
readOnlyPVCStorageClass.Parameters["backingSnapshot"] = "true"
}

return nil
})
if err != nil {
return fmt.Errorf("%w", err)
) {
if storageClass.Provisioner == v.defaultCephFSCSIDriverName {
rsSpec.ProtectedPVC.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}
}

v.log.Info("StorageClass for readonly cephfs PVC createOrUpdate Complete", "op", op)

// Update the rsSpec with access modes and the special storageclass
rsSpec.ProtectedPVC.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}
rsSpec.ProtectedPVC.StorageClassName = &readOnlyPVCStorageClass.Name

return nil
}

func (v *VSHandler) GetVolumeSnapshotClassFromPVCStorageClass(storageClassName *string) (string, error) {
Expand Down Expand Up @@ -1821,21 +1770,10 @@ func (v *VSHandler) reconcileLocalRS(rd *volsyncv1alpha1.ReplicationDestination,
) {
v.log.Info("Reconciling localRS", "RD", rd.GetName())

storageClass, err := v.getStorageClass(rdSpec.ProtectedPVC.StorageClassName)
if err != nil {
return nil, err
}

rsSpec := &ramendrv1alpha1.VolSyncReplicationSourceSpec{
ProtectedPVC: rdSpec.ProtectedPVC,
}

// Fix for CephFS (replication source only) - may need different storageclass and access modes
err = v.ModifyRSSpecForCephFS(rsSpec, storageClass)
if err != nil {
return nil, err
}

pvc, err := v.setupLocalRS(rd, rdSpec, snapshotRef)
if err != nil {
return nil, err
Expand Down Expand Up @@ -1983,16 +1921,20 @@ func (v *VSHandler) setupLocalRS(rd *volsyncv1alpha1.ReplicationDestination,
}

// In all other cases, we have to create a RO PVC.
return v.createReadOnlyPVCFromSnapshot(rd, rdSpec, snapshotRef, restoreSize)
return v.createPVCFromSnapshot(rd, rdSpec, snapshotRef, restoreSize)
}

func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestination,
func (v *VSHandler) createPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestination,
rdSpec *ramendrv1alpha1.VolSyncReplicationDestinationSpec,
snapshotRef *corev1.TypedLocalObjectReference,
snapRestoreSize *resource.Quantity,
) (*corev1.PersistentVolumeClaim, error) {
l := v.log.WithValues("pvcName", rd.GetName(), "snapshotRef", snapshotRef,
"snapRestoreSize", snapRestoreSize)
l := v.log.WithValues("pvcName", rd.GetName(), "snapshotRef", snapshotRef, "snapRestoreSize", snapRestoreSize)

storageClass, err := v.getStorageClass(rdSpec.ProtectedPVC.StorageClassName)
if err != nil {
return nil, err
}

pvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -2017,6 +1959,11 @@ func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.Replicatio

accessModes := []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}

// Use the protectedPVC accessModes when csi driver is not the default (openshift-storage.cephfs.csi.ceph.com)
if storageClass.Provisioner != v.defaultCephFSCSIDriverName {
accessModes = rdSpec.ProtectedPVC.AccessModes
}

if pvc.CreationTimestamp.IsZero() { // set immutable fields
pvc.Spec.AccessModes = accessModes
pvc.Spec.StorageClassName = rd.Spec.RsyncTLS.StorageClassName
Expand All @@ -2032,8 +1979,6 @@ func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.Replicatio
return nil
})
if err != nil {
l.Error(err, "Unable to createOrUpdate PVC from snapshot for localRS")

return nil, fmt.Errorf("error creating or updating PVC from snapshot for localRS (%w)", err)
}

Expand Down
Loading

0 comments on commit d0032a7

Please sign in to comment.