From 78ea5e7ab9adb03b55ae737710182363a5d17d63 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 17 Dec 2024 16:33:48 +0100 Subject: [PATCH] chore: review Signed-off-by: Armando Ruocco --- internal/controller/backup_controller.go | 8 ++++---- pkg/conditions/conditions.go | 14 +++++++------- pkg/management/postgres/backup.go | 4 ++-- pkg/management/postgres/webserver/local.go | 2 +- pkg/management/postgres/webserver/plugin_backup.go | 4 ++-- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index 60d6f6e494..d95d839fa5 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -417,7 +417,7 @@ func (r *BackupReconciler) reconcileSnapshotBackup( } } - if errCond := conditions.Update(ctx, r.Client, cluster, apiv1.BackupStartingCondition); errCond != nil { + if errCond := conditions.OptimisticLockPatch(ctx, r.Client, cluster, apiv1.BackupStartingCondition); errCond != nil { contextLogger.Error(errCond, "Error while updating backup condition (backup starting)") } @@ -440,7 +440,7 @@ func (r *BackupReconciler) reconcileSnapshotBackup( // and un-fence the Pod contextLogger.Error(err, "while executing snapshot backup") // Update backup status in cluster conditions - if errCond := conditions.Update( + if errCond := conditions.OptimisticLockPatch( ctx, r.Client, cluster, @@ -458,7 +458,7 @@ func (r *BackupReconciler) reconcileSnapshotBackup( return res, nil } - if err := conditions.Update(ctx, r.Client, cluster, apiv1.BackupSucceededCondition); err != nil { + if err := conditions.OptimisticLockPatch(ctx, r.Client, cluster, apiv1.BackupSucceededCondition); err != nil { contextLogger.Error(err, "Can't update the cluster with the completed snapshot backup data") } @@ -638,7 +638,7 @@ func startInstanceManagerBackup( status.CommandError = stdout // Update backup status in cluster conditions - if errCond := conditions.Update(ctx, client, cluster, apiv1.BuildClusterBackupFailedCondition(err)); errCond != nil { + if errCond := conditions.OptimisticLockPatch(ctx, client, cluster, apiv1.BuildClusterBackupFailedCondition(err)); errCond != nil { log.FromContext(ctx).Error(errCond, "Error while updating backup condition (backup failed)") } return postgres.PatchBackupStatusAndRetry(ctx, client, backup) diff --git a/pkg/conditions/conditions.go b/pkg/conditions/conditions.go index 5136629a27..4ba6a8d92f 100644 --- a/pkg/conditions/conditions.go +++ b/pkg/conditions/conditions.go @@ -28,22 +28,22 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" ) -// Update will update a particular condition in cluster status. +// OptimisticLockPatch will update a particular condition in cluster status. // This function may update the conditions in the passed cluster // with the latest ones that were found from the API server. -func Update( +func OptimisticLockPatch( ctx context.Context, c client.Client, cluster *apiv1.Cluster, - condition ...metav1.Condition, + conditions ...metav1.Condition, ) error { - if cluster == nil || len(condition) == 0 { + if cluster == nil || len(conditions) == 0 { return nil } - tx := func(cluster *apiv1.Cluster) bool { + applyConditions := func(cluster *apiv1.Cluster) bool { changed := false - for _, c := range condition { + for _, c := range conditions { changed = changed || meta.SetStatusCondition(&cluster.Status.Conditions, c) } return changed @@ -56,7 +56,7 @@ func Update( } updatedCluster := currentCluster.DeepCopy() - if changed := tx(updatedCluster); !changed { + if changed := applyConditions(updatedCluster); !changed { return nil } diff --git a/pkg/management/postgres/backup.go b/pkg/management/postgres/backup.go index e0ffbeb80f..77e130eb61 100644 --- a/pkg/management/postgres/backup.go +++ b/pkg/management/postgres/backup.go @@ -215,7 +215,7 @@ func (b *BackupCommand) takeBackup(ctx context.Context) error { // Update backup status in cluster conditions on startup if err := b.retryWithRefreshedCluster(ctx, func() error { - return conditions.Update(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) + return conditions.OptimisticLockPatch(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) }); err != nil { b.Log.Error(err, "Error changing backup condition (backup started)") // We do not terminate here because we could still have a good backup @@ -261,7 +261,7 @@ func (b *BackupCommand) takeBackup(ctx context.Context) error { // Update backup status in cluster conditions on backup completion if err := b.retryWithRefreshedCluster(ctx, func() error { - return conditions.Update(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition) + return conditions.OptimisticLockPatch(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition) }); err != nil { b.Log.Error(err, "Can't update the cluster with the completed backup data") } diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go index abc7e6fe77..0b0bcd0c6d 100644 --- a/pkg/management/postgres/webserver/local.go +++ b/pkg/management/postgres/webserver/local.go @@ -283,7 +283,7 @@ func (ws *localWebserverEndpoints) setWALArchiveStatusCondition(w http.ResponseW return } - if errCond := conditions.Update(ctx, ws.typedClient, cluster, asr.getContinuousArchivingCondition()); errCond != nil { + if errCond := conditions.OptimisticLockPatch(ctx, ws.typedClient, cluster, asr.getContinuousArchivingCondition()); errCond != nil { contextLogger.Error(errCond, "Error changing wal archiving condition", "condition", asr.getContinuousArchivingCondition()) http.Error( diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index 94d81a34f5..fea072902c 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -103,7 +103,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { // Update backup status in cluster conditions on startup if err := b.retryWithRefreshedCluster(ctx, func() error { - return conditions.Update(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) + return conditions.OptimisticLockPatch(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) }); err != nil { contextLogger.Error(err, "Error changing backup condition (backup started)") // We do not terminate here because we could still have a good backup @@ -153,7 +153,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { // Update backup status in cluster conditions on backup completion if err := b.retryWithRefreshedCluster(ctx, func() error { - return conditions.Update(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition) + return conditions.OptimisticLockPatch(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition) }); err != nil { contextLogger.Error(err, "Can't update the cluster with the completed backup data") }