Skip to content

Commit

Permalink
Make sure the backup cleanup is always triggered
Browse files Browse the repository at this point in the history
This commit makes sure, that backups go into a completed state, even if
there was nothing to backup at all.

Signed-off-by: Simon Beck <[email protected]>
  • Loading branch information
Kidswiss committed Dec 11, 2024
1 parent 7532231 commit be419e1
Show file tree
Hide file tree
Showing 6 changed files with 59 additions and 8 deletions.
26 changes: 20 additions & 6 deletions e2e/lib/k8up.bash
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,9 @@ given_a_subject() {
kubectl apply -f definitions/pv/pvc.yaml
yq e '.spec.template.spec.containers[0].securityContext.runAsUser='$(id -u)' | .spec.template.spec.containers[0].env[0].value=strenv(BACKUP_FILE_CONTENT) | .spec.template.spec.containers[0].env[1].value=strenv(BACKUP_FILE_NAME)' definitions/subject/deployment.yaml | kubectl apply -f -

# Let's wait for the deployment to actually be ready
kubectl -n k8up-e2e-subject wait --timeout 1m --for=condition=available deployment subject-deployment

echo "✅ The subject is ready"
}

Expand All @@ -167,6 +170,9 @@ given_an_annotated_subject() {
kubectl apply -f definitions/pv/pvc.yaml
yq e '.spec.template.spec.containers[1].securityContext.runAsUser='$(id -u)' | .spec.template.spec.containers[1].env[0].value=strenv(BACKUP_FILE_CONTENT) | .spec.template.spec.containers[1].env[1].value=strenv(BACKUP_FILE_NAME)' definitions/annotated-subject/deployment.yaml | kubectl apply -f -

# Let's wait for the deployment to actually be ready
kubectl -n k8up-e2e-subject wait --timeout 1m --for=condition=available deployment annotated-subject-deployment

echo "✅ The annotated subject is ready"
}

Expand All @@ -176,6 +182,9 @@ given_a_broken_annotated_subject() {
kubectl apply -f definitions/pv/pvc.yaml
yq e '.spec.template.spec.containers[1].securityContext.runAsUser='$(id -u)' ' definitions/annotated-subject/deployment-error.yaml | kubectl apply -f -

# Let's wait for the deployment to actually be ready
kubectl -n k8up-e2e-subject wait --timeout 1m --for=condition=available deployment annotated-subject-deployment

echo "✅ The annotated subject is ready"
}

Expand All @@ -187,6 +196,9 @@ given_an_annotated_subject_pod() {

yq e '.spec.containers[1].securityContext.runAsUser='$(id -u)' | .spec.containers[1].env[0].value=strenv(BACKUP_FILE_CONTENT) | .spec.containers[1].env[1].value=strenv(BACKUP_FILE_NAME)' definitions/annotated-subject/pod.yaml | kubectl apply -f -

# Let's wait for the pod to actually be ready
kubectl -n k8up-e2e-subject wait --timeout 1m --for=condition=ready pod subject-pod

echo "✅ The annotated subject pod is ready"
}

Expand All @@ -198,6 +210,9 @@ given_a_rwo_pvc_subject_in_worker_node() {

yq e 'with(select(document_index == 1) .spec.template.spec; .containers[0].securityContext.runAsUser='$(id -u)' | .containers[0].env[0].value=strenv(BACKUP_FILE_CONTENT) | .containers[0].env[1].value=strenv(BACKUP_FILE_NAME))' definitions/pvc-rwo-subject/worker.yaml | kubectl apply -f -

# Let's wait for the deployment to actually be ready
kubectl -n k8up-e2e-subject wait --timeout 1m --for=condition=available deployment pvc-rwo-subject-worker

echo "✅ The pvc rwo worker subject is ready"
}

Expand All @@ -209,6 +224,9 @@ given_a_rwo_pvc_subject_in_controlplane_node() {

yq e 'with(select(document_index == 1) .spec.template.spec; .containers[0].securityContext.runAsUser='$(id -u)' | .containers[0].env[0].value=strenv(BACKUP_FILE_CONTENT) | .containers[0].env[1].value=strenv(BACKUP_FILE_NAME))' definitions/pvc-rwo-subject/controlplane.yaml | kubectl apply -f -

# Let's wait for the deployment to actually be ready
kubectl -n k8up-e2e-subject wait --timeout 1m --for=condition=available deployment pvc-rwo-subject-controlplane

echo "✅ The pvc rwo controlplane subject is ready"
}

Expand Down Expand Up @@ -324,14 +342,10 @@ given_an_existing_mtls_backup() {
wait_until backup/k8up-backup-mtls completed
verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Backup named 'k8up-backup-mtls'"

for i in {1..3}; do
run restic dump latest "/data/subject-pvc/${backup_file_name}"
if [ ! -z "${output}" ]; then
break
fi
done
run restic dump latest "/data/subject-pvc/${backup_file_name}"

# shellcheck disable=SC2154
echo "${backup_file_content} = ${output}"
[ "${backup_file_content}" = "${output}" ]

echo "✅ An existing backup is ready"
Expand Down
2 changes: 1 addition & 1 deletion e2e/test-12-annotated-failure.bats
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ DETIK_CLIENT_NAMESPACE="k8up-e2e-subject"
# shellcheck disable=SC2034
DEBUG_DETIK="true"

@test "Annotated app, When creating a backup, Then expect Error" {
@test "Given annotated app, When creating a backup, Then expect Error" {
expected_content="expected content: $(timestamp)"
expected_filename="expected_filename.txt"

Expand Down
28 changes: 28 additions & 0 deletions e2e/test-13-cleanup-empty-jobs.bats
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#!/usr/bin/env bats

load "lib/utils"
load "lib/detik"
load "lib/k8up"

# shellcheck disable=SC2034
DETIK_CLIENT_NAME="kubectl"
# shellcheck disable=SC2034
DETIK_CLIENT_NAMESPACE="k8up-e2e-subject"
# shellcheck disable=SC2034
DEBUG_DETIK="true"

@test "Given empty namespace, When creating multiple backups, Then expect cleanup" {
given_a_running_operator
given_a_clean_ns
given_s3_storage

kubectl apply -f definitions/secrets
yq e '.spec.podSecurityContext.runAsUser='$(id -u)' | .metadata.name="first-backup"' definitions/backup/backup.yaml | kubectl apply -f -

yq e '.spec.podSecurityContext.runAsUser='$(id -u)' | .metadata.name="second-backup"' definitions/backup/backup.yaml | kubectl apply -f -

kubectl -n "$DETIK_CLIENT_NAMESPACE" annotate backup/second-backup reconcile=now

wait_for_until_jsonpath backup/second-backup 5m 'jsonpath={.status.conditions[?(@.type=="Scrubbed")].message}="Deleted 1 resources"'

}
2 changes: 2 additions & 0 deletions operator/backupcontroller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ func (r *BackupReconciler) Provision(ctx context.Context, obj *k8upv1.Backup) (r
log.V(1).Info("backup just started, waiting")
return controllerruntime.Result{RequeueAfter: 5 * time.Second}, nil
}

if obj.Status.HasFinished() || isPrebackupFailed(obj) {
cleanupCond := meta.FindStatusCondition(obj.Status.Conditions, k8upv1.ConditionScrubbed.String())
if cleanupCond == nil || cleanupCond.Reason != k8upv1.ReasonSucceeded.String() {
Expand Down Expand Up @@ -110,6 +111,7 @@ func (r *BackupReconciler) ReconcileJobStatus(ctx context.Context, obj *k8upv1.B
} else if numStarted > 0 {
objStatus.SetStarted(message)
}

obj.SetStatus(objStatus)

log.V(1).Info("updating status")
Expand Down
7 changes: 7 additions & 0 deletions operator/backupcontroller/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,13 @@ func (b *BackupExecutor) startBackup(ctx context.Context) error {
}
}

if len(backupJobs) == 0 {
status := b.Obj.GetStatus()
status.SetSucceeded("nothing to backup")
b.Obj.SetStatus(status)
return b.Client.Status().Update(ctx, b.Obj)
}

return nil
}

Expand Down
2 changes: 1 addition & 1 deletion operator/reconciler/reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func (ctrl *controller[T, L]) Reconcile(ctx context.Context, request controllerr
} else {
res, provisionErr = ctrl.reconciler.Provision(ctx, obj)
}
if apierrors.IsConflict(err) { // ignore "the object has been modified; please apply your changes to the latest version and try again" error, but requeue
if apierrors.IsConflict(provisionErr) { // ignore "the object has been modified; please apply your changes to the latest version and try again" error, but requeue
log := controllerruntime.LoggerFrom(ctx)
log.Info("Object has been modified, retrying...", "error", provisionErr.Error())
res.Requeue = true
Expand Down

0 comments on commit be419e1

Please sign in to comment.