diff --git a/Dockerfile b/Dockerfile index 94e3141c..8bbeb105 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,12 +12,12 @@ RUN go mod download # Copy the go source COPY cmd/ cmd/ COPY api/ api/ -COPY internal/controllers internal/controllers +COPY internal/metrics internal/metrics +COPY internal/utilities internal/utilities COPY internal/harbor internal/harbor COPY internal/helpers internal/helpers COPY internal/messenger internal/messenger -COPY internal/metrics internal/metrics -COPY internal/utilities internal/utilities +COPY internal/controllers internal/controllers # Build RUN CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} GO111MODULE=on go build -a -o manager cmd/main.go diff --git a/internal/controllers/v1beta2/build_controller.go b/internal/controllers/v1beta2/build_controller.go index 837802a0..566f763c 100644 --- a/internal/controllers/v1beta2/build_controller.go +++ b/internal/controllers/v1beta2/build_controller.go @@ -19,10 +19,12 @@ import ( "context" "encoding/json" "fmt" + "time" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" @@ -124,15 +126,14 @@ func (r *LagoonBuildReconciler) Reconcile(ctx context.Context, req ctrl.Request) // if the build isn't being deleted, but the status is cancelled // then clean up the undeployable build if value, ok := lagoonBuild.ObjectMeta.Labels["lagoon.sh/buildStatus"]; ok { + // if cancelled, handle the cancellation process if value == lagooncrd.BuildStatusCancelled.String() { - if value, ok := lagoonBuild.ObjectMeta.Labels["lagoon.sh/cancelledByNewBuild"]; ok { - if value == "true" { - opLog.Info(fmt.Sprintf("Cleaning up build %s as cancelled by new build", lagoonBuild.ObjectMeta.Name)) - r.cleanUpUndeployableBuild(ctx, lagoonBuild, "This build was cancelled as a newer build was triggered.", opLog, true) - } else { - opLog.Info(fmt.Sprintf("Cleaning up build %s as cancelled", lagoonBuild.ObjectMeta.Name)) - r.cleanUpUndeployableBuild(ctx, lagoonBuild, "", opLog, false) - } + if value, ok := lagoonBuild.ObjectMeta.Labels["lagoon.sh/cancelledByNewBuild"]; ok && value == "true" { + opLog.Info(fmt.Sprintf("Cleaning up build %s as cancelled by new build", lagoonBuild.ObjectMeta.Name)) + r.cleanUpUndeployableBuild(ctx, lagoonBuild, "This build was cancelled as a newer build was triggered.", opLog, true) + } else { + opLog.Info(fmt.Sprintf("Cleaning up build %s as cancelled", lagoonBuild.ObjectMeta.Name)) + r.cleanUpUndeployableBuild(ctx, lagoonBuild, "", opLog, false) } } } @@ -330,6 +331,17 @@ func (r *LagoonBuildReconciler) getOrCreateBuildResource(ctx context.Context, la // all new builds start as "queued" but will transition to pending or running fairly quickly // unless they are actually queued :D newBuild.Status.Phase = "Queued" + // also create the build with a queued buildstep + newBuild.Status.Conditions = []metav1.Condition{ + { + Type: "BuildStep", + // Reason needs to be CamelCase not camelCase. Would need to update the `build-deploy-tool` to use CamelCase + // to eventually remove the need for `cases` + Reason: "Queued", + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(time.Now().UTC()), + }, + } err := r.Get(ctx, types.NamespacedName{ Namespace: ns, Name: newBuild.ObjectMeta.Name, diff --git a/internal/controllers/v1beta2/build_helpers.go b/internal/controllers/v1beta2/build_helpers.go index f760a056..a72d03e7 100644 --- a/internal/controllers/v1beta2/build_helpers.go +++ b/internal/controllers/v1beta2/build_helpers.go @@ -9,9 +9,11 @@ import ( "sort" "strconv" "strings" + "time" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -928,7 +930,6 @@ func (r *LagoonBuildReconciler) updateQueuedBuild( r.buildStatusLogsToLagoonLogs(opLog, &lagoonBuild, &lagoonEnv, lagooncrd.BuildStatusPending, fmt.Sprintf("queued %v/%v", queuePosition, queueLength)) r.updateDeploymentAndEnvironmentTask(opLog, &lagoonBuild, &lagoonEnv, lagooncrd.BuildStatusPending, fmt.Sprintf("queued %v/%v", queuePosition, queueLength)) r.buildLogsToLagoonLogs(opLog, &lagoonBuild, allContainerLogs, lagooncrd.BuildStatusPending) - } return nil } @@ -985,10 +986,31 @@ Build cancelled if cancelled { r.buildLogsToLagoonLogs(opLog, &lagoonBuild, allContainerLogs, lagooncrd.BuildStatusCancelled) } - // delete the build from the lagoon namespace in kubernetes entirely - err = r.Delete(ctx, &lagoonBuild) - if err != nil { - return fmt.Errorf("there was an error deleting the lagoon build. Error was: %v", err) + // check if the build has a `BuildStep` type condition + buildStep := meta.FindStatusCondition(lagoonBuild.Status.Conditions, "BuildStep") + if buildStep != nil && buildStep.Reason == "Queued" { + // if the build was cancelled at the queued phase of a build, then it is likely it was cancelled by a new build + // update the buildstep to be cancelled by new build for clearer visibility in the resource status + if value, ok := lagoonBuild.ObjectMeta.Labels["lagoon.sh/cancelledByNewBuild"]; ok && value == "true" { + condition := metav1.Condition{ + Type: "BuildStep", + Reason: "CancelledByNewBuild", + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(time.Now().UTC()), + } + _ = meta.SetStatusCondition(&lagoonBuild.Status.Conditions, condition) + } + // finaly patch the build with the cancelled status phase + mergeMap := map[string]interface{}{ + "status": map[string]interface{}{ + "conditions": lagoonBuild.Status.Conditions, + "phase": lagooncrd.BuildStatusCancelled.String(), + }, + } + mergePatch, _ := json.Marshal(mergeMap) + if err := r.Patch(ctx, &lagoonBuild, client.RawPatch(types.MergePatchType, mergePatch)); err != nil { + opLog.Error(err, "unable to update resource") + } } return nil } diff --git a/internal/controllers/v1beta2/build_qoshandler.go b/internal/controllers/v1beta2/build_qoshandler.go index 7466dc93..e5c6afe7 100644 --- a/internal/controllers/v1beta2/build_qoshandler.go +++ b/internal/controllers/v1beta2/build_qoshandler.go @@ -37,6 +37,14 @@ func (r *LagoonBuildReconciler) qosBuildProcessor(ctx context.Context, opLog.Info("Checking which build next") } // handle the QoS build process here + // if the build is already running, then there is no need to check which build can be started next + if lagoonBuild.ObjectMeta.Labels["lagoon.sh/buildStatus"] == lagooncrd.BuildStatusRunning.String() { + // this is done so that all running state updates don't try to force the queue processor to run unnecessarily + // downside is that this can lead to queue/state changes being less frequent for queued builds in the api + // any new builds, or complete/failed/cancelled builds will still force the whichbuildnext processor to run though + return ctrl.Result{}, nil + } + // we only care if a new build can start if one is created, cancelled, failed, or completed return ctrl.Result{}, r.whichBuildNext(ctx, opLog) } diff --git a/internal/controllers/v1beta2/podmonitor_buildhandlers.go b/internal/controllers/v1beta2/podmonitor_buildhandlers.go index 0d2d33ea..5ac5f67c 100644 --- a/internal/controllers/v1beta2/podmonitor_buildhandlers.go +++ b/internal/controllers/v1beta2/podmonitor_buildhandlers.go @@ -564,11 +564,18 @@ Build %s LastTransitionTime: metav1.NewTime(time.Now().UTC()), } _ = meta.SetStatusCondition(&lagoonBuild.Status.Conditions, condition) + // add every build step as its own status condition too + condition = metav1.Condition{ + Type: cases.Title(language.English, cases.NoLower).String(buildStep), + Reason: buildCondition.String(), + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(time.Now().UTC()), + } + _ = meta.SetStatusCondition(&lagoonBuild.Status.Conditions, condition) mergeMap["status"] = map[string]interface{}{ "conditions": lagoonBuild.Status.Conditions, "phase": buildCondition.String(), } - // get the configmap for lagoon-env so we can use it for updating the deployment in lagoon var lagoonEnv corev1.ConfigMap if err := r.Get(ctx, types.NamespacedName{