diff --git a/api/v1alpha1/k6conditions.go b/api/v1alpha1/k6conditions.go index 3b9dfb2f..9fe7f024 100644 --- a/api/v1alpha1/k6conditions.go +++ b/api/v1alpha1/k6conditions.go @@ -88,7 +88,6 @@ func Initialize(k6 TestRunI) { // PLZ test run case if len(k6.GetSpec().TestRunID) > 0 { - UpdateCondition(k6, CloudTestRun, metav1.ConditionTrue) UpdateCondition(k6, CloudPLZTestRun, metav1.ConditionTrue) UpdateCondition(k6, CloudTestRunCreated, metav1.ConditionTrue) UpdateCondition(k6, CloudTestRunFinalized, metav1.ConditionFalse) diff --git a/controllers/k6_create.go b/controllers/k6_create.go index 87aa64de..1b1e06ab 100644 --- a/controllers/k6_create.go +++ b/controllers/k6_create.go @@ -88,9 +88,11 @@ func createJobSpecs(ctx context.Context, log logr.Logger, k6 v1alpha1.TestRunI, log.Info(err.Error()) // is it possible to implement this delay with resourceVersion of the job? + t, condUpdated := v1alpha1.LastUpdate(k6, v1alpha1.CloudTestRun) - // if condition has not been updated yet or has been updated very recently - if !condUpdated || time.Since(t).Seconds() <= 30 { + // If condition is unknown then resource hasn't been updated with `k6 inspect` results. + // If it has been updated but very recently, wait a bit before throwing an error. + if v1alpha1.IsUnknown(k6, v1alpha1.CloudTestRun) || !condUpdated || time.Since(t).Seconds() <= 30 { // try again before returning an error return ctrl.Result{RequeueAfter: time.Second * 10}, true, nil } diff --git a/controllers/testrun_controller.go b/controllers/testrun_controller.go index 92f1d353..665978d3 100644 --- a/controllers/testrun_controller.go +++ b/controllers/testrun_controller.go @@ -80,9 +80,13 @@ func (r *TestRunReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct return r.reconcile(ctx, req, log, k6) } +func isCloudTestRun(k6 v1alpha1.TestRunI) bool { + return v1alpha1.IsTrue(k6, v1alpha1.CloudTestRun) || v1alpha1.IsTrue(k6, v1alpha1.CloudPLZTestRun) +} + func (r *TestRunReconciler) reconcile(ctx context.Context, req ctrl.Request, log logr.Logger, k6 v1alpha1.TestRunI) (ctrl.Result, error) { var err error - if v1alpha1.IsTrue(k6, v1alpha1.CloudTestRun) { + if isCloudTestRun(k6) { // bootstrap the client found, err := r.createClient(ctx, k6, log) if err != nil { @@ -100,7 +104,7 @@ func (r *TestRunReconciler) reconcile(ctx context.Context, req ctrl.Request, log // Decision making here is now a mix between stages and conditions. // TODO: refactor further. - if v1alpha1.IsTrue(k6, v1alpha1.CloudTestRun) && v1alpha1.IsFalse(k6, v1alpha1.CloudTestRunAborted) { + if isCloudTestRun(k6) && v1alpha1.IsFalse(k6, v1alpha1.CloudTestRunAborted) { // check in with the BE for status if r.ShouldAbort(ctx, k6, log) { log.Info("Received an abort signal from the k6 Cloud: stopping the test.") @@ -141,7 +145,7 @@ func (r *TestRunReconciler) reconcile(ctx context.Context, req ctrl.Request, log msg := fmt.Sprintf(errMessageTooLong, "initializer pod", "initializer job and pod") log.Info(msg) - if v1alpha1.IsTrue(k6, v1alpha1.CloudTestRun) { + if isCloudTestRun(k6) { events := cloud.ErrorEvent(cloud.K6OperatorStartError). WithDetail(msg). WithAbort()