From 149976e8aabe69adc136cf34e6dc6fbe2e763fb6 Mon Sep 17 00:00:00 2001 From: chanwook-lee <108053113+chanwook-lee@users.noreply.github.com> Date: Wed, 15 Mar 2023 17:14:40 +0900 Subject: [PATCH] [Fix] mapping ownerreferences of pipeline to ij (#403) * [fix] pipeline ownerreferences * fix ij not completed * [fix] lint : reduce Cyclomatic Complexity * [chore] bump up version to v0.6.2 --- Makefile | 2 +- config/release.yaml | 8 ++-- controllers/integrationjob_controller.go | 1 + pkg/scheduler/scheduler.go | 49 ++++++++++++++++-------- 4 files changed, 39 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index 916bdca..c279e61 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ # Current Operator version -VERSION ?= v0.6.1 +VERSION ?= v0.6.2 REGISTRY ?= tmaxcloudck # Image URL to use all building/pushing image targets diff --git a/config/release.yaml b/config/release.yaml index a2c800c..ba4b5cb 100644 --- a/config/release.yaml +++ b/config/release.yaml @@ -84,7 +84,7 @@ spec: containers: - command: - /controller - image: docker.io/tmaxcloudck/cicd-operator:v0.6.1 + image: docker.io/tmaxcloudck/cicd-operator:v0.6.2 imagePullPolicy: Always name: manager env: @@ -171,7 +171,7 @@ spec: containers: - command: - /blocker - image: docker.io/tmaxcloudck/cicd-blocker:v0.6.1 + image: docker.io/tmaxcloudck/cicd-blocker:v0.6.2 imagePullPolicy: Always name: manager resources: @@ -231,7 +231,7 @@ spec: containers: - command: - /webhook - image: docker.io/tmaxcloudck/cicd-webhook:v0.6.1 + image: docker.io/tmaxcloudck/cicd-webhook:v0.6.2 imagePullPolicy: Always name: manager resources: @@ -291,7 +291,7 @@ spec: containers: - command: - /apiserver - image: docker.io/tmaxcloudck/cicd-api-server:v0.6.1 + image: docker.io/tmaxcloudck/cicd-api-server:v0.6.2 imagePullPolicy: Always name: manager resources: diff --git a/controllers/integrationjob_controller.go b/controllers/integrationjob_controller.go index 497b942..f3f57d5 100644 --- a/controllers/integrationjob_controller.go +++ b/controllers/integrationjob_controller.go @@ -199,5 +199,6 @@ func (r *integrationJobReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&cicdv1.IntegrationJob{}). Owns(&tektonv1beta1.PipelineRun{}). + Owns(&tektonv1beta1.Pipeline{}). Complete(r) } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 5d1ea1c..c552621 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -169,11 +169,24 @@ func (s *scheduler) schedulePending(availableCnt *int) func(structs.Item) { // Generate PipeLine and PipeLineRun pl, pr, err := s.pm.Generate(jobNode.IntegrationJob) + if err != nil { + if err := s.patchJobScheduleFailed(jobNode.IntegrationJob, err.Error()); err != nil { + log.Error(err, "") + } + log.Error(err, "") + return + } + + if err := s.SetControllerReferences(jobNode.IntegrationJob, pr, pl, s.scheme); err != nil { + return + } + + log.Info(fmt.Sprintf("Scheduled %s / %s / %s", jobNode.Name, jobNode.Namespace, jobNode.CreationTimestamp)) // Check whether PipeLine exists testPl := &tektonv1beta1.Pipeline{} if err := s.k8sClient.Get(context.Background(), types.NamespacedName{Name: pipelinemanager.Name(jobNode.IntegrationJob), Namespace: jobNode.Namespace}, testPl); err != nil { - // + // If not, create PipeLine if err := s.k8sClient.Create(context.Background(), pl); err != nil { if err := s.patchJobScheduleFailed(jobNode.IntegrationJob, err.Error()); err != nil { log.Error(err, "") @@ -183,33 +196,37 @@ func (s *scheduler) schedulePending(availableCnt *int) func(structs.Item) { } } - if err != nil { + // Create PipelineRun only when there is no Pipeline exists + if err := s.k8sClient.Create(context.Background(), pr); err != nil { if err := s.patchJobScheduleFailed(jobNode.IntegrationJob, err.Error()); err != nil { log.Error(err, "") } log.Error(err, "") return } - if err := controllerutil.SetControllerReference(jobNode.IntegrationJob, pr, s.scheme); err != nil { - if err := s.patchJobScheduleFailed(jobNode.IntegrationJob, err.Error()); err != nil { - log.Error(err, "") - } + + *availableCnt = *availableCnt - 1 + } +} + +func (s *scheduler) SetControllerReferences(job *cicdv1.IntegrationJob, pr metav1.Object, pl metav1.Object, scheme *runtime.Scheme) error { + if err := controllerutil.SetControllerReference(job, pr, s.scheme); err != nil { + if err := s.patchJobScheduleFailed(job, err.Error()); err != nil { log.Error(err, "") - return } + log.Error(err, "") + return err + } - log.Info(fmt.Sprintf("Scheduled %s / %s / %s", jobNode.Name, jobNode.Namespace, jobNode.CreationTimestamp)) - // Create PipelineRun only when there is no Pipeline exists - if err := s.k8sClient.Create(context.Background(), pr); err != nil { - if err := s.patchJobScheduleFailed(jobNode.IntegrationJob, err.Error()); err != nil { - log.Error(err, "") - } + if err := controllerutil.SetControllerReference(job, pl, s.scheme); err != nil { + if err := s.patchJobScheduleFailed(job, err.Error()); err != nil { log.Error(err, "") - return } - - *availableCnt = *availableCnt - 1 + log.Error(err, "") + return err } + + return nil } func (s *scheduler) patchJobScheduleFailed(job *cicdv1.IntegrationJob, msg string) error {