diff --git a/go.mod b/go.mod index d366077521..b29838858a 100644 --- a/go.mod +++ b/go.mod @@ -70,6 +70,7 @@ require ( k8s.io/client-go v12.0.0+incompatible k8s.io/klog/v2 v2.110.1 k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 + k8s.io/kubectl v0.27.3 k8s.io/kubernetes v1.28.4 sigs.k8s.io/cli-utils v0.34.0 sigs.k8s.io/controller-runtime v0.16.3 @@ -243,7 +244,6 @@ require ( k8s.io/code-generator v0.28.3 // indirect k8s.io/component-base v0.28.3 // indirect k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect - k8s.io/kubectl v0.27.3 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect oras.land/oras-go v1.2.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect diff --git a/integrationtests/agent/bundle_deployment_status_test.go b/integrationtests/agent/bundle_deployment_status_test.go index ee32bd25f3..f7551099f9 100644 --- a/integrationtests/agent/bundle_deployment_status_test.go +++ b/integrationtests/agent/bundle_deployment_status_test.go @@ -1,8 +1,10 @@ package agent import ( + "context" "os" + "github.com/rancher/fleet/integrationtests/utils" "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" . "github.com/onsi/ginkgo/v2" @@ -10,29 +12,27 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" ) -func orphanBundeResources() map[string][]v1alpha1.BundleResource { - v1, err := os.ReadFile(assetsPath + "/deployment-v1.yaml") - Expect(err).NotTo(HaveOccurred()) - v2, err := os.ReadFile(assetsPath + "/deployment-v2.yaml") - Expect(err).NotTo(HaveOccurred()) - - return map[string][]v1alpha1.BundleResource{ - "v1": { - { - Name: "deployment-v1.yaml", - Content: string(v1), - Encoding: "", - }, - }, "v2": { - { - Name: "deployment-v2.yaml", - Content: string(v2), - Encoding: "", - }, +func init() { + v1, _ := os.ReadFile(assetsPath + "/deployment-v1.yaml") + v2, _ := os.ReadFile(assetsPath + "/deployment-v2.yaml") + + resources["v1"] = []v1alpha1.BundleResource{ + { + Name: "deployment-v1.yaml", + Content: string(v1), + Encoding: "", + }, + } + resources["v2"] = []v1alpha1.BundleResource{ + { + Name: "deployment-v2.yaml", + Content: string(v2), + Encoding: "", }, } } @@ -45,48 +45,216 @@ var _ = Describe("BundleDeployment status", Ordered, func() { ) var ( - env *specEnv namespace string name string + env *specEnv ) createBundleDeploymentV1 := func(name string) { bundled := v1alpha1.BundleDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: namespace, + Namespace: clusterNS, }, Spec: v1alpha1.BundleDeploymentSpec{ DeploymentID: "v1", + Options: v1alpha1.BundleDeploymentOptions{ + DefaultNamespace: namespace, + }, }, } - b, err := env.controller.Create(&bundled) + err := k8sClient.Create(context.TODO(), &bundled) Expect(err).To(BeNil()) - Expect(b).To(Not(BeNil())) + Expect(bundled).To(Not(BeNil())) } - BeforeAll(func() { - env = specEnvs["orphanbundle"] - name = "orphanbundle" - namespace = env.namespace - DeferCleanup(func() { - Expect(k8sClient.Delete(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})).ToNot(HaveOccurred()) - }) - }) + createNamespace := func() string { + namespace, err := utils.NewNamespaceName() + Expect(err).ToNot(HaveOccurred()) + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + Expect(k8sClient.Create(context.Background(), ns)).ToNot(HaveOccurred()) + + return namespace + } When("New bundle deployment is created", func() { BeforeAll(func() { + name = "orphanbundletest1" + namespace = createNamespace() + DeferCleanup(func() { + Expect(k8sClient.Delete(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespace}})).ToNot(HaveOccurred()) + }) + env = &specEnv{namespace: namespace} + // this BundleDeployment will create a deployment with the resources from assets/deployment-v1.yaml createBundleDeploymentV1(name) + DeferCleanup(func() { + Expect(k8sClient.Delete(context.TODO(), &v1alpha1.BundleDeployment{ + ObjectMeta: metav1.ObjectMeta{Namespace: clusterNS, Name: name}, + })).ToNot(HaveOccurred()) + }) + }) + + It("Detects the BundleDeployment as not ready", func() { + bd := &v1alpha1.BundleDeployment{} + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterNS, Name: name}, bd) + Expect(err).NotTo(HaveOccurred()) + Expect(bd.Status.Ready).To(BeFalse()) + }) + + It("Eventually updates the BundleDeployment to make it ready and non modified", func() { + Eventually(env.isBundleDeploymentReadyAndNotModified).WithArguments(name).Should(BeTrue()) + }) + + It("Deploys resources from BundleDeployment to the cluster", func() { + svc, err := env.getService(svcName) + Expect(err).NotTo(HaveOccurred()) + Expect(svc.Name).NotTo(BeEmpty()) + }) + + It("Lists deployed resources in the status", func() { + bd := &v1alpha1.BundleDeployment{} + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterNS, Name: name}, bd) + Expect(err).NotTo(HaveOccurred()) + Expect(bd.Status.Resources).To(HaveLen(3)) + ts := bd.Status.Resources[0].CreatedAt + Expect(ts.Time).ToNot(BeZero()) + Expect(bd.Status.Resources).To(ContainElement(v1alpha1.BundleDeploymentResource{ + Kind: "Service", + APIVersion: "v1", + Namespace: namespace, + Name: "svc-test", + CreatedAt: ts, + })) + }) + + Context("A release resource is modified", func() { + It("Modify service", func() { + svc, err := env.getService(svcName) + Expect(err).NotTo(HaveOccurred()) + patch := svc.DeepCopy() + patch.Spec.Selector = map[string]string{"foo": "bar"} + Expect(k8sClient.Patch(ctx, patch, client.MergeFrom(&svc))).NotTo(HaveOccurred()) + }) + + It("BundleDeployment status will not be Ready, and will contain the error message", func() { + Eventually(func() bool { + modifiedStatus := v1alpha1.ModifiedStatus{ + Kind: "Service", + APIVersion: "v1", + Namespace: namespace, + Name: "svc-test", + Create: false, + Delete: false, + Patch: "{\"spec\":{\"selector\":{\"app.kubernetes.io/name\":\"MyApp\"}}}", + } + return env.isNotReadyAndModified(name, modifiedStatus, "service.v1 "+namespace+"/svc-test modified {\"spec\":{\"selector\":{\"app.kubernetes.io/name\":\"MyApp\"}}}") + }).Should(BeTrue(), "BundleDeployment status does not contain modified message") + }) + + It("Modify service to have its original value", func() { + svc, err := env.getService(svcName) + Expect(err).NotTo(HaveOccurred()) + patch := svc.DeepCopy() + patch.Spec.Selector = map[string]string{"app.kubernetes.io/name": "MyApp"} + Expect(k8sClient.Patch(ctx, patch, client.MergeFrom(&svc))).To(BeNil()) + }) + + It("BundleDeployment will eventually be ready and non modified", func() { + Eventually(env.isBundleDeploymentReadyAndNotModified).WithArguments(name).Should(BeTrue()) + }) }) - AfterAll(func() { - Expect(env.controller.Delete(namespace, name, nil)).NotTo(HaveOccurred()) + Context("Upgrading to a release that will leave an orphan resource", func() { + It("Upgrade BundleDeployment to a release that deletes the svc with a finalizer", func() { + Eventually(func() bool { + bd := &v1alpha1.BundleDeployment{} + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterNS, Name: name}, bd) + Expect(err).To(BeNil()) + bd.Spec.DeploymentID = "v2" + err = k8sClient.Update(ctx, bd) + return err == nil && bd != nil + }).Should(BeTrue()) + + }) + + It("BundleDeployment status will eventually be extra", func() { + Eventually(func() bool { + modifiedStatus := v1alpha1.ModifiedStatus{ + Kind: "Service", + APIVersion: "v1", + Namespace: namespace, + Name: "svc-finalizer", + Create: false, + Delete: true, + Patch: "", + } + return env.isNotReadyAndModified(name, modifiedStatus, "service.v1 "+namespace+"/svc-finalizer extra") + }).Should(BeTrue()) + }) + + It("Remove finalizer", func() { + svc, err := env.getService(svcFinalizerName) + Expect(err).NotTo(HaveOccurred()) + patch := svc.DeepCopy() + patch.Finalizers = nil + Expect(k8sClient.Patch(ctx, patch, client.MergeFrom(&svc))).To(BeNil()) + }) + + It("BundleDeployment will eventually be ready and non modified", func() { + Eventually(env.isBundleDeploymentReadyAndNotModified).WithArguments(name).Should(BeTrue()) + }) + }) + + Context("Delete a resource in the release", func() { + It("Delete service", func() { + svc, err := env.getService(svcName) + Expect(err).NotTo(HaveOccurred()) + err = k8sClient.Delete(ctx, &svc) + Expect(err).NotTo(HaveOccurred()) + }) + + It("BundleDeployment status will eventually be missing", func() { + Eventually(func() bool { + modifiedStatus := v1alpha1.ModifiedStatus{ + Kind: "Service", + APIVersion: "v1", + Namespace: namespace, + Name: "svc-test", + Create: true, + Delete: false, + Patch: "", + } + return env.isNotReadyAndModified(name, modifiedStatus, "service.v1 "+namespace+"/svc-test missing") + }).Should(BeTrue()) + }) + }) + }) + + When("Simulating how another operator modifies a dynamic resource", func() { + BeforeAll(func() { + namespace = createNamespace() + DeferCleanup(func() { + Expect(k8sClient.Delete(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespace}})).ToNot(HaveOccurred()) + }) + env = &specEnv{namespace: namespace} + + name = "orphanbundletest2" + createBundleDeploymentV1(name) + DeferCleanup(func() { + Expect(k8sClient.Delete(context.TODO(), &v1alpha1.BundleDeployment{ + ObjectMeta: metav1.ObjectMeta{Namespace: clusterNS, Name: name}, + })).ToNot(HaveOccurred()) + }) }) It("BundleDeployment is not ready", func() { - bd, err := env.controller.Get(namespace, name, metav1.GetOptions{}) + bd := &v1alpha1.BundleDeployment{} + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterNS, Name: name}, bd) Expect(err).NotTo(HaveOccurred()) Expect(bd.Status.Ready).To(BeFalse()) }) @@ -98,11 +266,12 @@ var _ = Describe("BundleDeployment status", Ordered, func() { It("Resources from BundleDeployment are present in the cluster", func() { svc, err := env.getService(svcName) Expect(err).NotTo(HaveOccurred()) - Expect(svc).To(Not(BeNil())) + Expect(svc.Name).NotTo(BeEmpty()) }) It("Lists deployed resources in the status", func() { - bd, err := env.controller.Get(namespace, name, metav1.GetOptions{}) + bd := &v1alpha1.BundleDeployment{} + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterNS, Name: name}, bd) Expect(err).NotTo(HaveOccurred()) Expect(bd.Status.Resources).To(HaveLen(3)) ts := bd.Status.Resources[0].CreatedAt @@ -137,7 +306,7 @@ var _ = Describe("BundleDeployment status", Ordered, func() { Patch: "{\"spec\":{\"selector\":{\"app.kubernetes.io/name\":\"MyApp\"}}}", } return env.isNotReadyAndModified(name, modifiedStatus, "service.v1 "+namespace+"/svc-test modified {\"spec\":{\"selector\":{\"app.kubernetes.io/name\":\"MyApp\"}}}") - }).Should(BeTrue()) + }).Should(BeTrue(), "BundleDeployment status does not contain modified message") }) It("Modify service to have its original value", func() { @@ -156,11 +325,12 @@ var _ = Describe("BundleDeployment status", Ordered, func() { Context("Upgrading to a release that will leave an orphan resource", func() { It("Upgrade BundleDeployment to a release that deletes the svc with a finalizer", func() { Eventually(func() bool { - bd, err := env.controller.Get(namespace, name, metav1.GetOptions{}) + bd := &v1alpha1.BundleDeployment{} + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterNS, Name: name}, bd) Expect(err).To(BeNil()) bd.Spec.DeploymentID = "v2" - b, err := env.controller.Update(bd) - return err == nil && b != nil + err = k8sClient.Update(ctx, bd) + return err == nil && bd != nil }).Should(BeTrue()) }) @@ -220,7 +390,21 @@ var _ = Describe("BundleDeployment status", Ordered, func() { When("Simulating how another operator creates a dynamic resource", func() { BeforeAll(func() { + namespace = createNamespace() + DeferCleanup(func() { + Expect(k8sClient.Delete(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespace}})).ToNot(HaveOccurred()) + }) + env = &specEnv{namespace: namespace} + + name = "orphanbundletest2" createBundleDeploymentV1(name) + DeferCleanup(func() { + Expect(k8sClient.Delete(context.TODO(), &v1alpha1.BundleDeployment{ + ObjectMeta: metav1.ObjectMeta{Namespace: clusterNS, Name: name}, + })).ToNot(HaveOccurred()) + }) + // It is possible that some operators copy the objectset.rio.cattle.io/hash label into a dynamically created objects. // https://github.com/rancher/fleet/issues/1141 By("Simulating orphan resource creation", func() { @@ -242,10 +426,6 @@ var _ = Describe("BundleDeployment status", Ordered, func() { }) }) - AfterAll(func() { - Expect(env.controller.Delete(namespace, name, nil)).NotTo(HaveOccurred()) - }) - It("BundleDeployment will eventually be ready and non modified", func() { Eventually(env.isBundleDeploymentReadyAndNotModified).WithArguments(name).Should(BeTrue()) }) @@ -253,7 +433,7 @@ var _ = Describe("BundleDeployment status", Ordered, func() { It("Resources from BundleDeployment are present in the cluster", func() { svc, err := env.getService(svcName) Expect(err).NotTo(HaveOccurred()) - Expect(svc).To(Not(BeNil())) + Expect(svc.Name).NotTo(BeEmpty()) }) }) }) diff --git a/integrationtests/agent/helm_capabilities_test.go b/integrationtests/agent/helm_capabilities_test.go index 1161bc790e..fb874259b4 100644 --- a/integrationtests/agent/helm_capabilities_test.go +++ b/integrationtests/agent/helm_capabilities_test.go @@ -1,9 +1,12 @@ package agent import ( + "context" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/rancher/fleet/integrationtests/utils" "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" corev1 "k8s.io/api/core/v1" @@ -11,35 +14,34 @@ import ( "k8s.io/apimachinery/pkg/types" ) -func capabilityBundleResources() map[string][]v1alpha1.BundleResource { - return map[string][]v1alpha1.BundleResource{ - "capabilitiesv1": { - { - Content: "apiVersion: v2\nname: config-chart\ndescription: A test chart that verifies its config\ntype: application\nversion: 0.1.0\nappVersion: \"1.16.0\"\nkubeVersion: '>= 1.20.0-0'\n", - Name: "config-chart/Chart.yaml", - }, - { - Content: "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: test-simple-chart-config\ndata:\n test: \"value123\"\n name: {{ .Values.name }}\n kubeVersion: {{ .Capabilities.KubeVersion.Version }}\n apiVersions: {{ join \", \" .Capabilities.APIVersions | }}\n helmVersion: {{ .Capabilities.HelmVersion.Version }}\n", - Name: "config-chart/templates/configmap.yaml", - }, - { - Content: "helm:\n chart: config-chart\n values:\n name: example-value\n", - Name: "fleet.yaml", - }, +func init() { + resources["capabilitiesv1"] = []v1alpha1.BundleResource{ + { + Content: "apiVersion: v2\nname: config-chart\ndescription: A test chart that verifies its config\ntype: application\nversion: 0.1.0\nappVersion: \"1.16.0\"\nkubeVersion: '>= 1.20.0-0'\n", + Name: "config-chart/Chart.yaml", }, - "capabilitiesv2": { - { - Content: "apiVersion: v2\nname: config-chart\ndescription: A test chart that verifies its config\ntype: application\nversion: 0.1.0\nappVersion: \"1.16.0\"\nkubeVersion: '>= 920.920.0-0'\n", - Name: "config-chart/Chart.yaml", - }, - { - Content: "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: test-simple-chart-config\ndata:\n test: \"value123\"\n name: {{ .Values.name }}\n", - Name: "config-chart/templates/configmap.yaml", - }, - { - Content: "helm:\n chart: config-chart\n values:\n name: example-value\n", - Name: "fleet.yaml", - }, + { + Content: "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: test-simple-chart-config\ndata:\n test: \"value123\"\n name: {{ .Values.name }}\n kubeVersion: {{ .Capabilities.KubeVersion.Version }}\n apiVersions: {{ join \", \" .Capabilities.APIVersions | }}\n helmVersion: {{ .Capabilities.HelmVersion.Version }}\n", + Name: "config-chart/templates/configmap.yaml", + }, + { + Content: "helm:\n chart: config-chart\n values:\n name: example-value\n", + Name: "fleet.yaml", + }, + } + + resources["capabilitiesv2"] = []v1alpha1.BundleResource{ + { + Content: "apiVersion: v2\nname: config-chart\ndescription: A test chart that verifies its config\ntype: application\nversion: 0.1.0\nappVersion: \"1.16.0\"\nkubeVersion: '>= 920.920.0-0'\n", + Name: "config-chart/Chart.yaml", + }, + { + Content: "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: test-simple-chart-config\ndata:\n test: \"value123\"\n name: {{ .Values.name }}\n", + Name: "config-chart/templates/configmap.yaml", + }, + { + Content: "helm:\n chart: config-chart\n values:\n name: example-value\n", + Name: "fleet.yaml", }, } } @@ -47,11 +49,19 @@ func capabilityBundleResources() map[string][]v1alpha1.BundleResource { var _ = Describe("Helm Chart uses Capabilities", Ordered, func() { var ( - env *specEnv + env *specEnv + name string ) BeforeAll(func() { - env = specEnvs["capabilitybundle"] + var err error + namespace, err := utils.NewNamespaceName() + Expect(err).ToNot(HaveOccurred()) + + env = &specEnv{namespace: namespace} + + Expect(k8sClient.Create(context.Background(), + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})).ToNot(HaveOccurred()) DeferCleanup(func() { Expect(k8sClient.Delete(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: env.namespace}})).ToNot(HaveOccurred()) }) @@ -61,11 +71,12 @@ var _ = Describe("Helm Chart uses Capabilities", Ordered, func() { bundled := v1alpha1.BundleDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: env.namespace, + Namespace: clusterNS, }, Spec: v1alpha1.BundleDeploymentSpec{ DeploymentID: id, Options: v1alpha1.BundleDeploymentOptions{ + DefaultNamespace: env.namespace, Helm: &v1alpha1.HelmOptions{ Chart: "config-chart", Values: &v1alpha1.GenericMap{ @@ -76,16 +87,19 @@ var _ = Describe("Helm Chart uses Capabilities", Ordered, func() { }, } - b, err := env.controller.Create(&bundled) + err := k8sClient.Create(ctx, &bundled) Expect(err).ToNot(HaveOccurred()) - Expect(b).To(Not(BeNil())) + Expect(bundled).To(Not(BeNil())) } When("chart kubeversion matches cluster", func() { BeforeAll(func() { - createBundle(env, "capabilitiesv1", "capav1") + name = "capav1" + createBundle(env, "capabilitiesv1", name) DeferCleanup(func() { - Expect(env.controller.Delete(env.namespace, "capav1", &metav1.DeleteOptions{})).ToNot(HaveOccurred()) + Expect(k8sClient.Delete(context.TODO(), &v1alpha1.BundleDeployment{ + ObjectMeta: metav1.ObjectMeta{Namespace: clusterNS, Name: name}, + })).ToNot(HaveOccurred()) }) }) @@ -94,7 +108,7 @@ var _ = Describe("Helm Chart uses Capabilities", Ordered, func() { Eventually(func() bool { err := k8sClient.Get(ctx, types.NamespacedName{Namespace: env.namespace, Name: "test-simple-chart-config"}, &cm) return err == nil - }).Should(BeTrue()) + }).Should(BeTrue(), "config map was not created") Expect(cm.Data["name"]).To(Equal("example-value")) Expect(cm.Data["kubeVersion"]).ToNot(BeEmpty()) @@ -106,15 +120,19 @@ var _ = Describe("Helm Chart uses Capabilities", Ordered, func() { When("chart kubeversion does not match cluster", func() { BeforeAll(func() { - createBundle(env, "capabilitiesv2", "capav2") + name = "capav2" + createBundle(env, "capabilitiesv2", name) DeferCleanup(func() { - Expect(env.controller.Delete(env.namespace, "capav2", &metav1.DeleteOptions{})).ToNot(HaveOccurred()) + Expect(k8sClient.Delete(context.TODO(), &v1alpha1.BundleDeployment{ + ObjectMeta: metav1.ObjectMeta{Namespace: clusterNS, Name: name}, + })).ToNot(HaveOccurred()) }) }) It("error message is added to status", func() { Eventually(func() bool { - bd, err := env.controller.Get(env.namespace, "capav2", metav1.GetOptions{}) + bd := &v1alpha1.BundleDeployment{} + err := k8sClient.Get(context.TODO(), types.NamespacedName{Namespace: clusterNS, Name: name}, bd) Expect(err).ToNot(HaveOccurred()) return checkCondition(bd.Status.Conditions, "Ready", "False", "chart requires kubeVersion") }).Should(BeTrue()) diff --git a/integrationtests/agent/suite_test.go b/integrationtests/agent/suite_test.go index 46c93c3234..59ab29a7d6 100644 --- a/integrationtests/agent/suite_test.go +++ b/integrationtests/agent/suite_test.go @@ -2,15 +2,14 @@ package agent import ( "context" - "fmt" "path/filepath" "strings" "testing" "time" "github.com/google/go-cmp/cmp" - "github.com/rancher/fleet/integrationtests/utils" - "github.com/rancher/fleet/internal/cmd/agent/controllers/bundledeployment" + "github.com/rancher/fleet/internal/cmd/agent" + "github.com/rancher/fleet/internal/cmd/agent/controller" "github.com/rancher/fleet/internal/cmd/agent/deployer" "github.com/rancher/fleet/internal/cmd/agent/deployer/cleanup" "github.com/rancher/fleet/internal/cmd/agent/deployer/driftdetect" @@ -19,14 +18,7 @@ import ( "github.com/rancher/fleet/internal/helmdeployer" "github.com/rancher/fleet/internal/manifest" "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" - "github.com/rancher/fleet/pkg/durations" - "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io" - fleetgen "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1" - "github.com/rancher/lasso/pkg/cache" - lassoclient "github.com/rancher/lasso/pkg/client" - "github.com/rancher/lasso/pkg/controller" - "github.com/rancher/wrangler/v2/pkg/apply" - "github.com/rancher/wrangler/v2/pkg/generated/controllers/core" + "github.com/rancher/wrangler/v2/pkg/genericcondition" . "github.com/onsi/ginkgo/v2" @@ -36,15 +28,20 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/discovery" "k8s.io/client-go/discovery/cached/memory" - "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubectl/pkg/scheme" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) var ( @@ -57,11 +54,12 @@ var ( ) const ( + clusterNS = "cluster-test-id" assetsPath = "assets" timeout = 30 * time.Second ) -type specResources func() map[string][]v1alpha1.BundleResource +var resources = map[string][]v1alpha1.BundleResource{} func TestFleet(t *testing.T) { RegisterFailHandler(Fail) @@ -70,6 +68,7 @@ func TestFleet(t *testing.T) { var _ = BeforeSuite(func() { SetDefaultEventuallyTimeout(timeout) + ctx, cancel = context.WithCancel(context.TODO()) testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{filepath.Join("..", "..", "charts", "fleet-crd", "templates", "crds.yaml")}, @@ -81,26 +80,36 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) - k8sClient, err = client.New(cfg, client.Options{}) + utilruntime.Must(v1alpha1.AddToScheme(scheme.Scheme)) + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) - specEnvs = make(map[string]*specEnv, 2) - for id, f := range map[string]specResources{"capabilitybundle": capabilityBundleResources, "orphanbundle": orphanBundeResources} { - namespace, err := utils.NewNamespaceName() - Expect(err).ToNot(HaveOccurred()) - fmt.Printf("Creating namespace %s\n", namespace) - Expect(k8sClient.Create(ctx, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - }, - })).ToNot(HaveOccurred()) - - res := f() - controller := registerBundleDeploymentController(cfg, namespace, newLookup(res)) - - specEnvs[id] = &specEnv{controller: controller, k8sClient: k8sClient, namespace: namespace} + zopts := zap.Options{ + Development: true, } + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&zopts))) + + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + LeaderElection: false, + Metrics: metricsserver.Options{BindAddress: "0"}, + }) + Expect(err).ToNot(HaveOccurred()) + + // Set up the bundledeployment reconciler + Expect(k8sClient.Create(context.Background(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: clusterNS}})).ToNot(HaveOccurred()) + reconciler := newReconciler(ctx, k8sManager, newLookup(resources)) + err = reconciler.SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to set up manager") + + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() }) var _ = AfterSuite(func() { @@ -108,78 +117,88 @@ var _ = AfterSuite(func() { Expect(testEnv.Stop()).ToNot(HaveOccurred()) }) -// registerBundleDeploymentController registers a BundleDeploymentController that will watch for changes -// just in the namespace provided. Resources are provided by the lookup parameter. -func registerBundleDeploymentController(cfg *rest.Config, namespace string, lookup *lookup) fleetgen.BundleDeploymentController { - d, err := discovery.NewDiscoveryClientForConfig(cfg) - Expect(err).ToNot(HaveOccurred()) +// newReconciler creates a new BundleDeploymentReconciler that will watch for changes +// in the test Fleet namespace, using configuration from the provided manager. +// Resources are provided by the lookup parameter. +func newReconciler(ctx context.Context, mgr manager.Manager, lookup *lookup) *controller.BundleDeploymentReconciler { + upstreamClient := mgr.GetClient() + // re-use client, since this is a single cluster test + localClient := upstreamClient + + systemNamespace := "cattle-fleet-system" + fleetNamespace := clusterNS + agentScope := "" + defaultNamespace := "default" + + // Build the helm deployer, which uses a getter for local cluster's client-go client for helm SDK + d := discovery.NewDiscoveryClientForConfigOrDie(cfg) disc := memory.NewMemCacheClient(d) mapper := restmapper.NewDeferredDiscoveryRESTMapper(disc) - dyn, err := dynamic.NewForConfig(cfg) - Expect(err).ToNot(HaveOccurred()) - trig := trigger.New(ctx, mapper, dyn) - cf, err := lassoclient.NewSharedClientFactory(cfg, &lassoclient.SharedClientFactoryOptions{ - Mapper: mapper, - }) - Expect(err).ToNot(HaveOccurred()) - - sharedFactory := controller.NewSharedControllerFactory(cache.NewSharedCachedFactory(cf, &cache.SharedCacheFactoryOptions{ - DefaultNamespace: namespace, - DefaultResync: durations.DefaultResyncAgent, - }), nil) - - // this factory will watch BundleDeployments just for the namespace provided - factory, err := fleet.NewFactoryFromConfigWithOptions(cfg, &fleet.FactoryOptions{ - SharedControllerFactory: sharedFactory, - }) - Expect(err).ToNot(HaveOccurred()) - - restClientGetter := restClientGetter{ - cfg: cfg, - discovery: disc, - restMapper: mapper, - } - coreFactory, err := core.NewFactoryFromConfig(cfg) - Expect(err).ToNot(HaveOccurred()) - - helmDeployer, err := helmdeployer.NewHelm(namespace, namespace, namespace, namespace, &restClientGetter, - coreFactory.Core().V1().ServiceAccount().Cache(), coreFactory.Core().V1().ConfigMap().Cache(), coreFactory.Core().V1().Secret().Cache()) - Expect(err).ToNot(HaveOccurred()) + getter := &restClientGetter{cfg: cfg, discovery: disc, restMapper: mapper} + helmDeployer, _ := helmdeployer.NewHelm( + ctx, + localClient, + systemNamespace, + defaultNamespace, + defaultNamespace, + agentScope, + getter, + ) + + // Build the deployer that the bundledeployment reconciler will use + deployer := deployer.New( + localClient, + mgr.GetAPIReader(), + lookup, + helmDeployer, + ) - wranglerApply, err := apply.NewForConfig(cfg) + // Build the monitor to detect changes + apply, _, localDynamic, err := agent.LocalClients(ctx, mgr.GetConfig()) Expect(err).ToNot(HaveOccurred()) - deployManager := deployer.New( - lookup, - helmDeployer) - cleanup := cleanup.New( - namespace, - namespace, - factory.Fleet().V1alpha1().BundleDeployment().Cache(), - factory.Fleet().V1alpha1().BundleDeployment(), - helmDeployer) monitor := monitor.New( - namespace, - namespace, - namespace, + apply, + mapper, helmDeployer, - wranglerApply) + defaultNamespace, + agentScope, + ) + + // Build the drift detector + trigger := trigger.New(ctx, localDynamic, mgr.GetRESTMapper()) driftdetect := driftdetect.New( - namespace, - namespace, - namespace, + trigger, + upstreamClient, + mgr.GetAPIReader(), + apply, + defaultNamespace, + defaultNamespace, + agentScope, + ) + + // Build the clean up + cleanup := cleanup.New( + upstreamClient, + mapper, + localDynamic, helmDeployer, - wranglerApply) + fleetNamespace, + defaultNamespace) - bundledeployment.Register(ctx, trig, mapper, dyn, deployManager, cleanup, monitor, driftdetect, factory.Fleet().V1alpha1().BundleDeployment()) + return &controller.BundleDeploymentReconciler{ + Client: upstreamClient, - err = factory.Start(ctx, 50) - Expect(err).ToNot(HaveOccurred()) + Scheme: mgr.GetScheme(), + LocalClient: localClient, - err = coreFactory.Start(ctx, 50) - Expect(err).ToNot(HaveOccurred()) + Deployer: deployer, + Monitor: monitor, + DriftDetect: driftdetect, + Cleanup: cleanup, - return factory.Fleet().V1alpha1().BundleDeployment() + AgentScope: agentScope, + } } // restClientGetter is needed to create the helm deployer. We just need to return the rest.Config for this test. @@ -213,18 +232,17 @@ type lookup struct { resources map[string][]v1alpha1.BundleResource } -func (l *lookup) Get(id string) (*manifest.Manifest, error) { +func (l *lookup) Get(_ context.Context, _ client.Reader, id string) (*manifest.Manifest, error) { return manifest.New(l.resources[id]), nil } type specEnv struct { - controller fleetgen.BundleDeploymentController - namespace string - k8sClient client.Client + namespace string } func (se specEnv) isNotReadyAndModified(name string, modifiedStatus v1alpha1.ModifiedStatus, message string) bool { - bd, err := se.controller.Get(se.namespace, name, metav1.GetOptions{}) + bd := &v1alpha1.BundleDeployment{} + err := k8sClient.Get(context.TODO(), types.NamespacedName{Namespace: clusterNS, Name: name}, bd, &client.GetOptions{}) Expect(err).NotTo(HaveOccurred()) isReadyCondition := checkCondition(bd.Status.Conditions, "Ready", "False", message) @@ -234,7 +252,8 @@ func (se specEnv) isNotReadyAndModified(name string, modifiedStatus v1alpha1.Mod } func (se specEnv) isBundleDeploymentReadyAndNotModified(name string) bool { - bd, err := se.controller.Get(se.namespace, name, metav1.GetOptions{}) + bd := &v1alpha1.BundleDeployment{} + err := k8sClient.Get(context.TODO(), types.NamespacedName{Namespace: clusterNS, Name: name}, bd, &client.GetOptions{}) Expect(err).NotTo(HaveOccurred()) return bd.Status.Ready && bd.Status.NonModified } @@ -245,7 +264,7 @@ func (se specEnv) getService(name string) (corev1.Service, error) { Name: name, } svc := corev1.Service{} - err := se.k8sClient.Get(ctx, nsn, &svc) + err := k8sClient.Get(ctx, nsn, &svc) if err != nil { return corev1.Service{}, err } diff --git a/internal/cmd/agent/apply.go b/internal/cmd/agent/apply.go new file mode 100644 index 0000000000..b5abce4702 --- /dev/null +++ b/internal/cmd/agent/apply.go @@ -0,0 +1,47 @@ +package agent + +import ( + "context" + "time" + + "github.com/rancher/wrangler/v2/pkg/apply" + "github.com/rancher/wrangler/v2/pkg/ticker" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery/cached/memory" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// LocalClients returns an apply.Apply. It is public so that tests don't have to replicate the setup. +func LocalClients(ctx context.Context, localconfig *rest.Config) (apply.Apply, meta.RESTMapper, *dynamic.DynamicClient, error) { + d, err := discovery.NewDiscoveryClientForConfig(localconfig) + if err != nil { + return nil, nil, nil, err + } + + disc := memory.NewMemCacheClient(d) + mapper := restmapper.NewDeferredDiscoveryRESTMapper(disc) + + go func() { + for range ticker.Context(ctx, 30*time.Second) { + disc.Invalidate() + mapper.Reset() + } + }() + + dyn, err := dynamic.NewForConfig(localconfig) + if err != nil { + return nil, nil, nil, err + } + + apply, err := apply.NewForConfig(localconfig) + if err != nil { + return nil, nil, nil, err + } + apply = apply.WithDynamicLookup() + + return apply, mapper, dyn, err +} diff --git a/internal/cmd/agent/clusterstatus.go b/internal/cmd/agent/clusterstatus.go index 4b6fe9ca9b..b4c8a3add9 100644 --- a/internal/cmd/agent/clusterstatus.go +++ b/internal/cmd/agent/clusterstatus.go @@ -1,12 +1,17 @@ package agent import ( + "context" "time" "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + memory "k8s.io/client-go/discovery/cached" "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/log" @@ -22,9 +27,11 @@ import ( cache2 "github.com/rancher/lasso/pkg/cache" "github.com/rancher/lasso/pkg/client" "github.com/rancher/lasso/pkg/controller" + "github.com/rancher/lasso/pkg/mapper" "github.com/rancher/wrangler/v2/pkg/generated/controllers/core" "github.com/rancher/wrangler/v2/pkg/kubeconfig" "github.com/rancher/wrangler/v2/pkg/ratelimit" + "github.com/rancher/wrangler/v2/pkg/ticker" ) func NewClusterStatus() *cobra.Command { @@ -161,3 +168,32 @@ func newSharedControllerFactory(config *rest.Config, mapper meta.RESTMapper, nam }, }), nil } + +func newMappers(ctx context.Context, fleetRESTConfig *rest.Config, clientconfig clientcmd.ClientConfig) (meta.RESTMapper, meta.RESTMapper, discovery.CachedDiscoveryInterface, error) { + fleetMapper, err := mapper.New(fleetRESTConfig) + if err != nil { + return nil, nil, nil, err + } + + client, err := clientconfig.ClientConfig() + if err != nil { + return nil, nil, nil, err + } + client.RateLimiter = ratelimit.None + + d, err := discovery.NewDiscoveryClientForConfig(client) + if err != nil { + return nil, nil, nil, err + } + discovery := memory.NewMemCacheClient(d) + mapper := restmapper.NewDeferredDiscoveryRESTMapper(discovery) + + go func() { + for range ticker.Context(ctx, 30*time.Second) { + discovery.Invalidate() + mapper.Reset() + } + }() + + return fleetMapper, mapper, discovery, nil +} diff --git a/internal/cmd/agent/controller.go b/internal/cmd/agent/controller.go index 2aad1acc4b..e5e033a5d6 100644 --- a/internal/cmd/agent/controller.go +++ b/internal/cmd/agent/controller.go @@ -2,109 +2,245 @@ package agent import ( "context" - "time" + "os" - "github.com/rancher/fleet/internal/cmd/agent/controllers" + "github.com/rancher/fleet/internal/cmd/agent/controller" + "github.com/rancher/fleet/internal/cmd/agent/deployer" + "github.com/rancher/fleet/internal/cmd/agent/deployer/cleanup" + "github.com/rancher/fleet/internal/cmd/agent/deployer/driftdetect" + "github.com/rancher/fleet/internal/cmd/agent/deployer/monitor" "github.com/rancher/fleet/internal/cmd/agent/register" + "github.com/rancher/fleet/internal/cmd/agent/trigger" + "github.com/rancher/fleet/internal/helmdeployer" + "github.com/rancher/fleet/internal/manifest" + "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" - "github.com/rancher/lasso/pkg/mapper" - "github.com/rancher/wrangler/v2/pkg/kubeconfig" - "github.com/rancher/wrangler/v2/pkg/ratelimit" - "github.com/rancher/wrangler/v2/pkg/ticker" + "helm.sh/helm/v3/pkg/cli" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/client-go/discovery" - "k8s.io/client-go/discovery/cached/memory" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" - "k8s.io/client-go/tools/clientcmd" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" +) + +var ( + scheme = runtime.NewScheme() + localScheme = runtime.NewScheme() ) // defaultNamespace is the namespace to use for resources that don't specify a namespace, e.g. "default" const defaultNamespace = "default" +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + //+kubebuilder:scaffold:scheme + + utilruntime.Must(clientgoscheme.AddToScheme(localScheme)) +} + // start the fleet agent -func start(ctx context.Context, kubeConfig, namespace, agentScope string) error { - clientConfig := kubeconfig.GetNonInteractiveClientConfig(kubeConfig) - kc, err := clientConfig.ClientConfig() +// systemNamespace is the namespace the agent is running in, e.g. cattle-fleet-system +func start(ctx context.Context, localConfig *rest.Config, systemNamespace, agentScope string) error { + // Registration is done in an init container. If we are here, we are already registered. + // Retrieve the existing config from the registration. + // Cannot start without kubeconfig for upstream cluster: + upstreamConfig, fleetNamespace, clusterName, err := loadRegistration(ctx, systemNamespace, localConfig) if err != nil { + setupLog.Error(err, "unable to load registration and start manager") return err } - agentInfo, err := register.Get(ctx, namespace, kc) + // Start manager for upstream cluster, we do not use leader election + setupLog.Info("listening for changes on upstream cluster", "cluster", clusterName, "namespace", fleetNamespace) + + metricsAddr := ":8080" + probeAddr := ":8081" + + mgr, err := ctrl.NewManager(upstreamConfig, ctrl.Options{ + Scheme: scheme, + Metrics: metricsserver.Options{BindAddress: metricsAddr}, + HealthProbeBindAddress: probeAddr, + LeaderElection: false, + // only watch resources in the fleet namespace + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{fleetNamespace: {}}, + }, + }) if err != nil { - setupLog.Error(err, "failed to get registration for upstream cluster") + setupLog.Error(err, "unable to start manager") return err } - fleetNamespace, _, err := agentInfo.ClientConfig.Namespace() - if err != nil { - setupLog.Error(err, "failed to get namespace from upstream cluster") - return err - } + localCtx, cancel := context.WithCancel(ctx) + defer cancel() - fleetRESTConfig, err := agentInfo.ClientConfig.ClientConfig() + reconciler, err := newReconciler(ctx, localCtx, mgr, localConfig, systemNamespace, fleetNamespace, agentScope) if err != nil { - setupLog.Error(err, "failed to get kubeconfig for upstream cluster") + setupLog.Error(err, "unable to set up bundledeployment reconciler") return err } - fleetMapper, mapper, discovery, err := newMappers(ctx, fleetRESTConfig, clientConfig) - if err != nil { - setupLog.Error(err, "failed to get mappers") + // Set up the bundledeployment reconciler + if err = (reconciler).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "BundleDeployment") return err } + //+kubebuilder:scaffold:builder - appCtx, err := controllers.NewAppContext( - fleetNamespace, namespace, agentInfo.ClusterNamespace, agentInfo.ClusterName, - fleetRESTConfig, clientConfig, fleetMapper, mapper, discovery) - if err != nil { - setupLog.Error(err, "failed to create app context") + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") return err } - - err = controllers.Register(ctx, - appCtx, - fleetNamespace, defaultNamespace, - agentScope) - if err != nil { - setupLog.Error(err, "failed to register controllers") + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") return err } - if err := appCtx.Start(ctx); err != nil { - setupLog.Error(err, "failed to start app context") + setupLog.Info("starting manager") + if err := mgr.Start(ctx); err != nil { + setupLog.Error(err, "problem running manager") return err + } return nil } -func newMappers(ctx context.Context, fleetRESTConfig *rest.Config, clientconfig clientcmd.ClientConfig) (meta.RESTMapper, meta.RESTMapper, discovery.CachedDiscoveryInterface, error) { - fleetMapper, err := mapper.New(fleetRESTConfig) +func loadRegistration(ctx context.Context, systemNamespace string, localConfig *rest.Config) (*rest.Config, string, string, error) { + agentInfo, err := register.Get(ctx, systemNamespace, localConfig) if err != nil { - return nil, nil, nil, err + setupLog.Error(err, "cannot get registration info for upstream cluster") + return nil, "", "", err } - client, err := clientconfig.ClientConfig() + // fleetNamespace is the upstream cluster namespace from AgentInfo, e.g. cluster-fleet-ID + fleetNamespace, _, err := agentInfo.ClientConfig.Namespace() if err != nil { - return nil, nil, nil, err + setupLog.Error(err, "cannot get upstream namespace from registration") + return nil, "", "", err } - client.RateLimiter = ratelimit.None - d, err := discovery.NewDiscoveryClientForConfig(client) + upstreamConfig, err := agentInfo.ClientConfig.ClientConfig() + if err != nil { + setupLog.Error(err, "cannot get upstream kubeconfig from registration") + return nil, "", "", err + } + return upstreamConfig, fleetNamespace, agentInfo.ClusterName, nil +} + +func newReconciler(ctx, localCtx context.Context, mgr manager.Manager, localConfig *rest.Config, systemNamespace, fleetNamespace, agentScope string) (*controller.BundleDeploymentReconciler, error) { + upstreamClient := mgr.GetClient() + + // Build client for local cluster + localCluster, localClient, err := newCluster(localCtx, localConfig, ctrl.Options{ + Scheme: localScheme, + Logger: mgr.GetLogger().WithName("local-cluster"), + }) if err != nil { - return nil, nil, nil, err + setupLog.Error(err, "unable to build local cluster client") + return nil, err } - discovery := memory.NewMemCacheClient(d) - mapper := restmapper.NewDeferredDiscoveryRESTMapper(discovery) + // Build the helm deployer, which uses a getter for local cluster's client-go client for helm SDK + getter := cli.New().RESTClientGetter() // uses local kubeconfig + helmDeployer, _ := helmdeployer.NewHelm( + ctx, + localClient, + systemNamespace, + defaultNamespace, + defaultNamespace, + agentScope, + getter, + ) + + // Build the deployer that the bundledeployment reconciler will use + deployer := deployer.New( + localClient, + mgr.GetAPIReader(), + manifest.NewLookup(), + helmDeployer, + ) + + // Build the monitor to update the bundle deployment's status, calculates modified/non-modified + apply, mapper, localDynamic, err := LocalClients(ctx, localConfig) + if err != nil { + return nil, err + } + monitor := monitor.New( + apply, + mapper, + helmDeployer, + defaultNamespace, + agentScope, + ) + + // Build the drift detector for deployed resources + trigger := trigger.New(ctx, localDynamic, localCluster.GetRESTMapper()) + driftdetect := driftdetect.New( + trigger, + upstreamClient, + mgr.GetAPIReader(), + apply, + defaultNamespace, + defaultNamespace, + agentScope, + ) + + // Build the clean up, which deletes helm releases + cleanup := cleanup.New( + upstreamClient, + mapper, + localDynamic, + helmDeployer, + fleetNamespace, + defaultNamespace, + ) + + return &controller.BundleDeploymentReconciler{ + Client: upstreamClient, + + Scheme: mgr.GetScheme(), + LocalClient: localClient, + + Deployer: deployer, + Monitor: monitor, + DriftDetect: driftdetect, + Cleanup: cleanup, + + DefaultNamespace: defaultNamespace, + + AgentScope: agentScope, + }, nil +} + +// newCluster returns a new cluster client, see controller-runtime/pkg/manager/manager.go +// This client is for the local cluster, not the upstream cluster. The upstream +// cluster client is used by the manager to watch for changes to the +// bundledeployments. +func newCluster(ctx context.Context, config *rest.Config, options manager.Options) (cluster.Cluster, client.Client, error) { + cluster, err := cluster.New(config, func(clusterOptions *cluster.Options) { + clusterOptions.Scheme = options.Scheme + clusterOptions.Logger = options.Logger + }) + if err != nil { + return nil, nil, err + } go func() { - for range ticker.Context(ctx, 30*time.Second) { - discovery.Invalidate() - mapper.Reset() + err := cluster.GetCache().Start(ctx) + if err != nil { + setupLog.Error(err, "unable to start the cache") + os.Exit(1) } }() + cluster.GetCache().WaitForCacheSync(ctx) - return fleetMapper, mapper, discovery, nil + return cluster, cluster.GetClient(), nil } diff --git a/internal/cmd/agent/controller/bundledeployment_controller.go b/internal/cmd/agent/controller/bundledeployment_controller.go new file mode 100644 index 0000000000..35988de156 --- /dev/null +++ b/internal/cmd/agent/controller/bundledeployment_controller.go @@ -0,0 +1,240 @@ +package controller + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + "github.com/rancher/fleet/internal/cmd/agent/deployer" + "github.com/rancher/fleet/internal/cmd/agent/deployer/cleanup" + "github.com/rancher/fleet/internal/cmd/agent/deployer/driftdetect" + "github.com/rancher/fleet/internal/cmd/agent/deployer/monitor" + fleetv1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + "github.com/rancher/fleet/pkg/durations" + + "github.com/rancher/wrangler/v2/pkg/condition" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + errutil "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// BundleDeploymentReconciler reconciles a BundleDeployment object, by +// deploying the bundle as a helm release. +type BundleDeploymentReconciler struct { + client.Client + Scheme *runtime.Scheme + + // LocalClient is the client for the cluster the agent is running on. + LocalClient client.Client + + Deployer *deployer.Deployer + Monitor *monitor.Monitor + DriftDetect *driftdetect.DriftDetect + Cleanup *cleanup.Cleanup + + DefaultNamespace string + + // AgentInfo is the labelSuffix used by the helm deployer + AgentScope string +} + +var DefaultRetry = wait.Backoff{ + Steps: 5, + Duration: 5 * time.Second, + Factor: 1.0, + Jitter: 0.1, +} + +//+kubebuilder:rbac:groups=fleet.cattle.io,resources=bundledeployments,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=fleet.cattle.io,resources=bundledeployments/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=fleet.cattle.io,resources=bundledeployments/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// The Reconcile function compares the state specified by +// the BundleDeployment object against the actual cluster state, and then +// performs operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.15.0/pkg/reconcile +func (r *BundleDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx).WithName("bundledeployment") + ctx = log.IntoContext(ctx, logger) + key := req.String() + + // get latest BundleDeployment from cluster + bd := &fleetv1.BundleDeployment{} + err := r.Get(ctx, req.NamespacedName, bd) + if apierrors.IsNotFound(err) { + // This actually deletes the helm releases if a bundledeployment is deleted or orphaned + logger.V(1).Info("BundleDeployment deleted, cleaning up helm releases") + err := r.Cleanup.CleanupReleases(ctx, key, nil) + if err != nil { + logger.Error(err, "Failed to clean up missing bundledeployment", "key", key) + } + return ctrl.Result{}, nil + } else if err != nil { + return ctrl.Result{}, err + } + + if bd.Spec.Paused { + logger.V(1).Info("Bundle paused, clearing drift detection") + err := r.DriftDetect.Clear(req.String()) + + return ctrl.Result{}, err + } + + merr := []error{} + + // helm deploy the bundledeployment + status, err := r.Deployer.DeployBundle(ctx, bd) + if err != nil { + logger.V(1).Error(err, "Failed to deploy bundle", "status", status) + + // do not use the returned status, instead set the condition and possibly a timestamp + bd.Status = setCondition(bd.Status, err, condition.Cond(fleetv1.BundleDeploymentConditionDeployed)) + + merr = append(merr, fmt.Errorf("failed deploying bundle: %w", err)) + } else { + bd.Status = setCondition(status, nil, condition.Cond(fleetv1.BundleDeploymentConditionDeployed)) + } + + // if we can't retrieve the resources, we don't need to try any of the other operations and requeue now + resources, err := r.Deployer.Resources(bd.Name, bd.Status.Release) + if err != nil { + logger.V(1).Info("Failed to retrieve bundledeployment's resources") + if err := updateStatus(ctx, logger, r.Client, req.NamespacedName, bd.Status); err != nil { + merr = append(merr, fmt.Errorf("failed to update the status: %w", err)) + } + return ctrl.Result{}, errutil.NewAggregate(merr) + } + + var result ctrl.Result + if monitor.ShouldUpdateStatus(bd) { + // update the bundledeployment status and check if we deploy an agent, or if we need to trigger drift correction + status, err = r.Monitor.UpdateStatus(ctx, bd, resources) + if err != nil { + logger.Error(err, "Cannot monitor deployed bundle") + + // if there is an error, do not use the returned status from monitor + bd.Status = setCondition(bd.Status, err, condition.Cond(fleetv1.BundleDeploymentConditionMonitored)) + merr = append(merr, fmt.Errorf("failed updating status: %w", err)) + } else { + // we add to the status from deployer.DeployBundle + bd.Status = setCondition(status, nil, condition.Cond(fleetv1.BundleDeploymentConditionMonitored)) + } + + // Run drift correction + if len(status.ModifiedStatus) > 0 && bd.Spec.CorrectDrift.Enabled { + err = r.Deployer.RemoveExternalChanges(ctx, bd) + if err != nil { + merr = append(merr, fmt.Errorf("failed reconciling drift: %w", err)) + } + } + + // TODO is this needed with drift correction? + if len(bd.Status.ModifiedStatus) > 0 && monitor.ShouldRedeploy(bd) { + result.RequeueAfter = durations.MonitorBundleDelay + logger.Info("Redeploying, by resetting the appliedDeploymentID") + bd.Status.AppliedDeploymentID = "" + + if monitor.IsAgent(bd) { + if err := r.Cleanup.OldAgent(ctx, status.ModifiedStatus); err != nil { + merr = append(merr, fmt.Errorf("failed cleaning old agent: %w", err)) + } + } + } + } + + // update our mini controller, which watches deployed resources for drift + err = r.DriftDetect.Refresh(logger, req.String(), bd, resources) + if err != nil { + logger.V(1).Error(err, "Failed to refresh drift detection", "step", "drift") + merr = append(merr, fmt.Errorf("failed refreshing drift detection: %w", err)) + } + + err = r.Cleanup.CleanupReleases(ctx, key, bd) + if err != nil { + logger.V(1).Error(err, "Failed to clean up bundledeployment releases") + } + + // final status update + logger.V(1).Info("Reconcile finished, updating the bundledeployment status") + err = updateStatus(ctx, logger, r.Client, req.NamespacedName, bd.Status) + if apierrors.IsNotFound(err) { + merr = append(merr, fmt.Errorf("bundledeployment has been deleted: %w", err)) + } else if err != nil { + merr = append(merr, fmt.Errorf("failed final update to bundledeployment status: %w", err)) + } + + return result, errutil.NewAggregate(merr) +} + +func ignoreConflict(err error) error { + if apierrors.IsConflict(err) { + return nil + } + return err +} + +// setCondition sets the condition and updates the timestamp, if the condition changed +func setCondition(newStatus fleetv1.BundleDeploymentStatus, err error, cond condition.Cond) fleetv1.BundleDeploymentStatus { + cond.SetError(&newStatus, "", ignoreConflict(err)) + return newStatus +} + +func updateStatus(ctx context.Context, logger logr.Logger, client client.Client, nsn types.NamespacedName, status fleetv1.BundleDeploymentStatus) error { + err := retry.RetryOnConflict(DefaultRetry, func() error { + newBD := &fleetv1.BundleDeployment{} + err := client.Get(ctx, nsn, newBD) + if err != nil { + return err + } + newBD.Status = status + err = client.Status().Update(ctx, newBD) + if err != nil { + logger.V(1).Error(err, "Reconcile failed final update to bundledeployment status", "status", status) + } + return err + }) + return err +} + +// SetupWithManager sets up the controller with the Manager. +func (r *BundleDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&fleetv1.BundleDeployment{}). + WithEventFilter( + // we do not trigger for status changes + predicate.Or( + predicate.GenerationChangedPredicate{}, + predicate.AnnotationChangedPredicate{}, + predicate.LabelChangedPredicate{}, + predicate.Funcs{ + // except for changes to status.Refresh + UpdateFunc: func(e event.UpdateEvent) bool { + n := e.ObjectNew.(*fleetv1.BundleDeployment) + o := e.ObjectOld.(*fleetv1.BundleDeployment) + if n == nil || o == nil { + return false + } + return n.Status.SyncGeneration != o.Status.SyncGeneration + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return true + }, + }, + )). + Complete(r) +} diff --git a/internal/cmd/agent/controllers/bundledeployment/controller.go b/internal/cmd/agent/controllers/bundledeployment/controller.go deleted file mode 100644 index 9ad64ddc0b..0000000000 --- a/internal/cmd/agent/controllers/bundledeployment/controller.go +++ /dev/null @@ -1,511 +0,0 @@ -// Package bundledeployment deploys bundles, monitors them and cleans up. -package bundledeployment - -import ( - "context" - "errors" - "fmt" - "reflect" - "regexp" - "strings" - "sync" - "time" - - "github.com/sirupsen/logrus" - - "github.com/rancher/fleet/internal/cmd/agent/deployer" - "github.com/rancher/fleet/internal/cmd/agent/deployer/cleanup" - "github.com/rancher/fleet/internal/cmd/agent/deployer/driftdetect" - "github.com/rancher/fleet/internal/cmd/agent/deployer/monitor" - "github.com/rancher/fleet/internal/cmd/agent/trigger" - "github.com/rancher/fleet/internal/helmdeployer" - fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" - "github.com/rancher/fleet/pkg/durations" - fleetcontrollers "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1" - - "github.com/rancher/wrangler/v2/pkg/condition" - "github.com/rancher/wrangler/v2/pkg/merr" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" -) - -var nsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} - -type handler struct { - cleanupOnce sync.Once - - ctx context.Context - trigger *trigger.Trigger - - Deployer *deployer.Deployer - Monitor *monitor.Monitor - DriftDetect *driftdetect.DriftDetect - Cleanup *cleanup.Cleanup - - bdController fleetcontrollers.BundleDeploymentController - restMapper meta.RESTMapper - dynamic dynamic.Interface -} - -func Register(ctx context.Context, - trigger *trigger.Trigger, - restMapper meta.RESTMapper, - dynamic dynamic.Interface, - deployer *deployer.Deployer, - cleanup *cleanup.Cleanup, - monitor *monitor.Monitor, - driftDetect *driftdetect.DriftDetect, - bdController fleetcontrollers.BundleDeploymentController) { - - h := &handler{ - ctx: ctx, - trigger: trigger, - Deployer: deployer, - Cleanup: cleanup, - Monitor: monitor, - DriftDetect: driftDetect, - bdController: bdController, - restMapper: restMapper, - dynamic: dynamic, - } - - fleetcontrollers.RegisterBundleDeploymentStatusHandler(ctx, - bdController, - condition.Cond(fleet.BundleDeploymentConditionDeployed), - "bundle-deploy", - h.DeployBundle) - - fleetcontrollers.RegisterBundleDeploymentStatusHandler(ctx, - bdController, - "Monitored", - "bundle-monitor", - h.MonitorBundle) - - bdController.OnChange(ctx, "bundle-trigger", h.Trigger) - bdController.OnChange(ctx, "bundle-cleanup", h.CleanupReleases) -} - -func (h *handler) garbageCollect() { - for { - if err := h.Cleanup.Cleanup(); err != nil { - logrus.Errorf("failed to cleanup orphaned releases: %v", err) - } - select { - case <-h.ctx.Done(): - return - case <-time.After(wait.Jitter(durations.GarbageCollect, 1.0)): - } - } -} - -func (h *handler) CleanupReleases(key string, bd *fleet.BundleDeployment) (*fleet.BundleDeployment, error) { - h.cleanupOnce.Do(func() { - go h.garbageCollect() - }) - - if bd != nil { - return bd, nil - } - return nil, h.Deployer.Delete(key) -} - -func (h *handler) DeployBundle(bd *fleet.BundleDeployment, status fleet.BundleDeploymentStatus) (fleet.BundleDeploymentStatus, error) { - if bd.Spec.Paused { - // nothing to do - logrus.Debugf("Bundle %s/%s is paused", bd.Namespace, bd.Name) - return status, nil - } - - if err := h.checkDependency(bd); err != nil { - logrus.Debugf("Bundle %s/%s has a dependency that is not ready: %v", bd.Namespace, bd.Name, err) - return status, err - } - - logrus.Infof("Deploying bundle %s/%s", bd.Namespace, bd.Name) - release, err := h.Deployer.Deploy(bd) - if err != nil { - // When an error from DeployBundle is returned it causes DeployBundle - // to requeue and keep trying to deploy on a loop. If there is something - // wrong with the deployed manifests this will be a loop that re-deploying - // cannot fix. Here we catch those errors and update the status to note - // the problem while skipping the constant requeuing. - if do, newStatus := deployErrToStatus(err, status); do { - // Setting the release to an empty string remove the previous - // release name. When a deployment fails the release name is not - // returned. Keeping the old release name can lead to other functions - // looking up old data in the history and presenting the wrong status. - // For example, the h.deployManager.Deploy function will find the old - // release and not return an error. It will set everything as if the - // current one is running properly. - newStatus.Release = "" - newStatus.AppliedDeploymentID = bd.Spec.DeploymentID - return newStatus, nil - } - return status, err - } - status.Release = release - status.AppliedDeploymentID = bd.Spec.DeploymentID - - if err := h.setNamespaceLabelsAndAnnotations(bd, release); err != nil { - return fleet.BundleDeploymentStatus{}, err - } - - // Setting the error to nil clears any existing error - condition.Cond(fleet.BundleDeploymentConditionInstalled).SetError(&status, "", nil) - return status, nil -} - -// setNamespaceLabelsAndAnnotations updates the namespace for the release, applying all labels and annotations to that namespace as configured in the bundle spec. -func (h *handler) setNamespaceLabelsAndAnnotations(bd *fleet.BundleDeployment, releaseID string) error { - if bd.Spec.Options.NamespaceLabels == nil && bd.Spec.Options.NamespaceAnnotations == nil { - return nil - } - - ns, err := h.fetchNamespace(releaseID) - if err != nil { - return err - } - - if reflect.DeepEqual(bd.Spec.Options.NamespaceLabels, ns.Labels) && reflect.DeepEqual(bd.Spec.Options.NamespaceAnnotations, ns.Annotations) { - return nil - } - - if bd.Spec.Options.NamespaceLabels != nil { - addLabelsFromOptions(ns.Labels, *bd.Spec.Options.NamespaceLabels) - } - if bd.Spec.Options.NamespaceAnnotations != nil { - if ns.Annotations == nil { - ns.Annotations = map[string]string{} - } - addAnnotationsFromOptions(ns.Annotations, *bd.Spec.Options.NamespaceAnnotations) - } - err = h.updateNamespace(ns) - if err != nil { - return err - } - - return nil -} - -// updateNamespace updates a namespace resource in the cluster. -func (h *handler) updateNamespace(ns *corev1.Namespace) error { - u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(ns) - if err != nil { - return err - } - _, err = h.dynamic.Resource(nsResource).Update(h.ctx, &unstructured.Unstructured{Object: u}, metav1.UpdateOptions{}) - if err != nil { - return err - } - - return nil -} - -// fetchNamespace gets the namespace matching the release ID. Returns an error if none is found. -func (h *handler) fetchNamespace(releaseID string) (*corev1.Namespace, error) { - // releaseID is composed of release.Namespace/release.Name/release.Version - namespace := strings.Split(releaseID, "/")[0] - list, err := h.dynamic.Resource(nsResource).List(h.ctx, metav1.ListOptions{ - LabelSelector: "name=" + namespace, - }) - if err != nil { - return nil, err - } - if len(list.Items) == 0 { - return nil, fmt.Errorf("namespace %s not found", namespace) - } - var ns corev1.Namespace - err = runtime.DefaultUnstructuredConverter. - FromUnstructured(list.Items[0].Object, &ns) - if err != nil { - return nil, err - } - - return &ns, nil -} - -// addLabelsFromOptions updates nsLabels so that it only contains all labels specified in optLabels, plus the `name` labels added by Helm when creating the namespace. -func addLabelsFromOptions(nsLabels map[string]string, optLabels map[string]string) { - for k, v := range optLabels { - nsLabels[k] = v - } - - // Delete labels not defined in the options. - // Keep the name label as it is added by helm when creating the namespace. - for k := range nsLabels { - if _, ok := optLabels[k]; k != "name" && !ok { - delete(nsLabels, k) - } - } -} - -// addAnnotationsFromOptions updates nsAnnotations so that it only contains all annotations specified in optAnnotations. -func addAnnotationsFromOptions(nsAnnotations map[string]string, optAnnotations map[string]string) { - for k, v := range optAnnotations { - nsAnnotations[k] = v - } - - // Delete Annotations not defined in the options. - for k := range nsAnnotations { - if _, ok := optAnnotations[k]; !ok { - delete(nsAnnotations, k) - } - } -} - -// deployErrToStatus converts an error into a status update -func deployErrToStatus(err error, status fleet.BundleDeploymentStatus) (bool, fleet.BundleDeploymentStatus) { - if err == nil { - return false, status - } - - msg := err.Error() - - // The following error conditions are turned into a status - // Note: these error strings are returned by the Helm SDK and its dependencies - re := regexp.MustCompile( - "(timed out waiting for the condition)|" + // a Helm wait occurs and it times out - "(error validating data)|" + // manifests fail to pass validation - "(chart requires kubeVersion)|" + // kubeVersion mismatch - "(annotation validation error)|" + // annotations fail to pass validation - "(failed, and has been rolled back due to atomic being set)|" + // atomic is set and a rollback occurs - "(YAML parse error)|" + // YAML is broken in source files - "(Forbidden: updates to [0-9A-Za-z]+ spec for fields other than [0-9A-Za-z ']+ are forbidden)|" + // trying to update fields that cannot be updated - "(Forbidden: spec is immutable after creation)|" + // trying to modify immutable spec - "(chart requires kubeVersion: [0-9A-Za-z\\.\\-<>=]+ which is incompatible with Kubernetes)", // trying to deploy to incompatible Kubernetes - ) - if re.MatchString(msg) { - status.Ready = false - status.NonModified = true - - // The ready status is displayed throughout the UI. Setting this as well - // as installed enables the status to be displayed when looking at the - // CRD or a UI build on that. - readyError := fmt.Errorf("not ready: %s", msg) - condition.Cond(fleet.BundleDeploymentConditionReady).SetError(&status, "", readyError) - - // Deployed and Monitored conditions are automated. They are true if their - // handlers return no error and false if an error is returned. When an - // error is returned they are requeued. To capture the state of an error - // that is not returned a new condition is being captured. Ready is the - // condition that displays for status in general and it is used for - // the readiness of resources. Only when we cannot capture the status of - // resources, like here, can use use it for a message like the above. - // The Installed condition lets us have a condition to capture the error - // that can be bubbled up for Bundles and Gitrepos to consume. - installError := fmt.Errorf("not installed: %s", msg) - condition.Cond(fleet.BundleDeploymentConditionInstalled).SetError(&status, "", installError) - - return true, status - } - - // The case that the bundle is already in an error state. A previous - // condition with the error should already be applied. - if err == helmdeployer.ErrNoResourceID { - return true, status - } - - return false, status -} - -func (h *handler) checkDependency(bd *fleet.BundleDeployment) error { - var depBundleList []string - bundleNamespace := bd.Labels[fleet.BundleNamespaceLabel] - for _, depend := range bd.Spec.DependsOn { - // skip empty BundleRef definitions. Possible if there is a typo in the yaml - if depend.Name != "" || depend.Selector != nil { - ls := &metav1.LabelSelector{} - if depend.Selector != nil { - ls = depend.Selector - } - - // depend.Name is just a shortcut for matchLabels: {bundle-name: name} - if depend.Name != "" { - ls = metav1.AddLabelToSelector(ls, fleet.BundleLabel, depend.Name) - ls = metav1.AddLabelToSelector(ls, fleet.BundleNamespaceLabel, bundleNamespace) - } - - selector, err := metav1.LabelSelectorAsSelector(ls) - if err != nil { - return err - } - bds, err := h.bdController.Cache().List(bd.Namespace, selector) - if err != nil { - return err - } - - if len(bds) == 0 { - return fmt.Errorf("list bundledeployments: no bundles matching labels %s in namespace %s", selector.String(), bundleNamespace) - } - - for _, depBundle := range bds { - c := condition.Cond("Ready") - if c.IsTrue(depBundle) { - continue - } else { - depBundleList = append(depBundleList, depBundle.Name) - } - } - } - } - - if len(depBundleList) != 0 { - return fmt.Errorf("dependent bundle(s) are not ready: %v", depBundleList) - } - - return nil -} - -func (h *handler) Trigger(key string, bd *fleet.BundleDeployment) (*fleet.BundleDeployment, error) { - if bd == nil || bd.Spec.Paused { - return bd, h.trigger.Clear(key) - } - - logrus.Debugf("Triggering for bundledeployment '%s'", key) - - resources, err := h.DriftDetect.AllResources(bd) - if err != nil { - return bd, err - } - - if resources == nil { - return bd, nil - } - - logrus.Debugf("Adding OnChange for bundledeployment's '%s' resource list", key) - return bd, h.trigger.OnChange(key, resources.DefaultNamespace, func() { - // enqueue bundledeployment if any resource changes - h.bdController.EnqueueAfter(bd.Namespace, bd.Name, 0) - }, resources.Objects...) -} - -func isAgent(bd *fleet.BundleDeployment) bool { - return strings.HasPrefix(bd.Name, "fleet-agent") -} - -func shouldRedeploy(bd *fleet.BundleDeployment) bool { - if isAgent(bd) { - return true - } - if bd.Spec.Options.ForceSyncGeneration <= 0 { - return false - } - if bd.Status.SyncGeneration == nil { - return true - } - return *bd.Status.SyncGeneration != bd.Spec.Options.ForceSyncGeneration -} - -func (h *handler) cleanupOldAgent(modifiedStatuses []fleet.ModifiedStatus) error { - var errs []error - for _, modified := range modifiedStatuses { - if modified.Delete { - gvk := schema.FromAPIVersionAndKind(modified.APIVersion, modified.Kind) - mapping, err := h.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - errs = append(errs, fmt.Errorf("mapping resource for %s for agent cleanup: %w", gvk, err)) - continue - } - - logrus.Infof("Removing old agent resource %s/%s, %s", modified.Namespace, modified.Name, gvk) - err = h.dynamic.Resource(mapping.Resource).Namespace(modified.Namespace).Delete(h.ctx, modified.Name, metav1.DeleteOptions{}) - if err != nil { - errs = append(errs, fmt.Errorf("deleting %s/%s for %s for agent cleanup: %w", modified.Namespace, modified.Name, gvk, err)) - continue - } - } - } - return merr.NewErrors(errs...) -} - -// removePrivateFields removes fields from the status, which won't be marshalled to JSON. -// They would however trigger a status update in apply -func removePrivateFields(s1 *fleet.BundleDeploymentStatus) { - for id := range s1.NonReadyStatus { - s1.NonReadyStatus[id].Summary.Relationships = nil - s1.NonReadyStatus[id].Summary.Attributes = nil - } -} - -func (h *handler) MonitorBundle(bd *fleet.BundleDeployment, status fleet.BundleDeploymentStatus) (fleet.BundleDeploymentStatus, error) { - if bd.Spec.DeploymentID != status.AppliedDeploymentID { - return status, nil - } - - // If the bundle failed to install the status should not be updated. Updating - // here would remove the condition message that was previously set on it. - if condition.Cond(fleet.BundleDeploymentConditionInstalled).IsFalse(bd) { - return status, nil - } - - // Same considerations in case the bundle is paused - if bd.Spec.Paused { - return status, nil - } - - err := h.Monitor.UpdateBundleDeploymentStatus(h.restMapper, bd) - if err != nil { - - // Returning an error will cause MonitorBundle to requeue in a loop. - // When there is no resourceID the error should be on the status. Without - // the ID we do not have the information to lookup the resources to - // compute the plan and discover the state of resources. - if err == helmdeployer.ErrNoResourceID { - return status, nil - } - - return status, err - } - status = bd.Status - - readyError := readyError(status) - condition.Cond(fleet.BundleDeploymentConditionReady).SetError(&status, "", readyError) - if len(status.ModifiedStatus) > 0 { - h.bdController.EnqueueAfter(bd.Namespace, bd.Name, durations.MonitorBundleDelay) - if shouldRedeploy(bd) { - logrus.Infof("Redeploying %s", bd.Name) - status.AppliedDeploymentID = "" - if isAgent(bd) { - if err := h.cleanupOldAgent(status.ModifiedStatus); err != nil { - return status, fmt.Errorf("failed to clean up agent: %w", err) - } - } - } - } - - status.SyncGeneration = &bd.Spec.Options.ForceSyncGeneration - if readyError != nil { - logrus.Errorf("bundle %s: %v", bd.Name, readyError) - } - - removePrivateFields(&status) - return status, nil -} - -func readyError(status fleet.BundleDeploymentStatus) error { - if status.Ready && status.NonModified { - return nil - } - - var msg string - if !status.Ready { - msg = "not ready" - if len(status.NonReadyStatus) > 0 { - msg = status.NonReadyStatus[0].String() - } - } else if !status.NonModified { - msg = "out of sync" - if len(status.ModifiedStatus) > 0 { - msg = status.ModifiedStatus[0].String() - } - } - - return errors.New(msg) -} diff --git a/internal/cmd/agent/controllers/controllers.go b/internal/cmd/agent/controllers/controllers.go deleted file mode 100644 index 7d2d5eb3dd..0000000000 --- a/internal/cmd/agent/controllers/controllers.go +++ /dev/null @@ -1,204 +0,0 @@ -// Package controllers wires and starts the controllers for the agent. -package controllers - -import ( - "context" - - "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/util/workqueue" - - "github.com/rancher/fleet/internal/cmd/agent/controllers/bundledeployment" - "github.com/rancher/fleet/internal/cmd/agent/deployer" - "github.com/rancher/fleet/internal/cmd/agent/deployer/cleanup" - "github.com/rancher/fleet/internal/cmd/agent/deployer/driftdetect" - "github.com/rancher/fleet/internal/cmd/agent/deployer/monitor" - "github.com/rancher/fleet/internal/cmd/agent/trigger" - "github.com/rancher/fleet/internal/helmdeployer" - "github.com/rancher/fleet/internal/manifest" - "github.com/rancher/fleet/pkg/durations" - "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io" - fleetcontrollers "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1" - - cache2 "github.com/rancher/lasso/pkg/cache" - "github.com/rancher/lasso/pkg/client" - "github.com/rancher/lasso/pkg/controller" - "github.com/rancher/wrangler/v2/pkg/apply" - batchcontrollers "github.com/rancher/wrangler/v2/pkg/generated/controllers/batch/v1" - "github.com/rancher/wrangler/v2/pkg/generated/controllers/core" - corecontrollers "github.com/rancher/wrangler/v2/pkg/generated/controllers/core/v1" - "github.com/rancher/wrangler/v2/pkg/start" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/client-go/discovery" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" -) - -type AppContext struct { - Fleet fleetcontrollers.Interface - Core corecontrollers.Interface - Batch batchcontrollers.Interface - Dynamic dynamic.Interface - Apply apply.Apply - starters []start.Starter - - ClusterNamespace string - ClusterName string - AgentNamespace string - - clientConfig clientcmd.ClientConfig - restConfig *rest.Config - cachedDiscoveryInterface discovery.CachedDiscoveryInterface - restMapper meta.RESTMapper -} - -func (a *AppContext) ToRawKubeConfigLoader() clientcmd.ClientConfig { - return a.clientConfig -} - -func (a *AppContext) ToRESTConfig() (*rest.Config, error) { - return a.restConfig, nil -} - -func (a *AppContext) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { - return a.cachedDiscoveryInterface, nil -} - -func (a *AppContext) ToRESTMapper() (meta.RESTMapper, error) { - return a.restMapper, nil -} - -func (a *AppContext) Start(ctx context.Context) error { - return start.All(ctx, 5, a.starters...) -} - -func Register(ctx context.Context, - appCtx *AppContext, - fleetNamespace, defaultNamespace, agentScope string) error { - - labelPrefix := "fleet" - if defaultNamespace != "" { - labelPrefix = defaultNamespace - } - - helmDeployer, err := helmdeployer.NewHelm(appCtx.AgentNamespace, defaultNamespace, labelPrefix, agentScope, appCtx, - appCtx.Core.ServiceAccount().Cache(), appCtx.Core.ConfigMap().Cache(), appCtx.Core.Secret().Cache()) - if err != nil { - return err - } - - bundledeployment.Register(ctx, - trigger.New(ctx, appCtx.restMapper, appCtx.Dynamic), - appCtx.restMapper, - appCtx.Dynamic, - deployer.New( - manifest.NewLookup(appCtx.Fleet.Content()), - helmDeployer), - cleanup.New( - fleetNamespace, - defaultNamespace, - appCtx.Fleet.BundleDeployment().Cache(), - appCtx.Fleet.BundleDeployment(), - helmDeployer), - monitor.New( - defaultNamespace, - labelPrefix, - agentScope, - helmDeployer, - appCtx.Apply), - driftdetect.New( - defaultNamespace, - labelPrefix, - agentScope, - helmDeployer, - appCtx.Apply), - appCtx.Fleet.BundleDeployment()) - - return nil -} - -func newSharedControllerFactory(config *rest.Config, mapper meta.RESTMapper, namespace string) (controller.SharedControllerFactory, error) { - cf, err := client.NewSharedClientFactory(config, &client.SharedClientFactoryOptions{ - Mapper: mapper, - }) - if err != nil { - return nil, err - } - - cacheFactory := cache2.NewSharedCachedFactory(cf, &cache2.SharedCacheFactoryOptions{ - DefaultNamespace: namespace, - DefaultResync: durations.DefaultResyncAgent, - }) - slowRateLimiter := workqueue.NewItemExponentialFailureRateLimiter(durations.SlowFailureRateLimiterBase, durations.SlowFailureRateLimiterMax) - return controller.NewSharedControllerFactory(cacheFactory, &controller.SharedControllerFactoryOptions{ - KindRateLimiter: map[schema.GroupVersionKind]workqueue.RateLimiter{ - v1alpha1.SchemeGroupVersion.WithKind("BundleDeployment"): slowRateLimiter, - }, - }), nil -} - -func NewAppContext(fleetNamespace, agentNamespace, clusterNamespace, clusterName string, - fleetRESTConfig *rest.Config, clientConfig clientcmd.ClientConfig, - fleetMapper, mapper meta.RESTMapper, discovery discovery.CachedDiscoveryInterface) (*AppContext, error) { - - // set up factory for upstream cluster - fleetFactory, err := newSharedControllerFactory(fleetRESTConfig, fleetMapper, fleetNamespace) - if err != nil { - return nil, err - } - - fleet, err := fleet.NewFactoryFromConfigWithOptions(fleetRESTConfig, &fleet.FactoryOptions{ - SharedControllerFactory: fleetFactory, - }) - if err != nil { - return nil, err - } - - localConfig, err := clientConfig.ClientConfig() - if err != nil { - return nil, err - } - - // set up factory for local cluster - localFactory, err := newSharedControllerFactory(localConfig, mapper, "") - if err != nil { - return nil, err - } - - core, err := core.NewFactoryFromConfigWithOptions(localConfig, &core.FactoryOptions{ - SharedControllerFactory: localFactory, - }) - if err != nil { - return nil, err - } - - apply, err := apply.NewForConfig(localConfig) - if err != nil { - return nil, err - } - - dynamic, err := dynamic.NewForConfig(localConfig) - if err != nil { - return nil, err - } - - return &AppContext{ - Dynamic: dynamic, - Apply: apply, - Fleet: fleet.Fleet().V1alpha1(), - Core: core.Core().V1(), - ClusterNamespace: clusterNamespace, - ClusterName: clusterName, - AgentNamespace: agentNamespace, - clientConfig: clientConfig, - restConfig: localConfig, - cachedDiscoveryInterface: discovery, - restMapper: mapper, - starters: []start.Starter{ - core, - fleet, - }, - }, nil -} diff --git a/internal/cmd/agent/deployer/cleanup/cleanup.go b/internal/cmd/agent/deployer/cleanup/cleanup.go index c9944ffa75..43a3e92e0e 100644 --- a/internal/cmd/agent/deployer/cleanup/cleanup.go +++ b/internal/cmd/agent/deployer/cleanup/cleanup.go @@ -1,49 +1,115 @@ package cleanup import ( + "context" + "fmt" + "sync" + "time" + + "github.com/go-logr/logr" + "github.com/rancher/fleet/internal/helmdeployer" fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" - fleetcontrollers "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1" + "github.com/rancher/fleet/pkg/durations" - "github.com/sirupsen/logrus" + "github.com/rancher/wrangler/v2/pkg/kv" + "github.com/rancher/wrangler/v2/pkg/merr" apierror "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" ) type Cleanup struct { - fleetNamespace string - defaultNamespace string - bundleDeploymentCache fleetcontrollers.BundleDeploymentCache - deployer *helmdeployer.Helm - bundleDeploymentController fleetcontrollers.BundleDeploymentController + client client.Client + fleetNamespace string + defaultNamespace string + helmDeployer *helmdeployer.Helm + cleanupOnce sync.Once + + mapper meta.RESTMapper + // localDynamicClient is a dynamic client for the cluster the agent is running on (local cluster). + localDynamicClient *dynamic.DynamicClient } -func New(fleetNamespace string, - defaultNamespace string, - bundleDeploymentCache fleetcontrollers.BundleDeploymentCache, - bundleDeploymentController fleetcontrollers.BundleDeploymentController, - deployer *helmdeployer.Helm) *Cleanup { +func New(upstream client.Client, mapper meta.RESTMapper, localDynamicClient *dynamic.DynamicClient, deployer *helmdeployer.Helm, fleetNamespace string, defaultNamespace string) *Cleanup { return &Cleanup{ - fleetNamespace: fleetNamespace, - defaultNamespace: defaultNamespace, - bundleDeploymentCache: bundleDeploymentCache, - deployer: deployer, - bundleDeploymentController: bundleDeploymentController, + client: upstream, + mapper: mapper, + localDynamicClient: localDynamicClient, + helmDeployer: deployer, + fleetNamespace: fleetNamespace, + defaultNamespace: defaultNamespace, } } -func (m *Cleanup) Cleanup() error { - deployed, err := m.deployer.ListDeployments() +func (c *Cleanup) OldAgent(ctx context.Context, modifiedStatuses []fleet.ModifiedStatus) error { + logger := log.FromContext(ctx).WithName("cleanupOldAgent") + var errs []error + for _, modified := range modifiedStatuses { + if modified.Delete { + gvk := schema.FromAPIVersionAndKind(modified.APIVersion, modified.Kind) + mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + errs = append(errs, fmt.Errorf("mapping resource for %s for agent cleanup: %w", gvk, err)) + continue + } + + logger.Info("Removing old agent resource", "namespace", modified.Namespace, "name", modified.Name, "gvk", gvk) + err = c.localDynamicClient.Resource(mapping.Resource).Namespace(modified.Namespace).Delete(ctx, modified.Name, metav1.DeleteOptions{}) + if err != nil { + errs = append(errs, fmt.Errorf("deleting %s/%s for %s for agent cleanup: %w", modified.Namespace, modified.Name, gvk, err)) + continue + } + } + } + return merr.NewErrors(errs...) +} + +func (c *Cleanup) CleanupReleases(ctx context.Context, key string, bd *fleet.BundleDeployment) error { + c.cleanupOnce.Do(func() { + go c.garbageCollect(ctx) + }) + + if bd != nil { + return nil + } + return c.delete(ctx, key) +} + +func (c *Cleanup) garbageCollect(ctx context.Context) { + logger := log.FromContext(ctx).WithName("garbageCollect") + for { + if err := c.cleanup(ctx, logger); err != nil { + logger.Error(err, "failed to cleanup orphaned releases") + } + select { + case <-ctx.Done(): + return + case <-time.After(wait.Jitter(durations.GarbageCollect, 1.0)): + } + } +} + +func (c *Cleanup) cleanup(ctx context.Context, logger logr.Logger) error { + deployed, err := c.helmDeployer.ListDeployments() if err != nil { return err } for _, deployed := range deployed { - bundleDeployment, err := m.bundleDeploymentCache.Get(m.fleetNamespace, deployed.BundleID) + bundleDeployment := &fleet.BundleDeployment{} + err := c.client.Get(ctx, types.NamespacedName{Namespace: c.fleetNamespace, Name: deployed.BundleID}, bundleDeployment) if apierror.IsNotFound(err) { // found a helm secret, but no bundle deployment, so uninstall the release - logrus.Infof("Deleting orphan bundle ID %s, release %s", deployed.BundleID, deployed.ReleaseName) - if err := m.deployer.Delete(deployed.BundleID, deployed.ReleaseName); err != nil { + logger.Info("Deleting orphan bundle ID, helm uninstall", "bundleID", deployed.BundleID, "release", deployed.ReleaseName) + if err := c.helmDeployer.Delete(ctx, deployed.BundleID, deployed.ReleaseName); err != nil { return err } @@ -52,11 +118,11 @@ func (m *Cleanup) Cleanup() error { return err } - key := m.releaseKey(bundleDeployment) + key := releaseKey(c.defaultNamespace, bundleDeployment) if key != deployed.ReleaseName { // found helm secret and bundle deployment for BundleID, but release name doesn't match, so delete the release - logrus.Infof("Deleting unknown bundle ID %s, release %s, expecting release %s", deployed.BundleID, deployed.ReleaseName, key) - if err := m.deployer.Delete(deployed.BundleID, deployed.ReleaseName); err != nil { + logger.Info("Deleting unknown bundle ID, helm uninstall", "bundleID", deployed.BundleID, "release", deployed.ReleaseName, "expectedRelease", key) + if err := c.helmDeployer.Delete(ctx, deployed.BundleID, deployed.ReleaseName); err != nil { return err } } @@ -65,9 +131,13 @@ func (m *Cleanup) Cleanup() error { return nil } +func (c *Cleanup) delete(ctx context.Context, bundleDeploymentKey string) error { + _, name := kv.RSplit(bundleDeploymentKey, "/") + return c.helmDeployer.Delete(ctx, name, "") +} + // releaseKey returns a deploymentKey from namespace+releaseName -func (m *Cleanup) releaseKey(bd *fleet.BundleDeployment) string { - ns := m.defaultNamespace +func releaseKey(ns string, bd *fleet.BundleDeployment) string { if bd.Spec.Options.TargetNamespace != "" { ns = bd.Spec.Options.TargetNamespace } else if bd.Spec.Options.DefaultNamespace != "" { diff --git a/internal/cmd/agent/deployer/deployer.go b/internal/cmd/agent/deployer/deployer.go index 14af216647..5dab555a6a 100644 --- a/internal/cmd/agent/deployer/deployer.go +++ b/internal/cmd/agent/deployer/deployer.go @@ -1,51 +1,314 @@ package deployer import ( + "context" + "fmt" + "reflect" + "regexp" + "strings" + "github.com/rancher/fleet/internal/helmdeployer" "github.com/rancher/fleet/internal/manifest" fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + "github.com/rancher/wrangler/v2/pkg/condition" "github.com/rancher/wrangler/v2/pkg/kv" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" ) type Deployer struct { - lookup manifest.Lookup - deployer *helmdeployer.Helm + client client.Client + upstreamClient client.Reader + lookup Lookup + helm *helmdeployer.Helm } -func New(lookup manifest.Lookup, deployer *helmdeployer.Helm) *Deployer { +type Lookup interface { + Get(ctx context.Context, client client.Reader, id string) (*manifest.Manifest, error) +} + +func New(localClient client.Client, upstreamClient client.Reader, lookup Lookup, deployer *helmdeployer.Helm) *Deployer { return &Deployer{ - lookup: lookup, - deployer: deployer, + client: localClient, + upstreamClient: upstreamClient, + lookup: lookup, + helm: deployer, } } -func (m *Deployer) Delete(bundleDeploymentKey string) error { - _, name := kv.RSplit(bundleDeploymentKey, "/") - return m.deployer.Delete(name, "") +func (d *Deployer) Resources(name string, release string) (*helmdeployer.Resources, error) { + return d.helm.Resources(name, release) +} + +func (d *Deployer) RemoveExternalChanges(ctx context.Context, bd *fleet.BundleDeployment) error { + return d.helm.RemoveExternalChanges(ctx, bd) +} + +// DeployBundle deploys the bundle deployment with the helm SDK. It does not +// mutate bd, instead it returns the modified status +func (d *Deployer) DeployBundle(ctx context.Context, bd *fleet.BundleDeployment) (fleet.BundleDeploymentStatus, error) { + status := bd.Status + logger := log.FromContext(ctx).WithName("DeployBundle").WithValues("deploymentID", bd.Spec.DeploymentID, "appliedDeploymentID", status.AppliedDeploymentID) + + if err := d.checkDependency(ctx, bd); err != nil { + logger.V(1).Info("Bundle has a dependency that is not ready", "error", err) + return status, err + } + + logger.Info("Checking if bundle needs to be deployed") + release, err := d.helmdeploy(ctx, bd) + if err != nil { + // When an error from DeployBundle is returned it causes DeployBundle + // to requeue and keep trying to deploy on a loop. If there is something + // wrong with the deployed manifests this will be a loop that re-deploying + // cannot fix. Here we catch those errors and update the status to note + // the problem while skipping the constant requeuing. + if do, newStatus := deployErrToStatus(err, status); do { + // Setting the release to an empty string removes the previous + // release name. When a deployment fails the release name is not + // returned. Keeping the old release name can lead to other functions + // looking up old data in the history and presenting the wrong status. + // For example, the deployManager.Deploy function will find the old + // release and not return an error. It will set everything as if the + // current one is running properly. + newStatus.Release = "" + newStatus.AppliedDeploymentID = bd.Spec.DeploymentID + return newStatus, nil + } + return status, err + } + status.Release = release + status.AppliedDeploymentID = bd.Spec.DeploymentID + + if err := d.setNamespaceLabelsAndAnnotations(ctx, bd, release); err != nil { + return fleet.BundleDeploymentStatus{}, err + } + + // Setting the error to nil clears any existing error + condition.Cond(fleet.BundleDeploymentConditionInstalled).SetError(&status, "", nil) + return status, nil } // Deploy the bundle deployment, i.e. with helmdeployer. // This loads the manifest and the contents from the upstream cluster. -func (m *Deployer) Deploy(bd *fleet.BundleDeployment) (string, error) { +func (d *Deployer) helmdeploy(ctx context.Context, bd *fleet.BundleDeployment) (string, error) { if bd.Spec.DeploymentID == bd.Status.AppliedDeploymentID { - if ok, err := m.deployer.EnsureInstalled(bd.Name, bd.Status.Release); err != nil { + if ok, err := d.helm.EnsureInstalled(bd.Name, bd.Status.Release); err != nil { return "", err } else if ok { return bd.Status.Release, nil } } manifestID, _ := kv.Split(bd.Spec.DeploymentID, ":") - manifest, err := m.lookup.Get(manifestID) + manifest, err := d.lookup.Get(ctx, d.upstreamClient, manifestID) if err != nil { return "", err } manifest.Commit = bd.Labels["fleet.cattle.io/commit"] - resource, err := m.deployer.Deploy(bd.Name, manifest, bd.Spec.Options) + resource, err := d.helm.Deploy(ctx, bd.Name, manifest, bd.Spec.Options) if err != nil { return "", err } return resource.ID, nil } + +// setNamespaceLabelsAndAnnotations updates the namespace for the release, applying all labels and annotations to that namespace as configured in the bundle spec. +func (d *Deployer) setNamespaceLabelsAndAnnotations(ctx context.Context, bd *fleet.BundleDeployment, releaseID string) error { + if bd.Spec.Options.NamespaceLabels == nil && bd.Spec.Options.NamespaceAnnotations == nil { + return nil + } + + ns, err := d.fetchNamespace(ctx, releaseID) + if err != nil { + return err + } + + if reflect.DeepEqual(bd.Spec.Options.NamespaceLabels, ns.Labels) && reflect.DeepEqual(bd.Spec.Options.NamespaceAnnotations, ns.Annotations) { + return nil + } + + if bd.Spec.Options.NamespaceLabels != nil { + addLabelsFromOptions(ns.Labels, *bd.Spec.Options.NamespaceLabels) + } + if bd.Spec.Options.NamespaceAnnotations != nil { + if ns.Annotations == nil { + ns.Annotations = map[string]string{} + } + addAnnotationsFromOptions(ns.Annotations, *bd.Spec.Options.NamespaceAnnotations) + } + err = d.updateNamespace(ctx, ns) + if err != nil { + return err + } + + return nil +} + +// updateNamespace updates a namespace resource in the cluster. +func (d *Deployer) updateNamespace(ctx context.Context, ns *corev1.Namespace) error { + err := d.client.Update(ctx, ns) + if err != nil { + return err + } + + return nil +} + +// fetchNamespace gets the namespace matching the release ID. Returns an error if none is found. +func (d *Deployer) fetchNamespace(ctx context.Context, releaseID string) (*corev1.Namespace, error) { + // releaseID is composed of release.Namespace/release.Name/release.Version + namespace := strings.Split(releaseID, "/")[0] + list := &corev1.NamespaceList{} + err := d.client.List(ctx, list, client.MatchingLabels{ + "name": namespace, + }) + if err != nil { + return nil, err + } + if len(list.Items) == 0 { + return nil, fmt.Errorf("namespace %s not found", namespace) + } + return &list.Items[0], nil +} + +// addLabelsFromOptions updates nsLabels so that it only contains all labels specified in optLabels, plus the `name` labels added by Helm when creating the namespace. +func addLabelsFromOptions(nsLabels map[string]string, optLabels map[string]string) { + for k, v := range optLabels { + nsLabels[k] = v + } + + // Delete labels not defined in the options. + // Keep the name label as it is added by helm when creating the namespace. + for k := range nsLabels { + if _, ok := optLabels[k]; k != "name" && !ok { + delete(nsLabels, k) + } + } +} + +// addAnnotationsFromOptions updates nsAnnotations so that it only contains all annotations specified in optAnnotations. +func addAnnotationsFromOptions(nsAnnotations map[string]string, optAnnotations map[string]string) { + for k, v := range optAnnotations { + nsAnnotations[k] = v + } + + // Delete Annotations not defined in the options. + for k := range nsAnnotations { + if _, ok := optAnnotations[k]; !ok { + delete(nsAnnotations, k) + } + } +} + +// deployErrToStatus converts an error into a status update +func deployErrToStatus(err error, status fleet.BundleDeploymentStatus) (bool, fleet.BundleDeploymentStatus) { + if err == nil { + return false, status + } + + msg := err.Error() + + // The following error conditions are turned into a status + // Note: these error strings are returned by the Helm SDK and its dependencies + re := regexp.MustCompile( + "(timed out waiting for the condition)|" + // a Helm wait occurs and it times out + "(error validating data)|" + // manifests fail to pass validation + "(chart requires kubeVersion)|" + // kubeVersion mismatch + "(annotation validation error)|" + // annotations fail to pass validation + "(failed, and has been rolled back due to atomic being set)|" + // atomic is set and a rollback occurs + "(YAML parse error)|" + // YAML is broken in source files + "(Forbidden: updates to [0-9A-Za-z]+ spec for fields other than [0-9A-Za-z ']+ are forbidden)|" + // trying to update fields that cannot be updated + "(Forbidden: spec is immutable after creation)|" + // trying to modify immutable spec + "(chart requires kubeVersion: [0-9A-Za-z\\.\\-<>=]+ which is incompatible with Kubernetes)", // trying to deploy to incompatible Kubernetes + ) + if re.MatchString(msg) { + status.Ready = false + status.NonModified = true + + // The ready status is displayed throughout the UI. Setting this as well + // as installed enables the status to be displayed when looking at the + // CRD or a UI build on that. + readyError := fmt.Errorf("not ready: %s", msg) + condition.Cond(fleet.BundleDeploymentConditionReady).SetError(&status, "", readyError) + + // Deployed and Monitored conditions are handled in the reconciler. + // They are true if the deployer returns no error and false if + // an error is returned. When an error is returned they are + // requeued. To capture the state of an error that is not + // returned a new condition is being captured. Ready is the + // condition that displays for status in general and it is used + // for the readiness of resources. Only when we cannot capture + // the status of resources, like here, can use use it for a + // message like the above. The Installed condition lets us have + // a condition to capture the error that can be bubbled up for + // Bundles and Gitrepos to consume. + installError := fmt.Errorf("not installed: %s", msg) + condition.Cond(fleet.BundleDeploymentConditionInstalled).SetError(&status, "", installError) + + return true, status + } + + // The case that the bundle is already in an error state. A previous + // condition with the error should already be applied. + if err == helmdeployer.ErrNoResourceID { + return true, status + } + + return false, status +} + +func (d *Deployer) checkDependency(ctx context.Context, bd *fleet.BundleDeployment) error { + var depBundleList []string + bundleNamespace := bd.Labels[fleet.BundleNamespaceLabel] + for _, depend := range bd.Spec.DependsOn { + // skip empty BundleRef definitions. Possible if there is a typo in the yaml + if depend.Name != "" || depend.Selector != nil { + ls := &metav1.LabelSelector{} + if depend.Selector != nil { + ls = depend.Selector + } + + // depend.Name is just a shortcut for matchLabels: {bundle-name: name} + if depend.Name != "" { + ls = metav1.AddLabelToSelector(ls, fleet.BundleLabel, depend.Name) + ls = metav1.AddLabelToSelector(ls, fleet.BundleNamespaceLabel, bundleNamespace) + } + + selector, err := metav1.LabelSelectorAsSelector(ls) + if err != nil { + return err + } + + bds := fleet.BundleDeploymentList{} + err = d.upstreamClient.List(ctx, &bds, client.MatchingLabelsSelector{Selector: selector}, client.InNamespace(bd.Namespace)) + if err != nil { + return err + } + + if len(bds.Items) == 0 { + return fmt.Errorf("list bundledeployments: no bundles matching labels %s in namespace %s", selector.String(), bundleNamespace) + } + + for _, depBundle := range bds.Items { + c := condition.Cond("Ready") + if c.IsTrue(depBundle) { + continue + } else { + depBundleList = append(depBundleList, depBundle.Name) + } + } + } + } + + if len(depBundleList) != 0 { + return fmt.Errorf("dependent bundle(s) are not ready: %v", depBundleList) + } + + return nil +} diff --git a/internal/cmd/agent/controllers/bundledeployment/controller_test.go b/internal/cmd/agent/deployer/deployer_test.go similarity index 61% rename from internal/cmd/agent/controllers/bundledeployment/controller_test.go rename to internal/cmd/agent/deployer/deployer_test.go index fee311ab94..ca64338f31 100644 --- a/internal/cmd/agent/controllers/bundledeployment/controller_test.go +++ b/internal/cmd/agent/deployer/deployer_test.go @@ -1,20 +1,21 @@ -package bundledeployment +package deployer //go:generate mockgen --build_flags=--mod=mod -destination=../../../controller/mocks/dynamic_mock.go -package mocks k8s.io/client-go/dynamic Interface,NamespaceableResourceInterface import ( + "context" "errors" "testing" - "github.com/golang/mock/gomock" - - "github.com/rancher/fleet/internal/cmd/controller/mocks" fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestSetNamespaceLabelsAndAnnotations(t *testing.T) { @@ -33,13 +34,14 @@ func TestSetNamespaceLabelsAndAnnotations(t *testing.T) { }}, ns: corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"name": "test"}, + Name: "namespace1234", + Labels: map[string]string{"name": "namespace"}, }, }, release: "namespace/foo/bar", expectedNs: corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"name": "test", "optLabel1": "optValue1", "optLabel2": "optValue2"}, + Labels: map[string]string{"name": "namespace", "optLabel1": "optValue1", "optLabel2": "optValue2"}, Annotations: map[string]string{"optAnn1": "optValue1"}, }, }, @@ -54,14 +56,15 @@ func TestSetNamespaceLabelsAndAnnotations(t *testing.T) { }}, ns: corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"nsLabel": "nsValue", "name": "test"}, + Name: "namespace1234", + Labels: map[string]string{"nsLabel": "nsValue", "name": "namespace"}, Annotations: map[string]string{"nsAnn": "nsValue"}, }, }, release: "namespace/foo/bar", expectedNs: corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"optLabel": "optValue", "name": "test"}, + Labels: map[string]string{"optLabel": "optValue", "name": "namespace"}, Annotations: map[string]string{}, }, }, @@ -76,14 +79,15 @@ func TestSetNamespaceLabelsAndAnnotations(t *testing.T) { }}, ns: corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"bdLabel": "nsValue"}, + Name: "namespace1234", + Labels: map[string]string{"bdLabel": "nsValue", "name": "namespace"}, Annotations: map[string]string{"bdAnn": "nsValue"}, }, }, release: "namespace/foo/bar", expectedNs: corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"bdLabel": "labelUpdated"}, + Labels: map[string]string{"bdLabel": "labelUpdated", "name": "namespace"}, Annotations: map[string]string{"bdAnn": "annUpdated"}, }, }, @@ -92,30 +96,32 @@ func TestSetNamespaceLabelsAndAnnotations(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockDynamic := mocks.NewMockInterface(ctrl) - mockNamespaceableResourceInterface := mocks.NewMockNamespaceableResourceInterface(ctrl) - u, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(&test.ns) - // Resource will be called twice, one time for UPDATE and another time for LIST - mockDynamic.EXPECT().Resource(gomock.Any()).Return(mockNamespaceableResourceInterface).Times(2) - mockNamespaceableResourceInterface.EXPECT().List(gomock.Any(), metav1.ListOptions{ - LabelSelector: "name=namespace", - }).Return(&unstructured.UnstructuredList{ - Items: []unstructured.Unstructured{{Object: u}}, - }, nil).Times(1) - uns, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(&test.expectedNs) - mockNamespaceableResourceInterface.EXPECT().Update(gomock.Any(), &unstructured.Unstructured{Object: uns}, gomock.Any()).Times(1) - - h := handler{ - dynamic: mockDynamic, + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&test.ns).Build() + h := Deployer{ + client: client, + } + err := h.setNamespaceLabelsAndAnnotations(context.Background(), test.bd, test.release) + if err != nil { + t.Errorf("expected nil error: got %v", err) } - err := h.setNamespaceLabelsAndAnnotations(test.bd, test.release) + ns := &corev1.Namespace{} + err = client.Get(context.Background(), types.NamespacedName{Name: test.ns.Name}, ns) if err != nil { t.Errorf("expected nil error: got %v", err) } + for k, v := range test.expectedNs.Labels { + if ns.Labels[k] != v { + t.Errorf("expected label %s: %s, got %s", k, v, ns.Labels[k]) + } + } + for k, v := range test.expectedNs.Annotations { + if ns.Annotations[k] != v { + t.Errorf("expected annotation %s: %s, got %s", k, v, ns.Annotations[k]) + } + } }) } } @@ -130,20 +136,14 @@ func TestSetNamespaceLabelsAndAnnotationsError(t *testing.T) { release := "test/foo/bar" expectedErr := errors.New("namespace test not found") - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockDynamic := mocks.NewMockInterface(ctrl) - mockNamespaceableResourceInterface := mocks.NewMockNamespaceableResourceInterface(ctrl) - mockDynamic.EXPECT().Resource(gomock.Any()).Return(mockNamespaceableResourceInterface).Times(1) - mockNamespaceableResourceInterface.EXPECT().List(gomock.Any(), metav1.ListOptions{ - LabelSelector: "name=test", - }).Return(&unstructured.UnstructuredList{ - Items: []unstructured.Unstructured{}, - }, nil).Times(1) - h := handler{ - dynamic: mockDynamic, + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + h := Deployer{ + client: client, } - err := h.setNamespaceLabelsAndAnnotations(bd, release) + + err := h.setNamespaceLabelsAndAnnotations(context.Background(), bd, release) if err.Error() != expectedErr.Error() { t.Errorf("expected error %v: got %v", expectedErr, err) diff --git a/internal/cmd/agent/deployer/driftdetect/driftdetect.go b/internal/cmd/agent/deployer/driftdetect/driftdetect.go index 1e19c24248..f52b7c2d52 100644 --- a/internal/cmd/agent/deployer/driftdetect/driftdetect.go +++ b/internal/cmd/agent/deployer/driftdetect/driftdetect.go @@ -1,57 +1,143 @@ package driftdetect import ( + "context" + + "github.com/go-logr/logr" + "github.com/rancher/fleet/internal/cmd/agent/deployer/plan" + "github.com/rancher/fleet/internal/cmd/agent/trigger" "github.com/rancher/fleet/internal/helmdeployer" fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" "github.com/rancher/wrangler/v2/pkg/apply" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" ) type DriftDetect struct { - defaultNamespace string - deployer *helmdeployer.Helm + // Trigger watches deployed resources on the local cluster. + trigger *trigger.Trigger + + upstreamClient client.Client + upstreamReader client.Reader + apply apply.Apply + defaultNamespace string labelPrefix string labelSuffix string } -func New(defaultNamespace string, - labelPrefix, labelSuffix string, - deployer *helmdeployer.Helm, - apply apply.Apply) *DriftDetect { +func New( + trigger *trigger.Trigger, + upstreamClient client.Client, + upstreamReader client.Reader, + apply apply.Apply, + defaultNamespace string, + labelPrefix string, + labelSuffix string, +) *DriftDetect { return &DriftDetect{ + trigger: trigger, + upstreamClient: upstreamClient, + upstreamReader: upstreamReader, + apply: apply.WithDynamicLookup(), defaultNamespace: defaultNamespace, labelPrefix: labelPrefix, labelSuffix: labelSuffix, - deployer: deployer, - apply: apply.WithDynamicLookup(), } } -// AllResources returns the resources that are deployed by the bundle deployment, -// according to the helm release history. It adds to be deleted resources to -// the list, by comparing the desired state to the actual state with apply. -func (m *DriftDetect) AllResources(bd *fleet.BundleDeployment) (*helmdeployer.Resources, error) { - resources, err := m.deployer.Resources(bd.Name, bd.Status.Release) +func (d *DriftDetect) Clear(bdKey string) error { + return d.trigger.Clear(bdKey) +} + +// Refresh triggers a sync of all resources of the provided bd which may have drifted from their desired state. +func (d *DriftDetect) Refresh(logger logr.Logger, bdKey string, bd *fleet.BundleDeployment, resources *helmdeployer.Resources) error { + logger = logger.WithName("DriftDetect") + logger.V(1).Info("Refreshing drift detection") + + resources, err := d.allResources(bd, resources) + if err != nil { + return err + } + + if resources == nil { + return nil + } + + logger.V(1).Info("Adding OnChange for bundledeployment's resource list") + logger = logger.WithValues("key", bdKey, "initialResourceVersion", bd.ResourceVersion) + + handleID := int(bd.Generation) + handler := func(key string) { + logger := logger.WithValues("handleID", handleID, "triggered by", key) + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Can't enqueue directly, update bundledeployment instead + return d.requeueBD(logger, handleID, bd.Namespace, bd.Name) + }) + if err != nil { + logger.Error(err, "Failed to trigger bundledeployment", "error", err) + return + } + + } + return d.trigger.OnChange(bdKey, resources.DefaultNamespace, handler, resources.Objects...) +} + +func (d *DriftDetect) requeueBD(logger logr.Logger, handleID int, namespace string, name string) error { + bd := &fleet.BundleDeployment{} + + err := d.upstreamReader.Get(context.Background(), client.ObjectKey{Name: name, Namespace: namespace}, bd) + if apierrors.IsNotFound(err) { + logger.Info("Bundledeployment is not found, can't trigger refresh") + return nil + } if err != nil { - return nil, nil + logger.Error(err, "Failed to get bundledeployment, can't trigger refresh") + return nil } + logger = logger.WithValues("resourceVersion", bd.ResourceVersion) + logger.V(1).Info("Going to update bundledeployment to trigger re-sync") + + // This mechanism of triggering requeues for changes is not ideal. + // It's a workaround since we can't enqueue directly from the trigger + // mini controller. Triggering via a status update is expensive. + // It's hard to compute a stable hash to make this idempotent, because + // the hash would need to be computed over the whole change. We can't + // just use the resource version of the bundle deployment. We would + // need to look at the deployed resources and compute a hash over them. + // However this status update happens for every changed resource, maybe + // multiple times per resource. It will also trigger on a resync. + bd.Status.SyncGeneration = &[]int64{int64(handleID)}[0] + + err = d.upstreamClient.Status().Update(context.Background(), bd) + if err != nil { + logger.V(1).Info("Retry to update bundledeployment, couldn't update status to trigger re-sync", "conflict", apierrors.IsConflict(err), "error", err) + } + return err +} + +// allResources returns the resources that are deployed by the bundle deployment, +// according to the helm release history. It adds to be deleted resources to +// the list, by comparing the desired state to the actual state with apply. +func (d *DriftDetect) allResources(bd *fleet.BundleDeployment, resources *helmdeployer.Resources) (*helmdeployer.Resources, error) { ns := resources.DefaultNamespace if ns == "" { - ns = m.defaultNamespace + ns = d.defaultNamespace } - apply := plan.GetApply(m.apply, plan.Options{ - LabelPrefix: m.labelPrefix, - LabelSuffix: m.labelSuffix, + apply := plan.GetApply(d.apply, plan.Options{ + LabelPrefix: d.labelPrefix, + LabelSuffix: d.labelSuffix, DefaultNamespace: ns, Name: bd.Name, }) - plan, err := plan.Plan(apply, bd, resources.DefaultNamespace, resources.Objects...) + plan, err := apply.DryRun(resources.Objects...) if err != nil { return nil, err } diff --git a/internal/cmd/agent/deployer/internal/diff/diff_options.go b/internal/cmd/agent/deployer/internal/diff/diff_options.go index 6f0c7f9d1d..30399d0bde 100644 --- a/internal/cmd/agent/deployer/internal/diff/diff_options.go +++ b/internal/cmd/agent/deployer/internal/diff/diff_options.go @@ -3,8 +3,7 @@ package diff import ( "github.com/go-logr/logr" - - "k8s.io/klog/v2/klogr" + "k8s.io/klog/v2/textlogger" ) type Option func(*options) @@ -21,7 +20,7 @@ func applyOptions(opts []Option) options { o := options{ ignoreAggregatedRoles: false, normalizer: GetNoopNormalizer(), - log: klogr.New(), + log: textlogger.NewLogger(textlogger.NewConfig()), } for _, opt := range opts { opt(&o) diff --git a/internal/cmd/agent/deployer/monitor/monitor_test.go b/internal/cmd/agent/deployer/monitor/conditions_test.go similarity index 100% rename from internal/cmd/agent/deployer/monitor/monitor_test.go rename to internal/cmd/agent/deployer/monitor/conditions_test.go diff --git a/internal/cmd/agent/deployer/monitor/monitor.go b/internal/cmd/agent/deployer/monitor/updatestatus.go similarity index 60% rename from internal/cmd/agent/deployer/monitor/monitor.go rename to internal/cmd/agent/deployer/monitor/updatestatus.go index 97213e54d0..b7dcf810e3 100644 --- a/internal/cmd/agent/deployer/monitor/monitor.go +++ b/internal/cmd/agent/deployer/monitor/updatestatus.go @@ -1,18 +1,19 @@ package monitor import ( + "context" + "errors" "fmt" "sort" + "strings" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/rancher/fleet/internal/cmd/agent/deployer/plan" + "github.com/go-logr/logr" + aplan "github.com/rancher/fleet/internal/cmd/agent/deployer/plan" "github.com/rancher/fleet/internal/helmdeployer" fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" - fleetcontrollers "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1" "github.com/rancher/wrangler/v2/pkg/apply" + "github.com/rancher/wrangler/v2/pkg/condition" "github.com/rancher/wrangler/v2/pkg/objectset" "github.com/rancher/wrangler/v2/pkg/summary" @@ -20,37 +21,130 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/log" ) type Monitor struct { - defaultNamespace string - deployer *helmdeployer.Helm - apply apply.Apply - labelPrefix string - labelSuffix string - bundleDeploymentController fleetcontrollers.BundleDeploymentController + apply apply.Apply + mapper meta.RESTMapper + + deployer *helmdeployer.Helm + + defaultNamespace string + labelPrefix string + labelSuffix string } -func New(defaultNamespace string, - labelPrefix, labelSuffix string, - deployer *helmdeployer.Helm, - apply apply.Apply) *Monitor { +func New(apply apply.Apply, mapper meta.RESTMapper, deployer *helmdeployer.Helm, defaultNamespace string, labelSuffix string) *Monitor { return &Monitor{ + apply: apply, + mapper: mapper, + deployer: deployer, defaultNamespace: defaultNamespace, - labelPrefix: labelPrefix, + labelPrefix: defaultNamespace, labelSuffix: labelSuffix, - deployer: deployer, - apply: apply.WithDynamicLookup(), } } -// UpdateBundleDeploymentStatus updates the status with information from the -// helm release history and an apply dry run. -func (m *Monitor) UpdateBundleDeploymentStatus(mapper meta.RESTMapper, bd *fleet.BundleDeployment) error { - resources, err := m.deployer.Resources(bd.Name, bd.Status.Release) +func ShouldRedeploy(bd *fleet.BundleDeployment) bool { + if IsAgent(bd) { + return true + } + if bd.Spec.Options.ForceSyncGeneration <= 0 { + return false + } + if bd.Status.SyncGeneration == nil { + return true + } + return *bd.Status.SyncGeneration != bd.Spec.Options.ForceSyncGeneration +} + +func IsAgent(bd *fleet.BundleDeployment) bool { + return strings.HasPrefix(bd.Name, "fleet-agent") +} + +func ShouldUpdateStatus(bd *fleet.BundleDeployment) bool { + if bd.Spec.DeploymentID != bd.Status.AppliedDeploymentID { + return false + } + + // If the bundle failed to install the status should not be updated. Updating + // here would remove the condition message that was previously set on it. + if condition.Cond(fleet.BundleDeploymentConditionInstalled).IsFalse(bd) { + return false + } + + return true +} + +func (m *Monitor) UpdateStatus(ctx context.Context, bd *fleet.BundleDeployment, resources *helmdeployer.Resources) (fleet.BundleDeploymentStatus, error) { + logger := log.FromContext(ctx).WithName("UpdateStatus") + + // updateFromResources mutates bd.Status, so copy it first + origStatus := *bd.Status.DeepCopy() + bd = bd.DeepCopy() + err := m.updateFromResources(logger, bd, resources) if err != nil { - return err + + // Returning an error will cause UpdateStatus to requeue in a loop. + // When there is no resourceID the error should be on the status. Without + // the ID we do not have the information to lookup the resources to + // compute the plan and discover the state of resources. + if err == helmdeployer.ErrNoResourceID { + return origStatus, nil + } + + return origStatus, err } + status := bd.Status + + readyError := readyError(status) + condition.Cond(fleet.BundleDeploymentConditionReady).SetError(&status, "", readyError) + + status.SyncGeneration = &bd.Spec.Options.ForceSyncGeneration + if readyError != nil { + logger.Info("Status not ready", "error", readyError) + } + + removePrivateFields(&status) + return status, nil +} + +// removePrivateFields removes fields from the status, which won't be marshalled to JSON. +// They would however trigger a status update in apply +func removePrivateFields(s1 *fleet.BundleDeploymentStatus) { + for id := range s1.NonReadyStatus { + s1.NonReadyStatus[id].Summary.Relationships = nil + s1.NonReadyStatus[id].Summary.Attributes = nil + } +} + +// readyError returns an error based on the provided status. +// That error is non-nil if the status corresponds to a non-ready or modified state of the bundle deployment. +func readyError(status fleet.BundleDeploymentStatus) error { + if status.Ready && status.NonModified { + return nil + } + + var msg string + if !status.Ready { + msg = "not ready" + if len(status.NonReadyStatus) > 0 { + msg = status.NonReadyStatus[0].String() + } + } else if !status.NonModified { + msg = "out of sync" + if len(status.ModifiedStatus) > 0 { + msg = status.ModifiedStatus[0].String() + } + } + + return errors.New(msg) +} + +// updateFromResources updates the status with information from the +// helm release history and an apply dry run. +func (m *Monitor) updateFromResources(logger logr.Logger, bd *fleet.BundleDeployment, resources *helmdeployer.Resources) error { resourcesPreviousRelease, err := m.deployer.ResourcesFromPreviousReleaseVersion(bd.Name, bd.Status.Release) if err != nil { return err @@ -60,19 +154,23 @@ func (m *Monitor) UpdateBundleDeploymentStatus(mapper meta.RESTMapper, bd *fleet if ns == "" { ns = m.defaultNamespace } - apply := plan.GetApply(m.apply, plan.Options{ + apply := aplan.GetApply(m.apply, aplan.Options{ LabelPrefix: m.labelPrefix, LabelSuffix: m.labelSuffix, DefaultNamespace: ns, Name: bd.Name, }) - plan, err := plan.Plan(apply, bd, resources.DefaultNamespace, resources.Objects...) + plan, err := apply.DryRun(resources.Objects...) + if err != nil { + return err + } + plan, err = aplan.Diff(plan, bd, resources.DefaultNamespace, resources.Objects...) if err != nil { return err } - bd.Status.NonReadyStatus = nonReady(plan, bd.Spec.Options.IgnoreOptions) + bd.Status.NonReadyStatus = nonReady(logger, plan, bd.Spec.Options.IgnoreOptions) bd.Status.ModifiedStatus = modified(plan, resourcesPreviousRelease) bd.Status.Ready = false bd.Status.NonModified = false @@ -82,28 +180,18 @@ func (m *Monitor) UpdateBundleDeploymentStatus(mapper meta.RESTMapper, bd *fleet } if len(bd.Status.ModifiedStatus) == 0 { bd.Status.NonModified = true - } else if bd.Spec.CorrectDrift.Enabled { - err = m.deployer.RemoveExternalChanges(bd) - if err != nil { - // Update BundleDeployment status as wrangler doesn't update the status if error is not nil. - _, errStatus := m.bundleDeploymentController.UpdateStatus(bd) - if errStatus != nil { - return errors.Wrap(err, "error updating status when reconciling drift: "+errStatus.Error()) - } - return errors.Wrapf(err, "error reconciling drift") - } } bd.Status.Resources = []fleet.BundleDeploymentResource{} for _, obj := range plan.Objects { - m, err := meta.Accessor(obj) + ma, err := meta.Accessor(obj) if err != nil { return err } - ns := m.GetNamespace() + ns := ma.GetNamespace() gvk := obj.GetObjectKind().GroupVersionKind() - if ns == "" && isNamespaced(mapper, gvk) { + if ns == "" && isNamespaced(m.mapper, gvk) { ns = resources.DefaultNamespace } @@ -112,14 +200,49 @@ func (m *Monitor) UpdateBundleDeploymentStatus(mapper meta.RESTMapper, bd *fleet Kind: kind, APIVersion: version, Namespace: ns, - Name: m.GetName(), - CreatedAt: m.GetCreationTimestamp(), + Name: ma.GetName(), + CreatedAt: ma.GetCreationTimestamp(), }) } return nil } +func nonReady(logger logr.Logger, plan apply.Plan, ignoreOptions fleet.IgnoreOptions) (result []fleet.NonReadyStatus) { + defer func() { + sort.Slice(result, func(i, j int) bool { + return result[i].UID < result[j].UID + }) + }() + + for _, obj := range plan.Objects { + if len(result) >= 10 { + return result + } + if u, ok := obj.(*unstructured.Unstructured); ok { + if ignoreOptions.Conditions != nil { + if err := excludeIgnoredConditions(u, ignoreOptions); err != nil { + logger.Error(err, "failed to ignore conditions") + } + } + + summary := summary.Summarize(u) + if !summary.IsReady() { + result = append(result, fleet.NonReadyStatus{ + UID: u.GetUID(), + Kind: u.GetKind(), + APIVersion: u.GetAPIVersion(), + Namespace: u.GetNamespace(), + Name: u.GetName(), + Summary: summary, + }) + } + } + } + + return result +} + func modified(plan apply.Plan, resourcesPreviousRelease *helmdeployer.Resources) (result []fleet.ModifiedStatus) { defer func() { sort.Slice(result, func(i, j int) bool { @@ -197,41 +320,6 @@ func isResourceInPreviousRelease(key objectset.ObjectKey, kind string, objsPrevi return false } -func nonReady(plan apply.Plan, ignoreOptions fleet.IgnoreOptions) (result []fleet.NonReadyStatus) { - defer func() { - sort.Slice(result, func(i, j int) bool { - return result[i].UID < result[j].UID - }) - }() - - for _, obj := range plan.Objects { - if len(result) >= 10 { - return result - } - if u, ok := obj.(*unstructured.Unstructured); ok { - if ignoreOptions.Conditions != nil { - if err := excludeIgnoredConditions(u, ignoreOptions); err != nil { - logrus.Errorf("failed to ignore conditions: %v", err) - } - } - - summary := summary.Summarize(u) - if !summary.IsReady() { - result = append(result, fleet.NonReadyStatus{ - UID: u.GetUID(), - Kind: u.GetKind(), - APIVersion: u.GetAPIVersion(), - Namespace: u.GetNamespace(), - Name: u.GetName(), - Summary: summary, - }) - } - } - } - - return result -} - // excludeIgnoredConditions removes the conditions that are included in ignoreOptions from the object passed as a parameter func excludeIgnoredConditions(obj *unstructured.Unstructured, ignoreOptions fleet.IgnoreOptions) error { conditions, _, err := unstructured.NestedSlice(obj.Object, "status", "conditions") diff --git a/internal/cmd/agent/deployer/plan/plan.go b/internal/cmd/agent/deployer/plan/plan.go index 336d728c7e..69e1ebadc1 100644 --- a/internal/cmd/agent/deployer/plan/plan.go +++ b/internal/cmd/agent/deployer/plan/plan.go @@ -35,15 +35,17 @@ func GetApply(apply apply.Apply, opts Options) apply.Apply { WithDefaultNamespace(opts.DefaultNamespace) } -// Plan first does a dry run of the apply to get the difference between the -// desired and live state. It relies on the bundledeployment's bundle diff -// patches to ignore changes. -func Plan(a apply.Apply, bd *fleet.BundleDeployment, ns string, objs ...runtime.Object) (apply.Plan, error) { +// DryRun does a dry run of the apply to get the difference between the +// desired and live state. +func DryRun(a apply.Apply, objs ...runtime.Object) (*apply.Plan, error) { plan, err := a.DryRun(objs...) - if err != nil { - return plan, err - } + return &plan, err +} +// Diff factors the bundledeployment's bundle diff patches into the plan from +// DryRun. This way, the status of the bundledeployment can be updated +// accurately. +func Diff(plan apply.Plan, bd *fleet.BundleDeployment, ns string, objs ...runtime.Object) (apply.Plan, error) { desired := objectset.NewObjectSet(objs...).ObjectsByGVK() live := objectset.NewObjectSet(plan.Objects...).ObjectsByGVK() diff --git a/internal/cmd/agent/register.go b/internal/cmd/agent/register.go index ff01071d94..ba8866d5fe 100644 --- a/internal/cmd/agent/register.go +++ b/internal/cmd/agent/register.go @@ -4,13 +4,15 @@ import ( "context" "github.com/spf13/cobra" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" command "github.com/rancher/fleet/internal/cmd" "github.com/rancher/fleet/internal/cmd/agent/register" + "github.com/rancher/wrangler/v2/pkg/kubeconfig" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) func NewRegister() *cobra.Command { diff --git a/internal/cmd/agent/register/register.go b/internal/cmd/agent/register/register.go index 8412e88de5..95da54617f 100644 --- a/internal/cmd/agent/register/register.go +++ b/internal/cmd/agent/register/register.go @@ -82,7 +82,7 @@ func Get(ctx context.Context, namespace string, cfg *rest.Config) (*AgentInfo, e // Register creates a fleet-agent secret with the upstream kubeconfig, by // running the registration process with the upstream cluster. -// To run the initial registration, the fleet-agent-bootstrap secret must exist +// For the initial registration, the fleet-agent-bootstrap secret must exist // on the local cluster. func Register(ctx context.Context, namespace string, config *rest.Config) (*AgentInfo, error) { for { diff --git a/internal/cmd/agent/root.go b/internal/cmd/agent/root.go index cda5146fbc..cfd71c28c0 100644 --- a/internal/cmd/agent/root.go +++ b/internal/cmd/agent/root.go @@ -1,6 +1,7 @@ package agent import ( + "flag" "fmt" "log" "net/http" @@ -22,13 +23,18 @@ type UpstreamOptions struct { type FleetAgent struct { command.DebugConfig - UpstreamOptions + Namespace string `usage:"system namespace is the namespace, the agent runs in, e.g. cattle-fleet-system" env:"NAMESPACE"` AgentScope string `usage:"An identifier used to scope the agent bundleID names, typically the same as namespace" env:"AGENT_SCOPE"` } -var setupLog = ctrl.Log.WithName("setup") +var ( + setupLog = ctrl.Log.WithName("setup") + zopts = zap.Options{ + Development: true, + } +) -func (a *FleetAgent) PersistentPre(_ *cobra.Command, _ []string) error { +func (a *FleetAgent) PersistentPre(cmd *cobra.Command, _ []string) error { if err := a.SetupDebug(); err != nil { return fmt.Errorf("failed to setup debug logging: %w", err) } @@ -36,12 +42,13 @@ func (a *FleetAgent) PersistentPre(_ *cobra.Command, _ []string) error { } func (a *FleetAgent) Run(cmd *cobra.Command, args []string) error { - zopts := zap.Options{ - Development: true, - } + ctx := clog.IntoContext(cmd.Context(), ctrl.Log) + + // for compatibility, override zap opts with legacy debug opts. remove once manifests are updated. + zopts.Development = a.Debug ctrl.SetLogger(zap.New(zap.UseFlagOptions(&zopts))) - ctx := clog.IntoContext(cmd.Context(), ctrl.Log) + localConfig := ctrl.GetConfigOrDie() go func() { log.Println(http.ListenAndServe("localhost:6060", nil)) // nolint:gosec // Debugging only @@ -50,7 +57,7 @@ func (a *FleetAgent) Run(cmd *cobra.Command, args []string) error { if a.Namespace == "" { return fmt.Errorf("--namespace or env NAMESPACE is required to be set") } - if err := start(ctx, a.Kubeconfig, a.Namespace, a.AgentScope); err != nil { + if err := start(ctx, localConfig, a.Namespace, a.AgentScope); err != nil { return err } @@ -62,6 +69,13 @@ func App() *cobra.Command { root := command.Command(&FleetAgent{}, cobra.Command{ Version: version.FriendlyVersion(), }) + // add command line flags from zap and controller-runtime, which use + // goflags and convert them to pflags + fs := flag.NewFlagSet("", flag.ExitOnError) + zopts.BindFlags(fs) + ctrl.RegisterFlags(fs) + root.Flags().AddGoFlagSet(fs) + root.AddCommand(NewClusterStatus()) root.AddCommand(NewRegister()) return root diff --git a/internal/cmd/agent/trigger/watcher.go b/internal/cmd/agent/trigger/watcher.go index 6951d6e315..0246d20f15 100644 --- a/internal/cmd/agent/trigger/watcher.go +++ b/internal/cmd/agent/trigger/watcher.go @@ -17,23 +17,25 @@ import ( "k8s.io/client-go/dynamic" ) +type triggerFunc func(key string) + type Trigger struct { sync.RWMutex ctx context.Context objectSets map[string]*objectset.ObjectSet watches map[schema.GroupVersionKind]*watcher - triggers map[schema.GroupVersionKind]map[objectset.ObjectKey]map[string]func() + triggers map[schema.GroupVersionKind]map[objectset.ObjectKey]map[string]triggerFunc restMapper meta.RESTMapper client dynamic.Interface } -func New(ctx context.Context, restMapper meta.RESTMapper, client dynamic.Interface) *Trigger { +func New(ctx context.Context, client dynamic.Interface, restMapper meta.RESTMapper) *Trigger { return &Trigger{ ctx: ctx, objectSets: map[string]*objectset.ObjectSet{}, watches: map[schema.GroupVersionKind]*watcher{}, - triggers: map[schema.GroupVersionKind]map[objectset.ObjectKey]map[string]func(){}, + triggers: map[schema.GroupVersionKind]map[objectset.ObjectKey]map[string]triggerFunc{}, restMapper: restMapper, client: client, } @@ -62,7 +64,7 @@ func setNamespace(nsed bool, key objectset.ObjectKey, defaultNamespace string) o return key } -func (t *Trigger) OnChange(key string, defaultNamespace string, trigger func(), objs ...runtime.Object) error { +func (t *Trigger) OnChange(key string, defaultNamespace string, trigger triggerFunc, objs ...runtime.Object) error { t.Lock() defer t.Unlock() @@ -92,12 +94,12 @@ func (t *Trigger) OnChange(key string, defaultNamespace string, trigger func(), objectKey = setNamespace(gvkNSed[gvk], objectKey, defaultNamespace) objectKeys, ok := t.triggers[gvk] if !ok { - objectKeys = map[objectset.ObjectKey]map[string]func(){} + objectKeys = map[objectset.ObjectKey]map[string]triggerFunc{} t.triggers[gvk] = objectKeys } funcs, ok := objectKeys[objectKey] if !ok { - funcs = map[string]func(){} + funcs = map[string]triggerFunc{} objectKeys[objectKey] = funcs } funcs[key] = trigger @@ -130,7 +132,7 @@ func (t *Trigger) call(gvk schema.GroupVersionKind, key objectset.ObjectKey) { defer t.RUnlock() for _, f := range t.triggers[gvk][key] { - f() + f(key.String()) } } diff --git a/internal/cmd/cli/match/match.go b/internal/cmd/cli/match/match.go index bcefd2d8df..72d61334f5 100644 --- a/internal/cmd/cli/match/match.go +++ b/internal/cmd/cli/match/match.go @@ -70,13 +70,13 @@ func Match(ctx context.Context, opts *Options) error { m := bm.Match(opts.ClusterName, map[string]map[string]string{ opts.ClusterGroup: opts.ClusterGroupLabels, }, opts.ClusterLabels) - return printMatch(bundle, m, opts.Output) + return printMatch(ctx, bundle, m, opts.Output) } - return printMatch(bundle, bm.MatchForTarget(opts.Target), opts.Output) + return printMatch(ctx, bundle, bm.MatchForTarget(opts.Target), opts.Output) } -func printMatch(bundle *fleet.Bundle, target *fleet.BundleTarget, output io.Writer) error { +func printMatch(ctx context.Context, bundle *fleet.Bundle, target *fleet.BundleTarget, output io.Writer) error { if target == nil { return errors.New("no match found") } @@ -89,7 +89,7 @@ func printMatch(bundle *fleet.Bundle, target *fleet.BundleTarget, output io.Writ manifest := manifest.New(bundle.Spec.Resources) - objs, err := helmdeployer.Template(bundle.Name, manifest, opts) + objs, err := helmdeployer.Template(ctx, bundle.Name, manifest, opts) if err != nil { return err } diff --git a/internal/cmd/controller/controllers/bundle/controller.go b/internal/cmd/controller/controllers/bundle/controller.go index a8331ee6d4..dbc8af67e3 100644 --- a/internal/cmd/controller/controllers/bundle/controller.go +++ b/internal/cmd/controller/controllers/bundle/controller.go @@ -161,7 +161,7 @@ func (h *handler) OnBundleChange(bundle *fleet.Bundle, status fleet.BundleStatus } if status.ObservedGeneration != bundle.Generation { - if err := setResourceKey(&status, bundle, manifest, h.isNamespaced); err != nil { + if err := setResourceKey(context.Background(), &status, bundle, manifest, h.isNamespaced); err != nil { updateDisplay(&status) return nil, status, err } @@ -190,14 +190,14 @@ func (h *handler) isNamespaced(gvk schema.GroupVersionKind) bool { } // setResourceKey updates status.ResourceKey from the bundle, by running helm template (does not mutate bundle) -func setResourceKey(status *fleet.BundleStatus, bundle *fleet.Bundle, manifest *manifest.Manifest, isNSed func(schema.GroupVersionKind) bool) error { +func setResourceKey(ctx context.Context, status *fleet.BundleStatus, bundle *fleet.Bundle, manifest *manifest.Manifest, isNSed func(schema.GroupVersionKind) bool) error { seen := map[fleet.ResourceKey]struct{}{} // iterate over the defined targets, from "targets.yaml", not the // actually matched targets to avoid duplicates for i := range bundle.Spec.Targets { opts := options.Merge(bundle.Spec.BundleDeploymentOptions, bundle.Spec.Targets[i].BundleDeploymentOptions) - objs, err := helmdeployer.Template(bundle.Name, manifest, opts) + objs, err := helmdeployer.Template(ctx, bundle.Name, manifest, opts) if err != nil { logrus.Infof("While calculating status.ResourceKey, error running helm template for bundle %s with target options from %s: %v", bundle.Name, bundle.Spec.Targets[i].Name, err) continue diff --git a/internal/cmd/controller/start.go b/internal/cmd/controller/start.go index 21225803ee..8c0963fbf5 100644 --- a/internal/cmd/controller/start.go +++ b/internal/cmd/controller/start.go @@ -6,9 +6,20 @@ import ( "github.com/rancher/fleet/internal/cmd/controller/controllers" "github.com/rancher/wrangler/v2/pkg/kubeconfig" "github.com/rancher/wrangler/v2/pkg/ratelimit" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) func start(ctx context.Context, systemNamespace string, kubeconfigFile string, disableGitops bool) error { + // provide a logger in the context to be compatible with controller-runtime + zopts := zap.Options{ + Development: true, + } + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&zopts))) + ctx = log.IntoContext(ctx, ctrl.Log) + cfg := kubeconfig.GetNonInteractiveClientConfig(kubeconfigFile) clientConfig, err := cfg.ClientConfig() if err != nil { diff --git a/internal/helmdeployer/deployer.go b/internal/helmdeployer/deployer.go index 22da8ee860..6ca9adfde8 100644 --- a/internal/helmdeployer/deployer.go +++ b/internal/helmdeployer/deployer.go @@ -2,15 +2,16 @@ package helmdeployer import ( "bytes" + "context" "fmt" "strconv" "strings" "time" + "github.com/go-logr/logr" "github.com/pkg/errors" "github.com/rancher/fleet/internal/config" "github.com/rancher/fleet/internal/helmdeployer/helmcache" - "github.com/sirupsen/logrus" "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/chart/loader" @@ -27,14 +28,17 @@ import ( fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" "github.com/rancher/wrangler/v2/pkg/apply" - corecontrollers "github.com/rancher/wrangler/v2/pkg/generated/controllers/core/v1" "github.com/rancher/wrangler/v2/pkg/kv" "github.com/rancher/wrangler/v2/pkg/name" "github.com/rancher/wrangler/v2/pkg/yaml" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/cli-runtime/pkg/genericclioptions" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" ) const ( @@ -65,12 +69,10 @@ type postRender struct { } type Helm struct { - agentNamespace string - serviceAccountCache corecontrollers.ServiceAccountCache - configmapCache corecontrollers.ConfigMapCache - secretCache corecontrollers.SecretCache - getter genericclioptions.RESTClientGetter - globalCfg action.Configuration + client client.Client + agentNamespace string + getter genericclioptions.RESTClientGetter + globalCfg action.Configuration // useGlobalCfg is only used by Template useGlobalCfg bool template bool @@ -86,7 +88,7 @@ type Resources struct { } type DeployedBundle struct { - // BundleID is the bundle.Name + // BundleID is the bundledeployment.Name BundleID string // ReleaseName is actually in the form "namespace/release name" ReleaseName string @@ -94,19 +96,18 @@ type DeployedBundle struct { KeepResources bool } -func NewHelm(namespace, defaultNamespace, labelPrefix, labelSuffix string, getter genericclioptions.RESTClientGetter, - serviceAccountCache corecontrollers.ServiceAccountCache, configmapCache corecontrollers.ConfigMapCache, secretCache corecontrollers.SecretCache) (*Helm, error) { +// NewHelm returns a new helm deployer +// * namespace is the system namespace, which is the namespace the agent is running in, e.g. cattle-fleet-system +func NewHelm(ctx context.Context, client client.Client, namespace, defaultNamespace, labelPrefix, labelSuffix string, getter genericclioptions.RESTClientGetter) (*Helm, error) { h := &Helm{ - getter: getter, - defaultNamespace: defaultNamespace, - agentNamespace: namespace, - serviceAccountCache: serviceAccountCache, - configmapCache: configmapCache, - secretCache: secretCache, - labelPrefix: labelPrefix, - labelSuffix: labelSuffix, - } - cfg, err := h.createCfg("") + client: client, + getter: getter, + defaultNamespace: defaultNamespace, + agentNamespace: namespace, + labelPrefix: labelPrefix, + labelSuffix: labelSuffix, + } + cfg, err := h.createCfg(ctx, "") if err != nil { return nil, err } @@ -190,7 +191,8 @@ func (p *postRender) Run(renderedManifests *bytes.Buffer) (modifiedManifests *by return bytes.NewBuffer(data), err } -func (h *Helm) Deploy(bundleID string, manifest *manifest.Manifest, options fleet.BundleDeploymentOptions) (*Resources, error) { +// Deploy deploys an unpacked content resource with helm. bundleID is the name of the bundledeployment. +func (h *Helm) Deploy(ctx context.Context, bundleID string, manifest *manifest.Manifest, options fleet.BundleDeploymentOptions) (*Resources, error) { if options.Helm == nil { options.Helm = &fleet.HelmOptions{} } @@ -226,13 +228,13 @@ func (h *Helm) Deploy(bundleID string, manifest *manifest.Manifest, options flee chart.Schema = nil } - if resources, err := h.install(bundleID, manifest, chart, options, true); err != nil { + if resources, err := h.install(ctx, bundleID, manifest, chart, options, true); err != nil { return nil, err } else if h.template { return releaseToResources(resources) } - release, err := h.install(bundleID, manifest, chart, options, false) + release, err := h.install(ctx, bundleID, manifest, chart, options, false) if err != nil { return nil, err } @@ -242,11 +244,11 @@ func (h *Helm) Deploy(bundleID string, manifest *manifest.Manifest, options flee // RemoveExternalChanges does a helm rollback to remove changes made outside of fleet. // It removes the helm history entry if the rollback fails. -func (h *Helm) RemoveExternalChanges(bd *fleet.BundleDeployment) error { - logrus.Infof("Drift correction: rollback BundleDeployment %s", bd.Name) +func (h *Helm) RemoveExternalChanges(ctx context.Context, bd *fleet.BundleDeployment) error { + log.FromContext(ctx).WithName("RemoveExternalChanges").Info("Drift correction: rollback") _, defaultNamespace, releaseName := h.getOpts(bd.Name, bd.Spec.Options) - cfg, err := h.getCfg(defaultNamespace, bd.Spec.Options.ServiceAccount) + cfg, err := h.getCfg(ctx, defaultNamespace, bd.Spec.Options.ServiceAccount) if err != nil { return err } @@ -314,7 +316,7 @@ func (h *Helm) getOpts(bundleID string, options fleet.BundleDeploymentOptions) ( return timeout, ns, name2.HelmReleaseName(bundleID) } -func (h *Helm) getCfg(namespace, serviceAccountName string) (action.Configuration, error) { +func (h *Helm) getCfg(ctx context.Context, namespace, serviceAccountName string) (action.Configuration, error) { var ( cfg action.Configuration getter = h.getter @@ -324,7 +326,7 @@ func (h *Helm) getCfg(namespace, serviceAccountName string) (action.Configuratio return h.globalCfg, nil } - serviceAccountNamespace, serviceAccountName, err := h.getServiceAccount(serviceAccountName) + serviceAccountNamespace, serviceAccountName, err := h.getServiceAccount(ctx, serviceAccountName) if err != nil { return cfg, err } @@ -339,7 +341,7 @@ func (h *Helm) getCfg(namespace, serviceAccountName string) (action.Configuratio kClient := kube.New(getter) kClient.Namespace = namespace - cfg, err = h.createCfg(namespace) + cfg, err = h.createCfg(ctx, namespace) cfg.Releases.MaxHistory = MaxHelmHistory cfg.KubeClient = kClient @@ -348,15 +350,17 @@ func (h *Helm) getCfg(namespace, serviceAccountName string) (action.Configuratio return cfg, err } -func (h *Helm) install(bundleID string, manifest *manifest.Manifest, chart *chart.Chart, options fleet.BundleDeploymentOptions, dryRun bool) (*release.Release, error) { +// install runs helm install or upgrade and supports dry running the action. Will run helm rollback in case of a failed upgrade. +func (h *Helm) install(ctx context.Context, bundleID string, manifest *manifest.Manifest, chart *chart.Chart, options fleet.BundleDeploymentOptions, dryRun bool) (*release.Release, error) { + logger := log.FromContext(ctx).WithName("HelmDeployer").WithName("install").WithValues("commit", manifest.Commit, "dryRun", dryRun) timeout, defaultNamespace, releaseName := h.getOpts(bundleID, options) - values, err := h.getValues(options, defaultNamespace) + values, err := h.getValues(ctx, options, defaultNamespace) if err != nil { return nil, err } - cfg, err := h.getCfg(defaultNamespace, options.ServiceAccount) + cfg, err := h.getCfg(ctx, defaultNamespace, options.ServiceAccount) if err != nil { return nil, err } @@ -367,7 +371,8 @@ func (h *Helm) install(bundleID string, manifest *manifest.Manifest, chart *char } if uninstall { - if err := h.delete(bundleID, options, dryRun); err != nil { + logger.Info("Uninstalling helm release first") + if err := h.delete(ctx, bundleID, options, dryRun); err != nil { return nil, err } if dryRun { @@ -422,7 +427,7 @@ func (h *Helm) install(bundleID string, manifest *manifest.Manifest, chart *char u.Wait = true } if !dryRun { - logrus.Infof("Helm: Installing %s", bundleID) + logger.Info("Installing helm release") } return u.Run(chart, values) } @@ -446,17 +451,17 @@ func (h *Helm) install(bundleID string, manifest *manifest.Manifest, chart *char u.Wait = true } if !dryRun { - logrus.Infof("Helm: Upgrading %s", bundleID) + logger.Info("Upgrading helm release") } rel, err := u.Run(releaseName, chart, values) if err != nil && err.Error() == HelmUpgradeInterruptedError { - logrus.Infof("Helm error: %s for %s. Doing a rollback", HelmUpgradeInterruptedError, bundleID) + logger.Info("Helm doing a rollback", "error", HelmUpgradeInterruptedError) r := action.NewRollback(&cfg) err = r.Run(releaseName) if err != nil { return nil, err } - logrus.Debugf("Helm: retrying upgrade for %s after rollback", bundleID) + logger.V(1).Info("Retrying upgrade after rollback") return u.Run(releaseName, chart, values) } @@ -464,7 +469,7 @@ func (h *Helm) install(bundleID string, manifest *manifest.Manifest, chart *char return rel, err } -func (h *Helm) getValues(options fleet.BundleDeploymentOptions, defaultNamespace string) (map[string]interface{}, error) { +func (h *Helm) getValues(ctx context.Context, options fleet.BundleDeploymentOptions, defaultNamespace string) (map[string]interface{}, error) { if options.Helm == nil { return nil, nil } @@ -488,7 +493,8 @@ func (h *Helm) getValues(options fleet.BundleDeploymentOptions, defaultNamespace if key == "" { key = DefaultKey } - configMap, err := h.configmapCache.Get(namespace, name) + configMap := &corev1.ConfigMap{} + err := h.client.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, configMap) if err != nil { return nil, err } @@ -513,7 +519,8 @@ func (h *Helm) getValues(options fleet.BundleDeploymentOptions, defaultNamespace if key == "" { key = DefaultKey } - secret, err := h.secretCache.Get(namespace, name) + secret := &corev1.Secret{} + err := h.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, secret) if err != nil { return nil, err } @@ -650,7 +657,7 @@ func (h *Helm) ResourcesFromPreviousReleaseVersion(bundleID, resourcesID string) // Delete the release for the given bundleID. releaseName is a key in the // format "namespace/name". If releaseName is empty, search for a matching // release. -func (h *Helm) Delete(bundleID, releaseName string) error { +func (h *Helm) Delete(ctx context.Context, bundleID, releaseName string) error { keepResources := false deployments, err := h.ListDeployments() if err != nil { @@ -669,10 +676,11 @@ func (h *Helm) Delete(bundleID, releaseName string) error { // Never found anything to delete return nil } - return h.deleteByRelease(bundleID, releaseName, keepResources) + return h.deleteByRelease(ctx, bundleID, releaseName, keepResources) } -func (h *Helm) deleteByRelease(bundleID, releaseName string, keepResources bool) error { +func (h *Helm) deleteByRelease(ctx context.Context, bundleID, releaseName string, keepResources bool) error { + logger := log.FromContext(ctx).WithName("deleteByRelease").WithValues("releaseName", releaseName, "keepResources", keepResources) releaseNamespace, releaseName := kv.Split(releaseName, "/") rels, err := h.globalCfg.Releases.List(func(r *release.Release) bool { return r.Namespace == releaseNamespace && @@ -697,19 +705,19 @@ func (h *Helm) deleteByRelease(bundleID, releaseName string, keepResources bool) } } - cfg, err := h.getCfg(releaseNamespace, serviceAccountName) + cfg, err := h.getCfg(ctx, releaseNamespace, serviceAccountName) if err != nil { return err } if strings.HasPrefix(bundleID, "fleet-agent") { // Never uninstall the fleet-agent, just "forget" it - return deleteHistory(cfg, bundleID) + return deleteHistory(cfg, logger, bundleID) } if keepResources { // don't delete resources, just delete the helm release secrets - return deleteHistory(cfg, bundleID) + return deleteHistory(cfg, logger, bundleID) } u := action.NewUninstall(&cfg) @@ -717,7 +725,8 @@ func (h *Helm) deleteByRelease(bundleID, releaseName string, keepResources bool) return err } -func (h *Helm) delete(bundleID string, options fleet.BundleDeploymentOptions, dryRun bool) error { +func (h *Helm) delete(ctx context.Context, bundleID string, options fleet.BundleDeploymentOptions, dryRun bool) error { + logger := log.FromContext(ctx).WithName("HelmDeployer").WithName("delete").WithValues("dryRun", dryRun) timeout, _, releaseName := h.getOpts(bundleID, options) r, err := h.globalCfg.Releases.Last(releaseName) @@ -743,14 +752,14 @@ func (h *Helm) delete(bundleID string, options fleet.BundleDeploymentOptions, dr } serviceAccountName := r.Chart.Metadata.Annotations[ServiceAccountNameAnnotation] - cfg, err := h.getCfg(r.Namespace, serviceAccountName) + cfg, err := h.getCfg(ctx, r.Namespace, serviceAccountName) if err != nil { return err } if strings.HasPrefix(bundleID, "fleet-agent") { // Never uninstall the fleet-agent, just "forget" it - return deleteHistory(cfg, bundleID) + return deleteHistory(cfg, logger, bundleID) } u := action.NewUninstall(&cfg) @@ -758,21 +767,25 @@ func (h *Helm) delete(bundleID string, options fleet.BundleDeploymentOptions, dr u.Timeout = timeout if !dryRun { - logrus.Infof("Helm: Uninstalling %s", bundleID) + logger.Info("Helm: Uninstalling") } _, err = u.Run(releaseName) return err } -func (h *Helm) createCfg(namespace string) (action.Configuration, error) { +func (h *Helm) createCfg(ctx context.Context, namespace string) (action.Configuration, error) { + logger := log.FromContext(ctx).WithName("helmSDK") + info := func(format string, v ...interface{}) { + logger.V(1).Info(fmt.Sprintf(format, v...)) + } kc := kube.New(h.getter) - kc.Log = logrus.Infof + kc.Log = info clientSet, err := kc.Factory.KubernetesClientSet() if err != nil { return action.Configuration{}, err } - driver := driver.NewSecrets(helmcache.NewSecretClient(h.secretCache, clientSet, namespace)) - driver.Log = logrus.Infof + driver := driver.NewSecrets(helmcache.NewSecretClient(h.client, clientSet, namespace)) + driver.Log = info store := storage.Init(driver) store.MaxHistory = MaxHelmHistory @@ -780,11 +793,11 @@ func (h *Helm) createCfg(namespace string) (action.Configuration, error) { RESTClientGetter: h.getter, Releases: store, KubeClient: kc, - Log: logrus.Infof, + Log: info, }, nil } -func deleteHistory(cfg action.Configuration, bundleID string) error { +func deleteHistory(cfg action.Configuration, logger logr.Logger, bundleID string) error { releases, err := cfg.Releases.List(func(r *release.Release) bool { return r.Name == bundleID && r.Chart.Metadata.Annotations[BundleIDAnnotation] == bundleID }) @@ -792,7 +805,7 @@ func deleteHistory(cfg action.Configuration, bundleID string) error { return err } for _, release := range releases { - logrus.Infof("Helm: Deleting release %s %d", release.Name, release.Version) + logger.Info("Helm: Deleting release", "releaseVersion", release.Version) if _, err := cfg.Releases.Delete(release.Name, release.Version); err != nil { return err } diff --git a/internal/helmdeployer/helmcache/secret.go b/internal/helmdeployer/helmcache/secret.go index 87cd95a827..d463e058ab 100644 --- a/internal/helmdeployer/helmcache/secret.go +++ b/internal/helmdeployer/helmcache/secret.go @@ -3,7 +3,6 @@ package helmcache import ( "context" - corecontrollers "github.com/rancher/wrangler/v2/pkg/generated/controllers/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -11,17 +10,21 @@ import ( "k8s.io/apimachinery/pkg/watch" applycorev1 "k8s.io/client-go/applyconfigurations/core/v1" "k8s.io/client-go/kubernetes" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) // SecretClient implements methods to handle secrets. Get and list will be retrieved from the wrangler cache, the other calls // will be make to the Kubernetes API server. type SecretClient struct { - cache corecontrollers.SecretCache + cache client.Client client kubernetes.Interface namespace string } -func NewSecretClient(cache corecontrollers.SecretCache, client kubernetes.Interface, namespace string) *SecretClient { +var _ corev1.SecretInterface = &SecretClient{} + +func NewSecretClient(cache client.Client, client kubernetes.Interface, namespace string) *SecretClient { return &SecretClient{cache, client, namespace} } @@ -45,30 +48,29 @@ func (s *SecretClient) DeleteCollection(ctx context.Context, opts metav1.DeleteO return s.client.CoreV1().Secrets(s.namespace).DeleteCollection(ctx, opts, listOpts) } -// Get gets a secret from the wrangler cache. -func (s *SecretClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error) { - return s.cache.Get(s.namespace, name) +// Get gets a secret from the cache. +func (s *SecretClient) Get(ctx context.Context, name string, _ metav1.GetOptions) (*v1.Secret, error) { + secret := &v1.Secret{} + err := s.cache.Get(ctx, types.NamespacedName{Namespace: s.namespace, Name: name}, secret) + return secret, err } -// List lists secrets from the wrangler cache. +// List lists secrets from the cache. func (s *SecretClient) List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error) { labels, err := labels.Parse(opts.LabelSelector) if err != nil { return nil, err } - secrets, err := s.cache.List(s.namespace, labels) + secrets := v1.SecretList{} + err = s.cache.List(ctx, &secrets, &client.ListOptions{ + Namespace: s.namespace, + LabelSelector: labels, + }) if err != nil { return nil, err } - var items []v1.Secret - for _, secret := range secrets { - items = append(items, *secret) - } - - return &v1.SecretList{ - Items: items, - }, nil + return &secrets, nil } // Watch watches a secret using a k8s client that calls the Kubernetes API server diff --git a/internal/helmdeployer/helmcache/secret_test.go b/internal/helmdeployer/helmcache/secret_test.go index e72020b1a9..0d3635b3c9 100644 --- a/internal/helmdeployer/helmcache/secret_test.go +++ b/internal/helmdeployer/helmcache/secret_test.go @@ -4,16 +4,18 @@ import ( "context" "testing" - "github.com/golang/mock/gomock" "github.com/google/go-cmp/cmp" - "github.com/rancher/wrangler/v2/pkg/generic/fake" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" applycorev1 "k8s.io/client-go/applyconfigurations/core/v1" v1 "k8s.io/client-go/applyconfigurations/meta/v1" k8sfake "k8s.io/client-go/kubernetes/fake" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) const ( @@ -21,7 +23,7 @@ const ( secretNamespace = "test-ns" ) -var secret = corev1.Secret{ +var defaultSecret = corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, Namespace: secretNamespace, @@ -29,44 +31,53 @@ var secret = corev1.Secret{ } func TestGet(t *testing.T) { - ctrl := gomock.NewController(t) - mockCache := fake.NewMockCacheInterface[*corev1.Secret](ctrl) - secretClient := NewSecretClient(mockCache, nil, secretNamespace) - mockCache.EXPECT().Get(secretNamespace, secretName).Return(&secret, nil) + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - secretGot, err := secretClient.Get(context.TODO(), secretName, metav1.GetOptions{}) + secret := defaultSecret + cache := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&secret).Build() + secretClient := NewSecretClient(cache, nil, secretNamespace) + secretGot, err := secretClient.Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { t.Errorf("unexpected error %v", err) } - if !cmp.Equal(&secret, secretGot) { - t.Errorf("expected secret %v, got %v", secret, secretGot) + if !cmp.Equal(secret.ObjectMeta, secretGot.ObjectMeta) { + t.Errorf("expected secret meta %#v, got %#v", secret.ObjectMeta, secretGot.ObjectMeta) + } + if !cmp.Equal(secret.Data, secretGot.Data) { + t.Errorf("expected secret data %#v, got %#v", secret.Data, secretGot.Data) } } func TestList(t *testing.T) { - ctrl := gomock.NewController(t) - mockCache := fake.NewMockCacheInterface[*corev1.Secret](ctrl) - secretClient := NewSecretClient(mockCache, nil, secretNamespace) - labelSelector, err := labels.Parse("foo=bar") - if err != nil { - t.Errorf("unexpected error %v", err) - } - mockCache.EXPECT().List(secretNamespace, labelSelector).Return([]*corev1.Secret{&secret}, nil) - secretList, err := secretClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "foo=bar"}) + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + secret := defaultSecret + secret.Labels = map[string]string{"foo": "bar"} + cache := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&secret).Build() + secretClient := NewSecretClient(cache, nil, secretNamespace) + secretList, err := secretClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "foo=bar"}) if err != nil { t.Errorf("unexpected error %v", err) } - secretListExpected := &corev1.SecretList{Items: []corev1.Secret{secret}} - if !cmp.Equal(secretListExpected, secretList) { - t.Errorf("expected secret %v, got %v", secretListExpected, secretList) + if len(secretList.Items) != 1 { + t.Errorf("expected secret list to have 1 element, got %v", len(secretList.Items)) + } + if !cmp.Equal(secret.ObjectMeta, secretList.Items[0].ObjectMeta) { + t.Errorf("expected secret meta %#v, got %#v", secret, secretList.Items[0]) + } + if !cmp.Equal(secret.Data, secretList.Items[0].Data) { + t.Errorf("expected secret data %#v, got %#v", secret, secretList.Items[0]) } } func TestCreate(t *testing.T) { client := k8sfake.NewSimpleClientset() secretClient := NewSecretClient(nil, client, secretNamespace) + secret := defaultSecret secretCreated, err := secretClient.Create(context.TODO(), &secret, metav1.CreateOptions{}) if err != nil { @@ -85,6 +96,7 @@ func TestUpdate(t *testing.T) { }, Data: map[string][]byte{"test": []byte("data")}, } + secret := defaultSecret client := k8sfake.NewSimpleClientset(&secret) secretClient := NewSecretClient(nil, client, secretNamespace) secretUpdated, err := secretClient.Update(context.TODO(), &secretUpdate, metav1.UpdateOptions{}) @@ -98,6 +110,7 @@ func TestUpdate(t *testing.T) { } func TestDelete(t *testing.T) { + secret := defaultSecret client := k8sfake.NewSimpleClientset(&secret) secretClient := NewSecretClient(nil, client, secretNamespace) err := secretClient.Delete(context.TODO(), secretName, metav1.DeleteOptions{}) @@ -108,6 +121,7 @@ func TestDelete(t *testing.T) { } func TestDeleteCollection(t *testing.T) { + secret := defaultSecret client := k8sfake.NewSimpleClientset(&secret) secretClient := NewSecretClient(nil, client, secretNamespace) err := secretClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: "name=" + secretName}) @@ -118,6 +132,7 @@ func TestDeleteCollection(t *testing.T) { } func TestWatch(t *testing.T) { + secret := defaultSecret client := k8sfake.NewSimpleClientset(&secret) secretClient := NewSecretClient(nil, client, secretNamespace) watch, err := secretClient.Watch(context.TODO(), metav1.ListOptions{FieldSelector: "name=" + secretName}) @@ -138,6 +153,7 @@ func TestPatch(t *testing.T) { }, Data: map[string][]byte{"test": []byte("content")}, } + secret := defaultSecret client := k8sfake.NewSimpleClientset(&secret) secretClient := NewSecretClient(nil, client, secretNamespace) patch := []byte(`{"data":{"test":"Y29udGVudA=="}}`) // "content", base64-encoded @@ -159,6 +175,7 @@ func TestApply(t *testing.T) { }, Data: map[string][]byte{"test": []byte("content")}, } + secret := defaultSecret client := k8sfake.NewSimpleClientset(&secret) secretClient := NewSecretClient(nil, client, secretNamespace) secretName := "test" diff --git a/internal/helmdeployer/impersonate.go b/internal/helmdeployer/impersonate.go index cc0b83d887..5a1e9eefa3 100644 --- a/internal/helmdeployer/impersonate.go +++ b/internal/helmdeployer/impersonate.go @@ -1,22 +1,27 @@ package helmdeployer import ( + "context" "fmt" "github.com/rancher/wrangler/v2/pkg/ratelimit" + + corev1 "k8s.io/api/core/v1" apierror "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) -func (h *Helm) getServiceAccount(name string) (string, string, error) { +func (h *Helm) getServiceAccount(ctx context.Context, name string) (string, string, error) { currentName := name if currentName == "" { currentName = DefaultServiceAccount } - _, err := h.serviceAccountCache.Get(h.agentNamespace, currentName) + sa := &corev1.ServiceAccount{} + err := h.client.Get(ctx, types.NamespacedName{Namespace: h.agentNamespace, Name: currentName}, sa) if apierror.IsNotFound(err) && name == "" { // if we can't find the service account, but none was asked for, don't use any return "", "", nil diff --git a/internal/helmdeployer/template.go b/internal/helmdeployer/template.go index 82cc2b91b8..64fab649e0 100644 --- a/internal/helmdeployer/template.go +++ b/internal/helmdeployer/template.go @@ -1,6 +1,7 @@ package helmdeployer import ( + "context" "io" "github.com/rancher/fleet/internal/manifest" @@ -18,7 +19,7 @@ import ( ) // Template runs helm template and returns the resources as a list of objects, without applying them. -func Template(bundleID string, manifest *manifest.Manifest, options fleet.BundleDeploymentOptions) ([]runtime.Object, error) { +func Template(ctx context.Context, bundleID string, manifest *manifest.Manifest, options fleet.BundleDeploymentOptions) ([]runtime.Object, error) { h := &Helm{ globalCfg: action.Configuration{}, useGlobalCfg: true, @@ -33,7 +34,7 @@ func Template(bundleID string, manifest *manifest.Manifest, options fleet.Bundle h.globalCfg.Log = logrus.Infof h.globalCfg.Releases = storage.Init(mem) - resources, err := h.Deploy(bundleID, manifest, options) + resources, err := h.Deploy(ctx, bundleID, manifest, options) if err != nil { return nil, err } diff --git a/internal/manifest/lookup.go b/internal/manifest/lookup.go index e0e62b58d0..2f02c7de3e 100644 --- a/internal/manifest/lookup.go +++ b/internal/manifest/lookup.go @@ -1,27 +1,25 @@ package manifest import ( + "context" + "github.com/rancher/fleet/internal/content" - fleetcontrollers "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) + fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" -type Lookup interface { - Get(id string) (*Manifest, error) -} + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) -func NewLookup(content fleetcontrollers.ContentClient) Lookup { - return &lookup{ - content: content, - } +func NewLookup() *Lookup { + return &Lookup{} } -type lookup struct { - content fleetcontrollers.ContentClient +type Lookup struct { } -func (l *lookup) Get(id string) (*Manifest, error) { - c, err := l.content.Get(id, metav1.GetOptions{}) +func (l *Lookup) Get(ctx context.Context, client client.Reader, id string) (*Manifest, error) { + c := &fleet.Content{} + err := client.Get(ctx, types.NamespacedName{Name: id}, c) if err != nil { return nil, err } diff --git a/pkg/apis/fleet.cattle.io/v1alpha1/bundle_types.go b/pkg/apis/fleet.cattle.io/v1alpha1/bundle_types.go index 59f719bba8..01613fa5de 100644 --- a/pkg/apis/fleet.cattle.io/v1alpha1/bundle_types.go +++ b/pkg/apis/fleet.cattle.io/v1alpha1/bundle_types.go @@ -288,7 +288,8 @@ var ( // BundleDeploymentConditionDeployed is used by the bundledeployment // controller. It is true if the handler returns no error and false if // an error is returned. - BundleDeploymentConditionDeployed = "Deployed" + BundleDeploymentConditionDeployed = "Deployed" + BundleDeploymentConditionMonitored = "Monitored" ) type BundleStatus struct { diff --git a/pkg/durations/durations.go b/pkg/durations/durations.go index 9eccf74760..2542a70686 100644 --- a/pkg/durations/durations.go +++ b/pkg/durations/durations.go @@ -24,7 +24,9 @@ const ( RestConfigTimeout = time.Second * 15 ServiceTokenSleep = time.Second * 2 TokenClusterEnqueueDelay = time.Second * 2 - TriggerSleep = time.Second * 2 - DefaultCpuPprofPeriod = time.Minute - ReleaseCacheTTL = time.Minute * 5 + // TriggerSleep is the delay before the mini controller starts watching + // deployed resources for changes + TriggerSleep = time.Second * 5 + DefaultCpuPprofPeriod = time.Minute + ReleaseCacheTTL = time.Minute * 5 )