From 543d87073153c482be258b7adebfa9e45060819e Mon Sep 17 00:00:00 2001 From: Skye Gill Date: Fri, 17 Mar 2023 10:28:01 +0000 Subject: [PATCH] refactor to share funcs Signed-off-by: Skye Gill --- .../v1alpha1/flagsourceconfiguration_types.go | 4 +- .../clientsideconfiguration_controller.go | 162 ++++-------- controllers/configmap.go | 30 +++ controllers/featureflag.go | 16 ++ controllers/owner.go | 17 ++ controllers/parse.go | 11 + controllers/provider.go | 144 +++++++++++ controllers/role.go | 60 +++++ webhooks/pod_webhook.go | 233 ++---------------- 9 files changed, 343 insertions(+), 334 deletions(-) create mode 100644 controllers/configmap.go create mode 100644 controllers/featureflag.go create mode 100644 controllers/owner.go create mode 100644 controllers/parse.go create mode 100644 controllers/provider.go create mode 100644 controllers/role.go diff --git a/apis/core/v1alpha1/flagsourceconfiguration_types.go b/apis/core/v1alpha1/flagsourceconfiguration_types.go index 6e1b43ddd..3aad44752 100644 --- a/apis/core/v1alpha1/flagsourceconfiguration_types.go +++ b/apis/core/v1alpha1/flagsourceconfiguration_types.go @@ -46,8 +46,8 @@ const ( defaultSocketPath string = "" defaultEvaluator string = "json" defaultImage string = "ghcr.io/open-feature/flagd" - // v0.4.1` is replaced in the `update-flagd` Makefile target - defaultTag string = "v0.4.1" + // INPUT_FLAGD_VERSION` is replaced in the `update-flagd` Makefile target + defaultTag string = "INPUT_FLAGD_VERSION" defaultLogFormat string = "json" SyncProviderKubernetes SyncProviderType = "kubernetes" SyncProviderFilepath SyncProviderType = "filepath" diff --git a/controllers/clientsideconfiguration_controller.go b/controllers/clientsideconfiguration_controller.go index e7044bf24..718512b84 100644 --- a/controllers/clientsideconfiguration_controller.go +++ b/controllers/clientsideconfiguration_controller.go @@ -21,9 +21,9 @@ import ( "fmt" "github.com/go-logr/logr" corev1alpha1 "github.com/open-feature/open-feature-operator/apis/core/v1alpha1" + "github.com/open-feature/open-feature-operator/pkg/utils" appsV1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -84,6 +84,14 @@ func (r *ClientSideConfigurationReconciler) Reconcile(ctx context.Context, req c return r.finishReconcile(err, false) } ns := csconf.Namespace + csconfOwnerReferences := []metav1.OwnerReference{ + { + Kind: csconf.Kind, + Name: csconf.Name, + UID: csconf.UID, + Controller: utils.FalseVal(), + }, + } // check for existing client side deployment deployment := &appsV1.Deployment{} @@ -138,6 +146,7 @@ func (r *ClientSideConfigurationReconciler) Reconcile(ctx context.Context, req c } else { svc.Name = clientSideServiceName svc.Namespace = ns + svc.OwnerReferences = csconfOwnerReferences svc.Spec.Selector = map[string]string{ "app": clientSideAppName, } @@ -170,70 +179,47 @@ func (r *ClientSideConfigurationReconciler) Reconcile(ctx context.Context, req c } } - // create gateway if it doesn't exist, update if it does + // update existing gateway namespacesFromSame := gatewayv1beta1.NamespacesFromSame hostname := gatewayv1beta1.Hostname(csconf.Spec.HTTPRouteHostname) gateway := &gatewayv1beta1.Gateway{} if err := r.Client.Get( ctx, client.ObjectKey{Namespace: ns, Name: csconf.Spec.GatewayName}, gateway, ); err != nil { - if !errors.IsNotFound(err) { - r.Log.Error(err, - fmt.Sprintf("Failed to get the gateway %s/%s", ns, csconf.Spec.GatewayName)) - return r.finishReconcile(err, false) - } - gateway.Name = csconf.Spec.GatewayName - gateway.Namespace = ns - gateway.Spec.GatewayClassName = gatewayv1beta1.ObjectName(csconf.Spec.GatewayClassName) - gateway.Spec.Listeners = []gatewayv1beta1.Listener{ - { - Name: clientSideGatewayListenerName, - Hostname: &hostname, - Protocol: gatewayv1beta1.HTTPProtocolType, - Port: gatewayv1beta1.PortNumber(csconf.Spec.GatewayListenerPort), - AllowedRoutes: &gatewayv1beta1.AllowedRoutes{ - Namespaces: &gatewayv1beta1.RouteNamespaces{ - From: &namespacesFromSame, - }, - }, - }, - } + r.Log.Error(err, + fmt.Sprintf("Failed to get the gateway %s/%s", ns, csconf.Spec.GatewayName)) + return r.finishReconcile(err, false) + } - if err := r.Client.Create(ctx, gateway); err != nil { - r.Log.Error(err, "Failed to create gateway") - return r.finishReconcile(nil, false) - } - } else { - gateway.Spec.GatewayClassName = gatewayv1beta1.ObjectName(csconf.Spec.GatewayClassName) - listener := gatewayv1beta1.Listener{ - Name: clientSideGatewayListenerName, - Hostname: &hostname, - Protocol: gatewayv1beta1.HTTPProtocolType, - Port: gatewayv1beta1.PortNumber(csconf.Spec.GatewayListenerPort), - AllowedRoutes: &gatewayv1beta1.AllowedRoutes{ - Namespaces: &gatewayv1beta1.RouteNamespaces{ - From: &namespacesFromSame, - }, + gateway.Spec.GatewayClassName = gatewayv1beta1.ObjectName(csconf.Spec.GatewayClassName) + listener := gatewayv1beta1.Listener{ + Name: clientSideGatewayListenerName, + Hostname: &hostname, + Protocol: gatewayv1beta1.HTTPProtocolType, + Port: gatewayv1beta1.PortNumber(csconf.Spec.GatewayListenerPort), + AllowedRoutes: &gatewayv1beta1.AllowedRoutes{ + Namespaces: &gatewayv1beta1.RouteNamespaces{ + From: &namespacesFromSame, }, - } + }, + } - listenerExists := false - for i := 0; i < len(gateway.Spec.Listeners); i++ { - if gateway.Spec.Listeners[i].Name == clientSideGatewayListenerName { - gateway.Spec.Listeners[i] = listener - listenerExists = true - break - } + listenerExists := false + for i := 0; i < len(gateway.Spec.Listeners); i++ { + if gateway.Spec.Listeners[i].Name == clientSideGatewayListenerName { + gateway.Spec.Listeners[i] = listener + listenerExists = true + break } + } - if !listenerExists { - gateway.Spec.Listeners = append(gateway.Spec.Listeners, listener) - } + if !listenerExists { + gateway.Spec.Listeners = append(gateway.Spec.Listeners, listener) + } - if err := r.Client.Update(ctx, gateway); err != nil { - r.Log.Error(err, "Failed to update gateway") - return r.finishReconcile(nil, false) - } + if err := r.Client.Update(ctx, gateway); err != nil { + r.Log.Error(err, "Failed to update gateway") + return r.finishReconcile(nil, false) } // create gateway http route if it doesn't exist @@ -251,6 +237,7 @@ func (r *ClientSideConfigurationReconciler) Reconcile(ctx context.Context, req c } else { httpRoute.Name = csconf.Spec.HTTPRouteName httpRoute.Namespace = ns + httpRoute.OwnerReferences = csconfOwnerReferences httpRoute.Spec.ParentRefs = []gatewayv1beta1.ParentReference{ { Name: gatewayv1beta1.ObjectName(csconf.Spec.GatewayName), @@ -300,22 +287,14 @@ func (r *ClientSideConfigurationReconciler) Reconcile(ctx context.Context, req c // TODO resource limits } - for _, source := range fsConfigSpec.Sources { - if source.Provider == "" { - source.Provider = fsConfigSpec.DefaultSyncProvider - } - switch { - case source.Provider.IsKubernetes(): - if err := r.handleKubernetesProvider(ctx, ns, csconf.Spec.ServiceAccountName, &flagdContainer, source); err != nil { - r.Log.Error(err, "Failed to handle kubernetes provider") - return r.finishReconcile(nil, false) - } - default: - r.Log.Error(fmt.Errorf("%s", source.Provider), "Unsupported source") - return r.finishReconcile(nil, false) - } + if err := HandleSourcesProviders(ctx, r.Log, r.Client, fsConfigSpec, ns, csconf.Spec.ServiceAccountName, + csconfOwnerReferences, &deployment.Spec.Template.Spec, deployment.Spec.Template.ObjectMeta, &flagdContainer, + ); err != nil { + r.Log.Error(err, "handle source providers") + return r.finishReconcile(nil, false) } + deployment.OwnerReferences = csconfOwnerReferences deployment.Spec.Template.Spec.ServiceAccountName = csconf.Spec.ServiceAccountName labels := map[string]string{ "app": clientSideAppName, @@ -333,53 +312,6 @@ func (r *ClientSideConfigurationReconciler) Reconcile(ctx context.Context, req c return r.finishReconcile(nil, false) } -func (r *ClientSideConfigurationReconciler) enableClusterRoleBinding(ctx context.Context, namespace, serviceAccountName string) error { - serviceAccount := client.ObjectKey{ - Name: serviceAccountName, - Namespace: namespace, - } - if serviceAccountName == "" { - serviceAccount.Name = "default" - } - // Check if the service account exists - r.Log.V(1).Info(fmt.Sprintf("Fetching serviceAccount: %s/%s", serviceAccount.Namespace, serviceAccount.Name)) - sa := corev1.ServiceAccount{} - if err := r.Client.Get(ctx, serviceAccount, &sa); err != nil { - r.Log.V(1).Info(fmt.Sprintf("ServiceAccount not found: %s/%s", serviceAccount.Namespace, serviceAccount.Name)) - return err - } - r.Log.V(1).Info(fmt.Sprintf("Fetching clusterrolebinding: %s", clusterRoleBindingName)) - // Fetch service account if it exists - crb := rbacv1.ClusterRoleBinding{} - if err := r.Client.Get(ctx, client.ObjectKey{Name: clusterRoleBindingName}, &crb); errors.IsNotFound(err) { - r.Log.V(1).Info(fmt.Sprintf("ClusterRoleBinding not found: %s", clusterRoleBindingName)) - return err - } - found := false - for _, subject := range crb.Subjects { - if subject.Kind == "ServiceAccount" && subject.Name == serviceAccount.Name && subject.Namespace == serviceAccount.Namespace { - r.Log.V(1).Info(fmt.Sprintf("ClusterRoleBinding already exists for service account: %s/%s", serviceAccount.Namespace, serviceAccount.Name)) - found = true - } - } - if !found { - r.Log.V(1).Info(fmt.Sprintf("Updating ClusterRoleBinding %s for service account: %s/%s", crb.Name, - serviceAccount.Namespace, serviceAccount.Name)) - crb.Subjects = append(crb.Subjects, rbacv1.Subject{ - Kind: "ServiceAccount", - Name: serviceAccount.Name, - Namespace: serviceAccount.Namespace, - }) - if err := r.Client.Update(ctx, &crb); err != nil { - r.Log.V(1).Info(fmt.Sprintf("Failed to update ClusterRoleBinding: %s", err.Error())) - return err - } - } - r.Log.V(1).Info(fmt.Sprintf("Updated ClusterRoleBinding: %s", crb.Name)) - - return nil -} - func (r *ClientSideConfigurationReconciler) handleKubernetesProvider(ctx context.Context, namespace, serviceAccountName string, container *corev1.Container, source corev1alpha1.Source) error { ns, n := parseAnnotation(source.Source, namespace) // ensure that the FeatureFlagConfiguration exists @@ -387,7 +319,7 @@ func (r *ClientSideConfigurationReconciler) handleKubernetesProvider(ctx context if ff.Name == "" { return fmt.Errorf("feature flag configuration %s/%s not found", ns, n) } - if err := r.enableClusterRoleBinding(ctx, namespace, serviceAccountName); err != nil { + if err := EnableClusterRoleBinding(ctx, r.Log, r.Client, namespace, serviceAccountName); err != nil { return fmt.Errorf("enableClusterRoleBinding: %w", err) } // append args diff --git a/controllers/configmap.go b/controllers/configmap.go new file mode 100644 index 000000000..7cfcdf1c3 --- /dev/null +++ b/controllers/configmap.go @@ -0,0 +1,30 @@ +package controllers + +import ( + "context" + "fmt" + "github.com/go-logr/logr" + "github.com/open-feature/open-feature-operator/apis/core/v1alpha1" + "github.com/open-feature/open-feature-operator/pkg/utils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func CreateConfigMap( + ctx context.Context, log logr.Logger, c client.Client, namespace string, name string, ownerReferences []metav1.OwnerReference, +) error { + log.V(1).Info(fmt.Sprintf("Creating configmap %s", name)) + references := []metav1.OwnerReference{ + ownerReferences[0], + } + references[0].Controller = utils.FalseVal() + ff := FeatureFlag(ctx, c, namespace, name) + if ff.Name == "" { + return fmt.Errorf("feature flag configuration %s/%s not found", namespace, name) + } + references = append(references, v1alpha1.GetFfReference(&ff)) + + cm := v1alpha1.GenerateFfConfigMap(name, namespace, references, ff.Spec) + + return c.Create(ctx, &cm) +} diff --git a/controllers/featureflag.go b/controllers/featureflag.go new file mode 100644 index 000000000..5bbb68cec --- /dev/null +++ b/controllers/featureflag.go @@ -0,0 +1,16 @@ +package controllers + +import ( + "context" + "github.com/open-feature/open-feature-operator/apis/core/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func FeatureFlag(ctx context.Context, c client.Client, namespace string, name string) v1alpha1.FeatureFlagConfiguration { + ffConfig := v1alpha1.FeatureFlagConfiguration{} + if err := c.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, &ffConfig); errors.IsNotFound(err) { + return v1alpha1.FeatureFlagConfiguration{} + } + return ffConfig +} diff --git a/controllers/owner.go b/controllers/owner.go new file mode 100644 index 000000000..15c5e1943 --- /dev/null +++ b/controllers/owner.go @@ -0,0 +1,17 @@ +package controllers + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SharedOwnership returns true if any of the owner references match in the given slices +func SharedOwnership(ownerReferences1, ownerReferences2 []metav1.OwnerReference) bool { + for _, owner1 := range ownerReferences1 { + for _, owner2 := range ownerReferences2 { + if owner1.UID == owner2.UID { + return true + } + } + } + return false +} diff --git a/controllers/parse.go b/controllers/parse.go new file mode 100644 index 000000000..c9d141725 --- /dev/null +++ b/controllers/parse.go @@ -0,0 +1,11 @@ +package controllers + +import "strings" + +func ParseAnnotation(s string, defaultNs string) (string, string) { + ss := strings.Split(s, "/") + if len(ss) == 2 { + return ss[0], ss[1] + } + return defaultNs, s +} diff --git a/controllers/provider.go b/controllers/provider.go new file mode 100644 index 000000000..5fd6df09e --- /dev/null +++ b/controllers/provider.go @@ -0,0 +1,144 @@ +package controllers + +import ( + "context" + "fmt" + "github.com/go-logr/logr" + "github.com/open-feature/open-feature-operator/apis/core/v1alpha1" + "github.com/open-feature/open-feature-operator/pkg/utils" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + rootFileSyncMountPath string = "/etc/flagd" + AllowKubernetesSyncAnnotation = "allowkubernetessync" + OpenFeatureAnnotationPrefix = "openfeature.dev" +) + +func HandleSourcesProviders( + ctx context.Context, log logr.Logger, c client.Client, flagSourceConfig *v1alpha1.FlagSourceConfigurationSpec, ns, serviceAccountName string, + ownerReferences []metav1.OwnerReference, podSpec *corev1.PodSpec, meta metav1.ObjectMeta, sidecar *corev1.Container, +) error { + for _, source := range flagSourceConfig.Sources { + if source.Provider == "" { + source.Provider = flagSourceConfig.DefaultSyncProvider + } + switch { + case source.Provider.IsFilepath(): + if err := handleFilepathProvider(ctx, log, c, ns, ownerReferences, podSpec, sidecar, source); err != nil { + return fmt.Errorf("handleFilepathProvider: %w", err) + } + case source.Provider.IsKubernetes(): + if err := handleKubernetesProvider(ctx, log, c, ns, serviceAccountName, meta, sidecar, source); err != nil { + return fmt.Errorf("handleKubernetesProvider: %w", err) + } + case source.Provider.IsHttp(): + handleHttpProvider(sidecar, source) + default: + return fmt.Errorf("unrecognized sync provider in config: %s", source.Provider) + } + } + + return nil +} + +func handleFilepathProvider( + ctx context.Context, log logr.Logger, c client.Client, ns string, ownerReferences []metav1.OwnerReference, + podSpec *corev1.PodSpec, sidecar *corev1.Container, source v1alpha1.Source, +) error { + // create config map + ns, n := ParseAnnotation(source.Source, ns) + cm := corev1.ConfigMap{} + if err := c.Get(ctx, client.ObjectKey{Name: n, Namespace: ns}, &cm); errors.IsNotFound(err) { + err := CreateConfigMap(ctx, log, c, ns, n, ownerReferences) + if err != nil { + log.Error(err, "create config map %s") + return err + } + } + + // Add reference of the owner + if !SharedOwnership(ownerReferences, cm.OwnerReferences) { + reference := ownerReferences[0] + reference.Controller = utils.FalseVal() + cm.OwnerReferences = append(cm.OwnerReferences, reference) + err := c.Update(ctx, &cm) + if err != nil { + log.Error(err, "update owner reference for %s") + } + } + // mount configmap + podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ + Name: n, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: n, + }, + }, + }, + }) + mountPath := fmt.Sprintf("%s/%s", rootFileSyncMountPath, v1alpha1.FeatureFlagConfigurationId(ns, n)) + sidecar.VolumeMounts = append(sidecar.VolumeMounts, corev1.VolumeMount{ + Name: n, + // create a directory mount per featureFlag spec + // file mounts will not work + MountPath: mountPath, + }) + sidecar.Args = append( + sidecar.Args, + "--uri", + fmt.Sprintf("file:%s/%s", + mountPath, + v1alpha1.FeatureFlagConfigurationConfigMapKey(ns, n), + ), + ) + return nil +} + +func handleKubernetesProvider( + ctx context.Context, log logr.Logger, c client.Client, ns, serviceAccountName string, meta metav1.ObjectMeta, sidecar *corev1.Container, source v1alpha1.Source, +) error { + ns, n := ParseAnnotation(source.Source, ns) + // ensure that the FeatureFlagConfiguration exists + ff := FeatureFlag(ctx, c, ns, n) + if ff.Name == "" { + return fmt.Errorf("feature flag configuration %s/%s not found", ns, n) + } + // add permissions to pod + if err := EnableClusterRoleBinding(ctx, log, c, ns, serviceAccountName); err != nil { + return err + } + // mark with annotation (required to backfill permissions if they are dropped) + meta.Annotations[fmt.Sprintf("%s/%s", OpenFeatureAnnotationPrefix, AllowKubernetesSyncAnnotation)] = "true" + // append args + sidecar.Args = append( + sidecar.Args, + "--uri", + fmt.Sprintf( + "core.openfeature.dev/%s/%s", + ns, + n, + ), + ) + return nil +} + +func handleHttpProvider(sidecar *corev1.Container, source v1alpha1.Source) { + // append args + sidecar.Args = append( + sidecar.Args, + "--uri", + source.Source, + ) + if source.HttpSyncBearerToken != "" { + sidecar.Args = append( + sidecar.Args, + "--bearer-token", + source.HttpSyncBearerToken, + ) + } +} diff --git a/controllers/role.go b/controllers/role.go new file mode 100644 index 000000000..4f5e0d6d2 --- /dev/null +++ b/controllers/role.go @@ -0,0 +1,60 @@ +package controllers + +import ( + "context" + "fmt" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// EnableClusterRoleBinding enables the open-feature-operator-flagd-kubernetes-sync cluster role binding for the given +// service account under the given namespace (required for kubernetes sync provider) +func EnableClusterRoleBinding(ctx context.Context, log logr.Logger, c client.Client, namespace, serviceAccountName string) error { + serviceAccount := client.ObjectKey{ + Name: serviceAccountName, + Namespace: namespace, + } + if serviceAccountName == "" { + serviceAccount.Name = "default" + } + // Check if the service account exists + log.V(1).Info(fmt.Sprintf("Fetching serviceAccount: %s/%s", serviceAccount.Namespace, serviceAccount.Name)) + sa := corev1.ServiceAccount{} + if err := c.Get(ctx, serviceAccount, &sa); err != nil { + log.V(1).Info(fmt.Sprintf("ServiceAccount not found: %s/%s", serviceAccount.Namespace, serviceAccount.Name)) + return err + } + log.V(1).Info(fmt.Sprintf("Fetching clusterrolebinding: %s", clusterRoleBindingName)) + // Fetch service account if it exists + crb := rbacv1.ClusterRoleBinding{} + if err := c.Get(ctx, client.ObjectKey{Name: clusterRoleBindingName}, &crb); errors.IsNotFound(err) { + log.V(1).Info(fmt.Sprintf("ClusterRoleBinding not found: %s", clusterRoleBindingName)) + return err + } + found := false + for _, subject := range crb.Subjects { + if subject.Kind == "ServiceAccount" && subject.Name == serviceAccount.Name && subject.Namespace == serviceAccount.Namespace { + log.V(1).Info(fmt.Sprintf("ClusterRoleBinding already exists for service account: %s/%s", serviceAccount.Namespace, serviceAccount.Name)) + found = true + } + } + if !found { + log.V(1).Info(fmt.Sprintf("Updating ClusterRoleBinding %s for service account: %s/%s", crb.Name, + serviceAccount.Namespace, serviceAccount.Name)) + crb.Subjects = append(crb.Subjects, rbacv1.Subject{ + Kind: "ServiceAccount", + Name: serviceAccount.Name, + Namespace: serviceAccount.Namespace, + }) + if err := c.Update(ctx, &crb); err != nil { + log.V(1).Info(fmt.Sprintf("Failed to update ClusterRoleBinding: %s", err.Error())) + return err + } + } + log.V(1).Info(fmt.Sprintf("Updated ClusterRoleBinding: %s", crb.Name)) + + return nil +} diff --git a/webhooks/pod_webhook.go b/webhooks/pod_webhook.go index 46310cbbe..6bb49a6df 100644 --- a/webhooks/pod_webhook.go +++ b/webhooks/pod_webhook.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/open-feature/open-feature-operator/controllers" "net/http" "reflect" "strings" @@ -15,9 +16,7 @@ import ( v1alpha1 "github.com/open-feature/open-feature-operator/apis/core/v1alpha1" "github.com/open-feature/open-feature-operator/pkg/utils" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -30,8 +29,6 @@ const ( flagdMetricPortEnvVar string = "FLAGD_METRICS_PORT" rootFileSyncMountPath string = "/etc/flagd" OpenFeatureAnnotationPath = "metadata.annotations.openfeature.dev/openfeature.dev" - OpenFeatureAnnotationPrefix = "openfeature.dev" - AllowKubernetesSyncAnnotation = "allowkubernetessync" FlagSourceConfigurationAnnotation = "flagsourceconfiguration" FeatureFlagConfigurationAnnotation = "featureflagconfiguration" EnabledAnnotation = "enabled" @@ -77,7 +74,7 @@ func (m *PodMutator) Handle(ctx context.Context, req admission.Request) admissio // Check enablement enabled := false - val, ok := pod.GetAnnotations()[OpenFeatureAnnotationPrefix] + val, ok := pod.GetAnnotations()[controllers.OpenFeatureAnnotationPrefix] if ok { m.Log.V(1).Info("DEPRECATED: The openfeature.dev annotation has been superseded by the openfeature.dev/enabled annotation. " + "Docs: https://github.com/open-feature/open-feature-operator/blob/main/docs/annotations.md") @@ -85,7 +82,7 @@ func (m *PodMutator) Handle(ctx context.Context, req admission.Request) admissio enabled = true } } - val, ok = pod.GetAnnotations()[fmt.Sprintf("%s/%s", OpenFeatureAnnotationPrefix, EnabledAnnotation)] + val, ok = pod.GetAnnotations()[fmt.Sprintf("%s/%s", controllers.OpenFeatureAnnotationPrefix, EnabledAnnotation)] if ok { if val == "true" { enabled = true @@ -99,7 +96,7 @@ func (m *PodMutator) Handle(ctx context.Context, req admission.Request) admissio // Check configuration fscNames := []string{} - val, ok = pod.GetAnnotations()[fmt.Sprintf("%s/%s", OpenFeatureAnnotationPrefix, FlagSourceConfigurationAnnotation)] + val, ok = pod.GetAnnotations()[fmt.Sprintf("%s/%s", controllers.OpenFeatureAnnotationPrefix, FlagSourceConfigurationAnnotation)] if ok { fscNames = parseList(val) } @@ -109,7 +106,7 @@ func (m *PodMutator) Handle(ctx context.Context, req admission.Request) admissio } // Check for the correct clusterrolebinding for the pod - if err := m.enableClusterRoleBinding(ctx, pod); err != nil { + if err := controllers.EnableClusterRoleBinding(ctx, m.Log, m.Client, pod.Namespace, pod.Spec.ServiceAccountName); err != nil { return admission.Denied(err.Error()) } @@ -121,7 +118,7 @@ func (m *PodMutator) Handle(ctx context.Context, req admission.Request) admissio } for _, fscName := range fscNames { - ns, name := parseAnnotation(fscName, req.Namespace) + ns, name := controllers.ParseAnnotation(fscName, req.Namespace) if err != nil { m.Log.V(1).Info(fmt.Sprintf("failed to parse annotation %s error: %s", fscName, err.Error())) return admission.Errored(http.StatusBadRequest, err) @@ -135,7 +132,7 @@ func (m *PodMutator) Handle(ctx context.Context, req admission.Request) admissio } // maintain backwards compatibility of the openfeature.dev/featureflagconfiguration annotation - ffConfigAnnotation, ffConfigAnnotationOk := pod.GetAnnotations()[fmt.Sprintf("%s/%s", OpenFeatureAnnotationPrefix, FeatureFlagConfigurationAnnotation)] + ffConfigAnnotation, ffConfigAnnotationOk := pod.GetAnnotations()[fmt.Sprintf("%s/%s", controllers.OpenFeatureAnnotationPrefix, FeatureFlagConfigurationAnnotation)] if ffConfigAnnotationOk { m.Log.V(1).Info("DEPRECATED: The openfeature.dev/featureflagconfiguration annotation has been superseded by the openfeature.dev/flagsourceconfiguration annotation. " + "Docs: https://github.com/open-feature/open-feature-operator/blob/main/docs/annotations.md") @@ -178,24 +175,10 @@ func (m *PodMutator) injectSidecar( Resources: m.FlagDResourceRequirements, } - for _, source := range flagSourceConfig.Sources { - if source.Provider == "" { - source.Provider = flagSourceConfig.DefaultSyncProvider - } - switch { - case source.Provider.IsFilepath(): - if err := m.handleFilepathProvider(ctx, pod, &sidecar, source); err != nil { - return nil, err - } - case source.Provider.IsKubernetes(): - if err := m.handleKubernetesProvider(ctx, pod, &sidecar, source); err != nil { - return nil, err - } - case source.Provider.IsHttp(): - m.handleHttpProvider(&sidecar, source) - default: - return nil, fmt.Errorf("unrecognized sync provider in config: %s", source.Provider) - } + if err := controllers.HandleSourcesProviders(ctx, m.Log, m.Client, flagSourceConfig, pod.Namespace, + pod.Spec.ServiceAccountName, pod.OwnerReferences, &pod.Spec, pod.ObjectMeta, &sidecar, + ); err != nil { + return nil, err } sidecar.Env = append(sidecar.Env, flagSourceConfig.ToEnvVars()...) @@ -220,109 +203,16 @@ func (m *PodMutator) injectSidecar( return json.Marshal(pod) } -func (m *PodMutator) handleHttpProvider(sidecar *corev1.Container, source v1alpha1.Source) { - // append args - sidecar.Args = append( - sidecar.Args, - "--uri", - source.Source, - ) - if source.HttpSyncBearerToken != "" { - sidecar.Args = append( - sidecar.Args, - "--bearer-token", - source.HttpSyncBearerToken, - ) - } -} - -func (m *PodMutator) handleKubernetesProvider(ctx context.Context, pod *corev1.Pod, sidecar *corev1.Container, source v1alpha1.Source) error { - ns, n := parseAnnotation(source.Source, pod.Namespace) - // ensure that the FeatureFlagConfiguration exists - ff := m.getFeatureFlag(ctx, ns, n) - if ff.Name == "" { - return fmt.Errorf("feature flag configuration %s/%s not found", ns, n) - } - // add permissions to pod - if err := m.enableClusterRoleBinding(ctx, pod); err != nil { - return err - } - // mark pod with annotation (required to backfill permissions if they are dropped) - pod.Annotations[fmt.Sprintf("%s/%s", OpenFeatureAnnotationPrefix, AllowKubernetesSyncAnnotation)] = "true" - // append args - sidecar.Args = append( - sidecar.Args, - "--uri", - fmt.Sprintf( - "core.openfeature.dev/%s/%s", - ns, - n, - ), - ) - return nil -} - -func (m *PodMutator) handleFilepathProvider(ctx context.Context, pod *corev1.Pod, sidecar *corev1.Container, source v1alpha1.Source) error { - // create config map - ns, n := parseAnnotation(source.Source, pod.Namespace) - cm := corev1.ConfigMap{} - if err := m.Client.Get(ctx, client.ObjectKey{Name: n, Namespace: ns}, &cm); errors.IsNotFound(err) { - err := m.createConfigMap(ctx, ns, n, pod) - if err != nil { - m.Log.V(1).Info(fmt.Sprintf("failed to create config map %s error: %s", n, err.Error())) - return err - } - } - - // Add owner reference of the pod's owner - if !podOwnerIsOwner(pod, cm) { - reference := pod.OwnerReferences[0] - reference.Controller = utils.FalseVal() - cm.OwnerReferences = append(cm.OwnerReferences, reference) - err := m.Client.Update(ctx, &cm) - if err != nil { - m.Log.V(1).Info(fmt.Sprintf("failed to update owner reference for %s error: %s", n, err.Error())) - } - } - // mount configmap - pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ - Name: n, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: n, - }, - }, - }, - }) - mountPath := fmt.Sprintf("%s/%s", rootFileSyncMountPath, v1alpha1.FeatureFlagConfigurationId(ns, n)) - sidecar.VolumeMounts = append(sidecar.VolumeMounts, corev1.VolumeMount{ - Name: n, - // create a directory mount per featureFlag spec - // file mounts will not work - MountPath: mountPath, - }) - sidecar.Args = append( - sidecar.Args, - "--uri", - fmt.Sprintf("file:%s/%s", - mountPath, - v1alpha1.FeatureFlagConfigurationConfigMapKey(ns, n), - ), - ) - return nil -} - // BackfillPermissions recovers the state of the flagd-kubernetes-sync role binding in the event of upgrade func (m *PodMutator) BackfillPermissions(ctx context.Context) error { defer func() { m.ready = true }() for i := 0; i < 5; i++ { - // fetch all pods with the fmt.Sprintf("%s/%s", OpenFeatureAnnotationPrefix, EnabledAnnotation) annotation set to "true" + // fetch all pods with the fmt.Sprintf("%s/%s", controllers.OpenFeatureAnnotationPrefix, EnabledAnnotation) annotation set to "true" podList := &corev1.PodList{} err := m.Client.List(ctx, podList, client.MatchingFields{ - fmt.Sprintf("%s/%s", OpenFeatureAnnotationPath, AllowKubernetesSyncAnnotation): "true", + fmt.Sprintf("%s/%s", OpenFeatureAnnotationPath, controllers.AllowKubernetesSyncAnnotation): "true", }) if err != nil { if !goErr.Is(err, &cache.ErrCacheNotStarted{}) { @@ -335,12 +225,12 @@ func (m *PodMutator) BackfillPermissions(ctx context.Context) error { // add each new service account to the flagd-kubernetes-sync role binding for _, pod := range podList.Items { m.Log.V(1).Info(fmt.Sprintf("backfilling permissions for pod %s/%s", pod.Namespace, pod.Name)) - if err := m.enableClusterRoleBinding(ctx, &pod); err != nil { + if err := controllers.EnableClusterRoleBinding(ctx, m.Log, m.Client, pod.Namespace, pod.Spec.ServiceAccountName); err != nil { m.Log.Error( err, fmt.Sprintf("unable backfill permissions for pod %s/%s", pod.Namespace, pod.Name), "webhook", - fmt.Sprintf("%s/%s", OpenFeatureAnnotationPath, AllowKubernetesSyncAnnotation), + fmt.Sprintf("%s/%s", OpenFeatureAnnotationPath, controllers.AllowKubernetesSyncAnnotation), ) } } @@ -358,14 +248,6 @@ func parseList(s string) []string { return out } -func parseAnnotation(s string, defaultNs string) (string, string) { - ss := strings.Split(s, "/") - if len(ss) == 2 { - return ss[0], ss[1] - } - return defaultNs, s -} - // PodMutator implements admission.DecoderInjector. // A decoder will be automatically injected. @@ -375,89 +257,6 @@ func (m *PodMutator) InjectDecoder(d *admission.Decoder) error { return nil } -func podOwnerIsOwner(pod *corev1.Pod, cm corev1.ConfigMap) bool { - for _, cmOwner := range cm.OwnerReferences { - for _, podOwner := range pod.OwnerReferences { - if cmOwner.UID == podOwner.UID { - return true - } - } - } - return false -} - -func (m *PodMutator) enableClusterRoleBinding(ctx context.Context, pod *corev1.Pod) error { - serviceAccount := client.ObjectKey{ - Name: pod.Spec.ServiceAccountName, - Namespace: pod.Namespace, - } - if pod.Spec.ServiceAccountName == "" { - serviceAccount.Name = "default" - } - // Check if the service account exists - m.Log.V(1).Info(fmt.Sprintf("Fetching serviceAccount: %s/%s", pod.Namespace, pod.Spec.ServiceAccountName)) - sa := corev1.ServiceAccount{} - if err := m.Client.Get(ctx, serviceAccount, &sa); err != nil { - m.Log.V(1).Info(fmt.Sprintf("ServiceAccount not found: %s/%s", serviceAccount.Namespace, serviceAccount.Name)) - return err - } - m.Log.V(1).Info(fmt.Sprintf("Fetching clusterrolebinding: %s", clusterRoleBindingName)) - // Fetch service account if it exists - crb := v1.ClusterRoleBinding{} - if err := m.Client.Get(ctx, client.ObjectKey{Name: clusterRoleBindingName}, &crb); errors.IsNotFound(err) { - m.Log.V(1).Info(fmt.Sprintf("ClusterRoleBinding not found: %s", clusterRoleBindingName)) - return err - } - found := false - for _, subject := range crb.Subjects { - if subject.Kind == "ServiceAccount" && subject.Name == serviceAccount.Name && subject.Namespace == serviceAccount.Namespace { - m.Log.V(1).Info(fmt.Sprintf("ClusterRoleBinding already exists for service account: %s/%s", serviceAccount.Namespace, serviceAccount.Name)) - found = true - } - } - if !found { - m.Log.V(1).Info(fmt.Sprintf("Updating ClusterRoleBinding %s for service account: %s/%s", crb.Name, - serviceAccount.Namespace, serviceAccount.Name)) - crb.Subjects = append(crb.Subjects, v1.Subject{ - Kind: "ServiceAccount", - Name: serviceAccount.Name, - Namespace: serviceAccount.Namespace, - }) - if err := m.Client.Update(ctx, &crb); err != nil { - m.Log.V(1).Info(fmt.Sprintf("Failed to update ClusterRoleBinding: %s", err.Error())) - return err - } - } - m.Log.V(1).Info(fmt.Sprintf("Updated ClusterRoleBinding: %s", crb.Name)) - - return nil -} - -func (m *PodMutator) createConfigMap(ctx context.Context, namespace string, name string, pod *corev1.Pod) error { - m.Log.V(1).Info(fmt.Sprintf("Creating configmap %s", name)) - references := []metav1.OwnerReference{ - pod.OwnerReferences[0], - } - references[0].Controller = utils.FalseVal() - ff := m.getFeatureFlag(ctx, namespace, name) - if ff.Name == "" { - return fmt.Errorf("feature flag configuration %s/%s not found", namespace, name) - } - references = append(references, v1alpha1.GetFfReference(&ff)) - - cm := v1alpha1.GenerateFfConfigMap(name, namespace, references, ff.Spec) - - return m.Client.Create(ctx, &cm) -} - -func (m *PodMutator) getFeatureFlag(ctx context.Context, namespace string, name string) v1alpha1.FeatureFlagConfiguration { - ffConfig := v1alpha1.FeatureFlagConfiguration{} - if err := m.Client.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, &ffConfig); errors.IsNotFound(err) { - return v1alpha1.FeatureFlagConfiguration{} - } - return ffConfig -} - func (m *PodMutator) getFlagSourceConfiguration(ctx context.Context, namespace string, name string) v1alpha1.FlagSourceConfiguration { fcConfig := v1alpha1.FlagSourceConfiguration{} if err := m.Client.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, &fcConfig); errors.IsNotFound(err) { @@ -499,7 +298,7 @@ func OpenFeatureEnabledAnnotationIndex(o client.Object) []string { "false", } } - val, ok := pod.ObjectMeta.Annotations[fmt.Sprintf("openfeature.dev/%s", AllowKubernetesSyncAnnotation)] + val, ok := pod.ObjectMeta.Annotations[fmt.Sprintf("openfeature.dev/%s", controllers.AllowKubernetesSyncAnnotation)] if ok && val == "true" { return []string{ "true",