From 5886703d0102d4f098ebbb4252d90781668ffe27 Mon Sep 17 00:00:00 2001 From: Mmadu Manasseh Date: Mon, 13 Jan 2025 16:30:12 +0100 Subject: [PATCH] use in-repo gitops-engine and kubectl modules Signed-off-by: Mmadu Manasseh --- pkg/checks/diff/diff.go | 9 +- pkg/gitops-engine/pkg/utils/kube/convert.go | 26 + pkg/gitops-engine/pkg/utils/kube/ctl.go | 350 +++++ pkg/gitops-engine/pkg/utils/kube/kube.go | 423 ++++++ .../pkg/utils/kube/resource_filter.go | 5 + .../pkg/utils/kube/resource_ops.go | 513 ++++++++ .../pkg/utils/kube/uniqueprotomodels.go | 191 +++ pkg/kubectl/apply/apply.go | 1131 +++++++++++++++++ pkg/kubectl/apply/apply_edit_last_applied.go | 89 ++ pkg/kubectl/apply/apply_set_last_applied.go | 219 ++++ pkg/kubectl/apply/apply_view_last_applied.go | 174 +++ pkg/kubectl/apply/applyset.go | 607 +++++++++ pkg/kubectl/apply/applyset_pruner.go | 195 +++ pkg/kubectl/apply/patcher.go | 431 +++++++ pkg/kubectl/apply/prune.go | 162 +++ pkg/kubectl/cli-runtime/resource/helper.go | 321 +++++ .../cli-runtime/resource/interfaces.go | 91 ++ 17 files changed, 4933 insertions(+), 4 deletions(-) create mode 100644 pkg/gitops-engine/pkg/utils/kube/convert.go create mode 100644 pkg/gitops-engine/pkg/utils/kube/ctl.go create mode 100644 pkg/gitops-engine/pkg/utils/kube/kube.go create mode 100644 pkg/gitops-engine/pkg/utils/kube/resource_filter.go create mode 100644 pkg/gitops-engine/pkg/utils/kube/resource_ops.go create mode 100644 pkg/gitops-engine/pkg/utils/kube/uniqueprotomodels.go create mode 100644 pkg/kubectl/apply/apply.go create mode 100644 pkg/kubectl/apply/apply_edit_last_applied.go create mode 100644 pkg/kubectl/apply/apply_set_last_applied.go create mode 100644 pkg/kubectl/apply/apply_view_last_applied.go create mode 100644 pkg/kubectl/apply/applyset.go create mode 100644 pkg/kubectl/apply/applyset_pruner.go create mode 100644 pkg/kubectl/apply/patcher.go create mode 100644 pkg/kubectl/apply/prune.go create mode 100644 pkg/kubectl/cli-runtime/resource/helper.go create mode 100644 pkg/kubectl/cli-runtime/resource/interfaces.go diff --git a/pkg/checks/diff/diff.go b/pkg/checks/diff/diff.go index 0738cba8..88facb4a 100644 --- a/pkg/checks/diff/diff.go +++ b/pkg/checks/diff/diff.go @@ -4,6 +4,10 @@ import ( "context" "encoding/json" "fmt" + "io" + "strings" + "time" + cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" "github.com/argoproj/argo-cd/v2/controller" "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" @@ -15,21 +19,18 @@ import ( "github.com/argoproj/gitops-engine/pkg/diff" "github.com/argoproj/gitops-engine/pkg/sync/hook" "github.com/argoproj/gitops-engine/pkg/sync/ignore" - "github.com/argoproj/gitops-engine/pkg/utils/kube" "github.com/argoproj/gitops-engine/pkg/utils/tracing" "github.com/ghodss/yaml" "github.com/go-logr/zerologr" "github.com/pmezard/go-difflib/difflib" "github.com/rs/zerolog/log" - "io" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" "k8s.io/klog/v2/textlogger" - "strings" - "time" "github.com/zapier/kubechecks/pkg/checks" + "github.com/zapier/kubechecks/pkg/gitops-engine/pkg/utils/kube" "github.com/zapier/kubechecks/pkg/msg" "github.com/zapier/kubechecks/telemetry" ) diff --git a/pkg/gitops-engine/pkg/utils/kube/convert.go b/pkg/gitops-engine/pkg/utils/kube/convert.go new file mode 100644 index 00000000..826c384d --- /dev/null +++ b/pkg/gitops-engine/pkg/utils/kube/convert.go @@ -0,0 +1,26 @@ +package kube + +import ( + "github.com/argoproj/gitops-engine/pkg/utils/kube/scheme" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func convertToVersionWithScheme(obj *unstructured.Unstructured, group string, version string) (*unstructured.Unstructured, error) { + s := scheme.Scheme + object, err := s.ConvertToVersion(obj, runtime.InternalGroupVersioner) + if err != nil { + return nil, err + } + unmarshalledObj, err := s.ConvertToVersion(object, schema.GroupVersion{Group: group, Version: version}) + if err != nil { + return nil, err + } + unstrBody, err := runtime.DefaultUnstructuredConverter.ToUnstructured(unmarshalledObj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: unstrBody}, nil +} diff --git a/pkg/gitops-engine/pkg/utils/kube/ctl.go b/pkg/gitops-engine/pkg/utils/kube/ctl.go new file mode 100644 index 00000000..3f494b38 --- /dev/null +++ b/pkg/gitops-engine/pkg/utils/kube/ctl.go @@ -0,0 +1,350 @@ +package kube + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/go-logr/logr" + "golang.org/x/sync/errgroup" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/kube-openapi/pkg/util/proto" + "k8s.io/kubectl/pkg/util/openapi" + + utils "github.com/argoproj/gitops-engine/pkg/utils/io" + "github.com/argoproj/gitops-engine/pkg/utils/tracing" +) + +type CleanupFunc func() + +type OnKubectlRunFunc func(command string) (CleanupFunc, error) + +type Kubectl interface { + ManageResources(config *rest.Config, openAPISchema openapi.Resources) (ResourceOperations, func(), error) + LoadOpenAPISchema(config *rest.Config) (openapi.Resources, *managedfields.GvkParser, error) + ConvertToVersion(obj *unstructured.Unstructured, group, version string) (*unstructured.Unstructured, error) + DeleteResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, deleteOptions metav1.DeleteOptions) error + GetResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string) (*unstructured.Unstructured, error) + CreateResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, obj *unstructured.Unstructured, createOptions metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) + PatchResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, patchType types.PatchType, patchBytes []byte, subresources ...string) (*unstructured.Unstructured, error) + GetAPIResources(config *rest.Config, preferred bool, resourceFilter ResourceFilter) ([]APIResourceInfo, error) + GetServerVersion(config *rest.Config) (string, error) + NewDynamicClient(config *rest.Config) (dynamic.Interface, error) + SetOnKubectlRun(onKubectlRun OnKubectlRunFunc) +} + +type KubectlCmd struct { + Log logr.Logger + Tracer tracing.Tracer + OnKubectlRun OnKubectlRunFunc +} + +type APIResourceInfo struct { + GroupKind schema.GroupKind + Meta metav1.APIResource + GroupVersionResource schema.GroupVersionResource +} + +type filterFunc func(apiResource *metav1.APIResource) bool + +func (k *KubectlCmd) filterAPIResources(config *rest.Config, preferred bool, resourceFilter ResourceFilter, filter filterFunc) ([]APIResourceInfo, error) { + disco, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + + var serverResources []*metav1.APIResourceList + if preferred { + serverResources, err = disco.ServerPreferredResources() + } else { + _, serverResources, err = disco.ServerGroupsAndResources() + } + + if err != nil { + if len(serverResources) == 0 { + return nil, err + } + k.Log.Error(err, "Partial success when performing preferred resource discovery") + } + apiResIfs := make([]APIResourceInfo, 0) + for _, apiResourcesList := range serverResources { + gv, err := schema.ParseGroupVersion(apiResourcesList.GroupVersion) + if err != nil { + gv = schema.GroupVersion{} + } + for _, apiResource := range apiResourcesList.APIResources { + + if resourceFilter.IsExcludedResource(gv.Group, apiResource.Kind, config.Host) { + continue + } + + if filter(&apiResource) { + resource := ToGroupVersionResource(apiResourcesList.GroupVersion, &apiResource) + gv, err := schema.ParseGroupVersion(apiResourcesList.GroupVersion) + if err != nil { + return nil, err + } + apiResIf := APIResourceInfo{ + GroupKind: schema.GroupKind{Group: gv.Group, Kind: apiResource.Kind}, + Meta: apiResource, + GroupVersionResource: resource, + } + apiResIfs = append(apiResIfs, apiResIf) + } + } + } + return apiResIfs, nil +} + +// isSupportedVerb returns whether or not a APIResource supports a specific verb. +// The verb will be matched case-insensitive. +func isSupportedVerb(apiResource *metav1.APIResource, verb string) bool { + if verb == "" || verb == "*" { + return true + } + for _, v := range apiResource.Verbs { + if strings.EqualFold(v, verb) { + return true + } + } + return false +} + +// LoadOpenAPISchema will load all existing resource schemas from the cluster +// and return: +// - openapi.Resources: used for getting the proto.Schema from a GVK +// - managedfields.GvkParser: used for building a ParseableType to be used in +// structured-merge-diffs +func (k *KubectlCmd) LoadOpenAPISchema(config *rest.Config) (openapi.Resources, *managedfields.GvkParser, error) { + disco, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, nil, err + } + + oapiGetter := openapi.NewOpenAPIGetter(disco) + oapiResources, err := openapi.NewOpenAPIParser(oapiGetter).Parse() + if err != nil { + return nil, nil, fmt.Errorf("error getting openapi resources: %s", err) + } + gvkParser, err := k.newGVKParser(oapiGetter) + if err != nil { + return oapiResources, nil, fmt.Errorf("error getting gvk parser: %s", err) + } + return oapiResources, gvkParser, nil +} + +func (k *KubectlCmd) newGVKParser(oapiGetter discovery.OpenAPISchemaInterface) (*managedfields.GvkParser, error) { + doc, err := oapiGetter.OpenAPISchema() + if err != nil { + return nil, fmt.Errorf("error getting openapi schema: %s", err) + } + models, err := proto.NewOpenAPIData(doc) + if err != nil { + return nil, fmt.Errorf("error getting openapi data: %s", err) + } + var taintedGVKs []schema.GroupVersionKind + models, taintedGVKs = newUniqueModels(models) + if len(taintedGVKs) > 0 { + k.Log.Info("Duplicate GVKs detected in OpenAPI schema. This could cause inaccurate diffs.", "gvks", taintedGVKs) + } + gvkParser, err := managedfields.NewGVKParser(models, false) + if err != nil { + return nil, err + } + return gvkParser, nil +} + +func (k *KubectlCmd) GetAPIResources(config *rest.Config, preferred bool, resourceFilter ResourceFilter) ([]APIResourceInfo, error) { + span := k.Tracer.StartSpan("GetAPIResources") + defer span.Finish() + apiResIfs, err := k.filterAPIResources(config, preferred, resourceFilter, func(apiResource *metav1.APIResource) bool { + return isSupportedVerb(apiResource, listVerb) && isSupportedVerb(apiResource, watchVerb) + }) + if err != nil { + return nil, err + } + return apiResIfs, err +} + +// GetResource returns resource +func (k *KubectlCmd) GetResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string) (*unstructured.Unstructured, error) { + span := k.Tracer.StartSpan("GetResource") + span.SetBaggageItem("kind", gvk.Kind) + span.SetBaggageItem("name", name) + defer span.Finish() + dynamicIf, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + disco, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + apiResource, err := ServerResourceForGroupVersionKind(disco, gvk, "get") + if err != nil { + return nil, err + } + resource := gvk.GroupVersion().WithResource(apiResource.Name) + resourceIf := ToResourceInterface(dynamicIf, apiResource, resource, namespace) + return resourceIf.Get(ctx, name, metav1.GetOptions{}) +} + +// CreateResource creates resource +func (k *KubectlCmd) CreateResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, obj *unstructured.Unstructured, createOptions metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) { + span := k.Tracer.StartSpan("CreateResource") + span.SetBaggageItem("kind", gvk.Kind) + span.SetBaggageItem("name", name) + defer span.Finish() + dynamicIf, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + disco, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + apiResource, err := ServerResourceForGroupVersionKind(disco, gvk, "create") + if err != nil { + return nil, err + } + resource := gvk.GroupVersion().WithResource(apiResource.Name) + resourceIf := ToResourceInterface(dynamicIf, apiResource, resource, namespace) + return resourceIf.Create(ctx, obj, createOptions, subresources...) +} + +// PatchResource patches resource +func (k *KubectlCmd) PatchResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, patchType types.PatchType, patchBytes []byte, subresources ...string) (*unstructured.Unstructured, error) { + span := k.Tracer.StartSpan("PatchResource") + span.SetBaggageItem("kind", gvk.Kind) + span.SetBaggageItem("name", name) + defer span.Finish() + dynamicIf, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + disco, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + apiResource, err := ServerResourceForGroupVersionKind(disco, gvk, "patch") + if err != nil { + return nil, err + } + resource := gvk.GroupVersion().WithResource(apiResource.Name) + resourceIf := ToResourceInterface(dynamicIf, apiResource, resource, namespace) + return resourceIf.Patch(ctx, name, patchType, patchBytes, metav1.PatchOptions{}, subresources...) +} + +// DeleteResource deletes resource +func (k *KubectlCmd) DeleteResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, deleteOptions metav1.DeleteOptions) error { + span := k.Tracer.StartSpan("DeleteResource") + span.SetBaggageItem("kind", gvk.Kind) + span.SetBaggageItem("name", name) + defer span.Finish() + dynamicIf, err := dynamic.NewForConfig(config) + if err != nil { + return err + } + disco, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return err + } + apiResource, err := ServerResourceForGroupVersionKind(disco, gvk, "delete") + if err != nil { + return err + } + resource := gvk.GroupVersion().WithResource(apiResource.Name) + resourceIf := ToResourceInterface(dynamicIf, apiResource, resource, namespace) + + if deleteOptions.PropagationPolicy == nil { + propagationPolicy := metav1.DeletePropagationForeground + deleteOptions = metav1.DeleteOptions{PropagationPolicy: &propagationPolicy} + } + return resourceIf.Delete(ctx, name, deleteOptions) +} + +func (k *KubectlCmd) ManageResources(config *rest.Config, openAPISchema openapi.Resources) (ResourceOperations, func(), error) { + f, err := os.CreateTemp(utils.TempDir, "") + if err != nil { + return nil, nil, fmt.Errorf("failed to generate temp file for kubeconfig: %v", err) + } + _ = f.Close() + err = WriteKubeConfig(config, "", f.Name()) + if err != nil { + utils.DeleteFile(f.Name()) + return nil, nil, fmt.Errorf("failed to write kubeconfig: %v", err) + } + fact := kubeCmdFactory(f.Name(), "", config) + cleanup := func() { + utils.DeleteFile(f.Name()) + } + return &kubectlResourceOperations{ + config: config, + fact: fact, + openAPISchema: openAPISchema, + tracer: k.Tracer, + log: k.Log, + onKubectlRun: k.OnKubectlRun, + }, cleanup, nil +} + +// ConvertToVersion converts an unstructured object into the specified group/version +func (k *KubectlCmd) ConvertToVersion(obj *unstructured.Unstructured, group string, version string) (*unstructured.Unstructured, error) { + span := k.Tracer.StartSpan("ConvertToVersion") + from := obj.GroupVersionKind().GroupVersion() + span.SetBaggageItem("from", from.String()) + span.SetBaggageItem("to", schema.GroupVersion{Group: group, Version: version}.String()) + defer span.Finish() + if from.Group == group && from.Version == version { + return obj.DeepCopy(), nil + } + return convertToVersionWithScheme(obj, group, version) +} + +func (k *KubectlCmd) GetServerVersion(config *rest.Config) (string, error) { + span := k.Tracer.StartSpan("GetServerVersion") + defer span.Finish() + client, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return "", err + } + v, err := client.ServerVersion() + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", v.Major, v.Minor), nil +} + +func (k *KubectlCmd) NewDynamicClient(config *rest.Config) (dynamic.Interface, error) { + return dynamic.NewForConfig(config) +} + +func (k *KubectlCmd) SetOnKubectlRun(onKubectlRun OnKubectlRunFunc) { + k.OnKubectlRun = onKubectlRun +} + +func RunAllAsync(count int, action func(i int) error) error { + g, ctx := errgroup.WithContext(context.Background()) +loop: + for i := 0; i < count; i++ { + index := i + g.Go(func() error { + return action(index) + }) + select { + case <-ctx.Done(): + // Something went wrong already, stop spawning tasks. + break loop + default: + } + } + return g.Wait() +} diff --git a/pkg/gitops-engine/pkg/utils/kube/kube.go b/pkg/gitops-engine/pkg/utils/kube/kube.go new file mode 100644 index 00000000..f88ed172 --- /dev/null +++ b/pkg/gitops-engine/pkg/utils/kube/kube.go @@ -0,0 +1,423 @@ +// Package kube provides helper utilities common for kubernetes +package kube + +import ( + "bytes" + "context" + "fmt" + "io" + "regexp" + "strings" + "time" + + "github.com/go-logr/logr" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + kubeyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/yaml" +) + +const ( + listVerb = "list" + watchVerb = "watch" +) + +const ( + SecretKind = "Secret" + ServiceKind = "Service" + ServiceAccountKind = "ServiceAccount" + EndpointsKind = "Endpoints" + DeploymentKind = "Deployment" + ReplicaSetKind = "ReplicaSet" + StatefulSetKind = "StatefulSet" + DaemonSetKind = "DaemonSet" + IngressKind = "Ingress" + JobKind = "Job" + PersistentVolumeClaimKind = "PersistentVolumeClaim" + CustomResourceDefinitionKind = "CustomResourceDefinition" + PodKind = "Pod" + APIServiceKind = "APIService" + NamespaceKind = "Namespace" + HorizontalPodAutoscalerKind = "HorizontalPodAutoscaler" +) + +type ResourceInfoProvider interface { + IsNamespaced(gk schema.GroupKind) (bool, error) +} + +func IsNamespacedOrUnknown(provider ResourceInfoProvider, gk schema.GroupKind) bool { + namespaced, err := provider.IsNamespaced(gk) + return namespaced || err != nil +} + +type ResourceKey struct { + Group string + Kind string + Namespace string + Name string +} + +func (k *ResourceKey) String() string { + return fmt.Sprintf("%s/%s/%s/%s", k.Group, k.Kind, k.Namespace, k.Name) +} + +func (k ResourceKey) GroupKind() schema.GroupKind { + return schema.GroupKind{Group: k.Group, Kind: k.Kind} +} + +func NewResourceKey(group string, kind string, namespace string, name string) ResourceKey { + return ResourceKey{Group: group, Kind: kind, Namespace: namespace, Name: name} +} + +func GetResourceKey(obj *unstructured.Unstructured) ResourceKey { + gvk := obj.GroupVersionKind() + return NewResourceKey(gvk.Group, gvk.Kind, obj.GetNamespace(), obj.GetName()) +} + +func GetObjectRef(obj *unstructured.Unstructured) v1.ObjectReference { + return v1.ObjectReference{ + UID: obj.GetUID(), + APIVersion: obj.GetAPIVersion(), + Kind: obj.GetKind(), + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } +} + +// TestConfig tests to make sure the REST config is usable +func TestConfig(config *rest.Config) error { + kubeclientset, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("REST config invalid: %s", err) + } + _, err = kubeclientset.ServerVersion() + if err != nil { + return fmt.Errorf("REST config invalid: %s", err) + } + return nil +} + +// ToUnstructured converts a concrete K8s API type to a un unstructured object +func ToUnstructured(obj interface{}) (*unstructured.Unstructured, error) { + uObj, err := runtime.NewTestUnstructuredConverter(equality.Semantic).ToUnstructured(obj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: uObj}, nil +} + +// MustToUnstructured converts a concrete K8s API type to a un unstructured object and panics if not successful +func MustToUnstructured(obj interface{}) *unstructured.Unstructured { + uObj, err := ToUnstructured(obj) + if err != nil { + panic(err) + } + return uObj +} + +// GetAppInstanceLabel returns the application instance name from labels +func GetAppInstanceLabel(un *unstructured.Unstructured, key string) string { + if labels := un.GetLabels(); labels != nil { + return labels[key] + } + return "" +} + +// UnsetLabel removes our app labels from an unstructured object +func UnsetLabel(target *unstructured.Unstructured, key string) { + if labels := target.GetLabels(); labels != nil { + if _, ok := labels[key]; ok { + delete(labels, key) + if len(labels) == 0 { + unstructured.RemoveNestedField(target.Object, "metadata", "labels") + } else { + target.SetLabels(labels) + } + } + } +} + +func ToGroupVersionResource(groupVersion string, apiResource *metav1.APIResource) schema.GroupVersionResource { + gvk := schema.FromAPIVersionAndKind(groupVersion, apiResource.Kind) + gv := gvk.GroupVersion() + return gv.WithResource(apiResource.Name) +} + +func ToResourceInterface(dynamicIf dynamic.Interface, apiResource *metav1.APIResource, resource schema.GroupVersionResource, namespace string) dynamic.ResourceInterface { + if apiResource.Namespaced { + return dynamicIf.Resource(resource).Namespace(namespace) + } + return dynamicIf.Resource(resource) +} + +func IsCRDGroupVersionKind(gvk schema.GroupVersionKind) bool { + return gvk.Kind == CustomResourceDefinitionKind && gvk.Group == "apiextensions.k8s.io" +} + +func IsCRD(obj *unstructured.Unstructured) bool { + return IsCRDGroupVersionKind(obj.GroupVersionKind()) +} + +// ServerResourceForGroupVersionKind looks up and returns the API resource from +// the server for a given GVK scheme. If verb is set to the non-empty string, +// it will return the API resource which supports the verb. There are some edge +// cases, where the same GVK is represented by more than one API. +// +// See: https://github.com/ksonnet/ksonnet/blob/master/utils/client.go +func ServerResourceForGroupVersionKind(disco discovery.DiscoveryInterface, gvk schema.GroupVersionKind, verb string) (*metav1.APIResource, error) { + // default is to return a not found for the requested resource + retErr := apierr.NewNotFound(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, "") + resources, err := disco.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) + if err != nil { + return nil, err + } + for _, r := range resources.APIResources { + if r.Kind == gvk.Kind { + if isSupportedVerb(&r, verb) { + return &r, nil + } else { + // We have a match, but the API does not support the action + // that was requested. Memorize this. + retErr = apierr.NewMethodNotSupported(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, verb) + } + } + } + return nil, retErr +} + +var ( + kubectlErrOutRegexp = regexp.MustCompile(`^(error: )?(error validating|error when creating|error when creating) "\S+": `) + + // See ApplyOpts::Run() + // cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err) + kubectlApplyPatchErrOutRegexp = regexp.MustCompile(`(?s)^error when applying patch:.*\nfor: "\S+": `) +) + +// cleanKubectlOutput makes the error output of kubectl a little better to read +func cleanKubectlOutput(s string) string { + s = strings.TrimSpace(s) + s = kubectlErrOutRegexp.ReplaceAllString(s, "") + s = kubectlApplyPatchErrOutRegexp.ReplaceAllString(s, "") + s = strings.Replace(s, "; if you choose to ignore these errors, turn validation off with --validate=false", "", -1) + return s +} + +// WriteKubeConfig takes a rest.Config and writes it as a kubeconfig at the specified path +func WriteKubeConfig(restConfig *rest.Config, namespace, filename string) error { + kubeConfig := NewKubeConfig(restConfig, namespace) + return clientcmd.WriteToFile(*kubeConfig, filename) +} + +// NewKubeConfig converts a clientcmdapi.Config (kubeconfig) from a rest.Config +func NewKubeConfig(restConfig *rest.Config, namespace string) *clientcmdapi.Config { + var proxyUrl string + if restConfig.Proxy != nil { + if u, err := restConfig.Proxy(nil); err == nil { + proxyUrl = u.String() + } + } + return &clientcmdapi.Config{ + CurrentContext: restConfig.Host, + Contexts: map[string]*clientcmdapi.Context{ + restConfig.Host: { + Cluster: restConfig.Host, + AuthInfo: restConfig.Host, + Namespace: namespace, + }, + }, + Clusters: map[string]*clientcmdapi.Cluster{ + restConfig.Host: { + Server: restConfig.Host, + TLSServerName: restConfig.TLSClientConfig.ServerName, + InsecureSkipTLSVerify: restConfig.TLSClientConfig.Insecure, + CertificateAuthority: restConfig.TLSClientConfig.CAFile, + CertificateAuthorityData: restConfig.TLSClientConfig.CAData, + ProxyURL: proxyUrl, + }, + }, + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + restConfig.Host: newAuthInfo(restConfig), + }, + } +} + +// newAuthInfo returns an AuthInfo from a rest config, detecting if the rest.Config is an +// in-cluster config and automatically setting the token path appropriately. +func newAuthInfo(restConfig *rest.Config) *clientcmdapi.AuthInfo { + authInfo := clientcmdapi.AuthInfo{} + haveCredentials := false + if restConfig.TLSClientConfig.CertFile != "" { + authInfo.ClientCertificate = restConfig.TLSClientConfig.CertFile + haveCredentials = true + } + if len(restConfig.TLSClientConfig.CertData) > 0 { + authInfo.ClientCertificateData = restConfig.TLSClientConfig.CertData + haveCredentials = true + } + if restConfig.TLSClientConfig.KeyFile != "" { + authInfo.ClientKey = restConfig.TLSClientConfig.KeyFile + haveCredentials = true + } + if len(restConfig.TLSClientConfig.KeyData) > 0 { + authInfo.ClientKeyData = restConfig.TLSClientConfig.KeyData + haveCredentials = true + } + if restConfig.Username != "" { + authInfo.Username = restConfig.Username + haveCredentials = true + } + if restConfig.Password != "" { + authInfo.Password = restConfig.Password + haveCredentials = true + } + if restConfig.BearerToken != "" { + authInfo.Token = restConfig.BearerToken + haveCredentials = true + } + if restConfig.ExecProvider != nil { + authInfo.Exec = restConfig.ExecProvider + haveCredentials = true + } + if restConfig.ExecProvider == nil && !haveCredentials { + // If no credentials were set (or there was no exec provider), we assume in-cluster config. + // In-cluster configs from the go-client will no longer set bearer tokens, so we set the + // well known token path. See issue #774 + authInfo.TokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + } + return &authInfo +} + +// SplitYAML splits a YAML file into unstructured objects. Returns list of all unstructured objects +// found in the yaml. If an error occurs, returns objects that have been parsed so far too. +func SplitYAML(yamlData []byte) ([]*unstructured.Unstructured, error) { + var objs []*unstructured.Unstructured + ymls, err := SplitYAMLToString(yamlData) + if err != nil { + return nil, err + } + for _, yml := range ymls { + u := &unstructured.Unstructured{} + if err := yaml.Unmarshal([]byte(yml), u); err != nil { + return objs, fmt.Errorf("failed to unmarshal manifest: %v", err) + } + objs = append(objs, u) + } + return objs, nil +} + +// SplitYAMLToString splits a YAML file into strings. Returns list of yamls +// found in the yaml. If an error occurs, returns objects that have been parsed so far too. +func SplitYAMLToString(yamlData []byte) ([]string, error) { + // Similar way to what kubectl does + // https://github.com/kubernetes/cli-runtime/blob/master/pkg/resource/visitor.go#L573-L600 + // Ideally k8s.io/cli-runtime/pkg/resource.Builder should be used instead of this method. + // E.g. Builder does list unpacking and flattening and this code does not. + d := kubeyaml.NewYAMLOrJSONDecoder(bytes.NewReader(yamlData), 4096) + var objs []string + for { + ext := runtime.RawExtension{} + if err := d.Decode(&ext); err != nil { + if err == io.EOF { + break + } + return objs, fmt.Errorf("failed to unmarshal manifest: %v", err) + } + ext.Raw = bytes.TrimSpace(ext.Raw) + if len(ext.Raw) == 0 || bytes.Equal(ext.Raw, []byte("null")) { + continue + } + objs = append(objs, string(ext.Raw)) + } + return objs, nil +} + +// WatchWithRetry returns channel of watch events or errors of failed to call watch API. +func WatchWithRetry(ctx context.Context, getWatch func() (watch.Interface, error)) chan struct { + *watch.Event + Error error +} { + ch := make(chan struct { + *watch.Event + Error error + }) + execute := func() (bool, error) { + w, err := getWatch() + if err != nil { + return false, err + } + defer w.Stop() + + for { + select { + case event, ok := <-w.ResultChan(): + if ok { + ch <- struct { + *watch.Event + Error error + }{Event: &event, Error: nil} + } else { + return true, nil + } + case <-ctx.Done(): + return false, nil + } + } + } + go func() { + defer close(ch) + for { + retry, err := execute() + if err != nil { + ch <- struct { + *watch.Event + Error error + }{Error: err} + } + if !retry { + return + } + time.Sleep(time.Second) + } + }() + return ch +} + +func GetDeploymentReplicas(u *unstructured.Unstructured) *int64 { + val, found, err := unstructured.NestedInt64(u.Object, "spec", "replicas") + if !found || err != nil { + return nil + } + return &val +} + +// RetryUntilSucceed keep retrying given action with specified interval until action succeed or specified context is done. +func RetryUntilSucceed(ctx context.Context, interval time.Duration, desc string, log logr.Logger, action func() error) { + pollErr := wait.PollUntilContextCancel(ctx, interval, true, func(ctx context.Context) (bool /*done*/, error) { + log.V(1).Info(fmt.Sprintf("Start %s", desc)) + err := action() + if err == nil { + log.V(1).Info(fmt.Sprintf("Completed %s", desc)) + return true, nil + } + log.V(1).Info(fmt.Sprintf("Failed to %s: %+v, retrying in %v", desc, err, interval)) + return false, nil + }) + if pollErr != nil { + // The only error that can happen here is wait.ErrWaitTimeout if ctx is done. + log.V(1).Info(fmt.Sprintf("Stop retrying %s", desc)) + } +} diff --git a/pkg/gitops-engine/pkg/utils/kube/resource_filter.go b/pkg/gitops-engine/pkg/utils/kube/resource_filter.go new file mode 100644 index 00000000..2a337317 --- /dev/null +++ b/pkg/gitops-engine/pkg/utils/kube/resource_filter.go @@ -0,0 +1,5 @@ +package kube + +type ResourceFilter interface { + IsExcludedResource(group, kind, cluster string) bool +} diff --git a/pkg/gitops-engine/pkg/utils/kube/resource_ops.go b/pkg/gitops-engine/pkg/utils/kube/resource_ops.go new file mode 100644 index 00000000..40e63ace --- /dev/null +++ b/pkg/gitops-engine/pkg/utils/kube/resource_ops.go @@ -0,0 +1,513 @@ +package kube + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "strings" + + "github.com/go-logr/logr" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/kubectl/pkg/cmd/auth" + "k8s.io/kubectl/pkg/cmd/create" + "k8s.io/kubectl/pkg/cmd/delete" + "k8s.io/kubectl/pkg/cmd/replace" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util/openapi" + + "github.com/argoproj/gitops-engine/pkg/diff" + "github.com/argoproj/gitops-engine/pkg/utils/io" + "github.com/argoproj/gitops-engine/pkg/utils/tracing" + + "github.com/zapier/kubechecks/pkg/kubectl/apply" +) + +// ResourceOperations provides methods to manage k8s resources +type ResourceOperations interface { + ApplyResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string, serverSideDiff bool) (string, error) + ReplaceResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force bool) (string, error) + CreateResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, validate bool) (string, error) + UpdateResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy) (*unstructured.Unstructured, error) +} + +type kubectlResourceOperations struct { + config *rest.Config + log logr.Logger + tracer tracing.Tracer + onKubectlRun OnKubectlRunFunc + fact cmdutil.Factory + openAPISchema openapi.Resources +} + +type commandExecutor func(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string) error + +func (k *kubectlResourceOperations) runResourceCommand(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, serverSideDiff bool, executor commandExecutor) (string, error) { + manifestBytes, err := json.Marshal(obj) + if err != nil { + return "", err + } + manifestFile, err := os.CreateTemp(io.TempDir, "") + if err != nil { + return "", fmt.Errorf("Failed to generate temp file for manifest: %v", err) + } + defer io.DeleteFile(manifestFile.Name()) + if _, err = manifestFile.Write(manifestBytes); err != nil { + return "", fmt.Errorf("Failed to write manifest: %v", err) + } + if err = manifestFile.Close(); err != nil { + return "", fmt.Errorf("Failed to close manifest: %v", err) + } + + // log manifest + if k.log.V(1).Enabled() { + var obj unstructured.Unstructured + err := json.Unmarshal(manifestBytes, &obj) + if err != nil { + return "", err + } + redacted, _, err := diff.HideSecretData(&obj, nil) + if err != nil { + return "", err + } + redactedBytes, err := json.Marshal(redacted) + if err != nil { + return "", err + } + k.log.V(1).Info(string(redactedBytes)) + } + + var out []string + // rbac resouces are first applied with auth reconcile kubectl feature. + // serverSideDiff should avoid this step as the resources are not being actually + // applied but just running in dryrun mode. Also, kubectl auth reconcile doesn't + // currently support running dryrun in server mode. + if obj.GetAPIVersion() == "rbac.authorization.k8s.io/v1" && !serverSideDiff { + outReconcile, err := k.rbacReconcile(ctx, obj, manifestFile.Name(), dryRunStrategy) + if err != nil { + return "", fmt.Errorf("error running rbacReconcile: %s", err) + } + out = append(out, outReconcile) + // We still want to fallthrough and run `kubectl apply` in order set the + // last-applied-configuration annotation in the object. + } + + // Run kubectl apply + ioStreams := genericclioptions.IOStreams{ + In: &bytes.Buffer{}, + Out: &bytes.Buffer{}, + ErrOut: &bytes.Buffer{}, + } + err = executor(k.fact, ioStreams, manifestFile.Name()) + if err != nil { + return "", errors.New(cleanKubectlOutput(err.Error())) + } + if buf := strings.TrimSpace(ioStreams.Out.(*bytes.Buffer).String()); len(buf) > 0 { + out = append(out, buf) + } + if buf := strings.TrimSpace(ioStreams.ErrOut.(*bytes.Buffer).String()); len(buf) > 0 { + out = append(out, buf) + } + return strings.Join(out, ". "), nil +} + +// rbacReconcile will perform reconciliation for RBAC resources. It will run +// the following command: +// +// kubectl auth reconcile +// +// This is preferred over `kubectl apply`, which cannot tolerate changes in +// roleRef, which is an immutable field. +// See: https://github.com/kubernetes/kubernetes/issues/66353 +// `auth reconcile` will delete and recreate the resource if necessary +func (k *kubectlResourceOperations) rbacReconcile(ctx context.Context, obj *unstructured.Unstructured, fileName string, dryRunStrategy cmdutil.DryRunStrategy) (string, error) { + cleanup, err := k.processKubectlRun("auth") + if err != nil { + return "", fmt.Errorf("error processing kubectl run auth: %w", err) + } + defer cleanup() + outReconcile, err := k.authReconcile(ctx, obj, fileName, dryRunStrategy) + if err != nil { + return "", fmt.Errorf("error running kubectl auth reconcile: %w", err) + } + return outReconcile, nil +} + +func kubeCmdFactory(kubeconfig, ns string, config *rest.Config) cmdutil.Factory { + kubeConfigFlags := genericclioptions.NewConfigFlags(true) + if ns != "" { + kubeConfigFlags.Namespace = &ns + } + kubeConfigFlags.KubeConfig = &kubeconfig + kubeConfigFlags.WithDiscoveryBurst(config.Burst) + kubeConfigFlags.WithDiscoveryQPS(config.QPS) + kubeConfigFlags.Impersonate = &config.Impersonate.UserName + kubeConfigFlags.ImpersonateUID = &config.Impersonate.UID + kubeConfigFlags.ImpersonateGroup = &config.Impersonate.Groups + matchVersionKubeConfigFlags := cmdutil.NewMatchVersionFlags(kubeConfigFlags) + return cmdutil.NewFactory(matchVersionKubeConfigFlags) +} + +func (k *kubectlResourceOperations) ReplaceResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force bool) (string, error) { + span := k.tracer.StartSpan("ReplaceResource") + span.SetBaggageItem("kind", obj.GetKind()) + span.SetBaggageItem("name", obj.GetName()) + defer span.Finish() + k.log.Info(fmt.Sprintf("Replacing resource %s/%s in cluster: %s, namespace: %s", obj.GetKind(), obj.GetName(), k.config.Host, obj.GetNamespace())) + return k.runResourceCommand(ctx, obj, dryRunStrategy, false, func(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string) error { + cleanup, err := k.processKubectlRun("replace") + if err != nil { + return err + } + defer cleanup() + + replaceOptions, err := k.newReplaceOptions(k.config, f, ioStreams, fileName, obj.GetNamespace(), force, dryRunStrategy) + if err != nil { + return err + } + return replaceOptions.Run(f) + }) +} + +func (k *kubectlResourceOperations) CreateResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, validate bool) (string, error) { + gvk := obj.GroupVersionKind() + span := k.tracer.StartSpan("CreateResource") + span.SetBaggageItem("kind", gvk.Kind) + span.SetBaggageItem("name", obj.GetName()) + defer span.Finish() + return k.runResourceCommand(ctx, obj, dryRunStrategy, false, func(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string) error { + cleanup, err := k.processKubectlRun("create") + if err != nil { + return err + } + defer cleanup() + + createOptions, err := k.newCreateOptions(ioStreams, fileName, dryRunStrategy) + if err != nil { + return err + } + command := &cobra.Command{} + saveConfig := false + command.Flags().BoolVar(&saveConfig, "save-config", false, "") + val := false + command.Flags().BoolVar(&val, "validate", false, "") + if validate { + _ = command.Flags().Set("validate", "true") + } + + return createOptions.RunCreate(f, command) + }) +} + +func (k *kubectlResourceOperations) UpdateResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy) (*unstructured.Unstructured, error) { + gvk := obj.GroupVersionKind() + span := k.tracer.StartSpan("UpdateResource") + span.SetBaggageItem("kind", gvk.Kind) + span.SetBaggageItem("name", obj.GetName()) + defer span.Finish() + dynamicIf, err := dynamic.NewForConfig(k.config) + if err != nil { + return nil, err + } + disco, err := discovery.NewDiscoveryClientForConfig(k.config) + if err != nil { + return nil, err + } + apiResource, err := ServerResourceForGroupVersionKind(disco, gvk, "update") + if err != nil { + return nil, err + } + resource := gvk.GroupVersion().WithResource(apiResource.Name) + resourceIf := ToResourceInterface(dynamicIf, apiResource, resource, obj.GetNamespace()) + + updateOptions := metav1.UpdateOptions{} + switch dryRunStrategy { + case cmdutil.DryRunClient, cmdutil.DryRunServer: + updateOptions.DryRun = []string{metav1.DryRunAll} + } + return resourceIf.Update(ctx, obj, updateOptions) +} + +// ApplyResource performs an apply of a unstructured resource +func (k *kubectlResourceOperations) ApplyResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string, serverSideDiff bool) (string, error) { + span := k.tracer.StartSpan("ApplyResource") + span.SetBaggageItem("kind", obj.GetKind()) + span.SetBaggageItem("name", obj.GetName()) + defer span.Finish() + k.log.WithValues( + "dry-run", [...]string{"none", "client", "server"}[dryRunStrategy], + "manager", manager, + "serverSideApply", serverSideApply, + "serverSideDiff", serverSideDiff).Info(fmt.Sprintf("Applying resource %s/%s in cluster: %s, namespace: %s", obj.GetKind(), obj.GetName(), k.config.Host, obj.GetNamespace())) + + return k.runResourceCommand(ctx, obj, dryRunStrategy, serverSideDiff, func(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string) error { + cleanup, err := k.processKubectlRun("apply") + if err != nil { + return err + } + defer cleanup() + + applyOpts, err := k.newApplyOptions(ioStreams, obj, fileName, validate, force, serverSideApply, dryRunStrategy, manager, serverSideDiff) + if err != nil { + return err + } + return applyOpts.Run() + }) +} + +func (k *kubectlResourceOperations) newApplyOptions(ioStreams genericclioptions.IOStreams, obj *unstructured.Unstructured, fileName string, validate bool, force, serverSideApply bool, dryRunStrategy cmdutil.DryRunStrategy, manager string, serverSideDiff bool) (*apply.ApplyOptions, error) { + flags := apply.NewApplyFlags(ioStreams) + o := &apply.ApplyOptions{ + IOStreams: ioStreams, + VisitedUids: sets.Set[types.UID]{}, + VisitedNamespaces: sets.Set[string]{}, + Recorder: genericclioptions.NoopRecorder{}, + PrintFlags: flags.PrintFlags, + Overwrite: true, + OpenAPIPatch: true, + ServerSideApply: serverSideApply, + } + dynamicClient, err := dynamic.NewForConfig(k.config) + if err != nil { + return nil, err + } + o.DynamicClient = dynamicClient + o.DeleteOptions, err = delete.NewDeleteFlags("").ToOptions(dynamicClient, ioStreams) + if err != nil { + return nil, err + } + o.OpenAPIGetter = k.fact + o.DryRunStrategy = dryRunStrategy + o.FieldManager = manager + validateDirective := metav1.FieldValidationIgnore + if validate { + validateDirective = metav1.FieldValidationStrict + } + o.Validator, err = k.fact.Validator(validateDirective) + if err != nil { + return nil, err + } + o.Builder = k.fact.NewBuilder() + o.Mapper, err = k.fact.ToRESTMapper() + if err != nil { + return nil, err + } + + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { + o.PrintFlags.NamePrintFlags.Operation = operation + switch o.DryRunStrategy { + case cmdutil.DryRunClient: + err = o.PrintFlags.Complete("%s (dry run)") + if err != nil { + return nil, err + } + case cmdutil.DryRunServer: + if serverSideDiff { + // managedFields are required by server-side diff to identify + // changes made by mutation webhooks. + o.PrintFlags.JSONYamlPrintFlags.ShowManagedFields = true + p, err := o.PrintFlags.JSONYamlPrintFlags.ToPrinter("json") + if err != nil { + return nil, fmt.Errorf("error configuring server-side diff printer: %w", err) + } + return p, nil + } else { + err = o.PrintFlags.Complete("%s (server dry run)") + if err != nil { + return nil, fmt.Errorf("error configuring server dryrun printer: %w", err) + } + } + } + return o.PrintFlags.ToPrinter() + } + o.DeleteOptions.FilenameOptions.Filenames = []string{fileName} + o.Namespace = obj.GetNamespace() + o.DeleteOptions.ForceDeletion = force + o.DryRunStrategy = dryRunStrategy + if manager != "" { + o.FieldManager = manager + } + if serverSideApply || serverSideDiff { + o.ForceConflicts = true + } + return o, nil +} + +func (k *kubectlResourceOperations) newCreateOptions(ioStreams genericclioptions.IOStreams, fileName string, dryRunStrategy cmdutil.DryRunStrategy) (*create.CreateOptions, error) { + o := create.NewCreateOptions(ioStreams) + + recorder, err := o.RecordFlags.ToRecorder() + if err != nil { + return nil, err + } + o.Recorder = recorder + + switch dryRunStrategy { + case cmdutil.DryRunClient: + err = o.PrintFlags.Complete("%s (dry run)") + if err != nil { + return nil, err + } + case cmdutil.DryRunServer: + err = o.PrintFlags.Complete("%s (server dry run)") + if err != nil { + return nil, err + } + } + o.DryRunStrategy = dryRunStrategy + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return nil, err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + o.FilenameOptions.Filenames = []string{fileName} + return o, nil +} + +func (k *kubectlResourceOperations) newReplaceOptions(config *rest.Config, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string, namespace string, force bool, dryRunStrategy cmdutil.DryRunStrategy) (*replace.ReplaceOptions, error) { + o := replace.NewReplaceOptions(ioStreams) + + recorder, err := o.RecordFlags.ToRecorder() + if err != nil { + return nil, err + } + o.Recorder = recorder + + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + + o.DeleteOptions, err = o.DeleteFlags.ToOptions(dynamicClient, o.IOStreams) + if err != nil { + return nil, err + } + + o.Builder = func() *resource.Builder { + return f.NewBuilder() + } + + switch dryRunStrategy { + case cmdutil.DryRunClient: + err = o.PrintFlags.Complete("%s (dry run)") + if err != nil { + return nil, err + } + case cmdutil.DryRunServer: + err = o.PrintFlags.Complete("%s (server dry run)") + if err != nil { + return nil, err + } + } + o.DryRunStrategy = dryRunStrategy + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return nil, err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.DeleteOptions.FilenameOptions.Filenames = []string{fileName} + o.Namespace = namespace + o.DeleteOptions.ForceDeletion = force + return o, nil +} + +func newReconcileOptions(f cmdutil.Factory, kubeClient *kubernetes.Clientset, fileName string, ioStreams genericclioptions.IOStreams, namespace string, dryRunStrategy cmdutil.DryRunStrategy) (*auth.ReconcileOptions, error) { + o := auth.NewReconcileOptions(ioStreams) + o.RBACClient = kubeClient.RbacV1() + o.NamespaceClient = kubeClient.CoreV1() + o.FilenameOptions.Filenames = []string{fileName} + o.DryRun = dryRunStrategy != cmdutil.DryRunNone + + r := f.NewBuilder(). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). + NamespaceParam(namespace).DefaultNamespace(). + FilenameParam(false, o.FilenameOptions). + Flatten(). + Do() + o.Visitor = r + + if o.DryRun { + err := o.PrintFlags.Complete("%s (dry run)") + if err != nil { + return nil, err + } + } + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return nil, err + } + o.PrintObject = printer.PrintObj + return o, nil +} + +func (k *kubectlResourceOperations) authReconcile(ctx context.Context, obj *unstructured.Unstructured, manifestFile string, dryRunStrategy cmdutil.DryRunStrategy) (string, error) { + kubeClient, err := kubernetes.NewForConfig(k.config) + if err != nil { + return "", err + } + // `kubectl auth reconcile` has a side effect of auto-creating namespaces if it doesn't exist. + // See: https://github.com/kubernetes/kubernetes/issues/71185. This is behavior which we do + // not want. We need to check if the namespace exists, before know if it is safe to run this + // command. Skip this for dryRuns. + if dryRunStrategy == cmdutil.DryRunNone && obj.GetNamespace() != "" { + _, err = kubeClient.CoreV1().Namespaces().Get(ctx, obj.GetNamespace(), metav1.GetOptions{}) + if err != nil { + return "", err + } + } + ioStreams := genericclioptions.IOStreams{ + In: &bytes.Buffer{}, + Out: &bytes.Buffer{}, + ErrOut: &bytes.Buffer{}, + } + reconcileOpts, err := newReconcileOptions(k.fact, kubeClient, manifestFile, ioStreams, obj.GetNamespace(), dryRunStrategy) + if err != nil { + return "", fmt.Errorf("error calling newReconcileOptions: %w", err) + } + err = reconcileOpts.Validate() + if err != nil { + return "", errors.New(cleanKubectlOutput(err.Error())) + } + err = reconcileOpts.RunReconcile() + if err != nil { + return "", errors.New(cleanKubectlOutput(err.Error())) + } + + var out []string + if buf := strings.TrimSpace(ioStreams.Out.(*bytes.Buffer).String()); len(buf) > 0 { + out = append(out, buf) + } + if buf := strings.TrimSpace(ioStreams.ErrOut.(*bytes.Buffer).String()); len(buf) > 0 { + out = append(out, buf) + } + return strings.Join(out, ". "), nil +} + +func (k *kubectlResourceOperations) processKubectlRun(cmd string) (CleanupFunc, error) { + if k.onKubectlRun != nil { + return k.onKubectlRun(cmd) + } + return func() {}, nil +} diff --git a/pkg/gitops-engine/pkg/utils/kube/uniqueprotomodels.go b/pkg/gitops-engine/pkg/utils/kube/uniqueprotomodels.go new file mode 100644 index 00000000..93013d49 --- /dev/null +++ b/pkg/gitops-engine/pkg/utils/kube/uniqueprotomodels.go @@ -0,0 +1,191 @@ +package kube + +import ( + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/util/proto" +) + +/** +The upstream Kubernetes NewGVKParser method causes problems for Argo CD. +https://github.com/kubernetes/apimachinery/blob/eb26334eeb0f769be8f0c5665ff34713cfdec83e/pkg/util/managedfields/gvkparser.go#L73 + +The function fails in instances where it is probably more desirable for Argo CD to simply ignore the error and move on. +But since the upstream implementation doesn't offer the option to ignore the error, we have to mutate the input to the +function to completely avoid the case that can produce the error. + +When encountering the error from NewGVKParser, we used to just set the internal GVKParser instance to nil, log the +error as info, and move on. + +But Argo CD increasingly relies on the GVKParser to produce reliable diffs, especially with server-side diffing. And +we're better off with an incorrectly-initialized GVKParser than no GVKParser at all. + +To understand why NewGVKParser fails, we need to understand how Kubernetes constructs its OpenAPI models. + +Kubernetes contains a built-in OpenAPI document containing the `definitions` for every built-in Kubernetes API. This +document includes shared structs like APIResourceList. Some of these definitions include an +x-kubernetes-group-version-kind extension. + +Aggregated APIs produce their own OpenAPI documents, which are merged with the built-in OpenAPI document. The aggregated +API documents generally include all the definitions of all the structs which are used anywhere by the API. This often +includes some of the same structs as the built-in OpenAPI document. + +So when Kubernetes constructs the complete OpenAPI document (the one served at /openapi/v2), it merges the built-in +OpenAPI document with the aggregated API OpenAPI documents. + +When the aggregator encounters two different definitions for the same struct (as determined by a deep compare) with the +same GVK (as determined by the value in the x-kubernetes-group-version-kind extension), it appends a `_vX` suffix to the +definition name in the OpenAPI document (where X is the count of the number of times the aggregator has seen the same +definition). Basically, it's communicating "different APIs have different opinions about the structure of structs with +this GVK, so I'm going to give them different names and let you sort it out." +https://github.com/kubernetes/kube-openapi/blob/b456828f718bab62dc3013d192665eb3d17f8fe9/pkg/aggregator/aggregator.go#L238-L279 + +This behavior is fine from the perspective of a typical Kubernetes API user. They download the OpenAPI document, they +see that there are two different "opinions" about the structure of a struct, and they can choose which one they want to +rely on. + +But Argo CD has to be generic. We need to take the provided OpenAPI document and use it to construct a GVKParser. And +the GVKParser (reasonably) rejects the OpenAPI document if it contains two definitions for the same struct. + +So we have to do some work to make the OpenAPI document palatable to the GVKParser. We have to remove the duplicate +definitions. Specifically, we take the first one and log a warning for each subsequent definition with the same GVK. + +In practice, this probably generally appears when a common aggregated API was built at a time significantly before the +current Kubernetes version. The most common case is that the metrics server is built against an older version of the +Kubernetes libraries, using old versions of the structs. When the metrics server is updated to use the latest version of +the Kubernetes libraries, the problems go away, because the aggregated API and Kubernetes agree about the shape of the +struct. + +Using the first encountered definition is imperfect and could result in unreliable diffs. But it's better than +constructing completely-wrong diffs due to the lack of a GVKParser. +*/ + +// uniqueModels is a model provider that ensures that no two models share the same gvk. Use newUniqueModels to +// initialize it and enforce uniqueness. +type uniqueModels struct { + models map[string]proto.Schema +} + +// LookupModel is public through the interface of Models. It +// returns a visitable schema from the given model name. +// Copied verbatim from here: https://github.com/kubernetes/kube-openapi/blob/b456828f718bab62dc3013d192665eb3d17f8fe9/pkg/util/proto/document.go#L322-L326 +func (d *uniqueModels) LookupModel(model string) proto.Schema { + return d.models[model] +} + +// Copied verbatim from here: https://github.com/kubernetes/kube-openapi/blob/b456828f718bab62dc3013d192665eb3d17f8fe9/pkg/util/proto/document.go#L328-L337 +func (d *uniqueModels) ListModels() []string { + models := []string{} + + for model := range d.models { + models = append(models, model) + } + + sort.Strings(models) + return models +} + +// newUniqueModels returns a new uniqueModels instance and a list of warnings for models that share the same gvk. +func newUniqueModels(models proto.Models) (proto.Models, []schema.GroupVersionKind) { + var taintedGVKs []schema.GroupVersionKind + gvks := map[schema.GroupVersionKind]string{} + um := &uniqueModels{models: map[string]proto.Schema{}} + for _, modelName := range models.ListModels() { + model := models.LookupModel(modelName) + if model == nil { + panic(fmt.Sprintf("ListModels returns a model that can't be looked-up for: %v", modelName)) + } + gvkList := parseGroupVersionKind(model) + gvk, wasProcessed := modelGvkWasAlreadyProcessed(model, gvks) + if !wasProcessed { + um.models[modelName] = model + + // Add GVKs to the map, so we can detect a duplicate GVK later. + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + gvks[gvk] = modelName + } + } + } else { + taintedGVKs = append(taintedGVKs, gvk) + } + } + return um, taintedGVKs +} + +// modelGvkWasAlreadyProcessed inspects a model to determine if it would trigger a duplicate GVK error. The gvks map +// holds the GVKs of all the models that have already been processed. If the model would trigger a duplicate GVK error, +// the function returns the GVK that would trigger the error and true. Otherwise, it returns an empty GVK and false. +func modelGvkWasAlreadyProcessed(model proto.Schema, gvks map[schema.GroupVersionKind]string) (schema.GroupVersionKind, bool) { + gvkList := parseGroupVersionKind(model) + // Not every model has a GVK extension specified. For those models, this loop will be skipped. + for _, gvk := range gvkList { + // The kind length check is copied from managedfields.NewGVKParser. It's unclear what edge case it's handling, + // but the behavior of this function should match NewGVKParser. + if len(gvk.Kind) > 0 { + _, ok := gvks[gvk] + if ok { + // This is the only condition under which NewGVKParser would return a duplicate GVK error. + return gvk, true + } + } + } + return schema.GroupVersionKind{}, false +} + +// groupVersionKindExtensionKey is the key used to lookup the +// GroupVersionKind value for an object definition from the +// definition's "extensions" map. +// Copied verbatim from: https://github.com/kubernetes/apimachinery/blob/eb26334eeb0f769be8f0c5665ff34713cfdec83e/pkg/util/managedfields/gvkparser.go#L29-L32 +const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" + +// parseGroupVersionKind gets and parses GroupVersionKind from the extension. Returns empty if it doesn't have one. +// Copied verbatim from: https://github.com/kubernetes/apimachinery/blob/eb26334eeb0f769be8f0c5665ff34713cfdec83e/pkg/util/managedfields/gvkparser.go#L82-L128 +func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind { + extensions := s.GetExtensions() + + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions[groupVersionKindExtensionKey] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + // gvk extension list must be a map with group, version, and + // kind fields + gvkMap, ok := gvk.(map[interface{}]interface{}) + if !ok { + continue + } + group, ok := gvkMap["group"].(string) + if !ok { + continue + } + version, ok := gvkMap["version"].(string) + if !ok { + continue + } + kind, ok := gvkMap["kind"].(string) + if !ok { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/pkg/kubectl/apply/apply.go b/pkg/kubectl/apply/apply.go new file mode 100644 index 00000000..6ea921eb --- /dev/null +++ b/pkg/kubectl/apply/apply.go @@ -0,0 +1,1131 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "context" + "fmt" + "io" + "net/http" + + "github.com/spf13/cobra" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/openapi3" + "k8s.io/client-go/util/csaupgrade" + "k8s.io/component-base/version" + "k8s.io/klog/v2" + cmddelete "k8s.io/kubectl/pkg/cmd/delete" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/openapi" + "k8s.io/kubectl/pkg/util/prune" + "k8s.io/kubectl/pkg/util/templates" + "k8s.io/kubectl/pkg/validation" +) + +// ApplyFlags directly reflect the information that CLI is gathering via flags. They will be converted to Options, which +// reflect the runtime requirements for the command. This structure reduces the transformation to wiring and makes +// the logic itself easy to unit test +type ApplyFlags struct { + RecordFlags *genericclioptions.RecordFlags + PrintFlags *genericclioptions.PrintFlags + + DeleteFlags *cmddelete.DeleteFlags + + FieldManager string + Selector string + Prune bool + PruneResources []prune.Resource + ApplySetRef string + All bool + Overwrite bool + OpenAPIPatch bool + Subresource string + + PruneAllowlist []string + + genericiooptions.IOStreams +} + +// ApplyOptions defines flags and other configuration parameters for the `apply` command +type ApplyOptions struct { + Recorder genericclioptions.Recorder + + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) + + DeleteOptions *cmddelete.DeleteOptions + + ServerSideApply bool + ForceConflicts bool + FieldManager string + Selector string + DryRunStrategy cmdutil.DryRunStrategy + Prune bool + PruneResources []prune.Resource + cmdBaseName string + All bool + Overwrite bool + OpenAPIPatch bool + Subresource string + + ValidationDirective string + Validator validation.Schema + Builder *resource.Builder + Mapper meta.RESTMapper + DynamicClient dynamic.Interface + OpenAPIGetter openapi.OpenAPIResourcesGetter + OpenAPIV3Root openapi3.Root + + Namespace string + EnforceNamespace bool + + genericiooptions.IOStreams + + // Objects (and some denormalized data) which are to be + // applied. The standard way to fill in this structure + // is by calling "GetObjects()", which will use the + // resource builder if "objectsCached" is false. The other + // way to set this field is to use "SetObjects()". + // Subsequent calls to "GetObjects()" after setting would + // not call the resource builder; only return the set objects. + objects []*resource.Info + objectsCached bool + + // Stores visited objects/namespaces for later use + // calculating the set of objects to prune. + VisitedUids sets.Set[types.UID] + VisitedNamespaces sets.Set[string] + + // Function run after the objects are generated and + // stored in the "objects" field, but before the + // apply is run on these objects. + PreProcessorFn func() error + // Function run after all objects have been applied. + // The standard PostProcessorFn is "PrintAndPrunePostProcessor()". + PostProcessorFn func() error + + // ApplySet tracks the set of objects that have been applied, for the purposes of pruning. + // See git.k8s.io/enhancements/keps/sig-cli/3659-kubectl-apply-prune + ApplySet *ApplySet +} + +var ( + applyLong = templates.LongDesc(i18n.T(` + Apply a configuration to a resource by file name or stdin. + The resource name must be specified. This resource will be created if it doesn't exist yet. + To use 'apply', always create the resource initially with either 'apply' or 'create --save-config'. + + JSON and YAML formats are accepted. + + Alpha Disclaimer: the --prune functionality is not yet complete. Do not use unless you are aware of what the current state is. See https://issues.k8s.io/34274.`)) + + applyExample = templates.Examples(i18n.T(` + # Apply the configuration in pod.json to a pod + kubectl apply -f ./pod.json + + # Apply resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml + kubectl apply -k dir/ + + # Apply the JSON passed into stdin to a pod + cat pod.json | kubectl apply -f - + + # Apply the configuration from all files that end with '.json' + kubectl apply -f '*.json' + + # Note: --prune is still in Alpha + # Apply the configuration in manifest.yaml that matches label app=nginx and delete all other resources that are not in the file and match label app=nginx + kubectl apply --prune -f manifest.yaml -l app=nginx + + # Apply the configuration in manifest.yaml and delete all the other config maps that are not in the file + kubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/ConfigMap`)) + + warningNoLastAppliedConfigAnnotation = "Warning: resource %[1]s is missing the %[2]s annotation which is required by %[3]s apply. %[3]s apply should only be used on resources created declaratively by either %[3]s create --save-config or %[3]s apply. The missing annotation will be patched automatically.\n" + warningChangesOnDeletingResource = "Warning: Detected changes to resource %[1]s which is currently being deleted.\n" + warningMigrationLastAppliedFailed = "Warning: failed to migrate kubectl.kubernetes.io/last-applied-configuration for Server-Side Apply. This is non-fatal and will be retried next time you apply. Error: %[1]s\n" + warningMigrationPatchFailed = "Warning: server rejected managed fields migration to Server-Side Apply. This is non-fatal and will be retried next time you apply. Error: %[1]s\n" + warningMigrationReapplyFailed = "Warning: failed to re-apply configuration after performing Server-Side Apply migration. This is non-fatal and will be retried next time you apply. Error: %[1]s\n" +) + +var ApplySetToolVersion = version.Get().GitVersion + +// NewApplyFlags returns a default ApplyFlags +func NewApplyFlags(streams genericiooptions.IOStreams) *ApplyFlags { + return &ApplyFlags{ + RecordFlags: genericclioptions.NewRecordFlags(), + DeleteFlags: cmddelete.NewDeleteFlags("The files that contain the configurations to apply."), + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + + Overwrite: true, + OpenAPIPatch: true, + + IOStreams: streams, + } +} + +// NewCmdApply creates the `apply` command +func NewCmdApply(baseName string, f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + flags := NewApplyFlags(ioStreams) + + cmd := &cobra.Command{ + Use: "apply (-f FILENAME | -k DIRECTORY)", + DisableFlagsInUseLine: true, + Short: i18n.T("Apply a configuration to a resource by file name or stdin"), + Long: applyLong, + Example: applyExample, + Run: func(cmd *cobra.Command, args []string) { + o, err := flags.ToOptions(f, cmd, baseName, args) + cmdutil.CheckErr(err) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + flags.AddFlags(cmd) + + // apply subcommands + cmd.AddCommand(NewCmdApplyViewLastApplied(f, flags.IOStreams)) + cmd.AddCommand(NewCmdApplySetLastApplied(f, flags.IOStreams)) + cmd.AddCommand(NewCmdApplyEditLastApplied(f, flags.IOStreams)) + + return cmd +} + +// AddFlags registers flags for a cli +func (flags *ApplyFlags) AddFlags(cmd *cobra.Command) { + // bind flag structs + flags.DeleteFlags.AddFlags(cmd) + flags.RecordFlags.AddFlags(cmd) + flags.PrintFlags.AddFlags(cmd) + + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmdutil.AddServerSideApplyFlags(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &flags.FieldManager, FieldManagerClientSideApply) + cmdutil.AddLabelSelectorFlagVar(cmd, &flags.Selector) + cmdutil.AddPruningFlags(cmd, &flags.Prune, &flags.PruneAllowlist, &flags.All, &flags.ApplySetRef) + cmd.Flags().BoolVar(&flags.Overwrite, "overwrite", flags.Overwrite, "Automatically resolve conflicts between the modified and live configuration by using values from the modified configuration") + cmd.Flags().BoolVar(&flags.OpenAPIPatch, "openapi-patch", flags.OpenAPIPatch, "If true, use openapi to calculate diff when the openapi presents and the resource can be found in the openapi spec. Otherwise, fall back to use baked-in types.") + cmdutil.AddSubresourceFlags(cmd, &flags.Subresource, "If specified, apply will operate on the subresource of the requested object. Only allowed when using --server-side.") +} + +// ToOptions converts from CLI inputs to runtime inputs +func (flags *ApplyFlags) ToOptions(f cmdutil.Factory, cmd *cobra.Command, baseName string, args []string) (*ApplyOptions, error) { + if len(args) != 0 { + return nil, cmdutil.UsageErrorf(cmd, "Unexpected args: %v", args) + } + + serverSideApply := cmdutil.GetServerSideApplyFlag(cmd) + forceConflicts := cmdutil.GetForceConflictsFlag(cmd) + dryRunStrategy, err := cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return nil, err + } + + dynamicClient, err := f.DynamicClient() + if err != nil { + return nil, err + } + + fieldManager := GetApplyFieldManagerFlag(cmd, serverSideApply) + + // allow for a success message operation to be specified at print time + toPrinter := func(operation string) (printers.ResourcePrinter, error) { + flags.PrintFlags.NamePrintFlags.Operation = operation + cmdutil.PrintFlagsWithDryRunStrategy(flags.PrintFlags, dryRunStrategy) + return flags.PrintFlags.ToPrinter() + } + + flags.RecordFlags.Complete(cmd) + recorder, err := flags.RecordFlags.ToRecorder() + if err != nil { + return nil, err + } + + deleteOptions, err := flags.DeleteFlags.ToOptions(dynamicClient, flags.IOStreams) + if err != nil { + return nil, err + } + + err = deleteOptions.FilenameOptions.RequireFilenameOrKustomize() + if err != nil { + return nil, err + } + + var openAPIV3Root openapi3.Root + if !cmdutil.OpenAPIV3Patch.IsDisabled() { + openAPIV3Client, err := f.OpenAPIV3Client() + if err == nil { + openAPIV3Root = openapi3.NewRoot(openAPIV3Client) + } else { + klog.V(4).Infof("warning: OpenAPI V3 Patch is enabled but is unable to be loaded. Will fall back to OpenAPI V2") + } + } + + validationDirective, err := cmdutil.GetValidationDirective(cmd) + if err != nil { + return nil, err + } + validator, err := f.Validator(validationDirective) + if err != nil { + return nil, err + } + builder := f.NewBuilder() + mapper, err := f.ToRESTMapper() + if err != nil { + return nil, err + } + + namespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return nil, err + } + + var applySet *ApplySet + if flags.ApplySetRef != "" { + parent, err := ParseApplySetParentRef(flags.ApplySetRef, mapper) + if err != nil { + return nil, fmt.Errorf("invalid parent reference %q: %w", flags.ApplySetRef, err) + } + // ApplySet uses the namespace value from the flag, but not from the kubeconfig or defaults + // This means the namespace flag is required when using a namespaced parent. + if enforceNamespace && parent.IsNamespaced() { + parent.Namespace = namespace + } + tooling := ApplySetTooling{Name: baseName, Version: ApplySetToolVersion} + restClient, err := f.UnstructuredClientForMapping(parent.RESTMapping) + if err != nil { + return nil, fmt.Errorf("failed to initialize RESTClient for ApplySet: %w", err) + } + if restClient == nil { + return nil, fmt.Errorf("could not build RESTClient for ApplySet") + } + applySet = NewApplySet(parent, tooling, mapper, restClient) + } + if flags.Prune { + flags.PruneResources, err = prune.ParseResources(mapper, flags.PruneAllowlist) + if err != nil { + return nil, err + } + } + + o := &ApplyOptions{ + // Store baseName for use in printing warnings / messages involving the base command name. + // This is useful for downstream command that wrap this one. + cmdBaseName: baseName, + + PrintFlags: flags.PrintFlags, + + DeleteOptions: deleteOptions, + ToPrinter: toPrinter, + ServerSideApply: serverSideApply, + ForceConflicts: forceConflicts, + FieldManager: fieldManager, + Selector: flags.Selector, + DryRunStrategy: dryRunStrategy, + Prune: flags.Prune, + PruneResources: flags.PruneResources, + All: flags.All, + Overwrite: flags.Overwrite, + OpenAPIPatch: flags.OpenAPIPatch, + Subresource: flags.Subresource, + + Recorder: recorder, + Namespace: namespace, + EnforceNamespace: enforceNamespace, + Validator: validator, + ValidationDirective: validationDirective, + Builder: builder, + Mapper: mapper, + DynamicClient: dynamicClient, + OpenAPIGetter: f, + OpenAPIV3Root: openAPIV3Root, + + IOStreams: flags.IOStreams, + + objects: []*resource.Info{}, + objectsCached: false, + + VisitedUids: sets.New[types.UID](), + VisitedNamespaces: sets.New[string](), + + ApplySet: applySet, + } + + o.PostProcessorFn = o.PrintAndPrunePostProcessor() + + return o, nil +} + +// Validate verifies if ApplyOptions are valid and without conflicts. +func (o *ApplyOptions) Validate() error { + if o.ForceConflicts && !o.ServerSideApply { + return fmt.Errorf("--force-conflicts only works with --server-side") + } + + if o.DryRunStrategy == cmdutil.DryRunClient && o.ServerSideApply { + return fmt.Errorf("--dry-run=client doesn't work with --server-side (did you mean --dry-run=server instead?)") + } + + if o.ServerSideApply && o.DeleteOptions.ForceDeletion { + return fmt.Errorf("--force cannot be used with --server-side") + } + + if o.DryRunStrategy == cmdutil.DryRunServer && o.DeleteOptions.ForceDeletion { + return fmt.Errorf("--dry-run=server cannot be used with --force") + } + + if o.All && len(o.Selector) > 0 { + return fmt.Errorf("cannot set --all and --selector at the same time") + } + + if o.ApplySet != nil { + if !o.Prune { + return fmt.Errorf("--applyset requires --prune") + } + if err := o.ApplySet.Validate(context.TODO(), o.DynamicClient); err != nil { + return err + } + } + if o.Prune { + // Do not force the recreation of an object(s) if we're pruning; this can cause + // undefined behavior since object UID's change. + if o.DeleteOptions.ForceDeletion { + return fmt.Errorf("--force cannot be used with --prune") + } + + if o.ApplySet != nil { + if o.All { + return fmt.Errorf("--all is incompatible with --applyset") + } else if o.Selector != "" { + return fmt.Errorf("--selector is incompatible with --applyset") + } else if len(o.PruneResources) > 0 { + return fmt.Errorf("--prune-allowlist is incompatible with --applyset") + } + } else { + if !o.All && o.Selector == "" { + return fmt.Errorf("all resources selected for prune without explicitly passing --all. To prune all resources, pass the --all flag. If you did not mean to prune all resources, specify a label selector") + } + if o.ServerSideApply { + return fmt.Errorf("--prune is in alpha and doesn't currently work on objects created by server-side apply") + } + } + } + if len(o.Subresource) > 0 && !o.ServerSideApply { + return fmt.Errorf("--subresource can only be specified for --server-side") + } + + return nil +} + +func isIncompatibleServerError(err error) bool { + // 415: Unsupported media type means we're talking to a server which doesn't + // support server-side apply. + if _, ok := err.(*errors.StatusError); !ok { + // Non-StatusError means the error isn't because the server is incompatible. + return false + } + return err.(*errors.StatusError).Status().Code == http.StatusUnsupportedMediaType +} + +// GetObjects returns a (possibly cached) version of all the valid objects to apply +// as a slice of pointer to resource.Info and an error if one or more occurred. +// IMPORTANT: This function can return both valid objects AND an error, since +// "ContinueOnError" is set on the builder. This function should not be called +// until AFTER the "complete" and "validate" methods have been called to ensure that +// the ApplyOptions is filled in and valid. +func (o *ApplyOptions) GetObjects() ([]*resource.Info, error) { + var err error = nil + if !o.objectsCached { + r := o.Builder. + Unstructured(). + Schema(o.Validator). + ContinueOnError(). + NamespaceParam(o.Namespace).DefaultNamespace(). + FilenameParam(o.EnforceNamespace, &o.DeleteOptions.FilenameOptions). + LabelSelectorParam(o.Selector). + Flatten(). + Do() + + o.objects, err = r.Infos() + + if o.ApplySet != nil { + if err := o.ApplySet.AddLabels(o.objects...); err != nil { + return nil, err + } + } + + o.objectsCached = true + } + return o.objects, err +} + +// SetObjects stores the set of objects (as resource.Info) to be +// subsequently applied. +func (o *ApplyOptions) SetObjects(infos []*resource.Info) { + o.objects = infos + o.objectsCached = true +} + +// Run executes the `apply` command. +func (o *ApplyOptions) Run() error { + if o.PreProcessorFn != nil { + klog.V(4).Infof("Running apply pre-processor function") + if err := o.PreProcessorFn(); err != nil { + return err + } + } + + // Enforce CLI specified namespace on server request. + if o.EnforceNamespace { + o.VisitedNamespaces.Insert(o.Namespace) + } + + // Generates the objects using the resource builder if they have not + // already been stored by calling "SetObjects()" in the pre-processor. + errs := []error{} + infos, err := o.GetObjects() + if err != nil { + errs = append(errs, err) + } + if len(infos) == 0 && len(errs) == 0 { + return fmt.Errorf("no objects passed to apply") + } + + if o.ApplySet != nil { + if err := o.ApplySet.BeforeApply(infos, o.DryRunStrategy, o.ValidationDirective); err != nil { + return err + } + } + + // Iterate through all objects, applying each one. + for _, info := range infos { + if err := o.applyOneObject(info); err != nil { + errs = append(errs, err) + } + } + // If any errors occurred during apply, then return error (or + // aggregate of errors). + if len(errs) == 1 { + return errs[0] + } + if len(errs) > 1 { + return utilerrors.NewAggregate(errs) + } + + if o.PostProcessorFn != nil { + klog.V(4).Infof("Running apply post-processor function") + if err := o.PostProcessorFn(); err != nil { + return err + } + } + + return nil +} + +func (o *ApplyOptions) applyOneObject(info *resource.Info) error { + o.MarkNamespaceVisited(info) + + if err := o.Recorder.Record(info.Object); err != nil { + klog.V(4).Infof("error recording current command: %v", err) + } + + if len(info.Name) == 0 { + metadata, _ := meta.Accessor(info.Object) + generatedName := metadata.GetGenerateName() + if len(generatedName) > 0 { + return fmt.Errorf("from %s: cannot use generate name with apply", generatedName) + } + } + + helper := resource.NewHelper(info.Client, info.Mapping). + DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.FieldManager). + WithFieldValidation(o.ValidationDirective) + + if o.ServerSideApply { + // Send the full object to be applied on the server side. + data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object) + if err != nil { + return cmdutil.AddSourceToErr("serverside-apply", info.Source, err) + } + + options := metav1.PatchOptions{ + Force: &o.ForceConflicts, + } + obj, err := helper. + WithSubresource(o.Subresource). + Patch( + info.Namespace, + info.Name, + types.ApplyPatchType, + data, + &options, + ) + if err != nil { + if isIncompatibleServerError(err) { + err = fmt.Errorf("Server-side apply not available on the server: (%v)", err) + } + if errors.IsConflict(err) { + err = fmt.Errorf(`%v +Please review the fields above--they currently have other managers. Here +are the ways you can resolve this warning: +* If you intend to manage all of these fields, please re-run the apply + command with the `+"`--force-conflicts`"+` flag. +* If you do not intend to manage all of the fields, please edit your + manifest to remove references to the fields that should keep their + current managers. +* You may co-own fields by updating your manifest to match the existing + value; in this case, you'll become the manager if the other manager(s) + stop managing the field (remove it from their configuration). +See https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts`, err) + } + return err + } + + info.Refresh(obj, true) + + // Migrate managed fields if necessary. + // + // By checking afterward instead of fetching the object beforehand and + // unconditionally fetching we can make 3 network requests in the rare + // case of migration and 1 request if migration is unnecessary. + // + // To check beforehand means 2 requests for most operations, and 3 + // requests in worst case. + if err = o.saveLastApplyAnnotationIfNecessary(helper, info); err != nil { + fmt.Fprintf(o.ErrOut, warningMigrationLastAppliedFailed, err.Error()) + } else if performedMigration, err := o.migrateToSSAIfNecessary(helper, info); err != nil { + // Print-error as a warning. + // This is a non-fatal error because object was successfully applied + // above, but it might have issues since migration failed. + // + // This migration will be re-attempted if necessary upon next + // apply. + fmt.Fprintf(o.ErrOut, warningMigrationPatchFailed, err.Error()) + } else if performedMigration { + if obj, err = helper.Patch( + info.Namespace, + info.Name, + types.ApplyPatchType, + data, + &options, + ); err != nil { + // Re-send original SSA patch (this will allow dropped fields to + // finally be removed) + fmt.Fprintf(o.ErrOut, warningMigrationReapplyFailed, err.Error()) + } else { + info.Refresh(obj, false) + } + } + + WarnIfDeleting(info.Object, o.ErrOut) + + if err := o.MarkObjectVisited(info); err != nil { + return err + } + + if o.shouldPrintObject() { + return nil + } + + printer, err := o.ToPrinter("serverside-applied") + if err != nil { + return err + } + + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + return nil + } + + // Get the modified configuration of the object. Embed the result + // as an annotation in the modified configuration, so that it will appear + // in the patch sent to the server. + modified, err := util.GetModifiedConfiguration(info.Object, true, unstructured.UnstructuredJSONScheme) + if err != nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving modified configuration from:\n%s\nfor:", info.String()), info.Source, err) + } + + if err := info.Get(); err != nil { + if !errors.IsNotFound(err) { + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) + } + + // Create the resource if it doesn't exist + // First, update the annotation used by kubectl apply + if err := util.CreateApplyAnnotation(info.Object, unstructured.UnstructuredJSONScheme); err != nil { + return cmdutil.AddSourceToErr("creating", info.Source, err) + } + + // prune nulls when client-side apply does a create to match what will happen when client-side applying an update. + // do this after CreateApplyAnnotation so the annotation matches what will be persisted on an update apply of the same manifest. + if u, ok := info.Object.(runtime.Unstructured); ok { + pruneNullsFromMap(u.UnstructuredContent()) + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + // Then create the resource and skip the three-way merge + obj, err := helper.Create(info.Namespace, true, info.Object) + if err != nil { + return cmdutil.AddSourceToErr("creating", info.Source, err) + } + info.Refresh(obj, true) + } + + if err := o.MarkObjectVisited(info); err != nil { + return err + } + + if o.shouldPrintObject() { + return nil + } + + printer, err := o.ToPrinter("created") + if err != nil { + return err + } + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + return nil + } + + if err := o.MarkObjectVisited(info); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + metadata, _ := meta.Accessor(info.Object) + annotationMap := metadata.GetAnnotations() + if _, ok := annotationMap[corev1.LastAppliedConfigAnnotation]; !ok { + fmt.Fprintf(o.ErrOut, warningNoLastAppliedConfigAnnotation, info.ObjectName(), corev1.LastAppliedConfigAnnotation, o.cmdBaseName) + } + + patcher, err := newPatcher(o, info, helper) + if err != nil { + return err + } + patchBytes, patchedObject, err := patcher.Patch(info.Object, modified, info.Source, info.Namespace, info.Name, o.ErrOut) + if err != nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err) + } + + info.Refresh(patchedObject, true) + + WarnIfDeleting(info.Object, o.ErrOut) + + if string(patchBytes) == "{}" && !o.shouldPrintObject() { + printer, err := o.ToPrinter("unchanged") + if err != nil { + return err + } + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + return nil + } + } + + if o.shouldPrintObject() { + return nil + } + + printer, err := o.ToPrinter("configured") + if err != nil { + return err + } + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + + return nil +} + +func pruneNullsFromMap(data map[string]interface{}) { + for k, v := range data { + if v == nil { + delete(data, k) + } else { + pruneNulls(v) + } + } +} +func pruneNullsFromSlice(data []interface{}) { + for _, v := range data { + pruneNulls(v) + } +} +func pruneNulls(v interface{}) { + switch v := v.(type) { + case map[string]interface{}: + pruneNullsFromMap(v) + case []interface{}: + pruneNullsFromSlice(v) + } +} + +// Saves the last-applied-configuration annotation in a separate SSA field manager +// to prevent it from being dropped by users who have transitioned to SSA. +// +// If this operation is not performed, then the last-applied-configuration annotation +// would be removed from the object upon the first SSA usage. We want to keep it +// around for a few releases since it is required to downgrade to +// SSA per [1] and [2]. This code should be removed once the annotation is +// deprecated. +// +// - [1] https://kubernetes.io/docs/reference/using-api/server-side-apply/#downgrading-from-server-side-apply-to-client-side-apply +// - [2] https://github.com/kubernetes/kubernetes/pull/90187 +// +// If the annotation is not already present, or if it is already managed by the +// separate SSA fieldmanager, this is a no-op. +func (o *ApplyOptions) saveLastApplyAnnotationIfNecessary( + helper *resource.Helper, + info *resource.Info, +) error { + if o.FieldManager != fieldManagerServerSideApply { + // There is no point in preserving the annotation if the field manager + // will not remain default. This is because the server will not keep + // the annotation up to date. + return nil + } + + // Send an apply patch with the last-applied-annotation + // so that it is not orphaned by SSA in the following patch: + accessor, err := meta.Accessor(info.Object) + if err != nil { + return err + } + + // Get the current annotations from the object. + annots := accessor.GetAnnotations() + if annots == nil { + annots = map[string]string{} + } + + fieldManager := fieldManagerLastAppliedAnnotation + originalAnnotation, hasAnnotation := annots[corev1.LastAppliedConfigAnnotation] + + // If the annotation does not already exist, we do not do anything + if !hasAnnotation { + return nil + } + + // If there is already an SSA field manager which owns the field, then there + // is nothing to do here. + if owners := csaupgrade.FindFieldsOwners( + accessor.GetManagedFields(), + metav1.ManagedFieldsOperationApply, + lastAppliedAnnotationFieldPath, + ); len(owners) > 0 { + return nil + } + + justAnnotation := &unstructured.Unstructured{} + justAnnotation.SetGroupVersionKind(info.Mapping.GroupVersionKind) + justAnnotation.SetName(accessor.GetName()) + justAnnotation.SetNamespace(accessor.GetNamespace()) + justAnnotation.SetAnnotations(map[string]string{ + corev1.LastAppliedConfigAnnotation: originalAnnotation, + }) + + modified, err := runtime.Encode(unstructured.UnstructuredJSONScheme, justAnnotation) + if err != nil { + return nil + } + + helperCopy := *helper + newObj, err := helperCopy.WithFieldManager(fieldManager).Patch( + info.Namespace, + info.Name, + types.ApplyPatchType, + modified, + nil, + ) + + if err != nil { + return err + } + + return info.Refresh(newObj, false) +} + +// Check if the returned object needs to have its kubectl-client-side-apply +// managed fields migrated server-side-apply. +// +// field ownership metadata is stored in three places: +// - server-side managed fields +// - client-side managed fields +// - and the last_applied_configuration annotation. +// +// The migration merges the client-side-managed fields into the +// server-side-managed fields, leaving the last_applied_configuration +// annotation in place. Server will keep the annotation up to date +// after every server-side-apply where the following conditions are ment: +// +// 1. field manager is 'kubectl' +// 2. annotation already exists +func (o *ApplyOptions) migrateToSSAIfNecessary( + helper *resource.Helper, + info *resource.Info, +) (migrated bool, err error) { + accessor, err := meta.Accessor(info.Object) + if err != nil { + return false, err + } + + // To determine which field managers were used by kubectl for client-side-apply + // we search for a manager used in `Update` operations which owns the + // last-applied-annotation. + // + // This is the last client-side-apply manager which changed the field. + // + // There may be multiple owners if multiple managers wrote the same exact + // configuration. In this case there are multiple owners, we want to migrate + // them all. + csaManagers := csaupgrade.FindFieldsOwners( + accessor.GetManagedFields(), + metav1.ManagedFieldsOperationUpdate, + lastAppliedAnnotationFieldPath) + + managerNames := sets.New[string]() + for _, entry := range csaManagers { + managerNames.Insert(entry.Manager) + } + + // Re-attempt patch as many times as it is conflicting due to ResourceVersion + // test failing + for i := 0; i < maxPatchRetry; i++ { + var patchData []byte + var obj runtime.Object + + patchData, err = csaupgrade.UpgradeManagedFieldsPatch( + info.Object, managerNames, o.FieldManager) + + if err != nil { + // If patch generation failed there was likely a bug. + return false, err + } else if patchData == nil { + // nil patch data means nothing to do - object is already migrated + return false, nil + } + + // Send the patch to upgrade the managed fields if it is non-nil + obj, err = helper.Patch( + info.Namespace, + info.Name, + types.JSONPatchType, + patchData, + nil, + ) + + if err == nil { + // Stop retrying upon success. + info.Refresh(obj, false) + return true, nil + } else if !errors.IsConflict(err) { + // Only retry if there was a conflict + return false, err + } + + // Refresh the object for next iteration + err = info.Get() + if err != nil { + // If there was an error fetching, return error + return false, err + } + } + + // Reaching this point with non-nil error means there was a conflict and + // max retries was hit + // Return the last error witnessed (which will be a conflict) + return false, err +} + +func (o *ApplyOptions) shouldPrintObject() bool { + // Print object only if output format other than "name" is specified + shouldPrint := false + output := *o.PrintFlags.OutputFormat + shortOutput := output == "name" + if len(output) > 0 && !shortOutput { + shouldPrint = true + } + return shouldPrint +} + +func (o *ApplyOptions) printObjects() error { + + if !o.shouldPrintObject() { + return nil + } + + infos, err := o.GetObjects() + if err != nil { + return err + } + + if len(infos) > 0 { + printer, err := o.ToPrinter("") + if err != nil { + return err + } + + objToPrint := infos[0].Object + if len(infos) > 1 { + objs := []runtime.Object{} + for _, info := range infos { + objs = append(objs, info.Object) + } + list := &corev1.List{ + TypeMeta: metav1.TypeMeta{ + Kind: "List", + APIVersion: "v1", + }, + ListMeta: metav1.ListMeta{}, + } + if err := meta.SetList(list, objs); err != nil { + return err + } + + objToPrint = list + } + if err := printer.PrintObj(objToPrint, o.Out); err != nil { + return err + } + } + + return nil +} + +// MarkNamespaceVisited keeps track of which namespaces the applied +// objects belong to. Used for pruning. +func (o *ApplyOptions) MarkNamespaceVisited(info *resource.Info) { + if info.Namespaced() { + o.VisitedNamespaces.Insert(info.Namespace) + } +} + +// MarkObjectVisited keeps track of UIDs of the applied +// objects. Used for pruning. +func (o *ApplyOptions) MarkObjectVisited(info *resource.Info) error { + metadata, err := meta.Accessor(info.Object) + if err != nil { + return err + } + o.VisitedUids.Insert(metadata.GetUID()) + + return nil +} + +// PrintAndPrunePostProcessor returns a function which meets the PostProcessorFn +// function signature. This returned function prints all the +// objects as a list (if configured for that), and prunes the +// objects not applied. The returned function is the standard +// apply post processor. +func (o *ApplyOptions) PrintAndPrunePostProcessor() func() error { + + return func() error { + ctx := context.TODO() + if err := o.printObjects(); err != nil { + return err + } + + if o.Prune { + if cmdutil.ApplySet.IsEnabled() && o.ApplySet != nil { + if err := o.ApplySet.Prune(ctx, o); err != nil { + // Do not update the ApplySet. If pruning failed, we want to keep the superset + // of the previous and current resources in the ApplySet, so that the pruning + // step of the next apply will be able to clean up the set correctly. + return err + } + } else { + p := newPruner(o) + return p.pruneAll(o) + } + } + + return nil + } +} + +const ( + // FieldManagerClientSideApply is the default client-side apply field manager. + // + // The default field manager is not `kubectl-apply` to distinguish from + // server-side apply. + FieldManagerClientSideApply = "kubectl-client-side-apply" + // The default server-side apply field manager is `kubectl` + // instead of a field manager like `kubectl-server-side-apply` + // for backward compatibility to not conflict with old versions + // of kubectl server-side apply where `kubectl` has already been the field manager. + fieldManagerServerSideApply = "kubectl" + + fieldManagerLastAppliedAnnotation = "kubectl-last-applied" +) + +var ( + lastAppliedAnnotationFieldPath = fieldpath.NewSet( + fieldpath.MakePathOrDie( + "metadata", "annotations", + corev1.LastAppliedConfigAnnotation), + ) +) + +// GetApplyFieldManagerFlag gets the field manager for kubectl apply +// if it is not set. +// +// The default field manager is not `kubectl-apply` to distinguish between +// client-side and server-side apply. +func GetApplyFieldManagerFlag(cmd *cobra.Command, serverSide bool) string { + // The field manager flag was set + if cmd.Flag("field-manager").Changed { + return cmdutil.GetFlagString(cmd, "field-manager") + } + + if serverSide { + return fieldManagerServerSideApply + } + + return FieldManagerClientSideApply +} + +// WarnIfDeleting prints a warning if a resource is being deleted +func WarnIfDeleting(obj runtime.Object, stderr io.Writer) { + metadata, _ := meta.Accessor(obj) + if metadata != nil && metadata.GetDeletionTimestamp() != nil { + // just warn the user about the conflict + fmt.Fprintf(stderr, warningChangesOnDeletingResource, metadata.GetName()) + } +} diff --git a/pkg/kubectl/apply/apply_edit_last_applied.go b/pkg/kubectl/apply/apply_edit_last_applied.go new file mode 100644 index 00000000..ec3fcadb --- /dev/null +++ b/pkg/kubectl/apply/apply_edit_last_applied.go @@ -0,0 +1,89 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericiooptions" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/cmd/util/editor" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + applyEditLastAppliedLong = templates.LongDesc(i18n.T(` + Edit the latest last-applied-configuration annotations of resources from the default editor. + + The edit-last-applied command allows you to directly edit any API resource you can retrieve via the + command-line tools. It will open the editor defined by your KUBE_EDITOR, or EDITOR + environment variables, or fall back to 'vi' for Linux or 'notepad' for Windows. + You can edit multiple objects, although changes are applied one at a time. The command + accepts file names as well as command-line arguments, although the files you point to must + be previously saved versions of resources. + + The default format is YAML. To edit in JSON, specify "-o json". + + The flag --windows-line-endings can be used to force Windows line endings, + otherwise the default for your operating system will be used. + + In the event an error occurs while updating, a temporary file will be created on disk + that contains your unapplied changes. The most common error when updating a resource + is another editor changing the resource on the server. When this occurs, you will have + to apply your changes to the newer version of the resource, or update your temporary + saved copy to include the latest resource version.`)) + + applyEditLastAppliedExample = templates.Examples(` + # Edit the last-applied-configuration annotations by type/name in YAML + kubectl apply edit-last-applied deployment/nginx + + # Edit the last-applied-configuration annotations by file in JSON + kubectl apply edit-last-applied -f deploy.yaml -o json`) +) + +// NewCmdApplyEditLastApplied created the cobra CLI command for the `apply edit-last-applied` command. +func NewCmdApplyEditLastApplied(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := editor.NewEditOptions(editor.ApplyEditMode, ioStreams) + + cmd := &cobra.Command{ + Use: "edit-last-applied (RESOURCE/NAME | -f FILENAME)", + DisableFlagsInUseLine: true, + Short: i18n.T("Edit latest last-applied-configuration annotations of a resource/object"), + Long: applyEditLastAppliedLong, + Example: applyEditLastAppliedExample, + ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, args, cmd)) + cmdutil.CheckErr(o.Run()) + }, + } + + // bind flag structs + o.RecordFlags.AddFlags(cmd) + o.PrintFlags.AddFlags(cmd) + + usage := "to use to edit the resource" + cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) + cmd.Flags().BoolVar(&o.WindowsLineEndings, "windows-line-endings", o.WindowsLineEndings, + "Defaults to the line ending native to your platform.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, FieldManagerClientSideApply) + cmdutil.AddValidateFlags(cmd) + + return cmd +} diff --git a/pkg/kubectl/apply/apply_set_last_applied.go b/pkg/kubectl/apply/apply_set_last_applied.go new file mode 100644 index 00000000..02e54cd0 --- /dev/null +++ b/pkg/kubectl/apply/apply_set_last_applied.go @@ -0,0 +1,219 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "bytes" + "fmt" + + "github.com/spf13/cobra" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/cmd/util/editor" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +// SetLastAppliedOptions defines options for the `apply set-last-applied` command.` +type SetLastAppliedOptions struct { + CreateAnnotation bool + + PrintFlags *genericclioptions.PrintFlags + PrintObj printers.ResourcePrinterFunc + + FilenameOptions resource.FilenameOptions + + infoList []*resource.Info + namespace string + enforceNamespace bool + dryRunStrategy cmdutil.DryRunStrategy + shortOutput bool + output string + patchBufferList []PatchBuffer + builder *resource.Builder + unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) + + genericiooptions.IOStreams +} + +// PatchBuffer caches changes that are to be applied. +type PatchBuffer struct { + Patch []byte + PatchType types.PatchType +} + +var ( + applySetLastAppliedLong = templates.LongDesc(i18n.T(` + Set the latest last-applied-configuration annotations by setting it to match the contents of a file. + This results in the last-applied-configuration being updated as though 'kubectl apply -f ' was run, + without updating any other parts of the object.`)) + + applySetLastAppliedExample = templates.Examples(i18n.T(` + # Set the last-applied-configuration of a resource to match the contents of a file + kubectl apply set-last-applied -f deploy.yaml + + # Execute set-last-applied against each configuration file in a directory + kubectl apply set-last-applied -f path/ + + # Set the last-applied-configuration of a resource to match the contents of a file; will create the annotation if it does not already exist + kubectl apply set-last-applied -f deploy.yaml --create-annotation=true + `)) +) + +// NewSetLastAppliedOptions takes option arguments from a CLI stream and returns it at SetLastAppliedOptions type. +func NewSetLastAppliedOptions(ioStreams genericiooptions.IOStreams) *SetLastAppliedOptions { + return &SetLastAppliedOptions{ + PrintFlags: genericclioptions.NewPrintFlags("configured").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdApplySetLastApplied creates the cobra CLI `apply` subcommand `set-last-applied`.` +func NewCmdApplySetLastApplied(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewSetLastAppliedOptions(ioStreams) + cmd := &cobra.Command{ + Use: "set-last-applied -f FILENAME", + DisableFlagsInUseLine: true, + Short: i18n.T("Set the last-applied-configuration annotation on a live object to match the contents of a file"), + Long: applySetLastAppliedLong, + Example: applySetLastAppliedExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunSetLastApplied()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddDryRunFlag(cmd) + cmd.Flags().BoolVar(&o.CreateAnnotation, "create-annotation", o.CreateAnnotation, "Will create 'last-applied-configuration' annotations if current objects doesn't have one") + cmdutil.AddJsonFilenameFlag(cmd.Flags(), &o.FilenameOptions.Filenames, "Filename, directory, or URL to files that contains the last-applied-configuration annotations") + + return cmd +} + +// Complete populates dry-run and output flag options. +func (o *SetLastAppliedOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { + var err error + o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + o.output = cmdutil.GetFlagString(cmd, "output") + o.shortOutput = o.output == "name" + + o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + o.builder = f.NewBuilder() + o.unstructuredClientForMapping = f.UnstructuredClientForMapping + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy) + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + o.PrintObj = printer.PrintObj + + return nil +} + +// Validate checks SetLastAppliedOptions for validity. +func (o *SetLastAppliedOptions) Validate() error { + r := o.builder. + Unstructured(). + NamespaceParam(o.namespace).DefaultNamespace(). + FilenameParam(o.enforceNamespace, &o.FilenameOptions). + Flatten(). + Do() + + err := r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + patchBuf, diffBuf, patchType, err := editor.GetApplyPatch(info.Object.(runtime.Unstructured)) + if err != nil { + return err + } + + // Verify the object exists in the cluster before trying to patch it. + if err := info.Get(); err != nil { + if errors.IsNotFound(err) { + return err + } + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) + } + originalBuf, err := util.GetOriginalConfiguration(info.Object) + if err != nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) + } + if originalBuf == nil && !o.CreateAnnotation { + return fmt.Errorf("no last-applied-configuration annotation found on resource: %s, to create the annotation, run the command with --create-annotation", info.Name) + } + + //only add to PatchBufferList when changed + if !bytes.Equal(cmdutil.StripComments(originalBuf), cmdutil.StripComments(diffBuf)) { + p := PatchBuffer{Patch: patchBuf, PatchType: patchType} + o.patchBufferList = append(o.patchBufferList, p) + o.infoList = append(o.infoList, info) + } else { + fmt.Fprintf(o.Out, "set-last-applied %s: no changes required.\n", info.Name) + } + + return nil + }) + return err +} + +// RunSetLastApplied executes the `set-last-applied` command according to SetLastAppliedOptions. +func (o *SetLastAppliedOptions) RunSetLastApplied() error { + for i, patch := range o.patchBufferList { + info := o.infoList[i] + finalObj := info.Object + + if o.dryRunStrategy != cmdutil.DryRunClient { + mapping := info.ResourceMapping() + client, err := o.unstructuredClientForMapping(mapping) + if err != nil { + return err + } + helper := resource. + NewHelper(client, mapping). + DryRun(o.dryRunStrategy == cmdutil.DryRunServer) + finalObj, err = helper.Patch(info.Namespace, info.Name, patch.PatchType, patch.Patch, nil) + if err != nil { + return err + } + } + if err := o.PrintObj(finalObj, o.Out); err != nil { + return err + } + } + return nil +} diff --git a/pkg/kubectl/apply/apply_view_last_applied.go b/pkg/kubectl/apply/apply_view_last_applied.go new file mode 100644 index 00000000..bd2f6a8b --- /dev/null +++ b/pkg/kubectl/apply/apply_view_last_applied.go @@ -0,0 +1,174 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/resource" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" + "sigs.k8s.io/yaml" +) + +// ViewLastAppliedOptions defines options for the `apply view-last-applied` command.` +type ViewLastAppliedOptions struct { + FilenameOptions resource.FilenameOptions + Selector string + LastAppliedConfigurationList []string + OutputFormat string + All bool + Factory cmdutil.Factory + + genericiooptions.IOStreams +} + +var ( + applyViewLastAppliedLong = templates.LongDesc(i18n.T(` + View the latest last-applied-configuration annotations by type/name or file. + + The default output will be printed to stdout in YAML format. You can use the -o option + to change the output format.`)) + + applyViewLastAppliedExample = templates.Examples(i18n.T(` + # View the last-applied-configuration annotations by type/name in YAML + kubectl apply view-last-applied deployment/nginx + + # View the last-applied-configuration annotations by file in JSON + kubectl apply view-last-applied -f deploy.yaml -o json`)) +) + +// NewViewLastAppliedOptions takes option arguments from a CLI stream and returns it at ViewLastAppliedOptions type. +func NewViewLastAppliedOptions(ioStreams genericiooptions.IOStreams) *ViewLastAppliedOptions { + return &ViewLastAppliedOptions{ + OutputFormat: "yaml", + + IOStreams: ioStreams, + } +} + +// NewCmdApplyViewLastApplied creates the cobra CLI `apply` subcommand `view-last-applied`.` +func NewCmdApplyViewLastApplied(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + options := NewViewLastAppliedOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "view-last-applied (TYPE [NAME | -l label] | TYPE/NAME | -f FILENAME)", + DisableFlagsInUseLine: true, + Short: i18n.T("View the latest last-applied-configuration annotations of a resource/object"), + Long: applyViewLastAppliedLong, + Example: applyViewLastAppliedExample, + ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(options.Complete(cmd, f, args)) + cmdutil.CheckErr(options.Validate()) + cmdutil.CheckErr(options.RunApplyViewLastApplied(cmd)) + }, + } + + cmd.Flags().StringVarP(&options.OutputFormat, "output", "o", options.OutputFormat, `Output format. Must be one of (yaml, json)`) + cmd.Flags().BoolVar(&options.All, "all", options.All, "Select all resources in the namespace of the specified resource types") + usage := "that contains the last-applied-configuration annotations" + cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage) + cmdutil.AddLabelSelectorFlagVar(cmd, &options.Selector) + + return cmd +} + +// Complete checks an object for last-applied-configuration annotations. +func (o *ViewLastAppliedOptions) Complete(cmd *cobra.Command, f cmdutil.Factory, args []string) error { + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + r := f.NewBuilder(). + Unstructured(). + NamespaceParam(cmdNamespace).DefaultNamespace(). + FilenameParam(enforceNamespace, &o.FilenameOptions). + ResourceTypeOrNameArgs(enforceNamespace, args...). + SelectAllParam(o.All). + LabelSelectorParam(o.Selector). + Latest(). + Flatten(). + Do() + err = r.Err() + if err != nil { + return err + } + + err = r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + configString, err := util.GetOriginalConfiguration(info.Object) + if err != nil { + return err + } + if configString == nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("no last-applied-configuration annotation found on resource: %s\n", info.Name), info.Source, err) + } + o.LastAppliedConfigurationList = append(o.LastAppliedConfigurationList, string(configString)) + return nil + }) + + if err != nil { + return err + } + + return nil +} + +// Validate checks ViewLastAppliedOptions for validity. +func (o *ViewLastAppliedOptions) Validate() error { + return nil +} + +// RunApplyViewLastApplied executes the `view-last-applied` command according to ViewLastAppliedOptions. +func (o *ViewLastAppliedOptions) RunApplyViewLastApplied(cmd *cobra.Command) error { + for _, str := range o.LastAppliedConfigurationList { + switch o.OutputFormat { + case "json": + jsonBuffer := &bytes.Buffer{} + err := json.Indent(jsonBuffer, []byte(str), "", " ") + if err != nil { + return err + } + fmt.Fprint(o.Out, string(jsonBuffer.Bytes())) + case "yaml": + yamlOutput, err := yaml.JSONToYAML([]byte(str)) + if err != nil { + return err + } + fmt.Fprint(o.Out, string(yamlOutput)) + default: + return cmdutil.UsageErrorf( + cmd, + "Unexpected -o output mode: %s, the flag 'output' must be one of yaml|json", + o.OutputFormat) + } + } + + return nil +} diff --git a/pkg/kubectl/apply/applyset.go b/pkg/kubectl/apply/applyset.go new file mode 100644 index 00000000..4fd6dd8e --- /dev/null +++ b/pkg/kubectl/apply/applyset.go @@ -0,0 +1,607 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + "k8s.io/klog/v2" + cmdutil "k8s.io/kubectl/pkg/cmd/util" +) + +// Label and annotation keys from the ApplySet specification. +// https://git.k8s.io/enhancements/keps/sig-cli/3659-kubectl-apply-prune#design-details-applyset-specification +const ( + // ApplySetToolingAnnotation is the key of the label that indicates which tool is used to manage this ApplySet. + // Tooling should refuse to mutate ApplySets belonging to other tools. + // The value must be in the format /. + // Example value: "kubectl/v1.27" or "helm/v3" or "kpt/v1.0.0" + ApplySetToolingAnnotation = "applyset.kubernetes.io/tooling" + + // ApplySetAdditionalNamespacesAnnotation annotation extends the scope of the ApplySet beyond the parent + // object's own namespace (if any) to include the listed namespaces. The value is a comma-separated + // list of the names of namespaces other than the parent's namespace in which objects are found + // Example value: "kube-system,ns1,ns2". + ApplySetAdditionalNamespacesAnnotation = "applyset.kubernetes.io/additional-namespaces" + + // Deprecated: ApplySetGRsAnnotation is a list of group-resources used to optimize listing of ApplySet member objects. + // It is optional in the ApplySet specification, as tools can perform discovery or use a different optimization. + // However, it is currently required in kubectl. + // When present, the value of this annotation must be a comma separated list of the group-resources, + // in the fully-qualified name format, i.e. .. + // Example value: "certificates.cert-manager.io,configmaps,deployments.apps,secrets,services" + // Deprecated and replaced by ApplySetGKsAnnotation, support for this can be removed in applyset beta or GA. + DeprecatedApplySetGRsAnnotation = "applyset.kubernetes.io/contains-group-resources" + + // ApplySetGKsAnnotation is a list of group-kinds used to optimize listing of ApplySet member objects. + // It is optional in the ApplySet specification, as tools can perform discovery or use a different optimization. + // However, it is currently required in kubectl. + // When present, the value of this annotation must be a comma separated list of the group-kinds, + // in the fully-qualified name format, i.e. .. + // Example value: "Certificate.cert-manager.io,ConfigMap,deployments.apps,Secret,Service" + ApplySetGKsAnnotation = "applyset.kubernetes.io/contains-group-kinds" + + // ApplySetParentIDLabel is the key of the label that makes object an ApplySet parent object. + // Its value MUST use the format specified in V1ApplySetIdFormat below + ApplySetParentIDLabel = "applyset.kubernetes.io/id" + + // V1ApplySetIdFormat is the format required for the value of ApplySetParentIDLabel (and ApplysetPartOfLabel). + // The %s segment is the unique ID of the object itself, which MUST be the base64 encoding + // (using the URL safe encoding of RFC4648) of the hash of the GKNN of the object it is on, in the form: + // base64(sha256(...)). + V1ApplySetIdFormat = "applyset-%s-v1" + + // ApplysetPartOfLabel is the key of the label which indicates that the object is a member of an ApplySet. + // The value of the label MUST match the value of ApplySetParentIDLabel on the parent object. + ApplysetPartOfLabel = "applyset.kubernetes.io/part-of" + + // ApplysetParentCRDLabel is the key of the label that can be set on a CRD to identify + // the custom resource type it defines (not the CRD itself) as an allowed parent for an ApplySet. + ApplysetParentCRDLabel = "applyset.kubernetes.io/is-parent-type" +) + +var defaultApplySetParentGVR = schema.GroupVersionResource{Version: "v1", Resource: "secrets"} + +// ApplySet tracks the information about an applyset apply/prune +type ApplySet struct { + // parentRef is a reference to the parent object that is used to track the applyset. + parentRef *ApplySetParentRef + + // toolingID is the value to be used and validated in the applyset.kubernetes.io/tooling annotation. + toolingID ApplySetTooling + + // currentResources is the set of resources that are part of the sever-side set as of when the current operation started. + currentResources map[schema.GroupKind]*kindInfo + + // currentNamespaces is the set of namespaces that contain objects in this applyset as of when the current operation started. + currentNamespaces sets.Set[string] + + // updatedResources is the set of resources that will be part of the set as of when the current operation completes. + updatedResources map[schema.GroupKind]*kindInfo + + // updatedNamespaces is the set of namespaces that will contain objects in this applyset as of when the current operation completes. + updatedNamespaces sets.Set[string] + + restMapper meta.RESTMapper + + // client is a client specific to the ApplySet parent object's type + client resource.RESTClient +} + +var builtinApplySetParentGVRs = sets.New[schema.GroupVersionResource]( + defaultApplySetParentGVR, + schema.GroupVersionResource{Version: "v1", Resource: "configmaps"}, +) + +// ApplySetParentRef stores object and type meta for the parent object that is used to track the applyset. +type ApplySetParentRef struct { + Name string + Namespace string + *meta.RESTMapping +} + +func (p ApplySetParentRef) IsNamespaced() bool { + return p.Scope.Name() == meta.RESTScopeNameNamespace +} + +// String returns the string representation of the parent object using the same format +// that we expect to receive in the --applyset flag on the CLI. +func (p ApplySetParentRef) String() string { + return fmt.Sprintf("%s.%s/%s", p.Resource.Resource, p.Resource.Group, p.Name) +} + +type ApplySetTooling struct { + Name string + Version string +} + +func (t ApplySetTooling) String() string { + return fmt.Sprintf("%s/%s", t.Name, t.Version) +} + +// NewApplySet creates a new ApplySet object tracked by the given parent object. +func NewApplySet(parent *ApplySetParentRef, tooling ApplySetTooling, mapper meta.RESTMapper, client resource.RESTClient) *ApplySet { + return &ApplySet{ + currentResources: make(map[schema.GroupKind]*kindInfo), + currentNamespaces: make(sets.Set[string]), + updatedResources: make(map[schema.GroupKind]*kindInfo), + updatedNamespaces: make(sets.Set[string]), + parentRef: parent, + toolingID: tooling, + restMapper: mapper, + client: client, + } +} + +const applySetIDPartDelimiter = "." + +// ID is the label value that we are using to identify this applyset. +// Format: base64(sha256(...)), using the URL safe encoding of RFC4648. + +func (a ApplySet) ID() string { + unencoded := strings.Join([]string{a.parentRef.Name, a.parentRef.Namespace, a.parentRef.GroupVersionKind.Kind, a.parentRef.GroupVersionKind.Group}, applySetIDPartDelimiter) + hashed := sha256.Sum256([]byte(unencoded)) + b64 := base64.RawURLEncoding.EncodeToString(hashed[:]) + // Label values must start and end with alphanumeric values, so add a known-safe prefix and suffix. + return fmt.Sprintf(V1ApplySetIdFormat, b64) +} + +// Validate imposes restrictions on the parent object that is used to track the applyset. +func (a ApplySet) Validate(ctx context.Context, client dynamic.Interface) error { + var errors []error + if a.parentRef.IsNamespaced() && a.parentRef.Namespace == "" { + errors = append(errors, fmt.Errorf("namespace is required to use namespace-scoped ApplySet")) + } + if !builtinApplySetParentGVRs.Has(a.parentRef.Resource) { + // Determine which custom resource types are allowed as ApplySet parents. + // Optimization: Since this makes requests, we only do this if they aren't using a default type. + permittedCRParents, err := a.getAllowedCustomResourceParents(ctx, client) + if err != nil { + errors = append(errors, fmt.Errorf("identifying allowed custom resource parent types: %w", err)) + } + parentRefResourceIgnoreVersion := a.parentRef.Resource.GroupResource().WithVersion("") + if !permittedCRParents.Has(parentRefResourceIgnoreVersion) { + errors = append(errors, fmt.Errorf("resource %q is not permitted as an ApplySet parent", a.parentRef.Resource)) + } + } + return utilerrors.NewAggregate(errors) +} + +func (a *ApplySet) labelForCustomParentCRDs() *metav1.LabelSelector { + return &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: ApplysetParentCRDLabel, + Operator: metav1.LabelSelectorOpExists, + }}, + } +} + +func (a *ApplySet) getAllowedCustomResourceParents(ctx context.Context, client dynamic.Interface) (sets.Set[schema.GroupVersionResource], error) { + opts := metav1.ListOptions{ + LabelSelector: metav1.FormatLabelSelector(a.labelForCustomParentCRDs()), + } + list, err := client.Resource(schema.GroupVersionResource{ + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + }).List(ctx, opts) + if err != nil { + return nil, err + } + set := sets.New[schema.GroupVersionResource]() + for i := range list.Items { + // Custom resources must be named `.` + // and are served under `/apis///.../` + gr := schema.ParseGroupResource(list.Items[i].GetName()) + set.Insert(gr.WithVersion("")) + } + return set, nil +} + +func (a *ApplySet) LabelsForMember() map[string]string { + return map[string]string{ + ApplysetPartOfLabel: a.ID(), + } +} + +// addLabels sets our tracking labels on each object; this should be called as part of loading the objects. +func (a *ApplySet) AddLabels(objects ...*resource.Info) error { + applysetLabels := a.LabelsForMember() + for _, obj := range objects { + accessor, err := meta.Accessor(obj.Object) + if err != nil { + return fmt.Errorf("getting accessor: %w", err) + } + labels := accessor.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + for k, v := range applysetLabels { + if _, found := labels[k]; found { + return fmt.Errorf("ApplySet label %q already set in input data", k) + } + labels[k] = v + } + accessor.SetLabels(labels) + } + + return nil +} + +func (a *ApplySet) fetchParent() error { + helper := resource.NewHelper(a.client, a.parentRef.RESTMapping) + obj, err := helper.Get(a.parentRef.Namespace, a.parentRef.Name) + if errors.IsNotFound(err) { + if !builtinApplySetParentGVRs.Has(a.parentRef.Resource) { + return fmt.Errorf("custom resource ApplySet parents cannot be created automatically") + } + return nil + } else if err != nil { + return fmt.Errorf("failed to fetch ApplySet parent object %q: %w", a.parentRef, err) + } else if obj == nil { + return fmt.Errorf("failed to fetch ApplySet parent object %q", a.parentRef) + } + + labels, annotations, err := getLabelsAndAnnotations(obj) + if err != nil { + return fmt.Errorf("getting metadata from parent object %q: %w", a.parentRef, err) + } + + toolAnnotation, hasToolAnno := annotations[ApplySetToolingAnnotation] + if !hasToolAnno { + return fmt.Errorf("ApplySet parent object %q already exists and is missing required annotation %q", a.parentRef, ApplySetToolingAnnotation) + } + if managedBy := toolingBaseName(toolAnnotation); managedBy != a.toolingID.Name { + return fmt.Errorf("ApplySet parent object %q already exists and is managed by tooling %q instead of %q", a.parentRef, managedBy, a.toolingID.Name) + } + + idLabel, hasIDLabel := labels[ApplySetParentIDLabel] + if !hasIDLabel { + return fmt.Errorf("ApplySet parent object %q exists and does not have required label %s", a.parentRef, ApplySetParentIDLabel) + } + if idLabel != a.ID() { + return fmt.Errorf("ApplySet parent object %q exists and has incorrect value for label %q (got: %s, want: %s)", a.parentRef, ApplySetParentIDLabel, idLabel, a.ID()) + } + + if a.currentResources, err = parseKindAnnotation(annotations, a.restMapper); err != nil { + // TODO: handle GVRs for now-deleted CRDs + return fmt.Errorf("parsing ApplySet annotation on %q: %w", a.parentRef, err) + } + a.currentNamespaces = parseNamespacesAnnotation(annotations) + if a.parentRef.IsNamespaced() { + a.currentNamespaces.Insert(a.parentRef.Namespace) + } + return nil +} +func (a *ApplySet) LabelSelectorForMembers() string { + return metav1.FormatLabelSelector(&metav1.LabelSelector{ + MatchLabels: a.LabelsForMember(), + }) +} + +// AllPrunableResources returns the list of all resources that should be considered for pruning. +// This is potentially a superset of the resources types that actually contain resources. +func (a *ApplySet) AllPrunableResources() []*kindInfo { + var ret []*kindInfo + for _, m := range a.currentResources { + ret = append(ret, m) + } + return ret +} + +// AllPrunableNamespaces returns the list of all namespaces that should be considered for pruning. +// This is potentially a superset of the namespaces that actually contain resources. +func (a *ApplySet) AllPrunableNamespaces() []string { + var ret []string + for ns := range a.currentNamespaces { + ret = append(ret, ns) + } + return ret +} + +func getLabelsAndAnnotations(obj runtime.Object) (map[string]string, map[string]string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, nil, err + } + return accessor.GetLabels(), accessor.GetAnnotations(), nil +} + +func toolingBaseName(toolAnnotation string) string { + parts := strings.Split(toolAnnotation, "/") + if len(parts) >= 2 { + return strings.Join(parts[:len(parts)-1], "/") + } + return toolAnnotation +} + +// kindInfo holds type information about a particular resource type. +type kindInfo struct { + restMapping *meta.RESTMapping +} + +func parseKindAnnotation(annotations map[string]string, mapper meta.RESTMapper) (map[schema.GroupKind]*kindInfo, error) { + annotation, ok := annotations[ApplySetGKsAnnotation] + if !ok { + if annotations[DeprecatedApplySetGRsAnnotation] != "" { + return parseDeprecatedResourceAnnotation(annotations[DeprecatedApplySetGRsAnnotation], mapper) + } + + // The spec does not require this annotation. However, 'missing' means 'perform discovery'. + // We return an error because we do not currently support dynamic discovery in kubectl apply. + return nil, fmt.Errorf("kubectl requires the %q annotation to be set on all ApplySet parent objects", ApplySetGKsAnnotation) + } + mappings := make(map[schema.GroupKind]*kindInfo) + // Annotation present but empty means that this is currently an empty set. + if annotation == "" { + return mappings, nil + } + for _, gkString := range strings.Split(annotation, ",") { + gk := schema.ParseGroupKind(gkString) + restMapping, err := mapper.RESTMapping(gk) + if err != nil { + return nil, fmt.Errorf("could not find mapping for kind in %q annotation: %w", ApplySetGKsAnnotation, err) + } + mappings[gk] = &kindInfo{ + restMapping: restMapping, + } + } + + return mappings, nil +} + +func parseDeprecatedResourceAnnotation(annotation string, mapper meta.RESTMapper) (map[schema.GroupKind]*kindInfo, error) { + mappings := make(map[schema.GroupKind]*kindInfo) + // Annotation present but empty means that this is currently an empty set. + if annotation == "" { + return mappings, nil + } + for _, grString := range strings.Split(annotation, ",") { + gr := schema.ParseGroupResource(grString) + gvk, err := mapper.KindFor(gr.WithVersion("")) + if err != nil { + return nil, fmt.Errorf("invalid group resource in %q annotation: %w", DeprecatedApplySetGRsAnnotation, err) + } + restMapping, err := mapper.RESTMapping(gvk.GroupKind()) + if err != nil { + return nil, fmt.Errorf("could not find kind for resource in %q annotation: %w", DeprecatedApplySetGRsAnnotation, err) + } + mappings[gvk.GroupKind()] = &kindInfo{ + restMapping: restMapping, + } + } + return mappings, nil +} + +func parseNamespacesAnnotation(annotations map[string]string) sets.Set[string] { + annotation, ok := annotations[ApplySetAdditionalNamespacesAnnotation] + if !ok { // this annotation is completely optional + return sets.Set[string]{} + } + // Don't include an empty namespace + if annotation == "" { + return sets.Set[string]{} + } + return sets.New(strings.Split(annotation, ",")...) +} + +// addResource registers the given resource and namespace as being part of the updated set of +// resources being applied by the current operation. +func (a *ApplySet) addResource(restMapping *meta.RESTMapping, namespace string) { + gk := restMapping.GroupVersionKind.GroupKind() + if _, found := a.updatedResources[gk]; !found { + a.updatedResources[gk] = &kindInfo{ + restMapping: restMapping, + } + } + if restMapping.Scope == meta.RESTScopeNamespace && namespace != "" { + a.updatedNamespaces.Insert(namespace) + } +} + +type ApplySetUpdateMode string + +var updateToLatestSet ApplySetUpdateMode = "latest" +var updateToSuperset ApplySetUpdateMode = "superset" + +func (a *ApplySet) updateParent(mode ApplySetUpdateMode, dryRun cmdutil.DryRunStrategy, validation string) error { + data, err := json.Marshal(a.buildParentPatch(mode)) + if err != nil { + return fmt.Errorf("failed to encode patch for ApplySet parent: %w", err) + } + // Note that because we are using SSA, we will remove any annotations we don't specify, + // which is how we remove the deprecated contains-group-resources annotation. + err = serverSideApplyRequest(a, data, dryRun, validation, false) + if err != nil && errors.IsConflict(err) { + // Try again with conflicts forced + klog.Warningf("WARNING: failed to update ApplySet: %s\nApplySet field manager %s should own these fields. Retrying with conflicts forced.", err.Error(), a.FieldManager()) + err = serverSideApplyRequest(a, data, dryRun, validation, true) + } + if err != nil { + return fmt.Errorf("failed to update ApplySet: %w", err) + } + return nil +} + +func serverSideApplyRequest(a *ApplySet, data []byte, dryRun cmdutil.DryRunStrategy, validation string, forceConficts bool) error { + if dryRun == cmdutil.DryRunClient { + return nil + } + helper := resource.NewHelper(a.client, a.parentRef.RESTMapping). + DryRun(dryRun == cmdutil.DryRunServer). + WithFieldManager(a.FieldManager()). + WithFieldValidation(validation) + + options := metav1.PatchOptions{ + Force: &forceConficts, + } + _, err := helper.Patch( + a.parentRef.Namespace, + a.parentRef.Name, + types.ApplyPatchType, + data, + &options, + ) + return err +} + +func (a *ApplySet) buildParentPatch(mode ApplySetUpdateMode) *metav1.PartialObjectMetadata { + var newGKsAnnotation, newNsAnnotation string + switch mode { + case updateToSuperset: + // If the apply succeeded but pruning failed, the set of group resources that + // the ApplySet should track is the superset of the previous and current resources. + // This ensures that the resources that failed to be pruned are not orphaned from the set. + grSuperset := sets.KeySet(a.currentResources).Union(sets.KeySet(a.updatedResources)) + newGKsAnnotation = generateKindsAnnotation(grSuperset) + newNsAnnotation = generateNamespacesAnnotation(a.currentNamespaces.Union(a.updatedNamespaces), a.parentRef.Namespace) + case updateToLatestSet: + newGKsAnnotation = generateKindsAnnotation(sets.KeySet(a.updatedResources)) + newNsAnnotation = generateNamespacesAnnotation(a.updatedNamespaces, a.parentRef.Namespace) + } + + return &metav1.PartialObjectMetadata{ + TypeMeta: metav1.TypeMeta{ + Kind: a.parentRef.GroupVersionKind.Kind, + APIVersion: a.parentRef.GroupVersionKind.GroupVersion().String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: a.parentRef.Name, + Namespace: a.parentRef.Namespace, + Annotations: map[string]string{ + ApplySetToolingAnnotation: a.toolingID.String(), + ApplySetGKsAnnotation: newGKsAnnotation, + ApplySetAdditionalNamespacesAnnotation: newNsAnnotation, + }, + Labels: map[string]string{ + ApplySetParentIDLabel: a.ID(), + }, + }, + } +} + +func generateNamespacesAnnotation(namespaces sets.Set[string], skip string) string { + nsList := namespaces.Clone().Delete(skip).UnsortedList() + sort.Strings(nsList) + return strings.Join(nsList, ",") +} + +func generateKindsAnnotation(resources sets.Set[schema.GroupKind]) string { + var gks []string + for gk := range resources { + gks = append(gks, gk.String()) + } + sort.Strings(gks) + return strings.Join(gks, ",") +} + +func (a ApplySet) FieldManager() string { + return fmt.Sprintf("%s-applyset", a.toolingID.Name) +} + +// ParseApplySetParentRef creates a new ApplySetParentRef from a parent reference in the format [RESOURCE][.GROUP]/NAME +func ParseApplySetParentRef(parentRefStr string, mapper meta.RESTMapper) (*ApplySetParentRef, error) { + var gvr schema.GroupVersionResource + var name string + + if groupRes, nameSuffix, hasTypeInfo := strings.Cut(parentRefStr, "/"); hasTypeInfo { + name = nameSuffix + gvr = schema.ParseGroupResource(groupRes).WithVersion("") + } else { + name = parentRefStr + gvr = defaultApplySetParentGVR + } + + if name == "" { + return nil, fmt.Errorf("name cannot be blank") + } + + gvk, err := mapper.KindFor(gvr) + if err != nil { + return nil, err + } + mapping, err := mapper.RESTMapping(gvk.GroupKind()) + if err != nil { + return nil, err + } + return &ApplySetParentRef{Name: name, RESTMapping: mapping}, nil +} + +// Prune deletes any objects from the apiserver that are no longer in the applyset. +func (a *ApplySet) Prune(ctx context.Context, o *ApplyOptions) error { + printer, err := o.ToPrinter("pruned") + if err != nil { + return err + } + opt := &ApplySetDeleteOptions{ + CascadingStrategy: o.DeleteOptions.CascadingStrategy, + DryRunStrategy: o.DryRunStrategy, + GracePeriod: o.DeleteOptions.GracePeriod, + + Printer: printer, + + IOStreams: o.IOStreams, + } + + if err := a.pruneAll(ctx, o.DynamicClient, o.VisitedUids, opt); err != nil { + return err + } + + if err := a.updateParent(updateToLatestSet, o.DryRunStrategy, o.ValidationDirective); err != nil { + return fmt.Errorf("apply and prune succeeded, but ApplySet update failed: %w", err) + } + + return nil +} + +// BeforeApply should be called before applying the objects. +// It pre-updates the parent object so that it covers the resources that will be applied. +// In this way, even if we are interrupted, we will not leak objects. +func (a *ApplySet) BeforeApply(objects []*resource.Info, dryRunStrategy cmdutil.DryRunStrategy, validationDirective string) error { + if err := a.fetchParent(); err != nil { + return err + } + // Update the live parent object to the superset of the current and previous resources. + // Doing this before the actual apply and prune operations improves behavior by ensuring + // the live object contains the superset on failure. This may cause the next pruning + // operation to make a larger number of GET requests than strictly necessary, but it prevents + // object leakage from the set. The superset will automatically be reduced to the correct + // set by the next successful operation. + for _, info := range objects { + a.addResource(info.ResourceMapping(), info.Namespace) + } + if err := a.updateParent(updateToSuperset, dryRunStrategy, validationDirective); err != nil { + return err + } + return nil +} diff --git a/pkg/kubectl/apply/applyset_pruner.go b/pkg/kubectl/apply/applyset_pruner.go new file mode 100644 index 00000000..3c064af3 --- /dev/null +++ b/pkg/kubectl/apply/applyset_pruner.go @@ -0,0 +1,195 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "context" + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/client-go/dynamic" + "k8s.io/klog/v2" + cmdutil "k8s.io/kubectl/pkg/cmd/util" +) + +type ApplySetDeleteOptions struct { + CascadingStrategy metav1.DeletionPropagation + DryRunStrategy cmdutil.DryRunStrategy + GracePeriod int + + Printer printers.ResourcePrinter + + IOStreams genericiooptions.IOStreams +} + +// PruneObject is an apiserver object that should be deleted as part of prune. +type PruneObject struct { + Name string + Namespace string + Mapping *meta.RESTMapping + Object runtime.Object +} + +// String returns a human-readable name of the object, for use in debug messages. +func (p *PruneObject) String() string { + s := p.Mapping.GroupVersionKind.GroupKind().String() + + if p.Namespace != "" { + s += " " + p.Namespace + "/" + p.Name + } else { + s += " " + p.Name + } + return s +} + +// FindAllObjectsToPrune returns the list of objects that will be pruned. +// Calling this instead of Prune can be useful for dry-run / diff behaviour. +func (a *ApplySet) FindAllObjectsToPrune(ctx context.Context, dynamicClient dynamic.Interface, visitedUids sets.Set[types.UID]) ([]PruneObject, error) { + type task struct { + namespace string + restMapping *meta.RESTMapping + + err error + results []PruneObject + } + var tasks []*task + + // We run discovery in parallel, in as many goroutines as priority and fairness will allow + // (We don't expect many requests in real-world scenarios - maybe tens, unlikely to be hundreds) + for gvk, resource := range a.AllPrunableResources() { + scope := resource.restMapping.Scope + + switch scope.Name() { + case meta.RESTScopeNameNamespace: + for _, namespace := range a.AllPrunableNamespaces() { + if namespace == "" { + // Just double-check because otherwise we get cryptic error messages + return nil, fmt.Errorf("unexpectedly encountered empty namespace during prune of namespace-scoped resource %v", gvk) + } + tasks = append(tasks, &task{ + namespace: namespace, + restMapping: resource.restMapping, + }) + } + + case meta.RESTScopeNameRoot: + tasks = append(tasks, &task{ + restMapping: resource.restMapping, + }) + + default: + return nil, fmt.Errorf("unhandled scope %q", scope.Name()) + } + } + + var wg sync.WaitGroup + + for i := range tasks { + task := tasks[i] + wg.Add(1) + go func() { + defer wg.Done() + + results, err := a.findObjectsToPrune(ctx, dynamicClient, visitedUids, task.namespace, task.restMapping) + if err != nil { + task.err = fmt.Errorf("listing %v objects for pruning: %w", task.restMapping.GroupVersionKind.String(), err) + } else { + task.results = results + } + }() + } + // Wait for all the goroutines to finish + wg.Wait() + + var allObjects []PruneObject + for _, task := range tasks { + if task.err != nil { + return nil, task.err + } + allObjects = append(allObjects, task.results...) + } + return allObjects, nil +} + +func (a *ApplySet) pruneAll(ctx context.Context, dynamicClient dynamic.Interface, visitedUids sets.Set[types.UID], deleteOptions *ApplySetDeleteOptions) error { + allObjects, err := a.FindAllObjectsToPrune(ctx, dynamicClient, visitedUids) + if err != nil { + return err + } + + return a.deleteObjects(ctx, dynamicClient, allObjects, deleteOptions) +} + +func (a *ApplySet) findObjectsToPrune(ctx context.Context, dynamicClient dynamic.Interface, visitedUids sets.Set[types.UID], namespace string, mapping *meta.RESTMapping) ([]PruneObject, error) { + applysetLabelSelector := a.LabelSelectorForMembers() + + opt := metav1.ListOptions{ + LabelSelector: applysetLabelSelector, + } + + klog.V(2).Infof("listing objects for pruning; namespace=%q, resource=%v", namespace, mapping.Resource) + objects, err := dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opt) + if err != nil { + return nil, err + } + + var pruneObjects []PruneObject + for i := range objects.Items { + obj := &objects.Items[i] + + uid := obj.GetUID() + if visitedUids.Has(uid) { + continue + } + name := obj.GetName() + pruneObjects = append(pruneObjects, PruneObject{ + Name: name, + Namespace: namespace, + Mapping: mapping, + Object: obj, + }) + + } + return pruneObjects, nil +} + +func (a *ApplySet) deleteObjects(ctx context.Context, dynamicClient dynamic.Interface, pruneObjects []PruneObject, opt *ApplySetDeleteOptions) error { + for i := range pruneObjects { + pruneObject := &pruneObjects[i] + + name := pruneObject.Name + namespace := pruneObject.Namespace + mapping := pruneObject.Mapping + + if opt.DryRunStrategy != cmdutil.DryRunClient { + if err := runDelete(ctx, namespace, name, mapping, dynamicClient, opt.CascadingStrategy, opt.GracePeriod, opt.DryRunStrategy == cmdutil.DryRunServer); err != nil { + return fmt.Errorf("pruning %v: %w", pruneObject.String(), err) + } + } + + opt.Printer.PrintObj(pruneObject.Object, opt.IOStreams.Out) + + } + return nil +} diff --git a/pkg/kubectl/apply/patcher.go b/pkg/kubectl/apply/patcher.go new file mode 100644 index 00000000..5f2f1423 --- /dev/null +++ b/pkg/kubectl/apply/patcher.go @@ -0,0 +1,431 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/pkg/errors" + + "github.com/jonboulle/clockwork" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/jsonmergepatch" + "k8s.io/apimachinery/pkg/util/mergepatch" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/openapi3" + "k8s.io/klog/v2" + "k8s.io/kube-openapi/pkg/validation/spec" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/openapi" +) + +const ( + // maxPatchRetry is the maximum number of conflicts retry for during a patch operation before returning failure + maxPatchRetry = 5 + // how many times we can retry before back off + triesBeforeBackOff = 1 + // groupVersionKindExtensionKey is the key used to lookup the + // GroupVersionKind value for an object definition from the + // definition's "extensions" map. + groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" +) + +// patchRetryBackOffPeriod is the period to back off when apply patch results in error. +var patchRetryBackOffPeriod = 1 * time.Second + +var createPatchErrFormat = "creating patch with:\noriginal:\n%s\nmodified:\n%s\ncurrent:\n%s\nfor:" + +// Patcher defines options to patch OpenAPI objects. +type Patcher struct { + Mapping *meta.RESTMapping + Helper *resource.Helper + + Overwrite bool + BackOff clockwork.Clock + + Force bool + CascadingStrategy metav1.DeletionPropagation + Timeout time.Duration + GracePeriod int + + // If set, forces the patch against a specific resourceVersion + ResourceVersion *string + + // Number of retries to make if the patch fails with conflict + Retries int + + OpenAPIGetter openapi.OpenAPIResourcesGetter + OpenAPIV3Root openapi3.Root +} + +func newPatcher(o *ApplyOptions, info *resource.Info, helper *resource.Helper) (*Patcher, error) { + var openAPIGetter openapi.OpenAPIResourcesGetter + var openAPIV3Root openapi3.Root + + if o.OpenAPIPatch { + openAPIGetter = o.OpenAPIGetter + openAPIV3Root = o.OpenAPIV3Root + } + + return &Patcher{ + Mapping: info.Mapping, + Helper: helper, + Overwrite: o.Overwrite, + BackOff: clockwork.NewRealClock(), + Force: o.DeleteOptions.ForceDeletion, + CascadingStrategy: o.DeleteOptions.CascadingStrategy, + Timeout: o.DeleteOptions.Timeout, + GracePeriod: o.DeleteOptions.GracePeriod, + OpenAPIGetter: openAPIGetter, + OpenAPIV3Root: openAPIV3Root, + Retries: maxPatchRetry, + }, nil +} + +func (p *Patcher) delete(namespace, name string) error { + options := asDeleteOptions(p.CascadingStrategy, p.GracePeriod) + _, err := p.Helper.DeleteWithOptions(namespace, name, &options) + return err +} + +func (p *Patcher) patchSimple(obj runtime.Object, modified []byte, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { + // Serialize the current configuration of the object from the server. + current, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return nil, nil, errors.Wrapf(err, "serializing current configuration from:\n%v\nfor:", obj) + } + + // Retrieve the original configuration of the object from the annotation. + original, err := util.GetOriginalConfiguration(obj) + if err != nil { + return nil, nil, errors.Wrapf(err, "retrieving original configuration from:\n%v\nfor:", obj) + } + + var patchType types.PatchType + var patch []byte + + if p.OpenAPIV3Root != nil { + gvkSupported, err := p.gvkSupportsPatchOpenAPIV3(p.Mapping.GroupVersionKind) + if err != nil { + // Realistically this error logging is not needed (not present in V2), + // but would help us in debugging if users encounter a problem + // with OpenAPI V3 not present in V2. + klog.V(5).Infof("warning: OpenAPI V3 path does not exist - group: %s, version %s, kind %s\n", + p.Mapping.GroupVersionKind.Group, p.Mapping.GroupVersionKind.Version, p.Mapping.GroupVersionKind.Kind) + } else if gvkSupported { + patch, err = p.buildStrategicMergePatchFromOpenAPIV3(original, modified, current) + if err != nil { + // Fall back to OpenAPI V2 if there is a problem + // We should remove the fallback in the future, + // but for the first release it might be beneficial + // to fall back to OpenAPI V2 while logging the error + // and seeing if we get any bug reports. + fmt.Fprintf(errOut, "warning: error calculating patch from openapi v3 spec: %v\n", err) + } else { + patchType = types.StrategicMergePatchType + } + } else { + klog.V(5).Infof("warning: OpenAPI V3 path does not support strategic merge patch - group: %s, version %s, kind %s\n", + p.Mapping.GroupVersionKind.Group, p.Mapping.GroupVersionKind.Version, p.Mapping.GroupVersionKind.Kind) + } + } + + if patch == nil && p.OpenAPIGetter != nil { + if openAPISchema, err := p.OpenAPIGetter.OpenAPISchema(); err == nil && openAPISchema != nil { + // if openapischema is used, we'll try to get required patch type for this GVK from Open API. + // if it fails or could not find any patch type, fall back to baked-in patch type determination. + if patchType, err = p.getPatchTypeFromOpenAPI(openAPISchema, p.Mapping.GroupVersionKind); err == nil && patchType == types.StrategicMergePatchType { + patch, err = p.buildStrategicMergeFromOpenAPI(openAPISchema, original, modified, current) + if err != nil { + // Warn user about problem and continue strategic merge patching using builtin types. + fmt.Fprintf(errOut, "warning: error calculating patch from openapi spec: %v\n", err) + } + } + } + } + + if patch == nil { + versionedObj, err := scheme.Scheme.New(p.Mapping.GroupVersionKind) + if err == nil { + patchType = types.StrategicMergePatchType + patch, err = p.buildStrategicMergeFromBuiltins(versionedObj, original, modified, current) + if err != nil { + return nil, nil, errors.Wrapf(err, createPatchErrFormat, original, modified, current) + } + } else { + if !runtime.IsNotRegisteredError(err) { + return nil, nil, errors.Wrapf(err, "getting instance of versioned object for %v:", p.Mapping.GroupVersionKind) + } + + patchType = types.MergePatchType + patch, err = p.buildMergePatch(original, modified, current) + if err != nil { + return nil, nil, errors.Wrapf(err, createPatchErrFormat, original, modified, current) + } + } + } + + if string(patch) == "{}" { + return patch, obj, nil + } + + if p.ResourceVersion != nil { + patch, err = addResourceVersion(patch, *p.ResourceVersion) + if err != nil { + return nil, nil, errors.Wrap(err, "Failed to insert resourceVersion in patch") + } + } + + patchedObj, err := p.Helper.Patch(namespace, name, patchType, patch, nil) + return patch, patchedObj, err +} + +// buildMergePatch builds patch according to the JSONMergePatch which is used for +// custom resource definitions. +func (p *Patcher) buildMergePatch(original, modified, current []byte) ([]byte, error) { + preconditions := []mergepatch.PreconditionFunc{mergepatch.RequireKeyUnchanged("apiVersion"), + mergepatch.RequireKeyUnchanged("kind"), mergepatch.RequireMetadataKeyUnchanged("name")} + patch, err := jsonmergepatch.CreateThreeWayJSONMergePatch(original, modified, current, preconditions...) + if err != nil { + if mergepatch.IsPreconditionFailed(err) { + return nil, fmt.Errorf("%s", "At least one of apiVersion, kind and name was changed") + } + return nil, err + } + + return patch, nil +} + +// gvkSupportsPatchOpenAPIV3 checks if a particular GVK supports the patch operation. +// It returns an error if the OpenAPI V3 could not be downloaded. +func (p *Patcher) gvkSupportsPatchOpenAPIV3(gvk schema.GroupVersionKind) (bool, error) { + gvSpec, err := p.OpenAPIV3Root.GVSpec(schema.GroupVersion{ + Group: p.Mapping.GroupVersionKind.Group, + Version: p.Mapping.GroupVersionKind.Version, + }) + if err != nil { + return false, err + } + if gvSpec == nil || gvSpec.Paths == nil || gvSpec.Paths.Paths == nil { + return false, fmt.Errorf("gvk group: %s, version: %s, kind: %s does not exist for OpenAPI V3", gvk.Group, gvk.Version, gvk.Kind) + } + for _, path := range gvSpec.Paths.Paths { + if path.Patch != nil { + if gvkMatchesSingle(p.Mapping.GroupVersionKind, path.Patch.Extensions) { + if path.Patch.RequestBody == nil || path.Patch.RequestBody.Content == nil { + // GVK exists but does not support requestBody. Indication of malformed OpenAPI. + return false, nil + } + if _, ok := path.Patch.RequestBody.Content["application/strategic-merge-patch+json"]; ok { + return true, nil + } + // GVK exists but strategic-merge-patch is not supported. Likely to be a CRD or aggregated resource. + return false, nil + } + } + } + return false, nil +} + +func gvkMatchesArray(targetGVK schema.GroupVersionKind, ext spec.Extensions) bool { + var gvkList []map[string]string + err := ext.GetObject(groupVersionKindExtensionKey, &gvkList) + if err != nil { + return false + } + for _, gvkMap := range gvkList { + if gvkMap["group"] == targetGVK.Group && + gvkMap["version"] == targetGVK.Version && + gvkMap["kind"] == targetGVK.Kind { + return true + } + } + return false +} + +func gvkMatchesSingle(targetGVK schema.GroupVersionKind, ext spec.Extensions) bool { + var gvkMap map[string]string + err := ext.GetObject(groupVersionKindExtensionKey, &gvkMap) + if err != nil { + return false + } + return gvkMap["group"] == targetGVK.Group && + gvkMap["version"] == targetGVK.Version && + gvkMap["kind"] == targetGVK.Kind +} + +func (p *Patcher) buildStrategicMergePatchFromOpenAPIV3(original, modified, current []byte) ([]byte, error) { + gvSpec, err := p.OpenAPIV3Root.GVSpec(schema.GroupVersion{ + Group: p.Mapping.GroupVersionKind.Group, + Version: p.Mapping.GroupVersionKind.Version, + }) + if err != nil { + return nil, err + } + if gvSpec == nil || gvSpec.Components == nil { + return nil, fmt.Errorf("OpenAPI V3 Components is nil") + } + for _, c := range gvSpec.Components.Schemas { + if !gvkMatchesArray(p.Mapping.GroupVersionKind, c.Extensions) { + continue + } + lookupPatchMeta := strategicpatch.PatchMetaFromOpenAPIV3{Schema: c, SchemaList: gvSpec.Components.Schemas} + if openapiv3Patch, err := strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.Overwrite); err != nil { + return nil, err + } else { + return openapiv3Patch, nil + } + + } + return nil, nil +} + +// buildStrategicMergeFromOpenAPI builds patch from OpenAPI if it is enabled. +// This is used for core types which is published in openapi. +func (p *Patcher) buildStrategicMergeFromOpenAPI(openAPISchema openapi.Resources, original, modified, current []byte) ([]byte, error) { + schema := openAPISchema.LookupResource(p.Mapping.GroupVersionKind) + if schema == nil { + // Missing schema returns nil patch; also no error. + return nil, nil + } + lookupPatchMeta := strategicpatch.PatchMetaFromOpenAPI{Schema: schema} + if openapiPatch, err := strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.Overwrite); err != nil { + return nil, err + } else { + return openapiPatch, nil + } +} + +// getPatchTypeFromOpenAPI looks up patch types supported by given GroupVersionKind in Open API. +func (p *Patcher) getPatchTypeFromOpenAPI(openAPISchema openapi.Resources, gvk schema.GroupVersionKind) (types.PatchType, error) { + if pc := openAPISchema.GetConsumes(p.Mapping.GroupVersionKind, "PATCH"); pc != nil { + for _, c := range pc { + if c == string(types.StrategicMergePatchType) { + return types.StrategicMergePatchType, nil + } + } + + return types.MergePatchType, nil + } + + return types.MergePatchType, fmt.Errorf("unable to find any patch type for %s in Open API", gvk) +} + +// buildStrategicMergeFromStruct builds patch from struct. This is used when +// openapi endpoint is not working or user disables it by setting openapi-patch flag +// to false. +func (p *Patcher) buildStrategicMergeFromBuiltins(versionedObj runtime.Object, original, modified, current []byte) ([]byte, error) { + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObj) + if err != nil { + return nil, err + } + patch, err := strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, p.Overwrite) + if err != nil { + return nil, err + } + + return patch, nil +} + +// Patch tries to patch an OpenAPI resource. On success, returns the merge patch as well +// the final patched object. On failure, returns an error. +func (p *Patcher) Patch(current runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { + var getErr error + patchBytes, patchObject, err := p.patchSimple(current, modified, namespace, name, errOut) + if p.Retries == 0 { + p.Retries = maxPatchRetry + } + for i := 1; i <= p.Retries && apierrors.IsConflict(err); i++ { + if i > triesBeforeBackOff { + p.BackOff.Sleep(patchRetryBackOffPeriod) + } + current, getErr = p.Helper.Get(namespace, name) + if getErr != nil { + return nil, nil, getErr + } + patchBytes, patchObject, err = p.patchSimple(current, modified, namespace, name, errOut) + } + if err != nil { + if (apierrors.IsConflict(err) || apierrors.IsInvalid(err)) && p.Force { + patchBytes, patchObject, err = p.deleteAndCreate(current, modified, namespace, name) + } else { + err = cmdutil.AddSourceToErr("patching", source, err) + } + } + return patchBytes, patchObject, err +} + +func (p *Patcher) deleteAndCreate(original runtime.Object, modified []byte, namespace, name string) ([]byte, runtime.Object, error) { + if err := p.delete(namespace, name); err != nil { + return modified, nil, err + } + // TODO: use wait + if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, p.Timeout, true, func(ctx context.Context) (bool, error) { + if _, err := p.Helper.Get(namespace, name); !apierrors.IsNotFound(err) { + return false, err + } + return true, nil + }); err != nil { + return modified, nil, err + } + versionedObject, _, err := unstructured.UnstructuredJSONScheme.Decode(modified, nil, nil) + if err != nil { + return modified, nil, err + } + createdObject, err := p.Helper.Create(namespace, true, versionedObject) + if err != nil { + // restore the original object if we fail to create the new one + // but still propagate and advertise error to user + recreated, recreateErr := p.Helper.Create(namespace, true, original) + if recreateErr != nil { + err = fmt.Errorf("An error occurred force-replacing the existing object with the newly provided one:\n\n%v.\n\nAdditionally, an error occurred attempting to restore the original object:\n\n%v", err, recreateErr) + } else { + createdObject = recreated + } + } + return modified, createdObject, err +} + +func addResourceVersion(patch []byte, rv string) ([]byte, error) { + var patchMap map[string]interface{} + err := json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, err + } + u := unstructured.Unstructured{Object: patchMap} + a, err := meta.Accessor(&u) + if err != nil { + return nil, err + } + a.SetResourceVersion(rv) + + return json.Marshal(patchMap) +} diff --git a/pkg/kubectl/apply/prune.go b/pkg/kubectl/apply/prune.go new file mode 100644 index 00000000..98ac19d8 --- /dev/null +++ b/pkg/kubectl/apply/prune.go @@ -0,0 +1,162 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "context" + "fmt" + "io" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/client-go/dynamic" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util/prune" +) + +type pruner struct { + mapper meta.RESTMapper + dynamicClient dynamic.Interface + + visitedUids sets.Set[types.UID] + visitedNamespaces sets.Set[string] + labelSelector string + fieldSelector string + + cascadingStrategy metav1.DeletionPropagation + dryRunStrategy cmdutil.DryRunStrategy + gracePeriod int + + toPrinter func(string) (printers.ResourcePrinter, error) + + out io.Writer +} + +func newPruner(o *ApplyOptions) pruner { + return pruner{ + mapper: o.Mapper, + dynamicClient: o.DynamicClient, + + labelSelector: o.Selector, + visitedUids: o.VisitedUids, + visitedNamespaces: o.VisitedNamespaces, + + cascadingStrategy: o.DeleteOptions.CascadingStrategy, + dryRunStrategy: o.DryRunStrategy, + gracePeriod: o.DeleteOptions.GracePeriod, + + toPrinter: o.ToPrinter, + + out: o.Out, + } +} + +func (p *pruner) pruneAll(o *ApplyOptions) error { + + namespacedRESTMappings, nonNamespacedRESTMappings, err := prune.GetRESTMappings(o.Mapper, o.PruneResources, o.Namespace != "") + if err != nil { + return fmt.Errorf("error retrieving RESTMappings to prune: %v", err) + } + + for n := range p.visitedNamespaces { + for _, m := range namespacedRESTMappings { + if err := p.prune(n, m); err != nil { + return fmt.Errorf("error pruning namespaced object %v: %v", m.GroupVersionKind, err) + } + } + } + + for _, m := range nonNamespacedRESTMappings { + if err := p.prune(metav1.NamespaceNone, m); err != nil { + return fmt.Errorf("error pruning nonNamespaced object %v: %v", m.GroupVersionKind, err) + } + } + + return nil +} + +func (p *pruner) prune(namespace string, mapping *meta.RESTMapping) error { + objList, err := p.dynamicClient.Resource(mapping.Resource). + Namespace(namespace). + List(context.TODO(), metav1.ListOptions{ + LabelSelector: p.labelSelector, + FieldSelector: p.fieldSelector, + }) + if err != nil { + return err + } + + objs, err := meta.ExtractList(objList) + if err != nil { + return err + } + + for _, obj := range objs { + metadata, err := meta.Accessor(obj) + if err != nil { + return err + } + annots := metadata.GetAnnotations() + if _, ok := annots[corev1.LastAppliedConfigAnnotation]; !ok { + // don't prune resources not created with apply + continue + } + uid := metadata.GetUID() + if p.visitedUids.Has(uid) { + continue + } + name := metadata.GetName() + if p.dryRunStrategy != cmdutil.DryRunClient { + if err := p.delete(namespace, name, mapping); err != nil { + return err + } + } + + printer, err := p.toPrinter("pruned") + if err != nil { + return err + } + printer.PrintObj(obj, p.out) + } + return nil +} + +func (p *pruner) delete(namespace, name string, mapping *meta.RESTMapping) error { + ctx := context.TODO() + return runDelete(ctx, namespace, name, mapping, p.dynamicClient, p.cascadingStrategy, p.gracePeriod, p.dryRunStrategy == cmdutil.DryRunServer) +} + +func runDelete(ctx context.Context, namespace, name string, mapping *meta.RESTMapping, c dynamic.Interface, cascadingStrategy metav1.DeletionPropagation, gracePeriod int, serverDryRun bool) error { + options := asDeleteOptions(cascadingStrategy, gracePeriod) + if serverDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + return c.Resource(mapping.Resource).Namespace(namespace).Delete(ctx, name, options) +} + +func asDeleteOptions(cascadingStrategy metav1.DeletionPropagation, gracePeriod int) metav1.DeleteOptions { + options := metav1.DeleteOptions{} + if gracePeriod >= 0 { + options = *metav1.NewDeleteOptions(int64(gracePeriod)) + } + options.PropagationPolicy = &cascadingStrategy + return options +} diff --git a/pkg/kubectl/cli-runtime/resource/helper.go b/pkg/kubectl/cli-runtime/resource/helper.go new file mode 100644 index 00000000..aa400ae0 --- /dev/null +++ b/pkg/kubectl/cli-runtime/resource/helper.go @@ -0,0 +1,321 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "context" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +var metadataAccessor = meta.NewAccessor() + +// Helper provides methods for retrieving or mutating a RESTful +// resource. +type Helper struct { + // The name of this resource as the server would recognize it + Resource string + // The name of the subresource as the server would recognize it + Subresource string + // A RESTClient capable of mutating this resource. + RESTClient RESTClient + // True if the resource type is scoped to namespaces + NamespaceScoped bool + // If true, then use server-side dry-run to not persist changes to storage + // for verbs and resources that support server-side dry-run. + // + // Note this should only be used against an apiserver with dry-run enabled, + // and on resources that support dry-run. If the apiserver or the resource + // does not support dry-run, then the change will be persisted to storage. + ServerDryRun bool + + // FieldManager is the name associated with the actor or entity that is making + // changes. + FieldManager string + + // FieldValidation is the directive used to indicate how the server should perform + // field validation (Ignore, Warn, or Strict) + FieldValidation string +} + +// NewHelper creates a Helper from a ResourceMapping +func NewHelper(client RESTClient, mapping *meta.RESTMapping) *Helper { + return &Helper{ + Resource: mapping.Resource.Resource, + RESTClient: client, + NamespaceScoped: mapping.Scope.Name() == meta.RESTScopeNameNamespace, + } +} + +// DryRun, if true, will use server-side dry-run to not persist changes to storage. +// Otherwise, changes will be persisted to storage. +func (m *Helper) DryRun(dryRun bool) *Helper { + m.ServerDryRun = dryRun + return m +} + +// WithFieldManager sets the field manager option to indicate the actor or entity +// that is making changes in a create or update operation. +func (m *Helper) WithFieldManager(fieldManager string) *Helper { + m.FieldManager = fieldManager + return m +} + +// WithFieldValidation sets the field validation option to indicate +// how the server should perform field validation (Ignore, Warn, or Strict). +func (m *Helper) WithFieldValidation(validationDirective string) *Helper { + m.FieldValidation = validationDirective + return m +} + +// Subresource sets the helper to access (/[ns//]/) +func (m *Helper) WithSubresource(subresource string) *Helper { + m.Subresource = subresource + return m +} + +func (m *Helper) Get(namespace, name string) (runtime.Object, error) { + req := m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + SubResource(m.Subresource) + return req.Do(context.TODO()).Get() +} + +func (m *Helper) List(namespace, apiVersion string, options *metav1.ListOptions) (runtime.Object, error) { + req := m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + VersionedParams(options, metav1.ParameterCodec) + return req.Do(context.TODO()).Get() +} + +// FollowContinue handles the continue parameter returned by the API server when using list +// chunking. To take advantage of this, the initial ListOptions provided by the consumer +// should include a non-zero Limit parameter. +func FollowContinue(initialOpts *metav1.ListOptions, + listFunc func(metav1.ListOptions) (runtime.Object, error)) error { + opts := initialOpts + for { + list, err := listFunc(*opts) + if err != nil { + return err + } + nextContinueToken, _ := metadataAccessor.Continue(list) + if len(nextContinueToken) == 0 { + return nil + } + opts.Continue = nextContinueToken + } +} + +// EnhanceListError augments errors typically returned by List operations with additional context, +// making sure to retain the StatusError type when applicable. +func EnhanceListError(err error, opts metav1.ListOptions, subj string) error { + if apierrors.IsResourceExpired(err) { + return err + } + if apierrors.IsBadRequest(err) || apierrors.IsNotFound(err) { + if se, ok := err.(*apierrors.StatusError); ok { + // modify the message without hiding this is an API error + if len(opts.LabelSelector) == 0 && len(opts.FieldSelector) == 0 { + se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", subj, + se.ErrStatus.Message) + } else { + se.ErrStatus.Message = fmt.Sprintf( + "Unable to find %q that match label selector %q, field selector %q: %v", subj, + opts.LabelSelector, + opts.FieldSelector, se.ErrStatus.Message) + } + return se + } + if len(opts.LabelSelector) == 0 && len(opts.FieldSelector) == 0 { + return fmt.Errorf("Unable to list %q: %v", subj, err) + } + return fmt.Errorf("Unable to find %q that match label selector %q, field selector %q: %v", + subj, opts.LabelSelector, opts.FieldSelector, err) + } + return err +} + +func (m *Helper) Watch(namespace, apiVersion string, options *metav1.ListOptions) (watch.Interface, error) { + options.Watch = true + return m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + VersionedParams(options, metav1.ParameterCodec). + Watch(context.TODO()) +} + +func (m *Helper) WatchSingle(namespace, name, resourceVersion string) (watch.Interface, error) { + return m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + VersionedParams(&metav1.ListOptions{ + ResourceVersion: resourceVersion, + Watch: true, + FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), + }, metav1.ParameterCodec). + Watch(context.TODO()) +} + +func (m *Helper) Delete(namespace, name string) (runtime.Object, error) { + return m.DeleteWithOptions(namespace, name, nil) +} + +func (m *Helper) DeleteWithOptions(namespace, name string, options *metav1.DeleteOptions) (runtime.Object, error) { + if options == nil { + options = &metav1.DeleteOptions{} + } + if m.ServerDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + + return m.RESTClient.Delete(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + Body(options). + Do(context.TODO()). + Get() +} + +func (m *Helper) Create(namespace string, modify bool, obj runtime.Object) (runtime.Object, error) { + return m.CreateWithOptions(namespace, modify, obj, nil) +} + +func (m *Helper) CreateWithOptions(namespace string, modify bool, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) { + if options == nil { + options = &metav1.CreateOptions{} + } + if m.ServerDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + if m.FieldManager != "" { + options.FieldManager = m.FieldManager + } + if m.FieldValidation != "" { + options.FieldValidation = m.FieldValidation + } + if modify { + // Attempt to version the object based on client logic. + version, err := metadataAccessor.ResourceVersion(obj) + if err != nil { + // We don't know how to clear the version on this object, so send it to the server as is + return m.createResource(m.RESTClient, m.Resource, namespace, obj, options) + } + if version != "" { + if err := metadataAccessor.SetResourceVersion(obj, ""); err != nil { + return nil, err + } + } + } + + return m.createResource(m.RESTClient, m.Resource, namespace, obj, options) +} + +func (m *Helper) createResource(c RESTClient, resource, namespace string, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) { + return c.Post(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(resource). + VersionedParams(options, metav1.ParameterCodec). + Body(obj). + Do(context.TODO()). + Get() +} +func (m *Helper) Patch(namespace, name string, pt types.PatchType, data []byte, options *metav1.PatchOptions) (runtime.Object, error) { + if options == nil { + options = &metav1.PatchOptions{} + } + if m.ServerDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + if m.FieldManager != "" { + options.FieldManager = m.FieldManager + } + if m.FieldValidation != "" { + options.FieldValidation = m.FieldValidation + } + return m.RESTClient.Patch(pt). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + SubResource(m.Subresource). + VersionedParams(options, metav1.ParameterCodec). + Body(data). + Do(context.TODO()). + Get() +} + +func (m *Helper) Replace(namespace, name string, overwrite bool, obj runtime.Object) (runtime.Object, error) { + c := m.RESTClient + var options = &metav1.UpdateOptions{} + if m.ServerDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + if m.FieldManager != "" { + options.FieldManager = m.FieldManager + } + if m.FieldValidation != "" { + options.FieldValidation = m.FieldValidation + } + + // Attempt to version the object based on client logic. + version, err := metadataAccessor.ResourceVersion(obj) + if err != nil { + // We don't know how to version this object, so send it to the server as is + return m.replaceResource(c, m.Resource, namespace, name, obj, options) + } + if version == "" && overwrite { + // Retrieve the current version of the object to overwrite the server object + serverObj, err := c.Get().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(m.Resource).Name(name).SubResource(m.Subresource).Do(context.TODO()).Get() + if err != nil { + // The object does not exist, but we want it to be created + return m.replaceResource(c, m.Resource, namespace, name, obj, options) + } + serverVersion, err := metadataAccessor.ResourceVersion(serverObj) + if err != nil { + return nil, err + } + if err := metadataAccessor.SetResourceVersion(obj, serverVersion); err != nil { + return nil, err + } + } + + return m.replaceResource(c, m.Resource, namespace, name, obj, options) +} + +func (m *Helper) replaceResource(c RESTClient, resource, namespace, name string, obj runtime.Object, options *metav1.UpdateOptions) (runtime.Object, error) { + return c.Put(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(resource). + Name(name). + SubResource(m.Subresource). + VersionedParams(options, metav1.ParameterCodec). + Body(obj). + Do(context.TODO()). + Get() +} diff --git a/pkg/kubectl/cli-runtime/resource/interfaces.go b/pkg/kubectl/cli-runtime/resource/interfaces.go new file mode 100644 index 00000000..7e04fb11 --- /dev/null +++ b/pkg/kubectl/cli-runtime/resource/interfaces.go @@ -0,0 +1,91 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +type RESTClientGetter interface { + ToRESTConfig() (*rest.Config, error) + ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) + ToRESTMapper() (meta.RESTMapper, error) +} + +type ClientConfigFunc func() (*rest.Config, error) +type RESTMapperFunc func() (meta.RESTMapper, error) +type CategoryExpanderFunc func() (restmapper.CategoryExpander, error) + +// RESTClient is a client helper for dealing with RESTful resources +// in a generic way. +type RESTClient interface { + Get() *rest.Request + Post() *rest.Request + Patch(types.PatchType) *rest.Request + Delete() *rest.Request + Put() *rest.Request +} + +// RequestTransform is a function that is given a chance to modify the outgoing request. +type RequestTransform func(*rest.Request) + +// NewClientWithOptions wraps the provided RESTClient and invokes each transform on each +// newly created request. +func NewClientWithOptions(c RESTClient, transforms ...RequestTransform) RESTClient { + if len(transforms) == 0 { + return c + } + return &clientOptions{c: c, transforms: transforms} +} + +type clientOptions struct { + c RESTClient + transforms []RequestTransform +} + +func (c *clientOptions) modify(req *rest.Request) *rest.Request { + for _, transform := range c.transforms { + transform(req) + } + return req +} + +func (c *clientOptions) Get() *rest.Request { + return c.modify(c.c.Get()) +} + +func (c *clientOptions) Post() *rest.Request { + return c.modify(c.c.Post()) +} +func (c *clientOptions) Patch(t types.PatchType) *rest.Request { + return c.modify(c.c.Patch(t)) +} +func (c *clientOptions) Delete() *rest.Request { + return c.modify(c.c.Delete()) +} +func (c *clientOptions) Put() *rest.Request { + return c.modify(c.c.Put()) +} + +// ContentValidator is an interface that knows how to validate an API object serialized to a byte array. +type ContentValidator interface { + ValidateBytes(data []byte) error +}