diff --git a/Makefile b/Makefile index 07075a860..44fae1330 100644 --- a/Makefile +++ b/Makefile @@ -118,7 +118,7 @@ $(CTLPTL): CLUSTERCTL := $(abspath $(TOOLS_BIN_DIR)/clusterctl) clusterctl: $(CLUSTERCTL) ## Build a local copy of clusterctl $(CLUSTERCTL): - curl -sSLf https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.0/clusterctl-$$(go env GOOS)-$$(go env GOARCH) -o $(CLUSTERCTL) + curl -sSLf https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/clusterctl-$$(go env GOOS)-$$(go env GOARCH) -o $(CLUSTERCTL) chmod a+rx $(CLUSTERCTL) HELM := $(abspath $(TOOLS_BIN_DIR)/helm) diff --git a/Tiltfile b/Tiltfile index 88e8064ec..7a6867904 100644 --- a/Tiltfile +++ b/Tiltfile @@ -20,7 +20,7 @@ settings = { "deploy_observability": False, "preload_images_for_kind": True, "kind_cluster_name": "caph", - "capi_version": "v1.6.0", + "capi_version": "v1.6.2", "cabpt_version": "v0.5.6", "cacppt_version": "v0.4.11", "cert_manager_version": "v1.11.0", diff --git a/docs/developers/tilt.md b/docs/developers/tilt.md index 7fba61ebb..4f7f002d0 100644 --- a/docs/developers/tilt.md +++ b/docs/developers/tilt.md @@ -7,7 +7,7 @@ "deploy_observability": False, "preload_images_for_kind": True, "kind_cluster_name": "caph", - "capi_version": "v1.6.0", + "capi_version": "v1.6.2", "cabpt_version": "v0.5.5", "cacppt_version": "v0.4.10", "cert_manager_version": "v1.11.0", @@ -33,7 +33,7 @@ | deploy_observability | bool | false | no | If true, installs grafana, loki and promtail in the dev cluster. Grafana UI will be accessible via a link in the tilt console. Important! This feature requires the `helm` command to be available in the user's path | | preload_images_for_kind | bool | true | no | If set to true, uses `kind load docker-image` to preload images into a kind cluster | | kind_cluster_name | []object | "caph" | no | The name of the kind cluster to use when preloading images | -| capi_version | string | "v1.6.0" | no | Version of CAPI | +| capi_version | string | "v1.6.2" | no | Version of CAPI | | cert_manager_version | string | "v1.11.0" | no | Version of cert manager | | kustomize_substitutions | map[string]string | { "HCLOUD_REGION": "fsn1", diff --git a/go.mod b/go.mod index 2ac7cc2a8..190431e7f 100644 --- a/go.mod +++ b/go.mod @@ -23,8 +23,8 @@ require ( k8s.io/klog/v2 v2.120.1 k8s.io/kubectl v0.28.4 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 - sigs.k8s.io/cluster-api v1.6.0 - sigs.k8s.io/cluster-api/test v1.6.0 + sigs.k8s.io/cluster-api v1.6.2 + sigs.k8s.io/cluster-api/test v1.6.2 sigs.k8s.io/controller-runtime v0.16.5 sigs.k8s.io/kind v0.22.0 ) diff --git a/go.sum b/go.sum index 96168189a..59ce94603 100644 --- a/go.sum +++ b/go.sum @@ -765,10 +765,12 @@ k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/cluster-api v1.6.0 h1:2bhVSnUbtWI8taCjd9lGiHExsRUpKf7Z1fXqi/IwYx4= -sigs.k8s.io/cluster-api v1.6.0/go.mod h1:LB7u/WxiWj4/bbpHNOa1oQ8nq0MQ5iYlD0pGfRSBGLI= +sigs.k8s.io/cluster-api v1.6.2 h1:ruUi4q/9jXFuI+hmnDjo9izHgrBk4bjfQXLKx678PQE= +sigs.k8s.io/cluster-api v1.6.2/go.mod h1:Anm4cA6R/AIP6KdIuVje8CdFc/TdGl+382bi5oPawRc= sigs.k8s.io/cluster-api/test v1.6.0 h1:hvqUpSYxXCvs4FiEfsDpFZAfZ7i4kkP/59mVdFHlzSI= sigs.k8s.io/cluster-api/test v1.6.0/go.mod h1:DJtbkrnrH77cd3PnXeKCQDMtCGVCrHZHPOjMvEsLB2U= +sigs.k8s.io/cluster-api/test v1.6.2 h1:RjrYL8Ag9vBxfv++RWEKy/vgTukQYeYVJBMkWvylASc= +sigs.k8s.io/cluster-api/test v1.6.2/go.mod h1:qfkWBqONPAyOwsPFlS8tsrAq7pjKH55pCpKtjhEbUrk= sigs.k8s.io/controller-runtime v0.16.5 h1:yr1cEJbX08xsTW6XEIzT13KHHmIyX8Umvme2cULvFZw= sigs.k8s.io/controller-runtime v0.16.5/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/test/e2e/config/hetzner-ci.yaml b/test/e2e/config/hetzner-ci.yaml index 7ebc2a057..add44ce35 100644 --- a/test/e2e/config/hetzner-ci.yaml +++ b/test/e2e/config/hetzner-ci.yaml @@ -13,8 +13,8 @@ providers: - name: cluster-api type: CoreProvider versions: - - name: v1.6.0 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.0/core-components.yaml" + - name: v1.6.2 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/core-components.yaml" type: "url" contract: v1beta1 files: @@ -30,8 +30,8 @@ providers: - name: kubeadm type: BootstrapProvider versions: - - name: v1.6.0 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.0/bootstrap-components.yaml" + - name: v1.6.2 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/bootstrap-components.yaml" type: "url" contract: "v1beta1" files: @@ -47,8 +47,8 @@ providers: - name: kubeadm type: ControlPlaneProvider versions: - - name: v1.6.0 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.0/control-plane-components.yaml" + - name: v1.6.2 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/control-plane-components.yaml" type: "url" files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" @@ -120,7 +120,7 @@ variables: # NOTE: INIT_WITH_BINARY and INIT_WITH_KUBERNETES_VERSION are only used by the clusterctl upgrade test to initialize # the management cluster to be upgraded. - INIT_WITH_BINARY: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.0/clusterctl-linux-amd64" + INIT_WITH_BINARY: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/clusterctl-linux-amd64" INIT_WITH_PROVIDERS_CONTRACT: "v1beta1" INIT_WITH_KUBERNETES_VERSION: "v1.28.4" INIT_WITH_INFRASTRUCTURE_PROVIDER_VERSION: ${CAPH_LATEST_VERSION:-} diff --git a/test/e2e/config/hetzner.yaml b/test/e2e/config/hetzner.yaml index 01ebb5b56..b0f9fc71b 100644 --- a/test/e2e/config/hetzner.yaml +++ b/test/e2e/config/hetzner.yaml @@ -13,8 +13,8 @@ providers: - name: cluster-api type: CoreProvider versions: - - name: v1.6.0 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.0/core-components.yaml" + - name: v1.6.2 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/core-components.yaml" type: "url" contract: v1beta1 files: @@ -30,8 +30,8 @@ providers: - name: kubeadm type: BootstrapProvider versions: - - name: v1.6.0 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.0/bootstrap-components.yaml" + - name: v1.6.2 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/bootstrap-components.yaml" type: "url" contract: "v1beta1" files: @@ -47,8 +47,8 @@ providers: - name: kubeadm type: ControlPlaneProvider versions: - - name: v1.6.0 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.0/control-plane-components.yaml" + - name: v1.6.2 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/control-plane-components.yaml" type: "url" files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" @@ -126,7 +126,7 @@ variables: # NOTE: INIT_WITH_BINARY and INIT_WITH_KUBERNETES_VERSION are only used by the clusterctl upgrade test to initialize # the management cluster to be upgraded. - INIT_WITH_BINARY: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.0/clusterctl-linux-amd64" + INIT_WITH_BINARY: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/clusterctl-linux-amd64" INIT_WITH_PROVIDERS_CONTRACT: "v1beta1" INIT_WITH_KUBERNETES_VERSION: "v1.28.4" INIT_WITH_INFRASTRUCTURE_PROVIDER_VERSION: ${CAPH_LATEST_VERSION:-} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0331af8fa..321842198 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1066,7 +1066,7 @@ k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/strings/slices k8s.io/utils/trace -# sigs.k8s.io/cluster-api v1.6.0 +# sigs.k8s.io/cluster-api v1.6.2 ## explicit; go 1.20 sigs.k8s.io/cluster-api/api/v1alpha4 sigs.k8s.io/cluster-api/api/v1beta1 diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster/components.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster/components.go index 3bc6a00e0..6dcbfc35c 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster/components.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster/components.go @@ -23,9 +23,11 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" @@ -66,6 +68,9 @@ type ComponentsClient interface { // DeleteWebhookNamespace deletes the core provider webhook namespace (eg. capi-webhook-system). // This is required when upgrading to v1alpha4 where webhooks are included in the controller itself. DeleteWebhookNamespace(ctx context.Context) error + + // ValidateNoObjectsExist checks if custom resources of the custom resource definitions exist and returns an error if so. + ValidateNoObjectsExist(ctx context.Context, provider clusterctlv1.Provider) error } // providerComponents implements ComponentsClient. @@ -257,6 +262,59 @@ func (p *providerComponents) DeleteWebhookNamespace(ctx context.Context) error { return nil } +func (p *providerComponents) ValidateNoObjectsExist(ctx context.Context, provider clusterctlv1.Provider) error { + log := logf.Log + log.Info("Checking for CRs", "Provider", provider.Name, "Version", provider.Version, "Namespace", provider.Namespace) + + proxyClient, err := p.proxy.NewClient() + if err != nil { + return err + } + + // Fetch all the components belonging to a provider. + // We want that the delete operation is able to clean-up everything. + labels := map[string]string{ + clusterctlv1.ClusterctlLabel: "", + clusterv1.ProviderNameLabel: provider.ManifestLabel(), + } + + customResources := &apiextensionsv1.CustomResourceDefinitionList{} + if err := proxyClient.List(ctx, customResources, client.MatchingLabels(labels)); err != nil { + return err + } + + // Filter the resources according to the delete options + crsHavingObjects := []string{} + for _, crd := range customResources.Items { + crd := crd + storageVersion, err := storageVersionForCRD(&crd) + if err != nil { + return err + } + + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(schema.GroupVersionKind{ + Group: crd.Spec.Group, + Version: storageVersion, + Kind: crd.Spec.Names.ListKind, + }) + + if err := proxyClient.List(ctx, list); err != nil { + return err + } + + if len(list.Items) > 0 { + crsHavingObjects = append(crsHavingObjects, crd.Kind) + } + } + + if len(crsHavingObjects) > 0 { + return fmt.Errorf("found existing objects for provider CRDs %q: [%s]. Please delete these objects first before running clusterctl delete with --include-crd", provider.GetName(), strings.Join(crsHavingObjects, ", ")) + } + + return nil +} + // newComponentsClient returns a providerComponents. func newComponentsClient(proxy Proxy) *providerComponents { return &providerComponents{ diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/config/cert_manager_client.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/config/cert_manager_client.go index e640ae3d5..55cbf6bb0 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/config/cert_manager_client.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/config/cert_manager_client.go @@ -29,7 +29,7 @@ const ( CertManagerConfigKey = "cert-manager" // CertManagerDefaultVersion defines the default cert-manager version to be used by clusterctl. - CertManagerDefaultVersion = "v1.13.2" + CertManagerDefaultVersion = "v1.14.2" // CertManagerDefaultURL defines the default cert-manager repository url to be used by clusterctl. // NOTE: At runtime CertManagerDefaultVersion may be replaced with the diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/config/providers_client.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/config/providers_client.go index 94fa74bc6..3adad5719 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/config/providers_client.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/config/providers_client.go @@ -64,6 +64,7 @@ const ( VirtinkProviderName = "virtink" CoxEdgeProviderName = "coxedge" ProxmoxProviderName = "proxmox" + K0smotronProviderName = "k0sproject-k0smotron" ) // Bootstrap providers. @@ -74,6 +75,7 @@ const ( OracleCloudNativeBootstrapProviderName = "ocne" KubeKeyK3sBootstrapProviderName = "kubekey-k3s" RKE2BootstrapProviderName = "rke2" + K0smotronBootstrapProviderName = "k0sproject-k0smotron" ) // ControlPlane providers. @@ -86,6 +88,7 @@ const ( KubeKeyK3sControlPlaneProviderName = "kubekey-k3s" KamajiControlPlaneProviderName = "kamaji" RKE2ControlPlaneProviderName = "rke2" + K0smotronControlPlaneProviderName = "k0sproject-k0smotron" ) // Add-on providers. @@ -276,6 +279,11 @@ func (p *providersClient) defaults() []Provider { url: "https://github.com/ionos-cloud/cluster-api-provider-proxmox/releases/latest/infrastructure-components.yaml", providerType: clusterctlv1.InfrastructureProviderType, }, + &provider{ + name: K0smotronProviderName, + url: "https://github.com/k0sproject/k0smotron/releases/latest/infrastructure-components.yaml", + providerType: clusterctlv1.InfrastructureProviderType, + }, // Bootstrap providers &provider{ @@ -308,6 +316,11 @@ func (p *providersClient) defaults() []Provider { url: "https://github.com/rancher-sandbox/cluster-api-provider-rke2/releases/latest/bootstrap-components.yaml", providerType: clusterctlv1.BootstrapProviderType, }, + &provider{ + name: K0smotronBootstrapProviderName, + url: "https://github.com/k0sproject/k0smotron/releases/latest/bootstrap-components.yaml", + providerType: clusterctlv1.BootstrapProviderType, + }, // ControlPlane providers &provider{ @@ -350,6 +363,11 @@ func (p *providersClient) defaults() []Provider { url: "https://github.com/rancher-sandbox/cluster-api-provider-rke2/releases/latest/control-plane-components.yaml", providerType: clusterctlv1.ControlPlaneProviderType, }, + &provider{ + name: K0smotronControlPlaneProviderName, + url: "https://github.com/k0sproject/k0smotron/releases/latest/control-plane-components.yaml", + providerType: clusterctlv1.ControlPlaneProviderType, + }, // Add-on providers &provider{ diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/delete.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/delete.go index f59f44354..68b124b48 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/delete.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/client/delete.go @@ -21,6 +21,7 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -156,6 +157,19 @@ func (c *clusterctlClient) Delete(ctx context.Context, options DeleteOptions) er } } + if options.IncludeCRDs { + errList := []error{} + for _, provider := range providersToDelete { + err = clusterClient.ProviderComponents().ValidateNoObjectsExist(ctx, provider) + if err != nil { + errList = append(errList, err) + } + } + if len(errList) > 0 { + return kerrors.NewAggregate(errList) + } + } + // Delete the selected providers. for _, provider := range providersToDelete { if err := clusterClient.ProviderComponents().Delete(ctx, cluster.DeleteOptions{Provider: provider, IncludeNamespace: options.IncludeNamespace, IncludeCRDs: options.IncludeCRDs, SkipInventory: options.SkipInventory}); err != nil { diff --git a/vendor/sigs.k8s.io/cluster-api/controllers/remote/cluster_cache_tracker.go b/vendor/sigs.k8s.io/cluster-api/controllers/remote/cluster_cache_tracker.go index 9bd40ceab..3c1c67248 100644 --- a/vendor/sigs.k8s.io/cluster-api/controllers/remote/cluster_cache_tracker.go +++ b/vendor/sigs.k8s.io/cluster-api/controllers/remote/cluster_cache_tracker.go @@ -29,6 +29,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -129,6 +130,9 @@ func setDefaultOptions(opts *ClusterCacheTrackerOptions) { opts.Log = &l } + l := opts.Log.WithValues("component", "remote/clustercachetracker") + opts.Log = &l + if len(opts.ClientUncachedObjects) == 0 { opts.ClientUncachedObjects = []client.Object{ &corev1.ConfigMap{}, @@ -295,8 +299,14 @@ func (t *ClusterCacheTracker) newClusterAccessor(ctx context.Context, cluster cl return nil, errors.Wrapf(err, "error fetching REST client config for remote cluster %q", cluster.String()) } - // Create a client and a cache for the cluster. - c, uncachedClient, cache, err := t.createClient(ctx, config, cluster, indexes) + // Create a http client and a mapper for the cluster. + httpClient, mapper, err := t.createHTTPClientAndMapper(config, cluster) + if err != nil { + return nil, errors.Wrapf(err, "error creating http client and mapper for remote cluster %q", cluster.String()) + } + + // Create an uncached client for the cluster. + uncachedClient, err := t.createUncachedClient(config, cluster, httpClient, mapper) if err != nil { return nil, err } @@ -321,16 +331,23 @@ func (t *ClusterCacheTracker) newClusterAccessor(ctx context.Context, cluster cl config.CAFile = inClusterConfig.CAFile config.Host = inClusterConfig.Host - // Create a new client and overwrite the previously created client. - c, _, cache, err = t.createClient(ctx, config, cluster, indexes) + // Update the http client and the mapper to use in-cluster config. + httpClient, mapper, err = t.createHTTPClientAndMapper(config, cluster) if err != nil { - return nil, errors.Wrap(err, "error creating client for self-hosted cluster") + return nil, errors.Wrapf(err, "error creating http client and mapper (using in-cluster config) for remote cluster %q", cluster.String()) } + log.Info(fmt.Sprintf("Creating cluster accessor for cluster %q with in-cluster service %q", cluster.String(), config.Host)) } else { log.Info(fmt.Sprintf("Creating cluster accessor for cluster %q with the regular apiserver endpoint %q", cluster.String(), config.Host)) } + // Create a client and a cache for the cluster. + cachedClient, err := t.createCachedClient(ctx, config, cluster, httpClient, mapper, indexes) + if err != nil { + return nil, err + } + // Generating a new private key to be used for generating temporary certificates to connect to // etcd on the target cluster. // NOTE: Generating a private key is an expensive operation, so we store it in the cluster accessor. @@ -340,9 +357,9 @@ func (t *ClusterCacheTracker) newClusterAccessor(ctx context.Context, cluster cl } return &clusterAccessor{ - cache: cache, + cache: cachedClient.Cache, config: config, - client: c, + client: cachedClient.Client, watches: sets.Set[string]{}, etcdClientCertificateKey: etcdKey, }, nil @@ -374,18 +391,18 @@ func (t *ClusterCacheTracker) runningOnWorkloadCluster(ctx context.Context, c cl return t.controllerPodMetadata.UID == pod.UID, nil } -// createClient creates a cached client, and uncached client and a mapper based on a rest.Config. -func (t *ClusterCacheTracker) createClient(ctx context.Context, config *rest.Config, cluster client.ObjectKey, indexes []Index) (client.Client, client.Client, *stoppableCache, error) { +// createHTTPClientAndMapper creates a http client and a dynamic rest mapper for the given cluster, based on the rest.Config. +func (t *ClusterCacheTracker) createHTTPClientAndMapper(config *rest.Config, cluster client.ObjectKey) (*http.Client, meta.RESTMapper, error) { // Create a http client for the cluster. httpClient, err := rest.HTTPClientFor(config) if err != nil { - return nil, nil, nil, errors.Wrapf(err, "error creating client for remote cluster %q: error creating http client", cluster.String()) + return nil, nil, errors.Wrapf(err, "error creating client for remote cluster %q: error creating http client", cluster.String()) } // Create a mapper for it mapper, err := apiutil.NewDynamicRESTMapper(config, httpClient) if err != nil { - return nil, nil, nil, errors.Wrapf(err, "error creating client for remote cluster %q: error creating dynamic rest mapper", cluster.String()) + return nil, nil, errors.Wrapf(err, "error creating client for remote cluster %q: error creating dynamic rest mapper", cluster.String()) } // Verify if we can get a rest mapping from the workload cluster apiserver. @@ -393,9 +410,34 @@ func (t *ClusterCacheTracker) createClient(ctx context.Context, config *rest.Con // to avoid further effort creating a cache and a client and to produce a clearer error message. _, err = mapper.RESTMapping(corev1.SchemeGroupVersion.WithKind("Node").GroupKind(), corev1.SchemeGroupVersion.Version) if err != nil { - return nil, nil, nil, errors.Wrapf(err, "error creating client for remote cluster %q: error getting rest mapping", cluster.String()) + return nil, nil, errors.Wrapf(err, "error creating client for remote cluster %q: error getting rest mapping", cluster.String()) } + return httpClient, mapper, nil +} + +// createUncachedClient creates an uncached client for the given cluster, based on the rest.Config. +func (t *ClusterCacheTracker) createUncachedClient(config *rest.Config, cluster client.ObjectKey, httpClient *http.Client, mapper meta.RESTMapper) (client.Client, error) { + // Create the uncached client for the remote cluster + uncachedClient, err := client.New(config, client.Options{ + Scheme: t.scheme, + Mapper: mapper, + HTTPClient: httpClient, + }) + if err != nil { + return nil, errors.Wrapf(err, "error creating uncached client for remote cluster %q", cluster.String()) + } + + return uncachedClient, nil +} + +type cachedClientOutput struct { + Client client.Client + Cache *stoppableCache +} + +// createCachedClient creates a cached client for the given cluster, based on a rest.Config. +func (t *ClusterCacheTracker) createCachedClient(ctx context.Context, config *rest.Config, cluster client.ObjectKey, httpClient *http.Client, mapper meta.RESTMapper, indexes []Index) (*cachedClientOutput, error) { // Create the cache for the remote cluster cacheOptions := cache.Options{ HTTPClient: httpClient, @@ -404,7 +446,7 @@ func (t *ClusterCacheTracker) createClient(ctx context.Context, config *rest.Con } remoteCache, err := cache.New(config, cacheOptions) if err != nil { - return nil, nil, nil, errors.Wrapf(err, "error creating client for remote cluster %q: error creating cache", cluster.String()) + return nil, errors.Wrapf(err, "error creating cached client for remote cluster %q: error creating cache", cluster.String()) } cacheCtx, cacheCtxCancel := context.WithCancel(ctx) @@ -417,7 +459,7 @@ func (t *ClusterCacheTracker) createClient(ctx context.Context, config *rest.Con for _, index := range indexes { if err := cache.IndexField(ctx, index.Object, index.Field, index.ExtractValue); err != nil { - return nil, nil, nil, errors.Wrapf(err, "error adding index for field %q to cache for remote cluster %q", index.Field, cluster.String()) + return nil, errors.Wrapf(err, "error creating cached client for remote cluster %q: error adding index for field %q to cache", cluster.String(), index.Field) } } @@ -433,19 +475,9 @@ func (t *ClusterCacheTracker) createClient(ctx context.Context, config *rest.Con }, }) if err != nil { - return nil, nil, nil, errors.Wrapf(err, "error creating client for remote cluster %q", cluster.String()) + return nil, errors.Wrapf(err, "error creating cached client for remote cluster %q", cluster.String()) } - // Create an uncached client. This is used in `runningOnWorkloadCluster` to ensure we don't continuously cache - // pods in the client. - uncachedClient, err := client.New(config, client.Options{ - Scheme: t.scheme, - Mapper: mapper, - HTTPClient: httpClient, - }) - if err != nil { - return nil, nil, nil, errors.Wrapf(err, "error creating uncached client for remote cluster %q", cluster.String()) - } // Start the cache!!! go cache.Start(cacheCtx) //nolint:errcheck @@ -454,7 +486,7 @@ func (t *ClusterCacheTracker) createClient(ctx context.Context, config *rest.Con defer cacheSyncCtxCancel() if !cache.WaitForCacheSync(cacheSyncCtx) { cache.Stop() - return nil, nil, nil, fmt.Errorf("failed waiting for cache for remote cluster %v to sync: %w", cluster, cacheCtx.Err()) + return nil, fmt.Errorf("failed waiting for cache for remote cluster %v to sync: %w", cluster, cacheCtx.Err()) } // Wrap the cached client with a client that sets timeouts on all Get and List calls @@ -471,7 +503,10 @@ func (t *ClusterCacheTracker) createClient(ctx context.Context, config *rest.Con httpClient: httpClient, }) - return cachedClient, uncachedClient, cache, nil + return &cachedClientOutput{ + Client: cachedClient, + Cache: cache, + }, nil } // deleteAccessor stops a clusterAccessor's cache and removes the clusterAccessor from the tracker.