Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP]✨ [DONT Review] Introduce new ClusterCache #11247

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions bootstrap/kubeadm/controllers/alias.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller"

kubeadmbootstrapcontrollers "sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/controllers"
"sigs.k8s.io/cluster-api/controllers/remote"
"sigs.k8s.io/cluster-api/controllers/clustercache"
)

// Following types provides access to reconcilers implemented in internal/controllers, thus
Expand All @@ -41,7 +41,7 @@ type KubeadmConfigReconciler struct {
Client client.Client
SecretCachingClient client.Client

Tracker *remote.ClusterCacheTracker
ClusterCache clustercache.ClusterCache

// WatchFilterValue is the label value used to filter events prior to reconciliation.
WatchFilterValue string
Expand All @@ -55,7 +55,7 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl
return (&kubeadmbootstrapcontrollers.KubeadmConfigReconciler{
Client: r.Client,
SecretCachingClient: r.SecretCachingClient,
Tracker: r.Tracker,
ClusterCache: r.ClusterCache,
WatchFilterValue: r.WatchFilterValue,
TokenTTL: r.TokenTTL,
}).SetupWithManager(ctx, mgr, options)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ import (
"sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/locking"
kubeadmtypes "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types"
bsutil "sigs.k8s.io/cluster-api/bootstrap/util"
"sigs.k8s.io/cluster-api/controllers/clustercache"
"sigs.k8s.io/cluster-api/controllers/remote"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
Expand Down Expand Up @@ -83,7 +84,7 @@ type InitLocker interface {
type KubeadmConfigReconciler struct {
Client client.Client
SecretCachingClient client.Client
Tracker *remote.ClusterCacheTracker
ClusterCache clustercache.ClusterCache
KubeadmInitLock InitLocker

// WatchFilterValue is the label value used to filter events prior to reconciliation.
Expand Down Expand Up @@ -320,7 +321,7 @@ func (r *KubeadmConfigReconciler) refreshBootstrapTokenIfNeeded(ctx context.Cont
log := ctrl.LoggerFrom(ctx)
token := config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token

remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
remoteClient, err := r.ClusterCache.GetClient(ctx, util.ObjectKey(cluster))
if err != nil {
return ctrl.Result{}, err
}
Expand Down Expand Up @@ -367,7 +368,7 @@ func (r *KubeadmConfigReconciler) refreshBootstrapTokenIfNeeded(ctx context.Cont
func (r *KubeadmConfigReconciler) rotateMachinePoolBootstrapToken(ctx context.Context, config *bootstrapv1.KubeadmConfig, cluster *clusterv1.Cluster, scope *Scope) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.V(2).Info("Config is owned by a MachinePool, checking if token should be rotated")
remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
remoteClient, err := r.ClusterCache.GetClient(ctx, util.ObjectKey(cluster))
if err != nil {
return ctrl.Result{}, err
}
Expand Down Expand Up @@ -1087,7 +1088,7 @@ func (r *KubeadmConfigReconciler) reconcileDiscovery(ctx context.Context, cluste

// if BootstrapToken already contains a token, respect it; otherwise create a new bootstrap token for the node to join
if config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token == "" {
remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
remoteClient, err := r.ClusterCache.GetClient(ctx, util.ObjectKey(cluster))
if err != nil {
return ctrl.Result{}, err
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (
"time"

ignition "github.com/flatcar/ignition/config/v2_3"
"github.com/go-logr/logr"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -34,14 +33,13 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/yaml"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
bootstrapbuilder "sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/builder"
"sigs.k8s.io/cluster-api/controllers/remote"
"sigs.k8s.io/cluster-api/controllers/clustercache"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/test/builder"
Expand Down Expand Up @@ -509,7 +507,7 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T)
k := &KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}

Expand Down Expand Up @@ -571,7 +569,7 @@ func TestKubeadmConfigReconciler_Reconcile_ErrorIfJoiningControlPlaneHasInvalidC
k := &KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}

Expand Down Expand Up @@ -693,7 +691,7 @@ func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsRe
k := &KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}

Expand Down Expand Up @@ -770,7 +768,7 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) {
k := &KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}

Expand Down Expand Up @@ -871,7 +869,7 @@ func TestBootstrapDataFormat(t *testing.T) {
k := &KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}
request := ctrl.Request{
Expand Down Expand Up @@ -952,7 +950,7 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) {
k := &KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}
request := ctrl.Request{
Expand Down Expand Up @@ -1033,7 +1031,7 @@ func TestBootstrapTokenTTLExtension(t *testing.T) {
SecretCachingClient: myclient,
KubeadmInitLock: &myInitLocker{},
TokenTTL: DefaultTokenTTL,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, remoteClient, remoteClient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(remoteClient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
}
request := ctrl.Request{
NamespacedName: client.ObjectKey{
Expand Down Expand Up @@ -1279,7 +1277,7 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) {
SecretCachingClient: myclient,
KubeadmInitLock: &myInitLocker{},
TokenTTL: DefaultTokenTTL,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, remoteClient, remoteClient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(remoteClient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
}
request := ctrl.Request{
NamespacedName: client.ObjectKey{
Expand Down Expand Up @@ -1602,7 +1600,7 @@ func TestKubeadmConfigReconciler_Reconcile_DiscoveryReconcileBehaviors(t *testin
k := &KubeadmConfigReconciler{
Client: fakeClient,
SecretCachingClient: fakeClient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: tc.cluster.Name, Namespace: tc.cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(fakeClient, client.ObjectKey{Name: tc.cluster.Name, Namespace: tc.cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}

Expand Down Expand Up @@ -1827,7 +1825,7 @@ func TestKubeadmConfigReconciler_Reconcile_AlwaysCheckCAVerificationUnlessReques
reconciler := KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}

Expand Down
98 changes: 48 additions & 50 deletions bootstrap/kubeadm/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ import (
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
kubeadmbootstrapcontrollers "sigs.k8s.io/cluster-api/bootstrap/kubeadm/controllers"
"sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/webhooks"
"sigs.k8s.io/cluster-api/controllers/clustercache"
"sigs.k8s.io/cluster-api/controllers/remote"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
Expand All @@ -63,31 +64,31 @@ var (
controllerName = "cluster-api-kubeadm-bootstrap-manager"

// flags.
enableLeaderElection bool
leaderElectionLeaseDuration time.Duration
leaderElectionRenewDeadline time.Duration
leaderElectionRetryPeriod time.Duration
watchFilterValue string
watchNamespace string
profilerAddress string
enableContentionProfiling bool
syncPeriod time.Duration
restConfigQPS float32
restConfigBurst int
clusterCacheTrackerClientQPS float32
clusterCacheTrackerClientBurst int
webhookPort int
webhookCertDir string
webhookCertName string
webhookKeyName string
healthAddr string
managerOptions = flags.ManagerOptions{}
logOptions = logs.NewOptions()
enableLeaderElection bool
leaderElectionLeaseDuration time.Duration
leaderElectionRenewDeadline time.Duration
leaderElectionRetryPeriod time.Duration
watchFilterValue string
watchNamespace string
profilerAddress string
enableContentionProfiling bool
syncPeriod time.Duration
restConfigQPS float32
restConfigBurst int
clusterCacheClientQPS float32
clusterCacheClientBurst int
webhookPort int
webhookCertDir string
webhookCertName string
webhookKeyName string
healthAddr string
managerOptions = flags.ManagerOptions{}
logOptions = logs.NewOptions()
// CABPK specific flags.
clusterConcurrency int
clusterCacheTrackerConcurrency int
kubeadmConfigConcurrency int
tokenTTL time.Duration
clusterConcurrency int
clusterCacheConcurrency int
kubeadmConfigConcurrency int
tokenTTL time.Duration
)

func init() {
Expand Down Expand Up @@ -131,7 +132,7 @@ func InitFlags(fs *pflag.FlagSet) {
"Number of clusters to process simultaneously")
_ = fs.MarkDeprecated("cluster-concurrency", "This flag has no function anymore and is going to be removed in a next release. Use \"--clustercachetracker-concurrency\" instead.")

fs.IntVar(&clusterCacheTrackerConcurrency, "clustercachetracker-concurrency", 10,
fs.IntVar(&clusterCacheConcurrency, "clustercache-concurrency", 100,
"Number of clusters to process simultaneously")

fs.IntVar(&kubeadmConfigConcurrency, "kubeadmconfig-concurrency", 10,
Expand All @@ -146,11 +147,11 @@ func InitFlags(fs *pflag.FlagSet) {
fs.IntVar(&restConfigBurst, "kube-api-burst", 30,
"Maximum number of queries that should be allowed in one burst from the controller client to the Kubernetes API server.")

fs.Float32Var(&clusterCacheTrackerClientQPS, "clustercachetracker-client-qps", 20,
"Maximum queries per second from the cluster cache tracker clients to the Kubernetes API server of workload clusters.")
fs.Float32Var(&clusterCacheClientQPS, "clustercache-client-qps", 20,
"Maximum queries per second from the cluster cache clients to the Kubernetes API server of workload clusters.")

fs.IntVar(&clusterCacheTrackerClientBurst, "clustercachetracker-client-burst", 30,
"Maximum number of queries that should be allowed in one burst from the cluster cache tracker clients to the Kubernetes API server of workload clusters.")
fs.IntVar(&clusterCacheClientBurst, "clustercache-client-burst", 30,
"Maximum number of queries that should be allowed in one burst from the cluster cache clients to the Kubernetes API server of workload clusters.")

fs.DurationVar(&tokenTTL, "bootstrap-token-ttl", kubeadmbootstrapcontrollers.DefaultTokenTTL,
"The amount of time the bootstrap token will be valid")
Expand Down Expand Up @@ -312,35 +313,32 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) {
os.Exit(1)
}

// Set up a ClusterCacheTracker and ClusterCacheReconciler to provide to controllers
// requiring a connection to a remote cluster
tracker, err := remote.NewClusterCacheTracker(
mgr,
remote.ClusterCacheTrackerOptions{
SecretCachingClient: secretCachingClient,
ControllerName: controllerName,
Log: &ctrl.Log,
ClientQPS: clusterCacheTrackerClientQPS,
ClientBurst: clusterCacheTrackerClientBurst,
clusterCache, err := clustercache.SetupWithManager(ctx, mgr, clustercache.Options{
SecretClient: secretCachingClient,
Cache: clustercache.CacheOptions{},
Client: clustercache.ClientOptions{
QPS: clusterCacheClientQPS,
Burst: clusterCacheClientBurst,
UserAgent: remote.DefaultClusterAPIUserAgent(controllerName),
Cache: clustercache.ClientCacheOptions{
DisableFor: []client.Object{
// Don't cache ConfigMaps & Secrets.
&corev1.ConfigMap{},
&corev1.Secret{},
},
},
},
)
if err != nil {
setupLog.Error(err, "unable to create cluster cache tracker")
os.Exit(1)
}
if err := (&remote.ClusterCacheReconciler{
Client: mgr.GetClient(),
Tracker: tracker,
WatchFilterValue: watchFilterValue,
}).SetupWithManager(ctx, mgr, concurrency(clusterCacheTrackerConcurrency)); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ClusterCacheReconciler")
}, concurrency(clusterCacheConcurrency))
if err != nil {
setupLog.Error(err, "Unable to create ClusterCache")
os.Exit(1)
}

if err := (&kubeadmbootstrapcontrollers.KubeadmConfigReconciler{
Client: mgr.GetClient(),
SecretCachingClient: secretCachingClient,
Tracker: tracker,
ClusterCache: clusterCache,
WatchFilterValue: watchFilterValue,
TokenTTL: tokenTTL,
}).SetupWithManager(ctx, mgr, concurrency(kubeadmConfigConcurrency)); err != nil {
Expand Down
Loading
Loading