From 6d0ac8c561a7ac66c21e4ee7bd1976c2ecedbf32 Mon Sep 17 00:00:00 2001 From: Alvaro Aleman Date: Sun, 28 Apr 2024 18:26:18 +0200 Subject: [PATCH] Use the generic/typed workqueue throughout This change makes us use the generic workqueue throughout the project in order to improve type safety and readability of the code. --- pkg/controller/bootstrap/bootstrapsigner.go | 9 ++- pkg/controller/bootstrap/tokencleaner.go | 11 ++- .../certificates/certificate_controller.go | 19 +++-- .../rootcacertpublisher/publisher.go | 11 ++- .../clusterroleaggregation_controller.go | 11 ++- .../cronjob/cronjob_controllerv2.go | 13 +++- .../cronjob/cronjob_controllerv2_test.go | 11 ++- pkg/controller/daemon/daemon_controller.go | 11 ++- .../daemon/daemon_controller_test.go | 32 ++++---- .../deployment/deployment_controller.go | 15 ++-- .../deployment/deployment_controller_test.go | 24 +++--- pkg/controller/deployment/progress_test.go | 7 +- pkg/controller/disruption/disruption.go | 41 +++++++--- pkg/controller/disruption/disruption_test.go | 2 +- .../endpoint/endpoints_controller.go | 17 ++-- .../endpointslice/endpointslice_controller.go | 23 +++--- .../endpointslicemirroring_controller.go | 18 +++-- .../garbagecollector/garbagecollector.go | 6 +- .../garbagecollector/garbagecollector_test.go | 78 +++++++++---------- .../garbagecollector/graph_builder.go | 39 ++++++---- pkg/controller/job/job_controller.go | 12 +-- pkg/controller/job/job_controller_test.go | 38 ++++----- .../namespace/namespace_controller.go | 19 +++-- .../node_lifecycle_controller.go | 17 ++-- pkg/controller/podautoscaler/horizontal.go | 27 ++++--- pkg/controller/podautoscaler/rate_limiters.go | 12 +-- pkg/controller/podgc/gc_controller.go | 6 +- pkg/controller/podgc/gc_controller_test.go | 4 +- pkg/controller/replicaset/replica_set.go | 9 ++- pkg/controller/replicaset/replica_set_test.go | 2 +- pkg/controller/resourceclaim/controller.go | 11 ++- .../resource_quota_controller.go | 22 ++++-- .../resourcequota/resource_quota_monitor.go | 19 +++-- .../serviceaccounts_controller.go | 9 ++- .../serviceaccount/tokens_controller.go | 26 ++++--- .../servicecidrs/servicecidrs_controller.go | 14 ++-- pkg/controller/statefulset/stateful_set.go | 13 ++-- .../statefulset/stateful_set_test.go | 36 +++------ .../storageversiongc/gc_controller.go | 18 +++-- .../storageversionmigrator/resourceversion.go | 12 +-- .../storageversionmigrator.go | 12 +-- .../tainteviction/taint_eviction.go | 18 ++--- pkg/controller/ttl/ttl_controller.go | 9 ++- pkg/controller/ttl/ttl_controller_test.go | 2 +- .../ttlafterfinished_controller.go | 11 ++- .../controller.go | 15 ++-- .../attachdetach/attach_detach_controller.go | 11 ++- pkg/controller/volume/ephemeral/controller.go | 9 ++- .../volume/expand/expand_controller.go | 15 ++-- .../volume/persistentvolume/pv_controller.go | 4 +- .../persistentvolume/pv_controller_base.go | 16 ++-- .../pvc_protection_controller.go | 9 ++- .../pvprotection/pv_protection_controller.go | 9 ++- ...cluster_authentication_trust_controller.go | 15 ++-- .../crdregistration_controller.go | 9 ++- .../legacytokentracking/controller.go | 9 ++- pkg/kubelet/cm/dra/plugin/noderesources.go | 15 ++-- pkg/kubelet/logs/container_log_manager.go | 15 ++-- .../logs/container_log_manager_test.go | 37 +++++---- .../core/service/ipallocator/cidrallocator.go | 13 ++-- .../ipallocator/controller/repairip.go | 43 ++++++---- .../apiapproval/apiapproval_controller.go | 15 ++-- .../establish/establishing_controller.go | 9 ++- .../pkg/controller/finalizer/crd_finalizer.go | 9 ++- .../nonstructuralschema_controller.go | 15 ++-- .../pkg/controller/openapi/controller.go | 15 ++-- .../pkg/controller/openapiv3/controller.go | 15 ++-- .../controller/status/naming_controller.go | 9 ++- .../policy/internal/generic/controller.go | 22 ++---- .../plugin/resourcequota/controller.go | 7 +- .../headerrequest/requestheader_controller.go | 9 ++- .../configmap_cafile_content.go | 7 +- .../dynamic_cafile_content.go | 7 +- .../dynamic_serving_content.go | 7 +- .../server/dynamiccertificates/tlsconfig.go | 7 +- .../encryptionconfig/controller/controller.go | 11 ++- .../controller/controller_test.go | 16 ++-- .../pkg/util/flowcontrol/apf_controller.go | 9 ++- .../client-go/examples/workqueue/main.go | 10 +-- .../client-go/transport/cert_rotation.go | 7 +- .../controllers/node/node_controller.go | 21 ++--- .../controllers/service/controller.go | 18 +++-- .../controllers/service/controller_test.go | 29 ++++--- .../controller/controller.go | 10 ++- .../pkg/apiserver/apiservice_controller.go | 9 ++- .../pkg/apiserver/handler_discovery.go | 11 ++- .../pkg/apiserver/handler_discovery_test.go | 24 +++--- .../autoregister/autoregister_controller.go | 9 ++- .../autoregister_controller_test.go | 5 +- .../pkg/controllers/openapi/controller.go | 10 +-- .../pkg/controllers/openapiv3/controller.go | 14 ++-- .../status/available_controller.go | 11 +-- .../status/available_controller_test.go | 7 +- .../k8s.io/sample-controller/controller.go | 29 ++----- 94 files changed, 830 insertions(+), 603 deletions(-) diff --git a/pkg/controller/bootstrap/bootstrapsigner.go b/pkg/controller/bootstrap/bootstrapsigner.go index 95be04bd76829..b0472eddeee78 100644 --- a/pkg/controller/bootstrap/bootstrapsigner.go +++ b/pkg/controller/bootstrap/bootstrapsigner.go @@ -82,7 +82,7 @@ type Signer struct { // have one item (Named ) in this queue. We are using it // serializes and collapses updates as they can come from both the ConfigMap // and Secrets controllers. - syncQueue workqueue.RateLimitingInterface + syncQueue workqueue.TypedRateLimitingInterface[string] secretLister corelisters.SecretLister secretSynced cache.InformerSynced @@ -103,7 +103,12 @@ func NewSigner(cl clientset.Interface, secrets informers.SecretInformer, configM secretSynced: secrets.Informer().HasSynced, configMapLister: configMaps.Lister(), configMapSynced: configMaps.Informer().HasSynced, - syncQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "bootstrap_signer_queue"), + syncQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "bootstrap_signer_queue", + }, + ), } configMaps.Informer().AddEventHandlerWithResyncPeriod( diff --git a/pkg/controller/bootstrap/tokencleaner.go b/pkg/controller/bootstrap/tokencleaner.go index 5a08296557f7b..52fa81bd49e6a 100644 --- a/pkg/controller/bootstrap/tokencleaner.go +++ b/pkg/controller/bootstrap/tokencleaner.go @@ -68,7 +68,7 @@ type TokenCleaner struct { // secretSynced returns true if the secret shared informer has been synced at least once. secretSynced cache.InformerSynced - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } // NewTokenCleaner returns a new *NewTokenCleaner. @@ -78,7 +78,12 @@ func NewTokenCleaner(cl clientset.Interface, secrets coreinformers.SecretInforme secretLister: secrets.Lister(), secretSynced: secrets.Informer().HasSynced, tokenSecretNamespace: options.TokenSecretNamespace, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "token_cleaner"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "token_cleaner", + }, + ), } secrets.Informer().AddEventHandlerWithResyncPeriod( @@ -144,7 +149,7 @@ func (tc *TokenCleaner) processNextWorkItem(ctx context.Context) bool { } defer tc.queue.Done(key) - if err := tc.syncFunc(ctx, key.(string)); err != nil { + if err := tc.syncFunc(ctx, key); err != nil { tc.queue.AddRateLimited(key) utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", key, err)) return true diff --git a/pkg/controller/certificates/certificate_controller.go b/pkg/controller/certificates/certificate_controller.go index 5e1e051e1009b..1306eef892b9e 100644 --- a/pkg/controller/certificates/certificate_controller.go +++ b/pkg/controller/certificates/certificate_controller.go @@ -49,7 +49,7 @@ type CertificateController struct { handler func(context.Context, *certificates.CertificateSigningRequest) error - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } func NewCertificateController( @@ -63,11 +63,16 @@ func NewCertificateController( cc := &CertificateController{ name: name, kubeClient: kubeClient, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter( - workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 1000*time.Second), - // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, - ), "certificate"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedMaxOfRateLimiter[string]( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](200*time.Millisecond, 1000*time.Second), + // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) + &workqueue.TypedBucketRateLimiter[string]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "certificate", + }, + ), handler: handler, } @@ -140,7 +145,7 @@ func (cc *CertificateController) processNextWorkItem(ctx context.Context) bool { } defer cc.queue.Done(cKey) - if err := cc.syncFunc(ctx, cKey.(string)); err != nil { + if err := cc.syncFunc(ctx, cKey); err != nil { cc.queue.AddRateLimited(cKey) if _, ignorable := err.(ignorableError); !ignorable { utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", cKey, err)) diff --git a/pkg/controller/certificates/rootcacertpublisher/publisher.go b/pkg/controller/certificates/rootcacertpublisher/publisher.go index de19dd267f8d7..36127e883e3ad 100644 --- a/pkg/controller/certificates/rootcacertpublisher/publisher.go +++ b/pkg/controller/certificates/rootcacertpublisher/publisher.go @@ -55,7 +55,12 @@ func NewPublisher(cmInformer coreinformers.ConfigMapInformer, nsInformer coreinf e := &Publisher{ client: cl, rootCA: rootCA, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "root_ca_cert_publisher"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "root_ca_cert_publisher", + }, + ), } cmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -90,7 +95,7 @@ type Publisher struct { nsListerSynced cache.InformerSynced - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } // Run starts process @@ -164,7 +169,7 @@ func (c *Publisher) processNextWorkItem(ctx context.Context) bool { } defer c.queue.Done(key) - if err := c.syncHandler(ctx, key.(string)); err != nil { + if err := c.syncHandler(ctx, key); err != nil { utilruntime.HandleError(fmt.Errorf("syncing %q failed: %v", key, err)) c.queue.AddRateLimited(key) return true diff --git a/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go b/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go index b2cdcb7aa3445..bebc45160ebaa 100644 --- a/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go +++ b/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go @@ -48,7 +48,7 @@ type ClusterRoleAggregationController struct { clusterRolesSynced cache.InformerSynced syncHandler func(ctx context.Context, key string) error - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } // NewClusterRoleAggregation creates a new controller @@ -58,7 +58,12 @@ func NewClusterRoleAggregation(clusterRoleInformer rbacinformers.ClusterRoleInfo clusterRoleLister: clusterRoleInformer.Lister(), clusterRolesSynced: clusterRoleInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ClusterRoleAggregator"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "ClusterRoleAggregator", + }, + ), } c.syncHandler = c.syncClusterRole @@ -212,7 +217,7 @@ func (c *ClusterRoleAggregationController) processNextWorkItem(ctx context.Conte } defer c.queue.Done(dsKey) - err := c.syncHandler(ctx, dsKey.(string)) + err := c.syncHandler(ctx, dsKey) if err == nil { c.queue.Forget(dsKey) return true diff --git a/pkg/controller/cronjob/cronjob_controllerv2.go b/pkg/controller/cronjob/cronjob_controllerv2.go index b83cf78836eaa..c43578ff40d6b 100644 --- a/pkg/controller/cronjob/cronjob_controllerv2.go +++ b/pkg/controller/cronjob/cronjob_controllerv2.go @@ -60,7 +60,7 @@ var ( // ControllerV2 is a controller for CronJobs. // Refactored Cronjob controller that uses DelayingQueue and informers type ControllerV2 struct { - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] kubeClient clientset.Interface recorder record.EventRecorder @@ -85,7 +85,12 @@ func NewControllerV2(ctx context.Context, jobInformer batchv1informers.JobInform eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) jm := &ControllerV2{ - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cronjob"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "cronjob", + }, + ), kubeClient: kubeClient, broadcaster: eventBroadcaster, recorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "cronjob-controller"}), @@ -162,10 +167,10 @@ func (jm *ControllerV2) processNextWorkItem(ctx context.Context) bool { } defer jm.queue.Done(key) - requeueAfter, err := jm.sync(ctx, key.(string)) + requeueAfter, err := jm.sync(ctx, key) switch { case err != nil: - utilruntime.HandleError(fmt.Errorf("error syncing CronJobController %v, requeuing: %v", key.(string), err)) + utilruntime.HandleError(fmt.Errorf("error syncing CronJobController %v, requeuing: %w", key, err)) jm.queue.AddRateLimited(key) case requeueAfter != nil: jm.queue.Forget(key) diff --git a/pkg/controller/cronjob/cronjob_controllerv2_test.go b/pkg/controller/cronjob/cronjob_controllerv2_test.go index ed1436713ad93..6859f5187e958 100644 --- a/pkg/controller/cronjob/cronjob_controllerv2_test.go +++ b/pkg/controller/cronjob/cronjob_controllerv2_test.go @@ -1375,12 +1375,12 @@ func TestControllerV2SyncCronJob(t *testing.T) { } type fakeQueue struct { - workqueue.RateLimitingInterface + workqueue.TypedRateLimitingInterface[string] delay time.Duration key interface{} } -func (f *fakeQueue) AddAfter(key interface{}, delay time.Duration) { +func (f *fakeQueue) AddAfter(key string, delay time.Duration) { f.delay = delay f.key = key } @@ -1593,7 +1593,12 @@ func TestControllerV2UpdateCronJob(t *testing.T) { return } jm.now = justASecondBeforeTheHour - queue := &fakeQueue{RateLimitingInterface: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test-update-cronjob")} + queue := &fakeQueue{TypedRateLimitingInterface: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "test-update-cronjob", + }, + )} jm.queue = queue jm.jobControl = &fakeJobControl{} jm.cronJobControl = &fakeCJControl{} diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 1f1de0b856fd5..0076ca2411a95 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -123,7 +123,7 @@ type DaemonSetsController struct { nodeStoreSynced cache.InformerSynced // DaemonSet keys that need to be synced. - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] failedPodsBackoff *flowcontrol.Backoff } @@ -153,7 +153,12 @@ func NewDaemonSetsController( }, burstReplicas: BurstReplicas, expectations: controller.NewControllerExpectations(), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "daemonset"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "daemonset", + }, + ), } daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -315,7 +320,7 @@ func (dsc *DaemonSetsController) processNextWorkItem(ctx context.Context) bool { } defer dsc.queue.Done(dsKey) - err := dsc.syncHandler(ctx, dsKey.(string)) + err := dsc.syncHandler(ctx, dsKey) if err == nil { dsc.queue.Forget(dsKey) return true diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index 1779077ac97ac..7aa832d2cb84d 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -474,7 +474,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) { // DeletedFinalStateUnknown should queue the embedded DS if found. manager.deleteDaemonset(logger, cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds}) enqueuedKey, _ := manager.queue.Get() - if enqueuedKey.(string) != "default/foo" { + if enqueuedKey != "default/foo" { t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey) } } @@ -2890,7 +2890,7 @@ func TestAddNode(t *testing.T) { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done := manager.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for node %v", node2.Name) } } @@ -2920,11 +2920,11 @@ func TestAddPod(t *testing.T) { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done := manager.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) } expectedKey, _ := controller.KeyFunc(ds1) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } @@ -2934,11 +2934,11 @@ func TestAddPod(t *testing.T) { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done = manager.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) } expectedKey, _ = controller.KeyFunc(ds2) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } } @@ -3011,11 +3011,11 @@ func TestUpdatePod(t *testing.T) { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done := manager.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) } expectedKey, _ := controller.KeyFunc(ds1) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } @@ -3027,11 +3027,11 @@ func TestUpdatePod(t *testing.T) { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done = manager.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) } expectedKey, _ = controller.KeyFunc(ds2) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } } @@ -3189,11 +3189,11 @@ func TestDeletePod(t *testing.T) { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done := manager.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) } expectedKey, _ := controller.KeyFunc(ds1) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } @@ -3203,11 +3203,11 @@ func TestDeletePod(t *testing.T) { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done = manager.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) } expectedKey, _ = controller.KeyFunc(ds2) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } } @@ -3255,7 +3255,7 @@ func bumpResourceVersion(obj metav1.Object) { // getQueuedKeys returns a sorted list of keys in the queue. // It can be used to quickly check that multiple keys are in there. -func getQueuedKeys(queue workqueue.RateLimitingInterface) []string { +func getQueuedKeys(queue workqueue.TypedRateLimitingInterface[string]) []string { var keys []string count := queue.Len() for i := 0; i < count; i++ { @@ -3263,7 +3263,7 @@ func getQueuedKeys(queue workqueue.RateLimitingInterface) []string { if done { return keys } - keys = append(keys, key.(string)) + keys = append(keys, key) } sort.Strings(keys) return keys diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index a266bed2c8584..ec63afd24fbe2 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -94,7 +94,7 @@ type DeploymentController struct { podListerSynced cache.InformerSynced // Deployments that need to be synced - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } // NewDeploymentController creates a new DeploymentController. @@ -105,7 +105,12 @@ func NewDeploymentController(ctx context.Context, dInformer appsinformers.Deploy client: client, eventBroadcaster: eventBroadcaster, eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "deployment-controller"}), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "deployment", + }, + ), } dc.rsControl = controller.RealRSControl{ KubeClient: client, @@ -486,19 +491,19 @@ func (dc *DeploymentController) processNextWorkItem(ctx context.Context) bool { } defer dc.queue.Done(key) - err := dc.syncHandler(ctx, key.(string)) + err := dc.syncHandler(ctx, key) dc.handleErr(ctx, err, key) return true } -func (dc *DeploymentController) handleErr(ctx context.Context, err error, key interface{}) { +func (dc *DeploymentController) handleErr(ctx context.Context, err error, key string) { logger := klog.FromContext(ctx) if err == nil || errors.HasStatusCause(err, v1.NamespaceTerminatingCause) { dc.queue.Forget(key) return } - ns, name, keyErr := cache.SplitMetaNamespaceKey(key.(string)) + ns, name, keyErr := cache.SplitMetaNamespaceKey(key) if keyErr != nil { logger.Error(err, "Failed to split meta namespace cache key", "cacheKey", key) } diff --git a/pkg/controller/deployment/deployment_controller_test.go b/pkg/controller/deployment/deployment_controller_test.go index 9e30b7841fb4d..b4481d06a7e16 100644 --- a/pkg/controller/deployment/deployment_controller_test.go +++ b/pkg/controller/deployment/deployment_controller_test.go @@ -716,11 +716,11 @@ func TestAddReplicaSet(t *testing.T) { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done := dc.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for rs %v", rs1.Name) } expectedKey, _ := controller.KeyFunc(d1) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } @@ -729,11 +729,11 @@ func TestAddReplicaSet(t *testing.T) { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done = dc.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for rs %v", rs2.Name) } expectedKey, _ = controller.KeyFunc(d2) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } } @@ -801,11 +801,11 @@ func TestUpdateReplicaSet(t *testing.T) { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done := dc.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for rs %v", rs1.Name) } expectedKey, _ := controller.KeyFunc(d1) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } @@ -817,11 +817,11 @@ func TestUpdateReplicaSet(t *testing.T) { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done = dc.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for rs %v", rs2.Name) } expectedKey, _ = controller.KeyFunc(d2) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } } @@ -953,11 +953,11 @@ func TestDeleteReplicaSet(t *testing.T) { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done := dc.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for rs %v", rs1.Name) } expectedKey, _ := controller.KeyFunc(d1) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } @@ -966,11 +966,11 @@ func TestDeleteReplicaSet(t *testing.T) { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done = dc.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for rs %v", rs2.Name) } expectedKey, _ = controller.KeyFunc(d2) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } } diff --git a/pkg/controller/deployment/progress_test.go b/pkg/controller/deployment/progress_test.go index 53197d721527a..81f3bbebcf80f 100644 --- a/pkg/controller/deployment/progress_test.go +++ b/pkg/controller/deployment/progress_test.go @@ -168,7 +168,12 @@ func TestRequeueStuckDeployment(t *testing.T) { } dc := &DeploymentController{ - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "doesnt_matter"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "doesnt_matter", + }, + ), } dc.enqueueDeployment = dc.enqueue diff --git a/pkg/controller/disruption/disruption.go b/pkg/controller/disruption/disruption.go index b32608077201d..edaff5af2bd0e 100644 --- a/pkg/controller/disruption/disruption.go +++ b/pkg/controller/disruption/disruption.go @@ -103,11 +103,11 @@ type DisruptionController struct { ssListerSynced cache.InformerSynced // PodDisruptionBudget keys that need to be synced. - queue workqueue.RateLimitingInterface - recheckQueue workqueue.DelayingInterface + queue workqueue.TypedRateLimitingInterface[string] + recheckQueue workqueue.TypedDelayingInterface[string] // pod keys that need to be synced due to a stale DisruptionTarget condition. - stalePodDisruptionQueue workqueue.RateLimitingInterface + stalePodDisruptionQueue workqueue.TypedRateLimitingInterface[string] stalePodDisruptionTimeout time.Duration broadcaster record.EventBroadcaster @@ -177,10 +177,29 @@ func NewDisruptionControllerInternal(ctx context.Context, ) *DisruptionController { logger := klog.FromContext(ctx) dc := &DisruptionController{ - kubeClient: kubeClient, - queue: workqueue.NewRateLimitingQueueWithDelayingInterface(workqueue.NewDelayingQueueWithCustomClock(clock, "disruption"), workqueue.DefaultControllerRateLimiter()), - recheckQueue: workqueue.NewDelayingQueueWithCustomClock(clock, "disruption_recheck"), - stalePodDisruptionQueue: workqueue.NewRateLimitingQueueWithDelayingInterface(workqueue.NewDelayingQueueWithCustomClock(clock, "stale_pod_disruption"), workqueue.DefaultControllerRateLimiter()), + kubeClient: kubeClient, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{ + DelayingQueue: workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[string]{ + Clock: clock, + Name: "disruption", + }), + }, + ), + recheckQueue: workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[string]{ + Clock: clock, + Name: "disruption_recheck", + }), + stalePodDisruptionQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{ + DelayingQueue: workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[string]{ + Clock: clock, + Name: "stale_pod_disruption", + }), + }, + ), broadcaster: record.NewBroadcaster(record.WithContext(ctx)), stalePodDisruptionTimeout: stalePodDisruptionTimeout, } @@ -617,13 +636,13 @@ func (dc *DisruptionController) processNextWorkItem(ctx context.Context) bool { } defer dc.queue.Done(dKey) - err := dc.sync(ctx, dKey.(string)) + err := dc.sync(ctx, dKey) if err == nil { dc.queue.Forget(dKey) return true } - utilruntime.HandleError(fmt.Errorf("Error syncing PodDisruptionBudget %v, requeuing: %v", dKey.(string), err)) + utilruntime.HandleError(fmt.Errorf("Error syncing PodDisruptionBudget %v, requeuing: %w", dKey, err)) //nolint:stylecheck dc.queue.AddRateLimited(dKey) return true @@ -655,12 +674,12 @@ func (dc *DisruptionController) processNextStalePodDisruptionWorkItem(ctx contex return false } defer dc.stalePodDisruptionQueue.Done(key) - err := dc.syncStalePodDisruption(ctx, key.(string)) + err := dc.syncStalePodDisruption(ctx, key) if err == nil { dc.stalePodDisruptionQueue.Forget(key) return true } - utilruntime.HandleError(fmt.Errorf("error syncing Pod %v to clear DisruptionTarget condition, requeueing: %v", key.(string), err)) + utilruntime.HandleError(fmt.Errorf("error syncing Pod %v to clear DisruptionTarget condition, requeueing: %w", key, err)) dc.stalePodDisruptionQueue.AddRateLimited(key) return true } diff --git a/pkg/controller/disruption/disruption_test.go b/pkg/controller/disruption/disruption_test.go index 084216614cc7b..977e1b8d9e5d6 100644 --- a/pkg/controller/disruption/disruption_test.go +++ b/pkg/controller/disruption/disruption_test.go @@ -1029,7 +1029,7 @@ func TestPDBNotExist(t *testing.T) { func TestUpdateDisruptedPods(t *testing.T) { _, ctx := ktesting.NewTestContext(t) dc, ps := newFakeDisruptionController(ctx) - dc.recheckQueue = workqueue.NewNamedDelayingQueue("pdb_queue") + dc.recheckQueue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[string]{Name: "pdb_queue"}) pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(1)) currentTime := dc.clock.Now() pdb.Status.DisruptedPods = map[string]metav1.Time{ diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index 2d72f186aef29..96e00dbecb021 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -76,8 +76,13 @@ func NewEndpointController(ctx context.Context, podInformer coreinformers.PodInf recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-controller"}) e := &Controller{ - client: client, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "endpoint"), + client: client, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "endpoint", + }, + ), workerLoopPeriod: time.Second, } @@ -146,7 +151,7 @@ type Controller struct { // more often than services with few pods; it also would cause a // service that's inserted multiple times to be processed more than // necessary. - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // workerLoopPeriod is the time between worker runs. The workers process the queue of service and pod changes. workerLoopPeriod time.Duration @@ -324,19 +329,19 @@ func (e *Controller) processNextWorkItem(ctx context.Context) bool { defer e.queue.Done(eKey) logger := klog.FromContext(ctx) - err := e.syncService(ctx, eKey.(string)) + err := e.syncService(ctx, eKey) e.handleErr(logger, err, eKey) return true } -func (e *Controller) handleErr(logger klog.Logger, err error, key interface{}) { +func (e *Controller) handleErr(logger klog.Logger, err error, key string) { if err == nil { e.queue.Forget(key) return } - ns, name, keyErr := cache.SplitMetaNamespaceKey(key.(string)) + ns, name, keyErr := cache.SplitMetaNamespaceKey(key) if keyErr != nil { logger.Error(err, "Failed to split meta namespace cache key", "key", key) } diff --git a/pkg/controller/endpointslice/endpointslice_controller.go b/pkg/controller/endpointslice/endpointslice_controller.go index 2e137fafdda89..9fd569f526112 100644 --- a/pkg/controller/endpointslice/endpointslice_controller.go +++ b/pkg/controller/endpointslice/endpointslice_controller.go @@ -99,12 +99,17 @@ func NewController(ctx context.Context, podInformer coreinformers.PodInformer, // such as an update to a Service or Deployment. A more significant // rate limit back off here helps ensure that the Controller does not // overwhelm the API Server. - queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter( - workqueue.NewItemExponentialFailureRateLimiter(defaultSyncBackOff, maxSyncBackOff), - // 10 qps, 100 bucket size. This is only for retry speed and its - // only the overall factor (not per item). - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, - ), "endpoint_slice"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](defaultSyncBackOff, maxSyncBackOff), + // 10 qps, 100 bucket size. This is only for retry speed and its + // only the overall factor (not per item). + &workqueue.TypedBucketRateLimiter[string]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "endpoint_slice", + }, + ), workerLoopPeriod: time.Second, } @@ -231,7 +236,7 @@ type Controller struct { // more often than services with few pods; it also would cause a // service that's inserted multiple times to be processed more than // necessary. - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // maxEndpointsPerSlice references the maximum number of endpoints that // should be added to an EndpointSlice @@ -293,13 +298,13 @@ func (c *Controller) processNextWorkItem(logger klog.Logger) bool { } defer c.queue.Done(cKey) - err := c.syncService(logger, cKey.(string)) + err := c.syncService(logger, cKey) c.handleErr(logger, err, cKey) return true } -func (c *Controller) handleErr(logger klog.Logger, err error, key interface{}) { +func (c *Controller) handleErr(logger klog.Logger, err error, key string) { trackSync(err) if err == nil { diff --git a/pkg/controller/endpointslicemirroring/endpointslicemirroring_controller.go b/pkg/controller/endpointslicemirroring/endpointslicemirroring_controller.go index 492329b6cf597..bdb0a02d84f5c 100644 --- a/pkg/controller/endpointslicemirroring/endpointslicemirroring_controller.go +++ b/pkg/controller/endpointslicemirroring/endpointslicemirroring_controller.go @@ -88,12 +88,16 @@ func NewController(ctx context.Context, endpointsInformer coreinformers.Endpoint // processes events that can require significant EndpointSlice changes. // A more significant rate limit back off here helps ensure that the // Controller does not overwhelm the API Server. - queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter( - workqueue.NewItemExponentialFailureRateLimiter(defaultSyncBackOff, maxSyncBackOff), + queue: workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](defaultSyncBackOff, maxSyncBackOff), // 10 qps, 100 bucket size. This is only for retry speed and its // only the overall factor (not per item). - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, - ), "endpoint_slice_mirroring"), + &workqueue.TypedBucketRateLimiter[string]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "endpoint_slice_mirroring", + }, + ), workerLoopPeriod: time.Second, } @@ -192,7 +196,7 @@ type Controller struct { // more often than Endpoints with few addresses; it also would cause an // Endpoints resource that's inserted multiple times to be processed more // than necessary. - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // maxEndpointsPerSubset references the maximum number of endpoints that // should be added to an EndpointSlice for an EndpointSubset. @@ -251,13 +255,13 @@ func (c *Controller) processNextWorkItem(logger klog.Logger) bool { } defer c.queue.Done(cKey) - err := c.syncEndpoints(logger, cKey.(string)) + err := c.syncEndpoints(logger, cKey) c.handleErr(logger, err, cKey) return true } -func (c *Controller) handleErr(logger klog.Logger, err error, key interface{}) { +func (c *Controller) handleErr(logger klog.Logger, err error, key string) { if err == nil { c.queue.Forget(key) return diff --git a/pkg/controller/garbagecollector/garbagecollector.go b/pkg/controller/garbagecollector/garbagecollector.go index 7ca4191026562..9874de251a05b 100644 --- a/pkg/controller/garbagecollector/garbagecollector.go +++ b/pkg/controller/garbagecollector/garbagecollector.go @@ -20,7 +20,6 @@ import ( "context" goerrors "errors" "fmt" - "k8s.io/controller-manager/pkg/informerfactory" "reflect" "sync" "time" @@ -42,6 +41,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/controller-manager/controller" + "k8s.io/controller-manager/pkg/informerfactory" "k8s.io/klog/v2" c "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/garbagecollector/metrics" @@ -65,9 +65,9 @@ type GarbageCollector struct { restMapper meta.ResettableRESTMapper metadataClient metadata.Interface // garbage collector attempts to delete the items in attemptToDelete queue when the time is ripe. - attemptToDelete workqueue.RateLimitingInterface + attemptToDelete workqueue.TypedRateLimitingInterface[*node] // garbage collector attempts to orphan the dependents of the items in the attemptToOrphan queue, then deletes the items. - attemptToOrphan workqueue.RateLimitingInterface + attemptToOrphan workqueue.TypedRateLimitingInterface[*node] dependencyGraphBuilder *GraphBuilder // GC caches the owners that do not exist according to the API server. absentOwnerCache *ReferenceCache diff --git a/pkg/controller/garbagecollector/garbagecollector_test.go b/pkg/controller/garbagecollector/garbagecollector_test.go index 85338e2276ea2..86e0afd96691c 100644 --- a/pkg/controller/garbagecollector/garbagecollector_test.go +++ b/pkg/controller/garbagecollector/garbagecollector_test.go @@ -414,12 +414,12 @@ func TestProcessEvent(t *testing.T) { dependencyGraphBuilder := &GraphBuilder{ informersStarted: alwaysStarted, - graphChanges: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + graphChanges: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[*event]()), uidToNode: &concurrentUIDToNode{ uidToNodeLock: sync.RWMutex{}, uidToNode: make(map[types.UID]*node), }, - attemptToDelete: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + attemptToDelete: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[*node]()), absentOwnerCache: NewReferenceCache(2), } for i := 0; i < len(scenario.events); i++ { @@ -2318,9 +2318,9 @@ func TestConflictingData(t *testing.T) { restMapper := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}} // set up our workqueues - attemptToDelete := newTrackingWorkqueue() - attemptToOrphan := newTrackingWorkqueue() - graphChanges := newTrackingWorkqueue() + attemptToDelete := newTrackingWorkqueue[*node]() + attemptToOrphan := newTrackingWorkqueue[*node]() + graphChanges := newTrackingWorkqueue[*event]() gc := &GarbageCollector{ metadataClient: metadataClient, @@ -2459,9 +2459,9 @@ type stepContext struct { gc *GarbageCollector eventRecorder *record.FakeRecorder metadataClient *fakemetadata.FakeMetadataClient - attemptToDelete *trackingWorkqueue - attemptToOrphan *trackingWorkqueue - graphChanges *trackingWorkqueue + attemptToDelete *trackingWorkqueue[*node] + attemptToOrphan *trackingWorkqueue[*node] + graphChanges *trackingWorkqueue[*event] } type step struct { @@ -2521,7 +2521,7 @@ func insertEvent(e *event) step { check: func(ctx stepContext) { ctx.t.Helper() // drain queue into items - var items []interface{} + var items []*event for ctx.gc.dependencyGraphBuilder.graphChanges.Len() > 0 { item, _ := ctx.gc.dependencyGraphBuilder.graphChanges.Get() ctx.gc.dependencyGraphBuilder.graphChanges.Done(item) @@ -2711,7 +2711,7 @@ func assertState(s state) step { break } - a := ctx.graphChanges.pendingList[i].(*event) + a := ctx.graphChanges.pendingList[i] if !reflect.DeepEqual(e, a) { objectDiff := "" if !reflect.DeepEqual(e.obj, a.obj) { @@ -2739,18 +2739,18 @@ func assertState(s state) step { ctx.t.Errorf("attemptToDelete: expected %d events, got %d", len(s.pendingAttemptToDelete), ctx.attemptToDelete.Len()) break } - a := ctx.attemptToDelete.pendingList[i].(*node).identity - a_virtual := ctx.attemptToDelete.pendingList[i].(*node).virtual + a := ctx.attemptToDelete.pendingList[i].identity + aVirtual := ctx.attemptToDelete.pendingList[i].virtual if !reflect.DeepEqual(e, a) { ctx.t.Errorf("attemptToDelete[%d]: expected %v, got %v", i, e, a) } - if e_virtual != a_virtual { + if e_virtual != aVirtual { ctx.t.Errorf("attemptToDelete[%d]: expected virtual node %v, got non-virtual node %v", i, e, a) } } if ctx.attemptToDelete.Len() > len(s.pendingAttemptToDelete) { for i, a := range ctx.attemptToDelete.pendingList[len(s.pendingAttemptToDelete):] { - ctx.t.Errorf("attemptToDelete[%d]: unexpected node: %v", len(s.pendingAttemptToDelete)+i, a.(*node).identity) + ctx.t.Errorf("attemptToDelete[%d]: unexpected node: %v", len(s.pendingAttemptToDelete)+i, a.identity) } } } @@ -2762,14 +2762,14 @@ func assertState(s state) step { ctx.t.Errorf("attemptToOrphan: expected %d events, got %d", len(s.pendingAttemptToOrphan), ctx.attemptToOrphan.Len()) break } - a := ctx.attemptToOrphan.pendingList[i].(*node).identity + a := ctx.attemptToOrphan.pendingList[i].identity if !reflect.DeepEqual(e, a) { ctx.t.Errorf("attemptToOrphan[%d]: expected %v, got %v", i, e, a) } } if ctx.attemptToOrphan.Len() > len(s.pendingAttemptToOrphan) { for i, a := range ctx.attemptToOrphan.pendingList[len(s.pendingAttemptToOrphan):] { - ctx.t.Errorf("attemptToOrphan[%d]: unexpected node: %v", len(s.pendingAttemptToOrphan)+i, a.(*node).identity) + ctx.t.Errorf("attemptToOrphan[%d]: unexpected node: %v", len(s.pendingAttemptToOrphan)+i, a.identity) } } } @@ -2782,46 +2782,46 @@ func assertState(s state) step { // allows introspection of the items in the queue, // and treats AddAfter and AddRateLimited the same as Add // so they are always synchronous. -type trackingWorkqueue struct { - limiter workqueue.RateLimitingInterface - pendingList []interface{} - pendingMap map[interface{}]struct{} +type trackingWorkqueue[T comparable] struct { + limiter workqueue.TypedRateLimitingInterface[T] + pendingList []T + pendingMap map[T]struct{} } -var _ = workqueue.RateLimitingInterface(&trackingWorkqueue{}) +var _ = workqueue.TypedRateLimitingInterface[string](&trackingWorkqueue[string]{}) -func newTrackingWorkqueue() *trackingWorkqueue { - return &trackingWorkqueue{ - limiter: workqueue.NewRateLimitingQueue(&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Inf, 100)}), - pendingMap: map[interface{}]struct{}{}, +func newTrackingWorkqueue[T comparable]() *trackingWorkqueue[T] { + return &trackingWorkqueue[T]{ + limiter: workqueue.NewTypedRateLimitingQueue[T](&workqueue.TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Inf, 100)}), + pendingMap: map[T]struct{}{}, } } -func (t *trackingWorkqueue) Add(item interface{}) { +func (t *trackingWorkqueue[T]) Add(item T) { t.queue(item) t.limiter.Add(item) } -func (t *trackingWorkqueue) AddAfter(item interface{}, duration time.Duration) { +func (t *trackingWorkqueue[T]) AddAfter(item T, duration time.Duration) { t.Add(item) } -func (t *trackingWorkqueue) AddRateLimited(item interface{}) { +func (t *trackingWorkqueue[T]) AddRateLimited(item T) { t.Add(item) } -func (t *trackingWorkqueue) Get() (interface{}, bool) { +func (t *trackingWorkqueue[T]) Get() (T, bool) { item, shutdown := t.limiter.Get() t.dequeue(item) return item, shutdown } -func (t *trackingWorkqueue) Done(item interface{}) { +func (t *trackingWorkqueue[T]) Done(item T) { t.limiter.Done(item) } -func (t *trackingWorkqueue) Forget(item interface{}) { +func (t *trackingWorkqueue[T]) Forget(item T) { t.limiter.Forget(item) } -func (t *trackingWorkqueue) NumRequeues(item interface{}) int { +func (t *trackingWorkqueue[T]) NumRequeues(item T) int { return 0 } -func (t *trackingWorkqueue) Len() int { +func (t *trackingWorkqueue[T]) Len() int { if e, a := len(t.pendingList), len(t.pendingMap); e != a { panic(fmt.Errorf("pendingList != pendingMap: %d / %d", e, a)) } @@ -2830,17 +2830,17 @@ func (t *trackingWorkqueue) Len() int { } return len(t.pendingList) } -func (t *trackingWorkqueue) ShutDown() { +func (t *trackingWorkqueue[T]) ShutDown() { t.limiter.ShutDown() } -func (t *trackingWorkqueue) ShutDownWithDrain() { +func (t *trackingWorkqueue[T]) ShutDownWithDrain() { t.limiter.ShutDownWithDrain() } -func (t *trackingWorkqueue) ShuttingDown() bool { +func (t *trackingWorkqueue[T]) ShuttingDown() bool { return t.limiter.ShuttingDown() } -func (t *trackingWorkqueue) queue(item interface{}) { +func (t *trackingWorkqueue[T]) queue(item T) { if _, queued := t.pendingMap[item]; queued { // fmt.Printf("already queued: %#v\n", item) return @@ -2848,13 +2848,13 @@ func (t *trackingWorkqueue) queue(item interface{}) { t.pendingMap[item] = struct{}{} t.pendingList = append(t.pendingList, item) } -func (t *trackingWorkqueue) dequeue(item interface{}) { +func (t *trackingWorkqueue[T]) dequeue(item T) { if _, queued := t.pendingMap[item]; !queued { // fmt.Printf("not queued: %#v\n", item) return } delete(t.pendingMap, item) - newPendingList := []interface{}{} + newPendingList := []T{} for _, p := range t.pendingList { if p == item { continue diff --git a/pkg/controller/garbagecollector/graph_builder.go b/pkg/controller/garbagecollector/graph_builder.go index 24d8d9e321e09..239bc34ebd578 100644 --- a/pkg/controller/garbagecollector/graph_builder.go +++ b/pkg/controller/garbagecollector/graph_builder.go @@ -103,13 +103,13 @@ type GraphBuilder struct { metadataClient metadata.Interface // monitors are the producer of the graphChanges queue, graphBuilder alters // the in-memory graph according to the changes. - graphChanges workqueue.RateLimitingInterface + graphChanges workqueue.TypedRateLimitingInterface[*event] // uidToNode doesn't require a lock to protect, because only the // single-threaded GraphBuilder.processGraphChanges() reads/writes it. uidToNode *concurrentUIDToNode // GraphBuilder is the producer of attemptToDelete and attemptToOrphan, GC is the consumer. - attemptToDelete workqueue.RateLimitingInterface - attemptToOrphan workqueue.RateLimitingInterface + attemptToDelete workqueue.TypedRateLimitingInterface[*node] + attemptToOrphan workqueue.TypedRateLimitingInterface[*node] // GraphBuilder and GC share the absentOwnerCache. Objects that are known to // be non-existent are added to the cached. absentOwnerCache *ReferenceCache @@ -145,8 +145,18 @@ func NewDependencyGraphBuilder( ) *GraphBuilder { eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) - attemptToDelete := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_delete") - attemptToOrphan := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_orphan") + attemptToDelete := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[*node](), + workqueue.TypedRateLimitingQueueConfig[*node]{ + Name: "garbage_collector_attempt_to_delete", + }, + ) + attemptToOrphan := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[*node](), + workqueue.TypedRateLimitingQueueConfig[*node]{ + Name: "garbage_collector_attempt_to_orphan", + }, + ) absentOwnerCache := NewReferenceCache(500) graphBuilder := &GraphBuilder{ eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "garbage-collector-controller"}), @@ -154,7 +164,12 @@ func NewDependencyGraphBuilder( metadataClient: metadataClient, informersStarted: informersStarted, restMapper: mapper, - graphChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_graph_changes"), + graphChanges: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[*event](), + workqueue.TypedRateLimitingQueueConfig[*event]{ + Name: "garbage_collector_graph_changes", + }, + ), uidToNode: &concurrentUIDToNode{ uidToNode: make(map[types.UID]*node), }, @@ -666,12 +681,8 @@ func (gb *GraphBuilder) processGraphChanges(logger klog.Logger) bool { return false } defer gb.graphChanges.Done(item) - event, ok := item.(*event) - if !ok { - utilruntime.HandleError(fmt.Errorf("expect a *event, got %v", item)) - return true - } - obj := event.obj + event := item + obj := item.obj accessor, err := meta.Accessor(obj) if err != nil { utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err)) @@ -971,8 +982,8 @@ func getAlternateOwnerIdentity(deps []*node, verifiedAbsentIdentity objectRefere } func (gb *GraphBuilder) GetGraphResources() ( - attemptToDelete workqueue.RateLimitingInterface, - attemptToOrphan workqueue.RateLimitingInterface, + attemptToDelete workqueue.TypedRateLimitingInterface[*node], + attemptToOrphan workqueue.TypedRateLimitingInterface[*node], absentOwnerCache *ReferenceCache, ) { return gb.attemptToDelete, gb.attemptToOrphan, gb.absentOwnerCache diff --git a/pkg/controller/job/job_controller.go b/pkg/controller/job/job_controller.go index 0b121a7a7a494..14da1adcb14d1 100644 --- a/pkg/controller/job/job_controller.go +++ b/pkg/controller/job/job_controller.go @@ -109,10 +109,10 @@ type Controller struct { podStore corelisters.PodLister // Jobs that need to be updated - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // Orphan deleted pods that still have a Job tracking finalizer to be removed - orphanQueue workqueue.RateLimitingInterface + orphanQueue workqueue.TypedRateLimitingInterface[string] broadcaster record.EventBroadcaster recorder record.EventRecorder @@ -159,8 +159,8 @@ func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodIn }, expectations: controller.NewControllerExpectations(), finalizerExpectations: newUIDTrackingExpectations(), - queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.NewItemExponentialFailureRateLimiter(DefaultJobApiBackOff, MaxJobApiBackOff), workqueue.RateLimitingQueueConfig{Name: "job", Clock: clock}), - orphanQueue: workqueue.NewRateLimitingQueueWithConfig(workqueue.NewItemExponentialFailureRateLimiter(DefaultJobApiBackOff, MaxJobApiBackOff), workqueue.RateLimitingQueueConfig{Name: "job_orphan_pod", Clock: clock}), + queue: workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.NewTypedItemExponentialFailureRateLimiter[string](DefaultJobApiBackOff, MaxJobApiBackOff), workqueue.TypedRateLimitingQueueConfig[string]{Name: "job", Clock: clock}), + orphanQueue: workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.NewTypedItemExponentialFailureRateLimiter[string](DefaultJobApiBackOff, MaxJobApiBackOff), workqueue.TypedRateLimitingQueueConfig[string]{Name: "job_orphan_pod", Clock: clock}), broadcaster: eventBroadcaster, recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "job-controller"}), clock: clock, @@ -590,7 +590,7 @@ func (jm *Controller) processNextWorkItem(ctx context.Context) bool { } defer jm.queue.Done(key) - err := jm.syncHandler(ctx, key.(string)) + err := jm.syncHandler(ctx, key) if err == nil { jm.queue.Forget(key) return true @@ -613,7 +613,7 @@ func (jm *Controller) processNextOrphanPod(ctx context.Context) bool { return false } defer jm.orphanQueue.Done(key) - err := jm.syncOrphanPod(ctx, key.(string)) + err := jm.syncOrphanPod(ctx, key) if err != nil { utilruntime.HandleError(fmt.Errorf("Error syncing orphan pod: %v", err)) jm.orphanQueue.AddRateLimited(key) diff --git a/pkg/controller/job/job_controller_test.go b/pkg/controller/job/job_controller_test.go index f02f8601dff60..dc94d2644da99 100644 --- a/pkg/controller/job/job_controller_test.go +++ b/pkg/controller/job/job_controller_test.go @@ -5214,22 +5214,22 @@ func TestAddPod(t *testing.T) { jm.addPod(logger, pod1) verifyEmptyQueueAndAwaitForQueueLen(ctx, t, jm, 1) key, done := jm.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) } expectedKey, _ := controller.KeyFunc(job1) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } jm.addPod(logger, pod2) verifyEmptyQueueAndAwaitForQueueLen(ctx, t, jm, 1) key, done = jm.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) } expectedKey, _ = controller.KeyFunc(job2) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } } @@ -5290,11 +5290,11 @@ func TestUpdatePod(t *testing.T) { jm.updatePod(logger, &prev, pod1) verifyEmptyQueueAndAwaitForQueueLen(ctx, t, jm, 1) key, done := jm.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) } expectedKey, _ := controller.KeyFunc(job1) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } @@ -5303,11 +5303,11 @@ func TestUpdatePod(t *testing.T) { jm.updatePod(logger, &prev, pod2) verifyEmptyQueueAndAwaitForQueueLen(ctx, t, jm, 1) key, done = jm.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) } expectedKey, _ = controller.KeyFunc(job2) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } } @@ -5420,22 +5420,22 @@ func TestDeletePod(t *testing.T) { jm.deletePod(logger, pod1, true) verifyEmptyQueueAndAwaitForQueueLen(ctx, t, jm, 1) key, done := jm.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) } expectedKey, _ := controller.KeyFunc(job1) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } jm.deletePod(logger, pod2, true) verifyEmptyQueueAndAwaitForQueueLen(ctx, t, jm, 1) key, done = jm.queue.Get() - if key == nil || done { + if key == "" || done { t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) } expectedKey, _ = controller.KeyFunc(job2) - if got, want := key.(string), expectedKey; got != want { + if got, want := key, expectedKey; got != want { t.Errorf("queue.Get() = %v, want %v", got, want) } } @@ -5725,23 +5725,23 @@ func TestJobApiBackoffReset(t *testing.T) { verifyEmptyQueue(ctx, t, manager) } -var _ workqueue.RateLimitingInterface = &fakeRateLimitingQueue{} +var _ workqueue.TypedRateLimitingInterface[string] = &fakeRateLimitingQueue{} type fakeRateLimitingQueue struct { - workqueue.Interface + workqueue.TypedInterface[string] requeues int - item interface{} + item string duration time.Duration } -func (f *fakeRateLimitingQueue) AddRateLimited(item interface{}) {} -func (f *fakeRateLimitingQueue) Forget(item interface{}) { +func (f *fakeRateLimitingQueue) AddRateLimited(item string) {} +func (f *fakeRateLimitingQueue) Forget(item string) { f.requeues = 0 } -func (f *fakeRateLimitingQueue) NumRequeues(item interface{}) int { +func (f *fakeRateLimitingQueue) NumRequeues(item string) int { return f.requeues } -func (f *fakeRateLimitingQueue) AddAfter(item interface{}, duration time.Duration) { +func (f *fakeRateLimitingQueue) AddAfter(item string, duration time.Duration) { f.item = item f.duration = duration } diff --git a/pkg/controller/namespace/namespace_controller.go b/pkg/controller/namespace/namespace_controller.go index c5f80a4aed704..9f2c0d85e972f 100644 --- a/pkg/controller/namespace/namespace_controller.go +++ b/pkg/controller/namespace/namespace_controller.go @@ -57,7 +57,7 @@ type NamespaceController struct { // returns true when the namespace cache is ready listerSynced cache.InformerSynced // namespaces that have been queued up for processing by workers - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // helper to delete all resources in the namespace when the namespace is deleted. namespacedResourcesDeleter deletion.NamespacedResourcesDeleterInterface } @@ -74,7 +74,12 @@ func NewNamespaceController( // create the controller so we can inject the enqueue function namespaceController := &NamespaceController{ - queue: workqueue.NewNamedRateLimitingQueue(nsControllerRateLimiter(), "namespace"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + nsControllerRateLimiter(), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "namespace", + }, + ), namespacedResourcesDeleter: deletion.NewNamespacedResourcesDeleter(ctx, kubeClient.CoreV1().Namespaces(), metadataClient, kubeClient.CoreV1(), discoverResourcesFn, finalizerToken), } @@ -101,12 +106,12 @@ func NewNamespaceController( // nsControllerRateLimiter is tuned for a faster than normal recycle time with default backoff speed and default overall // requeing speed. We do this so that namespace cleanup is reliably faster and we know that the number of namespaces being // deleted is smaller than total number of other namespace scoped resources in a cluster. -func nsControllerRateLimiter() workqueue.RateLimiter { - return workqueue.NewMaxOfRateLimiter( +func nsControllerRateLimiter() workqueue.TypedRateLimiter[string] { + return workqueue.NewTypedMaxOfRateLimiter( // this ensures that we retry namespace deletion at least every minute, never longer. - workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 60*time.Second), + workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 60*time.Second), // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + &workqueue.TypedBucketRateLimiter[string]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, ) } @@ -142,7 +147,7 @@ func (nm *NamespaceController) worker(ctx context.Context) { } defer nm.queue.Done(key) - err := nm.syncNamespaceFromKey(ctx, key.(string)) + err := nm.syncNamespaceFromKey(ctx, key) if err == nil { // no error, forget this entry and return nm.queue.Forget(key) diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go index e7f6b627de28f..b642e1bd4213f 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -297,8 +297,8 @@ type Controller struct { largeClusterThreshold int32 unhealthyZoneThreshold float32 - nodeUpdateQueue workqueue.Interface - podUpdateQueue workqueue.RateLimitingInterface + nodeUpdateQueue workqueue.TypedInterface[string] + podUpdateQueue workqueue.TypedRateLimitingInterface[podUpdateItem] } // NewNodeLifecycleController returns a new taint controller. @@ -344,8 +344,13 @@ func NewNodeLifecycleController( secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS, largeClusterThreshold: largeClusterThreshold, unhealthyZoneThreshold: unhealthyZoneThreshold, - nodeUpdateQueue: workqueue.NewTypedWithConfig[any](workqueue.TypedQueueConfig[any]{Name: "node_lifecycle_controller"}), - podUpdateQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "node_lifecycle_controller_pods"), + nodeUpdateQueue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{Name: "node_lifecycle_controller"}), + podUpdateQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[podUpdateItem](), + workqueue.TypedRateLimitingQueueConfig[podUpdateItem]{ + Name: "node_lifecycle_controller_pods", + }, + ), } nc.enterPartialDisruptionFunc = nc.ReducedQPSFunc @@ -515,7 +520,7 @@ func (nc *Controller) doNodeProcessingPassWorker(ctx context.Context) { if shutdown { return } - nodeName := obj.(string) + nodeName := obj if err := nc.doNoScheduleTaintingPass(ctx, nodeName); err != nil { logger.Error(err, "Failed to taint NoSchedule on node, requeue it", "node", klog.KRef("", nodeName)) // TODO(k82cn): Add nodeName back to the queue @@ -1096,7 +1101,7 @@ func (nc *Controller) doPodProcessingWorker(ctx context.Context) { return } - podItem := obj.(podUpdateItem) + podItem := obj nc.processPod(ctx, podItem) } } diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 3b179a89077d0..425bfe639870f 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -104,7 +104,7 @@ type HorizontalController struct { podListerSynced cache.InformerSynced // Controllers that need to be synced - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // Latest unstabilized recommendations for each autoscaler. recommendations map[string][]timestampedRecommendation @@ -148,15 +148,20 @@ func NewHorizontalController( hpaNamespacer: hpaNamespacer, downscaleStabilisationWindow: downscaleStabilisationWindow, monitor: monitor.New(), - queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"), - mapper: mapper, - recommendations: map[string][]timestampedRecommendation{}, - recommendationsLock: sync.Mutex{}, - scaleUpEvents: map[string][]timestampedScaleEvent{}, - scaleUpEventsLock: sync.RWMutex{}, - scaleDownEvents: map[string][]timestampedScaleEvent{}, - scaleDownEventsLock: sync.RWMutex{}, - hpaSelectors: selectors.NewBiMultimap(), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + NewDefaultHPARateLimiter(resyncPeriod), + workqueue.TypedRateLimitingQueueConfig[string]{ + Name: "horizontalpodautoscaler", + }, + ), + mapper: mapper, + recommendations: map[string][]timestampedRecommendation{}, + recommendationsLock: sync.Mutex{}, + scaleUpEvents: map[string][]timestampedScaleEvent{}, + scaleUpEventsLock: sync.RWMutex{}, + scaleDownEvents: map[string][]timestampedScaleEvent{}, + scaleDownEventsLock: sync.RWMutex{}, + hpaSelectors: selectors.NewBiMultimap(), } hpaInformer.Informer().AddEventHandlerWithResyncPeriod( @@ -265,7 +270,7 @@ func (a *HorizontalController) processNextWorkItem(ctx context.Context) bool { } defer a.queue.Done(key) - deleted, err := a.reconcileKey(ctx, key.(string)) + deleted, err := a.reconcileKey(ctx, key) if err != nil { utilruntime.HandleError(err) } diff --git a/pkg/controller/podautoscaler/rate_limiters.go b/pkg/controller/podautoscaler/rate_limiters.go index eb97db4435135..e7062923b395a 100644 --- a/pkg/controller/podautoscaler/rate_limiters.go +++ b/pkg/controller/podautoscaler/rate_limiters.go @@ -27,31 +27,31 @@ type FixedItemIntervalRateLimiter struct { interval time.Duration } -var _ workqueue.RateLimiter = &FixedItemIntervalRateLimiter{} +var _ workqueue.TypedRateLimiter[string] = &FixedItemIntervalRateLimiter{} // NewFixedItemIntervalRateLimiter creates a new instance of a RateLimiter using a fixed interval -func NewFixedItemIntervalRateLimiter(interval time.Duration) workqueue.RateLimiter { +func NewFixedItemIntervalRateLimiter(interval time.Duration) workqueue.TypedRateLimiter[string] { return &FixedItemIntervalRateLimiter{ interval: interval, } } // When returns the interval of the rate limiter -func (r *FixedItemIntervalRateLimiter) When(item interface{}) time.Duration { +func (r *FixedItemIntervalRateLimiter) When(item string) time.Duration { return r.interval } // NumRequeues returns back how many failures the item has had -func (r *FixedItemIntervalRateLimiter) NumRequeues(item interface{}) int { +func (r *FixedItemIntervalRateLimiter) NumRequeues(item string) int { return 1 } // Forget indicates that an item is finished being retried. -func (r *FixedItemIntervalRateLimiter) Forget(item interface{}) { +func (r *FixedItemIntervalRateLimiter) Forget(item string) { } // NewDefaultHPARateLimiter creates a rate limiter which limits overall (as per the // default controller rate limiter), as well as per the resync interval -func NewDefaultHPARateLimiter(interval time.Duration) workqueue.RateLimiter { +func NewDefaultHPARateLimiter(interval time.Duration) workqueue.TypedRateLimiter[string] { return NewFixedItemIntervalRateLimiter(interval) } diff --git a/pkg/controller/podgc/gc_controller.go b/pkg/controller/podgc/gc_controller.go index 476c764bdc0d7..a856af96dcb33 100644 --- a/pkg/controller/podgc/gc_controller.go +++ b/pkg/controller/podgc/gc_controller.go @@ -61,7 +61,7 @@ type PodGCController struct { nodeLister corelisters.NodeLister nodeListerSynced cache.InformerSynced - nodeQueue workqueue.DelayingInterface + nodeQueue workqueue.TypedDelayingInterface[string] terminatedPodThreshold int gcCheckPeriod time.Duration @@ -83,7 +83,7 @@ func NewPodGCInternal(ctx context.Context, kubeClient clientset.Interface, podIn podListerSynced: podInformer.Informer().HasSynced, nodeLister: nodeInformer.Lister(), nodeListerSynced: nodeInformer.Informer().HasSynced, - nodeQueue: workqueue.NewNamedDelayingQueue("orphaned_pods_nodes"), + nodeQueue: workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[string]{Name: "orphaned_pods_nodes"}), gcCheckPeriod: gcCheckPeriod, quarantineTime: quarantineTime, } @@ -270,7 +270,7 @@ func (gcc *PodGCController) discoverDeletedNodes(ctx context.Context, existingNo if quit { return nil, true } - nodeName := item.(string) + nodeName := item if !existingNodeNames.Has(nodeName) { exists, err := gcc.checkIfNodeExists(ctx, nodeName) switch { diff --git a/pkg/controller/podgc/gc_controller_test.go b/pkg/controller/podgc/gc_controller_test.go index bf15c69b4d9e2..12b4ffa1ae740 100644 --- a/pkg/controller/podgc/gc_controller_test.go +++ b/pkg/controller/podgc/gc_controller_test.go @@ -198,7 +198,7 @@ func makePod(name string, nodeName string, phase v1.PodPhase) *v1.Pod { } } -func waitForAdded(q workqueue.DelayingInterface, depth int) error { +func waitForAdded(q workqueue.TypedDelayingInterface[string], depth int) error { return wait.Poll(1*time.Millisecond, 10*time.Second, func() (done bool, err error) { if q.Len() == depth { return true, nil @@ -380,7 +380,7 @@ func TestGCOrphaned(t *testing.T) { // Overwrite queue fakeClock := testingclock.NewFakeClock(time.Now()) gcc.nodeQueue.ShutDown() - gcc.nodeQueue = workqueue.NewDelayingQueueWithCustomClock(fakeClock, "podgc_test_queue") + gcc.nodeQueue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[string]{Clock: fakeClock, Name: "podgc_test_queue"}) // First GC of orphaned pods gcc.gc(ctx) diff --git a/pkg/controller/replicaset/replica_set.go b/pkg/controller/replicaset/replica_set.go index 648d07266bb80..a730fa19dae76 100644 --- a/pkg/controller/replicaset/replica_set.go +++ b/pkg/controller/replicaset/replica_set.go @@ -111,7 +111,7 @@ type ReplicaSetController struct { podListerSynced cache.InformerSynced // Controllers that need to be synced - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } // NewReplicaSetController configures a replica set controller with the specified event recorder @@ -145,7 +145,10 @@ func NewBaseController(logger klog.Logger, rsInformer appsinformers.ReplicaSetIn eventBroadcaster: eventBroadcaster, burstReplicas: burstReplicas, expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), queueName), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: queueName}, + ), } rsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -548,7 +551,7 @@ func (rsc *ReplicaSetController) processNextWorkItem(ctx context.Context) bool { } defer rsc.queue.Done(key) - err := rsc.syncHandler(ctx, key.(string)) + err := rsc.syncHandler(ctx, key) if err == nil { rsc.queue.Forget(key) return true diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index c8ec14e60f454..681d0e0cc0bfe 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -875,7 +875,7 @@ func TestControllerUpdateRequeue(t *testing.T) { manager.podControl = &fakePodControl // Enqueue once. Then process it. Disable rate-limiting for this. - manager.queue = workqueue.NewRateLimitingQueue(workqueue.NewMaxOfRateLimiter()) + manager.queue = workqueue.NewTypedRateLimitingQueue(workqueue.NewTypedMaxOfRateLimiter[string]()) manager.enqueueRS(rs) manager.processNextWorkItem(ctx) // It should have been requeued. diff --git a/pkg/controller/resourceclaim/controller.go b/pkg/controller/resourceclaim/controller.go index 148ad7d57dbd7..efb12cfef821e 100644 --- a/pkg/controller/resourceclaim/controller.go +++ b/pkg/controller/resourceclaim/controller.go @@ -109,7 +109,7 @@ type Controller struct { // recorder is used to record events in the API server recorder record.EventRecorder - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // The deletedObjects cache keeps track of Pods for which we know that // they have existed and have been removed. For those we can be sure @@ -142,8 +142,11 @@ func NewController( claimsSynced: claimInformer.Informer().HasSynced, templateLister: templateInformer.Lister(), templatesSynced: templateInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resource_claim"), - deletedObjects: newUIDCache(maxUIDCacheEntries), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "resource_claim"}, + ), + deletedObjects: newUIDCache(maxUIDCacheEntries), } metrics.RegisterMetrics() @@ -424,7 +427,7 @@ func (ec *Controller) processNextWorkItem(ctx context.Context) bool { } defer ec.queue.Done(key) - err := ec.syncHandler(ctx, key.(string)) + err := ec.syncHandler(ctx, key) if err == nil { ec.queue.Forget(key) return true diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index 8eee7ef48c43d..dcfd6c6a88ef0 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -85,9 +85,9 @@ type Controller struct { // A list of functions that return true when their caches have synced informerSyncedFuncs []cache.InformerSynced // ResourceQuota objects that need to be synchronized - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // missingUsageQueue holds objects that are missing the initial usage information - missingUsageQueue workqueue.RateLimitingInterface + missingUsageQueue workqueue.TypedRateLimitingInterface[string] // To allow injection of syncUsage for testing. syncHandler func(ctx context.Context, key string) error // function that controls full recalculation of quota usage @@ -109,10 +109,16 @@ func NewController(ctx context.Context, options *ControllerOptions) (*Controller rqClient: options.QuotaClient, rqLister: options.ResourceQuotaInformer.Lister(), informerSyncedFuncs: []cache.InformerSynced{options.ResourceQuotaInformer.Informer().HasSynced}, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_primary"), - missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_priority"), - resyncPeriod: options.ResyncPeriod, - registry: options.Registry, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "resourcequota_primary"}, + ), + missingUsageQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "resourcequota_priority"}, + ), + resyncPeriod: options.ResyncPeriod, + registry: options.Registry, } // set the synchronization handler rq.syncHandler = rq.syncResourceQuotaFromKey @@ -246,7 +252,7 @@ func (rq *Controller) addQuota(logger klog.Logger, obj interface{}) { } // worker runs a worker thread that just dequeues items, processes them, and marks them done. -func (rq *Controller) worker(queue workqueue.RateLimitingInterface) func(context.Context) { +func (rq *Controller) worker(queue workqueue.TypedRateLimitingInterface[string]) func(context.Context) { workFunc := func(ctx context.Context) bool { key, quit := queue.Get() if quit { @@ -261,7 +267,7 @@ func (rq *Controller) worker(queue workqueue.RateLimitingInterface) func(context logger = klog.LoggerWithValues(logger, "queueKey", key) ctx = klog.NewContext(ctx, logger) - err := rq.syncHandler(ctx, key.(string)) + err := rq.syncHandler(ctx, key) if err == nil { queue.Forget(key) return false diff --git a/pkg/controller/resourcequota/resource_quota_monitor.go b/pkg/controller/resourcequota/resource_quota_monitor.go index fc2c6b6382f35..d0d0f30b97551 100644 --- a/pkg/controller/resourcequota/resource_quota_monitor.go +++ b/pkg/controller/resourcequota/resource_quota_monitor.go @@ -83,7 +83,7 @@ type QuotaMonitor struct { running bool // monitors are the producer of the resourceChanges queue - resourceChanges workqueue.RateLimitingInterface + resourceChanges workqueue.TypedRateLimitingInterface[*event] // interfaces with informers informerFactory informerfactory.InformerFactory @@ -106,10 +106,13 @@ type QuotaMonitor struct { // NewMonitor creates a new instance of a QuotaMonitor func NewMonitor(informersStarted <-chan struct{}, informerFactory informerfactory.InformerFactory, ignoredResources map[schema.GroupResource]struct{}, resyncPeriod controller.ResyncPeriodFunc, replenishmentFunc ReplenishmentFunc, registry quota.Registry, updateFilter UpdateFilter) *QuotaMonitor { return &QuotaMonitor{ - informersStarted: informersStarted, - informerFactory: informerFactory, - ignoredResources: ignoredResources, - resourceChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resource_quota_controller_resource_changes"), + informersStarted: informersStarted, + informerFactory: informerFactory, + ignoredResources: ignoredResources, + resourceChanges: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[*event](), + workqueue.TypedRateLimitingQueueConfig[*event]{Name: "resource_quota_controller_resource_changes"}, + ), resyncPeriod: resyncPeriod, replenishmentFunc: replenishmentFunc, registry: registry, @@ -351,11 +354,7 @@ func (qm *QuotaMonitor) processResourceChanges(ctx context.Context) bool { return false } defer qm.resourceChanges.Done(item) - event, ok := item.(*event) - if !ok { - utilruntime.HandleError(fmt.Errorf("expect a *event, got %v", item)) - return true - } + event := item obj := event.obj accessor, err := meta.Accessor(obj) if err != nil { diff --git a/pkg/controller/serviceaccount/serviceaccounts_controller.go b/pkg/controller/serviceaccount/serviceaccounts_controller.go index 43589f6633d61..4395136d066dd 100644 --- a/pkg/controller/serviceaccount/serviceaccounts_controller.go +++ b/pkg/controller/serviceaccount/serviceaccounts_controller.go @@ -65,7 +65,10 @@ func NewServiceAccountsController(saInformer coreinformers.ServiceAccountInforme e := &ServiceAccountsController{ client: cl, serviceAccountsToEnsure: options.ServiceAccounts, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "serviceaccount"}, + ), } saHandler, _ := saInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ @@ -100,7 +103,7 @@ type ServiceAccountsController struct { nsLister corelisters.NamespaceLister nsListerSynced cache.InformerSynced - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } // Run runs the ServiceAccountsController blocks until receiving signal from stopCh. @@ -165,7 +168,7 @@ func (c *ServiceAccountsController) processNextWorkItem(ctx context.Context) boo } defer c.queue.Done(key) - err := c.syncHandler(ctx, key.(string)) + err := c.syncHandler(ctx, key) if err == nil { c.queue.Forget(key) return true diff --git a/pkg/controller/serviceaccount/tokens_controller.go b/pkg/controller/serviceaccount/tokens_controller.go index 6641202eeaa9a..dfacea8acfaf7 100644 --- a/pkg/controller/serviceaccount/tokens_controller.go +++ b/pkg/controller/serviceaccount/tokens_controller.go @@ -80,8 +80,14 @@ func NewTokensController(serviceAccounts informers.ServiceAccountInformer, secre token: options.TokenGenerator, rootCA: options.RootCA, - syncServiceAccountQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount_tokens_service"), - syncSecretQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount_tokens_secret"), + syncServiceAccountQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[serviceAccountQueueKey](), + workqueue.TypedRateLimitingQueueConfig[serviceAccountQueueKey]{Name: "serviceaccount_tokens_service"}, + ), + syncSecretQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[secretQueueKey](), + workqueue.TypedRateLimitingQueueConfig[secretQueueKey]{Name: "serviceaccount_tokens_service"}, + ), maxRetries: maxRetries, } @@ -143,14 +149,14 @@ type TokensController struct { // syncServiceAccountQueue handles service account events: // * ensures tokens are removed for service accounts which no longer exist // key is "//" - syncServiceAccountQueue workqueue.RateLimitingInterface + syncServiceAccountQueue workqueue.TypedRateLimitingInterface[serviceAccountQueueKey] // syncSecretQueue handles secret events: // * deletes tokens whose service account no longer exists // * updates tokens with missing token or namespace data, or mismatched ca data // * ensures service account secret references are removed for tokens which are deleted // key is a secretQueueKey{} - syncSecretQueue workqueue.RateLimitingInterface + syncSecretQueue workqueue.TypedRateLimitingInterface[secretQueueKey] maxRetries int } @@ -189,14 +195,14 @@ func (e *TokensController) queueServiceAccountUpdateSync(oldObj interface{}, new } // complete optionally requeues key, then calls queue.Done(key) -func (e *TokensController) retryOrForget(logger klog.Logger, queue workqueue.RateLimitingInterface, key interface{}, requeue bool) { +func retryOrForget[T comparable](logger klog.Logger, queue workqueue.TypedRateLimitingInterface[T], key T, requeue bool, maxRetries int) { if !requeue { queue.Forget(key) return } requeueCount := queue.NumRequeues(key) - if requeueCount < e.maxRetries { + if requeueCount < maxRetries { queue.AddRateLimited(key) return } @@ -227,7 +233,7 @@ func (e *TokensController) syncServiceAccount(ctx context.Context) { retry := false defer func() { - e.retryOrForget(logger, e.syncServiceAccountQueue, key, retry) + retryOrForget(logger, e.syncServiceAccountQueue, key, retry, e.maxRetries) }() saInfo, err := parseServiceAccountKey(key) @@ -263,7 +269,7 @@ func (e *TokensController) syncSecret(ctx context.Context) { // Track whether or not we should retry this sync retry := false defer func() { - e.retryOrForget(logger, e.syncSecretQueue, key, retry) + retryOrForget(logger, e.syncSecretQueue, key, retry, e.maxRetries) }() secretInfo, err := parseSecretQueueKey(key) @@ -571,7 +577,7 @@ type serviceAccountQueueKey struct { uid types.UID } -func makeServiceAccountKey(sa *v1.ServiceAccount) interface{} { +func makeServiceAccountKey(sa *v1.ServiceAccount) serviceAccountQueueKey { return serviceAccountQueueKey{ namespace: sa.Namespace, name: sa.Name, @@ -599,7 +605,7 @@ type secretQueueKey struct { saUID types.UID } -func makeSecretQueueKey(secret *v1.Secret) interface{} { +func makeSecretQueueKey(secret *v1.Secret) secretQueueKey { return secretQueueKey{ namespace: secret.Namespace, name: secret.Name, diff --git a/pkg/controller/servicecidrs/servicecidrs_controller.go b/pkg/controller/servicecidrs/servicecidrs_controller.go index 40fdbf0884c11..ee40847f0b605 100644 --- a/pkg/controller/servicecidrs/servicecidrs_controller.go +++ b/pkg/controller/servicecidrs/servicecidrs_controller.go @@ -75,8 +75,11 @@ func NewController( broadcaster := record.NewBroadcaster(record.WithContext(ctx)) recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName}) c := &Controller{ - client: client, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ipaddresses"), + client: client, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "ipaddresses"}, + ), tree: iptree.New[sets.Set[string]](), workerLoopPeriod: time.Second, } @@ -115,7 +118,7 @@ type Controller struct { ipAddressLister networkinglisters.IPAddressLister ipAddressSynced cache.InformerSynced - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // workerLoopPeriod is the time between worker runs. The workers process the queue of service and ipRange changes. workerLoopPeriod time.Duration @@ -264,13 +267,12 @@ func (c *Controller) worker(ctx context.Context) { } func (c *Controller) processNext(ctx context.Context) bool { - eKey, quit := c.queue.Get() + key, quit := c.queue.Get() if quit { return false } - defer c.queue.Done(eKey) + defer c.queue.Done(key) - key := eKey.(string) err := c.sync(ctx, key) if err == nil { c.queue.Forget(key) diff --git a/pkg/controller/statefulset/stateful_set.go b/pkg/controller/statefulset/stateful_set.go index 3f67ef15da055..09ecc52c1c318 100644 --- a/pkg/controller/statefulset/stateful_set.go +++ b/pkg/controller/statefulset/stateful_set.go @@ -71,7 +71,7 @@ type StatefulSetController struct { // revListerSynced returns true if the rev shared informer has synced at least once revListerSynced cache.InformerSynced // StatefulSets that need to be synced. - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // eventBroadcaster is the core of event processing pipeline. eventBroadcaster record.EventBroadcaster } @@ -101,8 +101,11 @@ func NewStatefulSetController( ), pvcListerSynced: pvcInformer.Informer().HasSynced, revListerSynced: revInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "statefulset"), - podControl: controller.RealPodControl{KubeClient: kubeClient, Recorder: recorder}, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "statefulset"}, + ), + podControl: controller.RealPodControl{KubeClient: kubeClient, Recorder: recorder}, eventBroadcaster: eventBroadcaster, } @@ -428,8 +431,8 @@ func (ssc *StatefulSetController) processNextWorkItem(ctx context.Context) bool return false } defer ssc.queue.Done(key) - if err := ssc.sync(ctx, key.(string)); err != nil { - utilruntime.HandleError(fmt.Errorf("error syncing StatefulSet %v, requeuing: %v", key.(string), err)) + if err := ssc.sync(ctx, key); err != nil { + utilruntime.HandleError(fmt.Errorf("error syncing StatefulSet %v, requeuing: %w", key, err)) ssc.queue.AddRateLimited(key) } else { ssc.queue.Forget(key) diff --git a/pkg/controller/statefulset/stateful_set_test.go b/pkg/controller/statefulset/stateful_set_test.go index 70f50f5520732..c40766b39070d 100644 --- a/pkg/controller/statefulset/stateful_set_test.go +++ b/pkg/controller/statefulset/stateful_set_test.go @@ -297,10 +297,8 @@ func TestStatefulSetControllerAddPod(t *testing.T) { ssc.addPod(logger, pod1) key, done := ssc.queue.Get() - if key == nil || done { + if key == "" || done { t.Error("failed to enqueue StatefulSet") - } else if key, ok := key.(string); !ok { - t.Error("key is not a string") } else if expectedKey, _ := controller.KeyFunc(set1); expectedKey != key { t.Errorf("expected StatefulSet key %s found %s", expectedKey, key) } @@ -308,10 +306,8 @@ func TestStatefulSetControllerAddPod(t *testing.T) { ssc.addPod(logger, pod2) key, done = ssc.queue.Get() - if key == nil || done { + if key == "" || done { t.Error("failed to enqueue StatefulSet") - } else if key, ok := key.(string); !ok { - t.Error("key is not a string") } else if expectedKey, _ := controller.KeyFunc(set2); expectedKey != key { t.Errorf("expected StatefulSet key %s found %s", expectedKey, key) } @@ -348,7 +344,7 @@ func TestStatefulSetControllerAddPodNoSet(t *testing.T) { ssc.addPod(logger, pod) ssc.queue.ShutDown() key, _ := ssc.queue.Get() - if key != nil { + if key != "" { t.Errorf("StatefulSet enqueued key for Pod with no Set %s", key) } } @@ -368,10 +364,8 @@ func TestStatefulSetControllerUpdatePod(t *testing.T) { fakeResourceVersion(pod1) ssc.updatePod(logger, &prev, pod1) key, done := ssc.queue.Get() - if key == nil || done { + if key == "" || done { t.Error("failed to enqueue StatefulSet") - } else if key, ok := key.(string); !ok { - t.Error("key is not a string") } else if expectedKey, _ := controller.KeyFunc(set1); expectedKey != key { t.Errorf("expected StatefulSet key %s found %s", expectedKey, key) } @@ -380,10 +374,8 @@ func TestStatefulSetControllerUpdatePod(t *testing.T) { fakeResourceVersion(pod2) ssc.updatePod(logger, &prev, pod2) key, done = ssc.queue.Get() - if key == nil || done { + if key == "" || done { t.Error("failed to enqueue StatefulSet") - } else if key, ok := key.(string); !ok { - t.Error("key is not a string") } else if expectedKey, _ := controller.KeyFunc(set2); expectedKey != key { t.Errorf("expected StatefulSet key %s found %s", expectedKey, key) } @@ -399,7 +391,7 @@ func TestStatefulSetControllerUpdatePodWithNoSet(t *testing.T) { ssc.updatePod(logger, &prev, pod) ssc.queue.ShutDown() key, _ := ssc.queue.Get() - if key != nil { + if key != "" { t.Errorf("StatefulSet enqueued key for Pod with no Set %s", key) } } @@ -413,7 +405,7 @@ func TestStatefulSetControllerUpdatePodWithSameVersion(t *testing.T) { ssc.updatePod(logger, pod, pod) ssc.queue.ShutDown() key, _ := ssc.queue.Get() - if key != nil { + if key != "" { t.Errorf("StatefulSet enqueued key for Pod with no Set %s", key) } } @@ -487,20 +479,16 @@ func TestStatefulSetControllerDeletePod(t *testing.T) { ssc.deletePod(logger, pod1) key, done := ssc.queue.Get() - if key == nil || done { + if key == "" || done { t.Error("failed to enqueue StatefulSet") - } else if key, ok := key.(string); !ok { - t.Error("key is not a string") } else if expectedKey, _ := controller.KeyFunc(set1); expectedKey != key { t.Errorf("expected StatefulSet key %s found %s", expectedKey, key) } ssc.deletePod(logger, pod2) key, done = ssc.queue.Get() - if key == nil || done { + if key == "" || done { t.Error("failed to enqueue StatefulSet") - } else if key, ok := key.(string); !ok { - t.Error("key is not a string") } else if expectedKey, _ := controller.KeyFunc(set2); expectedKey != key { t.Errorf("expected StatefulSet key %s found %s", expectedKey, key) } @@ -533,10 +521,8 @@ func TestStatefulSetControllerDeletePodTombstone(t *testing.T) { tombstone := cache.DeletedFinalStateUnknown{Key: tombstoneKey, Obj: pod} ssc.deletePod(logger, tombstone) key, done := ssc.queue.Get() - if key == nil || done { + if key == "" || done { t.Error("failed to enqueue StatefulSet") - } else if key, ok := key.(string); !ok { - t.Error("key is not a string") } else if expectedKey, _ := controller.KeyFunc(set); expectedKey != key { t.Errorf("expected StatefulSet key %s found %s", expectedKey, key) } @@ -952,7 +938,7 @@ func newFakeStatefulSetController(ctx context.Context, initialObjects ...runtime func fakeWorker(ssc *StatefulSetController) { if obj, done := ssc.queue.Get(); !done { - ssc.sync(context.TODO(), obj.(string)) + _ = ssc.sync(context.TODO(), obj) ssc.queue.Done(obj) } } diff --git a/pkg/controller/storageversiongc/gc_controller.go b/pkg/controller/storageversiongc/gc_controller.go index b7f4b936afcd0..3356a25e22dd7 100644 --- a/pkg/controller/storageversiongc/gc_controller.go +++ b/pkg/controller/storageversiongc/gc_controller.go @@ -50,8 +50,8 @@ type Controller struct { storageVersionSynced cache.InformerSynced - leaseQueue workqueue.RateLimitingInterface - storageVersionQueue workqueue.RateLimitingInterface + leaseQueue workqueue.TypedRateLimitingInterface[string] + storageVersionQueue workqueue.TypedRateLimitingInterface[string] } // NewStorageVersionGC creates a new Controller. @@ -61,8 +61,14 @@ func NewStorageVersionGC(ctx context.Context, clientset kubernetes.Interface, le leaseLister: leaseInformer.Lister(), leasesSynced: leaseInformer.Informer().HasSynced, storageVersionSynced: storageVersionInformer.Informer().HasSynced, - leaseQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "storage_version_garbage_collector_leases"), - storageVersionQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "storage_version_garbage_collector_storageversions"), + leaseQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "storage_version_garbage_collector_leases"}, + ), + storageVersionQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "storage_version_garbage_collector_storageversions"}, + ), } logger := klog.FromContext(ctx) leaseInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -121,7 +127,7 @@ func (c *Controller) processNextLease(ctx context.Context) bool { } defer c.leaseQueue.Done(key) - err := c.processDeletedLease(ctx, key.(string)) + err := c.processDeletedLease(ctx, key) if err == nil { c.leaseQueue.Forget(key) return true @@ -144,7 +150,7 @@ func (c *Controller) processNextStorageVersion(ctx context.Context) bool { } defer c.storageVersionQueue.Done(key) - err := c.syncStorageVersion(ctx, key.(string)) + err := c.syncStorageVersion(ctx, key) if err == nil { c.storageVersionQueue.Forget(key) return true diff --git a/pkg/controller/storageversionmigrator/resourceversion.go b/pkg/controller/storageversionmigrator/resourceversion.go index a6ab4c2b5ad1b..169615bfdc070 100644 --- a/pkg/controller/storageversionmigrator/resourceversion.go +++ b/pkg/controller/storageversionmigrator/resourceversion.go @@ -54,7 +54,7 @@ type ResourceVersionController struct { metadataClient metadata.Interface svmListers svmlisters.StorageVersionMigrationLister svmSynced cache.InformerSynced - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] kubeClient clientset.Interface mapper meta.ResettableRESTMapper } @@ -76,7 +76,10 @@ func NewResourceVersionController( svmListers: svmInformer.Lister(), svmSynced: svmInformer.Informer().HasSynced, mapper: mapper, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ResourceVersionControllerName), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: ResourceVersionControllerName}, + ), } _, _ = svmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -137,13 +140,12 @@ func (rv *ResourceVersionController) worker(ctx context.Context) { } func (rv *ResourceVersionController) processNext(ctx context.Context) bool { - eKey, quit := rv.queue.Get() + key, quit := rv.queue.Get() if quit { return false } - defer rv.queue.Done(eKey) + defer rv.queue.Done(key) - key := eKey.(string) err := rv.sync(ctx, key) if err == nil { rv.queue.Forget(key) diff --git a/pkg/controller/storageversionmigrator/storageversionmigrator.go b/pkg/controller/storageversionmigrator/storageversionmigrator.go index 2b2d420b028a4..a1284f75b2164 100644 --- a/pkg/controller/storageversionmigrator/storageversionmigrator.go +++ b/pkg/controller/storageversionmigrator/storageversionmigrator.go @@ -55,7 +55,7 @@ type SVMController struct { dynamicClient *dynamic.DynamicClient svmListers svmlisters.StorageVersionMigrationLister svmSynced cache.InformerSynced - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] restMapper meta.RESTMapper dependencyGraphBuilder *garbagecollector.GraphBuilder } @@ -79,7 +79,10 @@ func NewSVMController( svmSynced: svmInformer.Informer().HasSynced, restMapper: mapper, dependencyGraphBuilder: dependencyGraphBuilder, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: controllerName}, + ), } _, _ = svmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -146,13 +149,12 @@ func (svmc *SVMController) worker(ctx context.Context) { } func (svmc *SVMController) processNext(ctx context.Context) bool { - svmKey, quit := svmc.queue.Get() + key, quit := svmc.queue.Get() if quit { return false } - defer svmc.queue.Done(svmKey) + defer svmc.queue.Done(key) - key := svmKey.(string) err := svmc.sync(ctx, key) if err == nil { svmc.queue.Forget(key) diff --git a/pkg/controller/tainteviction/taint_eviction.go b/pkg/controller/tainteviction/taint_eviction.go index fb83abcfa67e4..18a0bfdda7c65 100644 --- a/pkg/controller/tainteviction/taint_eviction.go +++ b/pkg/controller/tainteviction/taint_eviction.go @@ -102,8 +102,8 @@ type Controller struct { nodeUpdateChannels []chan nodeUpdateItem podUpdateChannels []chan podUpdateItem - nodeUpdateQueue workqueue.Interface - podUpdateQueue workqueue.Interface + nodeUpdateQueue workqueue.TypedInterface[nodeUpdateItem] + podUpdateQueue workqueue.TypedInterface[podUpdateItem] } func deletePodHandler(c clientset.Interface, emitEventFunc func(types.NamespacedName), controllerName string) func(ctx context.Context, fireAt time.Time, args *WorkArgs) error { @@ -220,8 +220,8 @@ func New(ctx context.Context, c clientset.Interface, podInformer corev1informers }, taintedNodes: make(map[string][]v1.Taint), - nodeUpdateQueue: workqueue.NewWithConfig(workqueue.QueueConfig{Name: "noexec_taint_node"}), - podUpdateQueue: workqueue.NewWithConfig(workqueue.QueueConfig{Name: "noexec_taint_pod"}), + nodeUpdateQueue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[nodeUpdateItem]{Name: "noexec_taint_node"}), + podUpdateQueue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[podUpdateItem]{Name: "noexec_taint_pod"}), } tm.taintEvictionQueue = CreateWorkerQueue(deletePodHandler(c, tm.emitPodDeletionEvent, tm.name)) @@ -312,15 +312,14 @@ func (tc *Controller) Run(ctx context.Context) { // into channels. go func(stopCh <-chan struct{}) { for { - item, shutdown := tc.nodeUpdateQueue.Get() + nodeUpdate, shutdown := tc.nodeUpdateQueue.Get() if shutdown { break } - nodeUpdate := item.(nodeUpdateItem) hash := hash(nodeUpdate.nodeName, UpdateWorkerSize) select { case <-stopCh: - tc.nodeUpdateQueue.Done(item) + tc.nodeUpdateQueue.Done(nodeUpdate) return case tc.nodeUpdateChannels[hash] <- nodeUpdate: // tc.nodeUpdateQueue.Done is called by the nodeUpdateChannels worker @@ -330,7 +329,7 @@ func (tc *Controller) Run(ctx context.Context) { go func(stopCh <-chan struct{}) { for { - item, shutdown := tc.podUpdateQueue.Get() + podUpdate, shutdown := tc.podUpdateQueue.Get() if shutdown { break } @@ -338,11 +337,10 @@ func (tc *Controller) Run(ctx context.Context) { // between node worker setting tc.taintedNodes and pod worker reading this to decide // whether to delete pod. // It's possible that even without this assumption this code is still correct. - podUpdate := item.(podUpdateItem) hash := hash(podUpdate.nodeName, UpdateWorkerSize) select { case <-stopCh: - tc.podUpdateQueue.Done(item) + tc.podUpdateQueue.Done(podUpdate) return case tc.podUpdateChannels[hash] <- podUpdate: // tc.podUpdateQueue.Done is called by the podUpdateChannels worker diff --git a/pkg/controller/ttl/ttl_controller.go b/pkg/controller/ttl/ttl_controller.go index 417f3e903140d..c85758bcf6820 100644 --- a/pkg/controller/ttl/ttl_controller.go +++ b/pkg/controller/ttl/ttl_controller.go @@ -60,7 +60,7 @@ type Controller struct { nodeStore listers.NodeLister // Nodes that need to be synced. - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // Returns true if all underlying informers are synced. hasSynced func() bool @@ -81,7 +81,10 @@ type Controller struct { func NewTTLController(ctx context.Context, nodeInformer informers.NodeInformer, kubeClient clientset.Interface) *Controller { ttlc := &Controller{ kubeClient: kubeClient, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ttlcontroller"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "ttlcontroller"}, + ), } logger := klog.FromContext(ctx) nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -216,7 +219,7 @@ func (ttlc *Controller) processItem(ctx context.Context) bool { } defer ttlc.queue.Done(key) - err := ttlc.updateNodeIfNeeded(ctx, key.(string)) + err := ttlc.updateNodeIfNeeded(ctx, key) if err == nil { ttlc.queue.Forget(key) return true diff --git a/pkg/controller/ttl/ttl_controller_test.go b/pkg/controller/ttl/ttl_controller_test.go index 991fb5a906528..356f9d7134db8 100644 --- a/pkg/controller/ttl/ttl_controller_test.go +++ b/pkg/controller/ttl/ttl_controller_test.go @@ -230,7 +230,7 @@ func TestDesiredTTL(t *testing.T) { for i, testCase := range testCases { ttlController := &Controller{ - queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + queue: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]()), nodeCount: testCase.nodeCount, desiredTTLSeconds: testCase.desiredTTL, boundaryStep: testCase.boundaryStep, diff --git a/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go b/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go index f4659a7bbe7ba..b22c74f117e86 100644 --- a/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go +++ b/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go @@ -62,7 +62,7 @@ type Controller struct { jListerSynced cache.InformerSynced // Jobs that the controller will check its TTL and attempt to delete when the TTL expires. - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // The clock for tracking time clock clock.Clock @@ -79,7 +79,10 @@ func New(ctx context.Context, jobInformer batchinformers.JobInformer, client cli tc := &Controller{ client: client, recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "ttl-after-finished-controller"}), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ttl_jobs_to_delete"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "ttl_jobs_to_delete"}, + ), } logger := klog.FromContext(ctx) @@ -172,13 +175,13 @@ func (tc *Controller) processNextWorkItem(ctx context.Context) bool { } defer tc.queue.Done(key) - err := tc.processJob(ctx, key.(string)) + err := tc.processJob(ctx, key) tc.handleErr(err, key) return true } -func (tc *Controller) handleErr(err error, key interface{}) { +func (tc *Controller) handleErr(err error, key string) { if err == nil { tc.queue.Forget(key) return diff --git a/pkg/controller/validatingadmissionpolicystatus/controller.go b/pkg/controller/validatingadmissionpolicystatus/controller.go index 776d2cdf47616..4e9bf280c387b 100644 --- a/pkg/controller/validatingadmissionpolicystatus/controller.go +++ b/pkg/controller/validatingadmissionpolicystatus/controller.go @@ -41,7 +41,7 @@ const ControllerName = "validatingadmissionpolicy-status" // This controller runs type checks against referred types for each policy definition. type Controller struct { policyInformer informerv1.ValidatingAdmissionPolicyInformer - policyQueue workqueue.RateLimitingInterface + policyQueue workqueue.TypedRateLimitingInterface[string] policySynced cache.InformerSynced policyClient admissionregistrationv1.ValidatingAdmissionPolicyInterface @@ -69,9 +69,12 @@ func (c *Controller) Run(ctx context.Context, workers int) { func NewController(policyInformer informerv1.ValidatingAdmissionPolicyInformer, policyClient admissionregistrationv1.ValidatingAdmissionPolicyInterface, typeChecker *validatingadmissionpolicy.TypeChecker) (*Controller, error) { c := &Controller{ policyInformer: policyInformer, - policyQueue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: ControllerName}), - policyClient: policyClient, - typeChecker: typeChecker, + policyQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: ControllerName}, + ), + policyClient: policyClient, + typeChecker: typeChecker, } reg, err := policyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { @@ -112,10 +115,6 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool { defer c.policyQueue.Done(key) err := func() error { - key, ok := key.(string) - if !ok { - return fmt.Errorf("expect a string but got %v", key) - } policy, err := c.policyInformer.Lister().Get(key) if err != nil { if kerrors.IsNotFound(err) { diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller.go b/pkg/controller/volume/attachdetach/attach_detach_controller.go index e7abfef21bfb2..d09550febed6c 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller.go @@ -133,7 +133,10 @@ func NewAttachDetachController( podIndexer: podInformer.Informer().GetIndexer(), nodeLister: nodeInformer.Lister(), nodesSynced: nodeInformer.Informer().HasSynced, - pvcQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcs"), + pvcQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "pvcs"}, + ), } adc.csiNodeLister = csiNodeInformer.Lister() @@ -313,7 +316,7 @@ type attachDetachController struct { broadcaster record.EventBroadcaster // pvcQueue is used to queue pvc objects - pvcQueue workqueue.RateLimitingInterface + pvcQueue workqueue.TypedRateLimitingInterface[string] // csiMigratedPluginManager detects in-tree plugins that have been migrated to CSI csiMigratedPluginManager csimigration.PluginManager @@ -600,11 +603,11 @@ func (adc *attachDetachController) processNextItem(logger klog.Logger) bool { } defer adc.pvcQueue.Done(keyObj) - if err := adc.syncPVCByKey(logger, keyObj.(string)); err != nil { + if err := adc.syncPVCByKey(logger, keyObj); err != nil { // Rather than wait for a full resync, re-add the key to the // queue to be processed. adc.pvcQueue.AddRateLimited(keyObj) - runtime.HandleError(fmt.Errorf("Failed to sync pvc %q, will retry again: %v", keyObj.(string), err)) + runtime.HandleError(fmt.Errorf("failed to sync pvc %q, will retry again: %w", keyObj, err)) return true } diff --git a/pkg/controller/volume/ephemeral/controller.go b/pkg/controller/volume/ephemeral/controller.go index d0160ae1e0c3b..c4ff709dd3cb1 100644 --- a/pkg/controller/volume/ephemeral/controller.go +++ b/pkg/controller/volume/ephemeral/controller.go @@ -71,7 +71,7 @@ type ephemeralController struct { // recorder is used to record events in the API server recorder record.EventRecorder - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } // NewController creates an ephemeral volume controller. @@ -88,7 +88,10 @@ func NewController( podSynced: podInformer.Informer().HasSynced, pvcLister: pvcInformer.Lister(), pvcsSynced: pvcInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ephemeral_volume"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "ephemeral_volume"}, + ), } ephemeralvolumemetrics.RegisterMetrics() @@ -193,7 +196,7 @@ func (ec *ephemeralController) processNextWorkItem(ctx context.Context) bool { } defer ec.queue.Done(key) - err := ec.syncHandler(ctx, key.(string)) + err := ec.syncHandler(ctx, key) if err == nil { ec.queue.Forget(key) return true diff --git a/pkg/controller/volume/expand/expand_controller.go b/pkg/controller/volume/expand/expand_controller.go index d173192d182ce..735bc388bdc2b 100644 --- a/pkg/controller/volume/expand/expand_controller.go +++ b/pkg/controller/volume/expand/expand_controller.go @@ -87,7 +87,7 @@ type expandController struct { operationGenerator operationexecutor.OperationGenerator - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] translator CSINameTranslator @@ -104,10 +104,13 @@ func NewExpandController( csiMigratedPluginManager csimigration.PluginManager) (ExpandController, error) { expc := &expandController{ - kubeClient: kubeClient, - pvcLister: pvcInformer.Lister(), - pvcsSynced: pvcInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "volume_expand"), + kubeClient: kubeClient, + pvcLister: pvcInformer.Lister(), + pvcsSynced: pvcInformer.Informer().HasSynced, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "volume_expand"}, + ), translator: translator, csiMigratedPluginManager: csiMigratedPluginManager, } @@ -180,7 +183,7 @@ func (expc *expandController) processNextWorkItem(ctx context.Context) bool { } defer expc.queue.Done(key) - err := expc.syncHandler(ctx, key.(string)) + err := expc.syncHandler(ctx, key) if err == nil { expc.queue.Forget(key) return true diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index fd3c0ebc96b99..526e6179a0497 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -188,8 +188,8 @@ type PersistentVolumeController struct { // version errors in API server and other checks in this controller), // however overall speed of multi-worker controller would be lower than if // it runs single thread only. - claimQueue *workqueue.Type - volumeQueue *workqueue.Type + claimQueue *workqueue.Typed[string] + volumeQueue *workqueue.Typed[string] // Map of scheduled/running operations. runningOperations goroutinemap.GoRoutineMap diff --git a/pkg/controller/volume/persistentvolume/pv_controller_base.go b/pkg/controller/volume/persistentvolume/pv_controller_base.go index 4d4a912e0d887..15392ca626316 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller_base.go +++ b/pkg/controller/volume/persistentvolume/pv_controller_base.go @@ -89,8 +89,8 @@ func NewController(ctx context.Context, p ControllerParameters) (*PersistentVolu clusterName: p.ClusterName, createProvisionedPVRetryCount: createProvisionedPVRetryCount, createProvisionedPVInterval: createProvisionedPVInterval, - claimQueue: workqueue.NewTypedWithConfig[any](workqueue.TypedQueueConfig[any]{Name: "claims"}), - volumeQueue: workqueue.NewTypedWithConfig[any](workqueue.TypedQueueConfig[any]{Name: "volumes"}), + claimQueue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{Name: "claims"}), + volumeQueue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{Name: "volumes"}), resyncPeriod: p.SyncPeriod, operationTimestamps: metrics.NewOperationStartTimeCache(), } @@ -171,7 +171,7 @@ func (ctrl *PersistentVolumeController) initializeCaches(logger klog.Logger, vol } // enqueueWork adds volume or claim to given work queue. -func (ctrl *PersistentVolumeController) enqueueWork(ctx context.Context, queue workqueue.Interface, obj interface{}) { +func (ctrl *PersistentVolumeController) enqueueWork(ctx context.Context, queue workqueue.TypedInterface[string], obj interface{}) { // Beware of "xxx deleted" events logger := klog.FromContext(ctx) if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { @@ -489,12 +489,11 @@ func updateMigrationAnnotations(logger klog.Logger, cmpm CSIMigratedPluginManage func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) { logger := klog.FromContext(ctx) workFunc := func(ctx context.Context) bool { - keyObj, quit := ctrl.volumeQueue.Get() + key, quit := ctrl.volumeQueue.Get() if quit { return true } - defer ctrl.volumeQueue.Done(keyObj) - key := keyObj.(string) + defer ctrl.volumeQueue.Done(key) logger.V(5).Info("volumeWorker", "volumeKey", key) _, name, err := cache.SplitMetaNamespaceKey(key) @@ -548,12 +547,11 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) { func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) { logger := klog.FromContext(ctx) workFunc := func() bool { - keyObj, quit := ctrl.claimQueue.Get() + key, quit := ctrl.claimQueue.Get() if quit { return true } - defer ctrl.claimQueue.Done(keyObj) - key := keyObj.(string) + defer ctrl.claimQueue.Done(key) logger.V(5).Info("claimWorker", "claimKey", key) namespace, name, err := cache.SplitMetaNamespaceKey(key) diff --git a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go index ccf42db187519..9e8614bc0cf65 100644 --- a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go +++ b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go @@ -51,14 +51,17 @@ type Controller struct { podListerSynced cache.InformerSynced podIndexer cache.Indexer - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } // NewPVCProtectionController returns a new instance of PVCProtectionController. func NewPVCProtectionController(logger klog.Logger, pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) (*Controller, error) { e := &Controller{ client: cl, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "pvcprotection"}, + ), } e.pvcLister = pvcInformer.Lister() @@ -126,7 +129,7 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool { } defer c.queue.Done(pvcKey) - pvcNamespace, pvcName, err := cache.SplitMetaNamespaceKey(pvcKey.(string)) + pvcNamespace, pvcName, err := cache.SplitMetaNamespaceKey(pvcKey) if err != nil { utilruntime.HandleError(fmt.Errorf("error parsing PVC key %q: %v", pvcKey, err)) return true diff --git a/pkg/controller/volume/pvprotection/pv_protection_controller.go b/pkg/controller/volume/pvprotection/pv_protection_controller.go index d45b87190d204..8f29f383f9c2e 100644 --- a/pkg/controller/volume/pvprotection/pv_protection_controller.go +++ b/pkg/controller/volume/pvprotection/pv_protection_controller.go @@ -45,14 +45,17 @@ type Controller struct { pvLister corelisters.PersistentVolumeLister pvListerSynced cache.InformerSynced - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } // NewPVProtectionController returns a new *Controller. func NewPVProtectionController(logger klog.Logger, pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface) *Controller { e := &Controller{ client: cl, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "pvprotection"}, + ), } e.pvLister = pvInformer.Lister() @@ -102,7 +105,7 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool { } defer c.queue.Done(pvKey) - pvName := pvKey.(string) + pvName := pvKey err := c.processPV(ctx, pvName) if err == nil { diff --git a/pkg/controlplane/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go b/pkg/controlplane/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go index e816ed2436434..4953917139157 100644 --- a/pkg/controlplane/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go +++ b/pkg/controlplane/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go @@ -61,7 +61,7 @@ type Controller struct { // queue is where incoming work is placed to de-dup and to allow "easy" rate limited requeues on errors. // we only ever place one entry in here, but it is keyed as usual: namespace/name - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // kubeSystemConfigMapInformer is tracked so that we can start these on Run kubeSystemConfigMapInformer cache.SharedIndexInformer @@ -94,11 +94,14 @@ func NewClusterAuthenticationTrustController(requiredAuthenticationData ClusterA kubeSystemConfigMapInformer := corev1informers.NewConfigMapInformer(kubeClient, configMapNamespace, 12*time.Hour, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) c := &Controller{ - requiredAuthenticationData: requiredAuthenticationData, - configMapLister: corev1listers.NewConfigMapLister(kubeSystemConfigMapInformer.GetIndexer()), - configMapClient: kubeClient.CoreV1(), - namespaceClient: kubeClient.CoreV1(), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cluster_authentication_trust_controller"), + requiredAuthenticationData: requiredAuthenticationData, + configMapLister: corev1listers.NewConfigMapLister(kubeSystemConfigMapInformer.GetIndexer()), + configMapClient: kubeClient.CoreV1(), + namespaceClient: kubeClient.CoreV1(), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "cluster_authentication_trust_controller"}, + ), preRunCaches: []cache.InformerSynced{kubeSystemConfigMapInformer.HasSynced}, kubeSystemConfigMapInformer: kubeSystemConfigMapInformer, } diff --git a/pkg/controlplane/controller/crdregistration/crdregistration_controller.go b/pkg/controlplane/controller/crdregistration/crdregistration_controller.go index 5ee06f6a74581..578a196aa37e9 100644 --- a/pkg/controlplane/controller/crdregistration/crdregistration_controller.go +++ b/pkg/controlplane/controller/crdregistration/crdregistration_controller.go @@ -56,7 +56,7 @@ type crdRegistrationController struct { // queue is where incoming work is placed to de-dup and to allow "easy" rate limited requeues on errors // this is actually keyed by a groupVersion - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[schema.GroupVersion] } // NewCRDRegistrationController returns a controller which will register CRD GroupVersions with the auto APIService registration @@ -67,7 +67,10 @@ func NewCRDRegistrationController(crdinformer crdinformers.CustomResourceDefinit crdSynced: crdinformer.Informer().HasSynced, apiServiceRegistration: apiServiceRegistration, syncedInitialSet: make(chan struct{}), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd_autoregistration_controller"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[schema.GroupVersion](), + workqueue.TypedRateLimitingQueueConfig[schema.GroupVersion]{Name: "crd_autoregistration_controller"}, + ), } c.syncHandler = c.handleVersionUpdate @@ -164,7 +167,7 @@ func (c *crdRegistrationController) processNextWorkItem() bool { defer c.queue.Done(key) // do your work on the key. This method will contains your "do stuff" logic - err := c.syncHandler(key.(schema.GroupVersion)) + err := c.syncHandler(key) if err == nil { // if you had no error, tell the queue to stop tracking history for your key. This will // reset things like failure counts for per-item rate limiting diff --git a/pkg/controlplane/controller/legacytokentracking/controller.go b/pkg/controlplane/controller/legacytokentracking/controller.go index c45fdfcb35f23..512db25096acb 100644 --- a/pkg/controlplane/controller/legacytokentracking/controller.go +++ b/pkg/controlplane/controller/legacytokentracking/controller.go @@ -58,7 +58,7 @@ type Controller struct { configMapInformer cache.SharedIndexInformer configMapCache cache.Indexer configMapSynced cache.InformerSynced - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // rate limiter controls the rate limit of the creation of the configmap. // this is useful in multi-apiserver cluster to prevent config existing in a @@ -80,8 +80,11 @@ func newController(cs kubernetes.Interface, cl clock.Clock, limiter *rate.Limite }) c := &Controller{ - configMapClient: cs.CoreV1(), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "legacy_token_tracking_controller"), + configMapClient: cs.CoreV1(), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "legacy_token_tracking_controller"}, + ), configMapInformer: informer, configMapCache: informer.GetIndexer(), configMapSynced: informer.HasSynced, diff --git a/pkg/kubelet/cm/dra/plugin/noderesources.go b/pkg/kubelet/cm/dra/plugin/noderesources.go index 0a02af6983485..3da0aecfa854c 100644 --- a/pkg/kubelet/cm/dra/plugin/noderesources.go +++ b/pkg/kubelet/cm/dra/plugin/noderesources.go @@ -56,7 +56,7 @@ type nodeResourcesController struct { kubeClient kubernetes.Interface getNode func() (*v1.Node, error) wg sync.WaitGroup - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] sliceStore cache.Store mutex sync.RWMutex @@ -96,10 +96,13 @@ func startNodeResourcesController(ctx context.Context, kubeClient kubernetes.Int ctx = klog.NewContext(ctx, logger) c := &nodeResourcesController{ - ctx: ctx, - kubeClient: kubeClient, - getNode: getNode, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "node_resource_slices"), + ctx: ctx, + kubeClient: kubeClient, + getNode: getNode, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "node_resource_slices"}, + ), activePlugins: make(map[string]*activePlugin), } @@ -347,7 +350,7 @@ func (c *nodeResourcesController) processNextWorkItem(ctx context.Context) bool } defer c.queue.Done(key) - driverName := key.(string) + driverName := key // Panics are caught and treated like errors. var err error diff --git a/pkg/kubelet/logs/container_log_manager.go b/pkg/kubelet/logs/container_log_manager.go index aff93d97e95e3..fdb6fe0abd3d3 100644 --- a/pkg/kubelet/logs/container_log_manager.go +++ b/pkg/kubelet/logs/container_log_manager.go @@ -146,7 +146,7 @@ type containerLogManager struct { policy LogRotatePolicy clock clock.Clock mutex sync.Mutex - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] maxWorkers int monitoringPeriod metav1.Duration } @@ -172,10 +172,13 @@ func NewContainerLogManager(runtimeService internalapi.RuntimeService, osInterfa MaxSize: parsedMaxSize, MaxFiles: maxFiles, }, - clock: clock.RealClock{}, - mutex: sync.Mutex{}, - maxWorkers: maxWorkers, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "kubelet_log_rotate_manager"), + clock: clock.RealClock{}, + mutex: sync.Mutex{}, + maxWorkers: maxWorkers, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "kubelet_log_rotate_manager"}, + ), monitoringPeriod: monitorInterval, }, nil } @@ -264,7 +267,7 @@ func (c *containerLogManager) processContainer(ctx context.Context, worker int) }() // Always default the return to true to keep the processing of Queue ongoing ok = true - id := key.(string) + id := key resp, err := c.runtimeService.ContainerStatus(ctx, id, false) if err != nil { diff --git a/pkg/kubelet/logs/container_log_manager_test.go b/pkg/kubelet/logs/container_log_manager_test.go index 130723a068ef8..561efeeecca33 100644 --- a/pkg/kubelet/logs/container_log_manager_test.go +++ b/pkg/kubelet/logs/container_log_manager_test.go @@ -96,10 +96,13 @@ func TestRotateLogs(t *testing.T) { MaxSize: testMaxSize, MaxFiles: testMaxFiles, }, - osInterface: container.RealOS{}, - clock: testingclock.NewFakeClock(now), - mutex: sync.Mutex{}, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "kubelet_log_rotate_manager"), + osInterface: container.RealOS{}, + clock: testingclock.NewFakeClock(now), + mutex: sync.Mutex{}, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "kubelet_log_rotate_manager"}, + ), maxWorkers: 10, monitoringPeriod: v1.Duration{Duration: 10 * time.Second}, } @@ -204,10 +207,13 @@ func TestClean(t *testing.T) { MaxSize: testMaxSize, MaxFiles: testMaxFiles, }, - osInterface: container.RealOS{}, - clock: testingclock.NewFakeClock(now), - mutex: sync.Mutex{}, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "kubelet_log_rotate_manager"), + osInterface: container.RealOS{}, + clock: testingclock.NewFakeClock(now), + mutex: sync.Mutex{}, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "kubelet_log_rotate_manager"}, + ), maxWorkers: 10, monitoringPeriod: v1.Duration{Duration: 10 * time.Second}, } @@ -411,12 +417,15 @@ func TestRotateLatestLog(t *testing.T) { now := time.Now() f := critest.NewFakeRuntimeService() c := &containerLogManager{ - runtimeService: f, - policy: LogRotatePolicy{MaxFiles: test.maxFiles}, - osInterface: container.RealOS{}, - clock: testingclock.NewFakeClock(now), - mutex: sync.Mutex{}, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "kubelet_log_rotate_manager"), + runtimeService: f, + policy: LogRotatePolicy{MaxFiles: test.maxFiles}, + osInterface: container.RealOS{}, + clock: testingclock.NewFakeClock(now), + mutex: sync.Mutex{}, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "kubelet_log_rotate_manager"}, + ), maxWorkers: 10, monitoringPeriod: v1.Duration{Duration: 10 * time.Second}, } diff --git a/pkg/registry/core/service/ipallocator/cidrallocator.go b/pkg/registry/core/service/ipallocator/cidrallocator.go index 2c2f0ad08c97d..968c6c123e88a 100644 --- a/pkg/registry/core/service/ipallocator/cidrallocator.go +++ b/pkg/registry/core/service/ipallocator/cidrallocator.go @@ -57,7 +57,7 @@ type MetaAllocator struct { ipAddressLister networkingv1alpha1listers.IPAddressLister ipAddressSynced cache.InformerSynced ipAddressInformer networkingv1alpha1informers.IPAddressInformer - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] internalStopCh chan struct{} @@ -92,10 +92,13 @@ func NewMetaAllocator( ipAddressLister: ipAddressInformer.Lister(), ipAddressSynced: ipAddressInformer.Informer().HasSynced, ipAddressInformer: ipAddressInformer, - queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: ControllerName}), - internalStopCh: make(chan struct{}), - tree: iptree.New[*Allocator](), - ipFamily: family, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: ControllerName}, + ), + internalStopCh: make(chan struct{}), + tree: iptree.New[*Allocator](), + ipFamily: family, } _, _ = serviceCIDRInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ diff --git a/pkg/registry/core/service/ipallocator/controller/repairip.go b/pkg/registry/core/service/ipallocator/controller/repairip.go index a38e613e4e24c..062002e7c2b00 100644 --- a/pkg/registry/core/service/ipallocator/controller/repairip.go +++ b/pkg/registry/core/service/ipallocator/controller/repairip.go @@ -100,9 +100,9 @@ type RepairIPAddress struct { ipAddressLister networkinglisters.IPAddressLister ipAddressSynced cache.InformerSynced - cidrQueue workqueue.RateLimitingInterface - svcQueue workqueue.RateLimitingInterface - ipQueue workqueue.RateLimitingInterface + cidrQueue workqueue.TypedRateLimitingInterface[string] + svcQueue workqueue.TypedRateLimitingInterface[string] + ipQueue workqueue.TypedRateLimitingInterface[string] workerLoopPeriod time.Duration muTree sync.Mutex @@ -132,14 +132,23 @@ func NewRepairIPAddress(interval time.Duration, serviceCIDRSynced: serviceCIDRInformer.Informer().HasSynced, ipAddressLister: ipAddressInformer.Lister(), ipAddressSynced: ipAddressInformer.Informer().HasSynced, - cidrQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "servicecidrs"), - svcQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "services"), - ipQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ipaddresses"), - tree: iptree.New[string](), - workerLoopPeriod: time.Second, - broadcaster: eventBroadcaster, - recorder: recorder, - clock: clock.RealClock{}, + cidrQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "servicecidrs"}, + ), + svcQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "services"}, + ), + ipQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "ipaddresses"}, + ), + tree: iptree.New[string](), + workerLoopPeriod: time.Second, + broadcaster: eventBroadcaster, + recorder: recorder, + clock: clock.RealClock{}, } _, _ = serviceInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ @@ -310,13 +319,13 @@ func (r *RepairIPAddress) processNextWorkSvc() bool { } defer r.svcQueue.Done(eKey) - err := r.syncService(eKey.(string)) + err := r.syncService(eKey) r.handleSvcErr(err, eKey) return true } -func (r *RepairIPAddress) handleSvcErr(err error, key interface{}) { +func (r *RepairIPAddress) handleSvcErr(err error, key string) { if err == nil { r.svcQueue.Forget(key) return @@ -458,13 +467,13 @@ func (r *RepairIPAddress) processNextWorkIp() bool { } defer r.ipQueue.Done(eKey) - err := r.syncIPAddress(eKey.(string)) - r.handleIpErr(err, eKey) + err := r.syncIPAddress(eKey) + r.handleIPErr(err, eKey) return true } -func (r *RepairIPAddress) handleIpErr(err error, key interface{}) { +func (r *RepairIPAddress) handleIPErr(err error, key string) { if err == nil { r.ipQueue.Forget(key) return @@ -566,7 +575,7 @@ func (r *RepairIPAddress) processNextWorkCIDR() bool { return true } -func (r *RepairIPAddress) handleCIDRErr(err error, key interface{}) { +func (r *RepairIPAddress) handleCIDRErr(err error, key string) { if err == nil { r.cidrQueue.Forget(key) return diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/apiapproval_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/apiapproval_controller.go index f3cd027bc48f0..ac1d46e4db85a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/apiapproval_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/apiapproval_controller.go @@ -46,7 +46,7 @@ type KubernetesAPIApprovalPolicyConformantConditionController struct { // To allow injection for testing. syncFn func(key string) error - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // last protectedAnnotation value this controller updated the condition per CRD name (to avoid two // different version of the apiextensions-apiservers in HA to fight for the right message) @@ -60,10 +60,13 @@ func NewKubernetesAPIApprovalPolicyConformantConditionController( crdClient client.CustomResourceDefinitionsGetter, ) *KubernetesAPIApprovalPolicyConformantConditionController { c := &KubernetesAPIApprovalPolicyConformantConditionController{ - crdClient: crdClient, - crdLister: crdInformer.Lister(), - crdSynced: crdInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "kubernetes_api_approval_conformant_condition_controller"), + crdClient: crdClient, + crdLister: crdInformer.Lister(), + crdSynced: crdInformer.Informer().HasSynced, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "kubernetes_api_approval_conformant_condition_controller"}, + ), lastSeenProtectedAnnotation: map[string]string{}, } @@ -210,7 +213,7 @@ func (c *KubernetesAPIApprovalPolicyConformantConditionController) processNextWo } defer c.queue.Done(key) - err := c.syncFn(key.(string)) + err := c.syncFn(key) if err == nil { c.queue.Forget(key) return true diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go index c2d7d55718fca..4d5acb4c9707e 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go @@ -45,7 +45,7 @@ type EstablishingController struct { // To allow injection for testing. syncFn func(key string) error - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } // NewEstablishingController creates new EstablishingController. @@ -55,7 +55,10 @@ func NewEstablishingController(crdInformer informers.CustomResourceDefinitionInf crdClient: crdClient, crdLister: crdInformer.Lister(), crdSynced: crdInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crdEstablishing"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "crdEstablishing"}, + ), } ec.syncFn = ec.sync @@ -100,7 +103,7 @@ func (ec *EstablishingController) processNextWorkItem() bool { } defer ec.queue.Done(key) - err := ec.syncFn(key.(string)) + err := ec.syncFn(key) if err == nil { ec.queue.Forget(key) return true diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go index d559dca45d8f4..b5d55953f3f2c 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go @@ -66,7 +66,7 @@ type CRDFinalizer struct { // To allow injection for testing. syncFn func(key string) error - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } // ListerCollectionDeleter combines rest.Lister and rest.CollectionDeleter. @@ -93,7 +93,10 @@ func NewCRDFinalizer( crdLister: crdInformer.Lister(), crdSynced: crdInformer.Informer().HasSynced, crClientGetter: crClientGetter, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd_finalizer"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "crd_finalizer"}, + ), } crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -290,7 +293,7 @@ func (c *CRDFinalizer) processNextWorkItem() bool { } defer c.queue.Done(key) - err := c.syncFn(key.(string)) + err := c.syncFn(key) if err == nil { c.queue.Forget(key) return true diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go index 229d194b87880..55e467d9c0549 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go @@ -50,7 +50,7 @@ type ConditionController struct { // To allow injection for testing. syncFn func(key string) error - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // last generation this controller updated the condition per CRD name (to avoid two // different version of the apiextensions-apiservers in HA to fight for the right message) @@ -64,10 +64,13 @@ func NewConditionController( crdClient client.CustomResourceDefinitionsGetter, ) *ConditionController { c := &ConditionController{ - crdClient: crdClient, - crdLister: crdInformer.Lister(), - crdSynced: crdInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "non_structural_schema_condition_controller"), + crdClient: crdClient, + crdLister: crdInformer.Lister(), + crdSynced: crdInformer.Informer().HasSynced, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "non_structural_schema_condition_controller"}, + ), lastSeenGeneration: map[string]int64{}, } @@ -216,7 +219,7 @@ func (c *ConditionController) processNextWorkItem() bool { } defer c.queue.Done(key) - err := c.syncFn(key.(string)) + err := c.syncFn(key) if err == nil { c.queue.Forget(key) return true diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/controller.go index 54255de099b23..a83d298f8f94c 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/controller.go @@ -51,7 +51,7 @@ type Controller struct { // To allow injection for testing. syncFn func(string) error - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] staticSpec *spec.Swagger @@ -114,9 +114,12 @@ func createSpecCache(crd *apiextensionsv1.CustomResourceDefinition) *specCache { // NewController creates a new Controller with input CustomResourceDefinition informer func NewController(crdInformer informers.CustomResourceDefinitionInformer) *Controller { c := &Controller{ - crdLister: crdInformer.Lister(), - crdsSynced: crdInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd_openapi_controller"), + crdLister: crdInformer.Lister(), + crdsSynced: crdInformer.Informer().HasSynced, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "crd_openapi_controller"}, + ), specsByName: map[string]*specCache{}, } @@ -183,11 +186,11 @@ func (c *Controller) processNextWorkItem() bool { defer func() { elapsed := time.Since(start) if elapsed > time.Second { - klog.Warningf("slow openapi aggregation of %q: %s", key.(string), elapsed) + klog.Warningf("slow openapi aggregation of %q: %s", key, elapsed) } }() - err := c.syncFn(key.(string)) + err := c.syncFn(key) if err == nil { c.queue.Forget(key) return true diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapiv3/controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapiv3/controller.go index c9cc4b302c35c..7e072d3855864 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapiv3/controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapiv3/controller.go @@ -50,7 +50,7 @@ type Controller struct { // To allow injection for testing. syncFn func(string) error - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] openAPIV3Service *handler3.OpenAPIService @@ -62,9 +62,12 @@ type Controller struct { // NewController creates a new Controller with input CustomResourceDefinition informer func NewController(crdInformer informers.CustomResourceDefinitionInformer) *Controller { c := &Controller{ - crdLister: crdInformer.Lister(), - crdsSynced: crdInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd_openapi_v3_controller"), + crdLister: crdInformer.Lister(), + crdsSynced: crdInformer.Informer().HasSynced, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "crd_openapi_v3_controller"}, + ), specsByGVandName: map[schema.GroupVersion]map[string]*spec3.OpenAPI{}, } @@ -133,11 +136,11 @@ func (c *Controller) processNextWorkItem() bool { defer func() { elapsed := time.Since(start) if elapsed > time.Second { - klog.Warningf("slow openapi aggregation of %q: %s", key.(string), elapsed) + klog.Warningf("slow openapi aggregation of %q: %s", key, elapsed) } }() - err := c.syncFn(key.(string)) + err := c.syncFn(key) if err == nil { c.queue.Forget(key) return true diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go index d4165a0945774..1a019152032fe 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go @@ -58,7 +58,7 @@ type NamingConditionController struct { // To allow injection for testing. syncFn func(key string) error - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } func NewNamingConditionController( @@ -69,7 +69,10 @@ func NewNamingConditionController( crdClient: crdClient, crdLister: crdInformer.Lister(), crdSynced: crdInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd_naming_condition_controller"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "crd_naming_condition_controller"}, + ), } informerIndexer := crdInformer.Informer().GetIndexer() @@ -314,7 +317,7 @@ func (c *NamingConditionController) processNextWorkItem() bool { } defer c.queue.Done(key) - err := c.syncFn(key.(string)) + err := c.syncFn(key) if err == nil { c.queue.Forget(key) return true diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/controller.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/controller.go index 4334c0dd82c18..f08e1cc559837 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/controller.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/controller.go @@ -40,7 +40,7 @@ var _ Controller[runtime.Object] = &controller[runtime.Object]{} type controller[T runtime.Object] struct { informer Informer[T] - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // Returns an error if there was a transient error during reconciliation // and the object should be tried again later. @@ -99,7 +99,10 @@ func (c *controller[T]) Run(ctx context.Context) error { klog.Infof("starting %s", c.options.Name) defer klog.Infof("stopping %s", c.options.Name) - c.queue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), c.options.Name) + c.queue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: c.options.Name}, + ) // Forcefully shutdown workqueue. Drop any enqueued items. // Important to do this in a `defer` at the start of `Run`. @@ -219,7 +222,7 @@ func (c *controller[T]) runWorker() { } // We wrap this block in a func so we can defer c.workqueue.Done. - err := func(obj interface{}) error { + err := func(obj string) error { // We call Done here so the workqueue knows we have finished // processing this item. We also must remember to call Forget if we // do not want this work item being re-queued. For example, we do @@ -227,19 +230,6 @@ func (c *controller[T]) runWorker() { // put back on the workqueue and attempted again after a back-off // period. defer c.queue.Done(obj) - var key string - var ok bool - // We expect strings to come off the workqueue. These are of the - // form namespace/name. We do this as the delayed nature of the - // workqueue means the items in the informer cache may actually be - // more up to date that when the item was initially put onto the - // workqueue. - if key, ok = obj.(string); !ok { - // How did an incorrectly formatted key get in the workqueue? - // Done is sufficient. (Forget resets rate limiter for the key, - // but the key is invalid so there is no point in doing that) - return fmt.Errorf("expected string in workqueue but got %#v", obj) - } defer c.hasProcessed.Finished(key) if err := c.reconcile(key); err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/controller.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/controller.go index 8301de72053d1..9a54c40b24fcd 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/controller.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/controller.go @@ -61,7 +61,7 @@ type quotaEvaluator struct { // The technique is valuable for rollup activities to avoid fanout and reduce resource contention. // We could move this into a library if another component needed it. // queue is indexed by namespace, so that we bundle up on a per-namespace basis - queue *workqueue.Type + queue *workqueue.Typed[string] workLock sync.Mutex work map[string][]*admissionWaiter dirtyWork map[string][]*admissionWaiter @@ -122,7 +122,7 @@ func NewQuotaEvaluator(quotaAccessor QuotaAccessor, ignoredResources map[schema. ignoredResources: ignoredResources, registry: quotaRegistry, - queue: workqueue.NewTypedWithConfig[any](workqueue.TypedQueueConfig[any]{Name: "admission_quota_controller"}), + queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{Name: "admission_quota_controller"}), work: map[string][]*admissionWaiter{}, dirtyWork: map[string][]*admissionWaiter{}, inProgress: sets.String{}, @@ -666,11 +666,10 @@ func (e *quotaEvaluator) completeWork(ns string) { // returned namespace (regardless of whether the work item list is // empty). func (e *quotaEvaluator) getWork() (string, []*admissionWaiter, bool) { - uncastNS, shutdown := e.queue.Get() + ns, shutdown := e.queue.Get() if shutdown { return "", []*admissionWaiter{}, shutdown } - ns := uncastNS.(string) e.workLock.Lock() defer e.workLock.Unlock() diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go b/staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go index d8c4090b12a5a..dc844ee73bff8 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go +++ b/staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "sync/atomic" "time" corev1 "k8s.io/api/core/v1" @@ -35,7 +36,6 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - "sync/atomic" ) const ( @@ -74,7 +74,7 @@ type RequestHeaderAuthRequestController struct { configmapInformer cache.SharedIndexInformer configmapInformerSynced cache.InformerSynced - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // exportedRequestHeaderBundle is a requestHeaderBundle that contains the last read, non-zero length content of the configmap exportedRequestHeaderBundle atomic.Value @@ -104,7 +104,10 @@ func NewRequestHeaderAuthRequestController( extraHeaderPrefixesKey: extraHeaderPrefixesKey, allowedClientNamesKey: allowedClientNamesKey, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "RequestHeaderAuthRequestController"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "RequestHeaderAuthRequestController"}, + ), } // we construct our own informer because we need such a small subset of the information available. Just one namespace. diff --git a/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go b/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go index 428fd66bae7da..845a45fab312a 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go +++ b/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go @@ -54,7 +54,7 @@ type ConfigMapCAController struct { listeners []Listener - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // preRunCaches are the caches to sync before starting the work of this control loop preRunCaches []cache.InformerSynced } @@ -94,7 +94,10 @@ func NewDynamicCAFromConfigMapController(purpose, namespace, name, key string, k configmapLister: configmapLister, configMapInformer: uncastConfigmapInformer, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicConfigMapCABundle-%s", purpose)), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicConfigMapCABundle-%s", purpose)}, + ), preRunCaches: []cache.InformerSynced{uncastConfigmapInformer.HasSynced}, } diff --git a/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go b/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go index 75bc49e993146..1f32adf9e467d 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go +++ b/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go @@ -60,7 +60,7 @@ type DynamicFileCAContent struct { listeners []Listener // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } var _ Notifier = &DynamicFileCAContent{} @@ -82,7 +82,10 @@ func NewDynamicCAContentFromFile(purpose, filename string) (*DynamicFileCAConten ret := &DynamicFileCAContent{ name: name, filename: filename, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicCABundle-%s", purpose)}, + ), } if err := ret.loadCABundle(); err != nil { return nil, err diff --git a/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go b/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go index 62aef4992c56d..e0dd0474b1cc7 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go +++ b/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go @@ -47,7 +47,7 @@ type DynamicCertKeyPairContent struct { listeners []Listener // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } var _ CertKeyContentProvider = &DynamicCertKeyPairContent{} @@ -64,7 +64,10 @@ func NewDynamicServingContentFromFiles(purpose, certFile, keyFile string) (*Dyna name: name, certFile: certFile, keyFile: keyFile, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicCABundle-%s", purpose)}, + ), } if err := ret.loadCertKeyPair(); err != nil { return nil, err diff --git a/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go b/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go index 56e7ffd275533..61bd6fded6ab5 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go +++ b/staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go @@ -56,7 +56,7 @@ type DynamicServingCertificateController struct { currentServingTLSConfig atomic.Value // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] eventRecorder events.EventRecorder } @@ -76,7 +76,10 @@ func NewDynamicServingCertificateController( servingCert: servingCert, sniCerts: sniCerts, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicServingCertificateController"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicServingCertificateController"}, + ), eventRecorder: eventRecorder, } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go index cde6a379ecd58..fd783f41a44b4 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go @@ -49,7 +49,7 @@ type DynamicEncryptionConfigContent struct { lastLoadedEncryptionConfigHash string // queue for processing changes in encryption config file. - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // dynamicTransformers updates the transformers when encryption config file changes. dynamicTransformers *encryptionconfig.DynamicTransformers @@ -78,8 +78,11 @@ func NewDynamicEncryptionConfiguration( filePath: filePath, lastLoadedEncryptionConfigHash: configContentHash, dynamicTransformers: dynamicTransformers, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), - apiServerID: apiServerID, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: name}, + ), + apiServerID: apiServerID, getEncryptionConfigHash: func(_ context.Context, filepath string) (string, error) { return encryptionconfig.GetEncryptionConfigHash(filepath) }, @@ -150,7 +153,7 @@ func (d *DynamicEncryptionConfigContent) processNextWorkItem(serverCtx context.C return true } -func (d *DynamicEncryptionConfigContent) processWorkItem(serverCtx context.Context, workqueueKey interface{}) { +func (d *DynamicEncryptionConfigContent) processWorkItem(serverCtx context.Context, workqueueKey string) { var ( updatedEffectiveConfig bool err error diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller_test.go b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller_test.go index 7b7af79ca7c5a..c68ed064e1d2e 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller_test.go @@ -349,7 +349,7 @@ apiserver_encryption_config_controller_automatic_reloads_total{apiserver_id_hash } type mockWorkQueue struct { - workqueue.RateLimitingInterface // will panic if any unexpected method is called + workqueue.TypedRateLimitingInterface[string] // will panic if any unexpected method is called closeOnce sync.Once addCalled chan struct{} @@ -362,33 +362,33 @@ type mockWorkQueue struct { addRateLimitedCount atomic.Uint64 } -func (m *mockWorkQueue) Done(item interface{}) { +func (m *mockWorkQueue) Done(item string) { m.count.Add(1) m.wasCanceled = m.ctx.Err() != nil m.cancel() } -func (m *mockWorkQueue) Get() (item interface{}, shutdown bool) { +func (m *mockWorkQueue) Get() (item string, shutdown bool) { <-m.addCalled switch m.count.Load() { case 0: - return nil, false + return "", false case 1: - return nil, true + return "", true default: panic("too many calls to Get") } } -func (m *mockWorkQueue) Add(item interface{}) { +func (m *mockWorkQueue) Add(item string) { m.closeOnce.Do(func() { close(m.addCalled) }) } -func (m *mockWorkQueue) ShutDown() {} -func (m *mockWorkQueue) AddRateLimited(item interface{}) { m.addRateLimitedCount.Add(1) } +func (m *mockWorkQueue) ShutDown() {} +func (m *mockWorkQueue) AddRateLimited(item string) { m.addRateLimitedCount.Add(1) } type mockHealthChecker struct { pluginName string diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go index d40cae509d227..44a2aed084ef8 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go @@ -135,7 +135,7 @@ type configController struct { // configQueue holds `(interface{})(0)` when the configuration // objects need to be reprocessed. - configQueue workqueue.RateLimitingInterface + configQueue workqueue.TypedRateLimitingInterface[int] plLister flowcontrollister.PriorityLevelConfigurationLister plInformerSynced cache.InformerSynced @@ -292,7 +292,10 @@ func newTestableController(config TestableConfig) *configController { klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.name, cfgCtlr.asFieldManager) // Start with longish delay because conflicts will be between // different processes, so take some time to go away. - cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue") + cfgCtlr.configQueue = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemExponentialFailureRateLimiter[int](200*time.Millisecond, 8*time.Hour), + workqueue.TypedRateLimitingQueueConfig[int]{Name: "priority_and_fairness_config_queue"}, + ) // ensure the data structure reflects the mandatory config cfgCtlr.lockAndDigestConfigObjects(nil, nil) fci := config.InformerFactory.Flowcontrol().V1() @@ -474,7 +477,7 @@ func (cfgCtlr *configController) processNextWorkItem() bool { return false } - func(obj interface{}) { + func(obj int) { defer cfgCtlr.configQueue.Done(obj) specificDelay, err := cfgCtlr.syncOne() switch { diff --git a/staging/src/k8s.io/client-go/examples/workqueue/main.go b/staging/src/k8s.io/client-go/examples/workqueue/main.go index e854840aed4c2..b8825dc1e2323 100644 --- a/staging/src/k8s.io/client-go/examples/workqueue/main.go +++ b/staging/src/k8s.io/client-go/examples/workqueue/main.go @@ -37,12 +37,12 @@ import ( // Controller demonstrates how to implement a controller with client-go. type Controller struct { indexer cache.Indexer - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] informer cache.Controller } // NewController creates a new Controller. -func NewController(queue workqueue.RateLimitingInterface, indexer cache.Indexer, informer cache.Controller) *Controller { +func NewController(queue workqueue.TypedRateLimitingInterface[string], indexer cache.Indexer, informer cache.Controller) *Controller { return &Controller{ informer: informer, indexer: indexer, @@ -62,7 +62,7 @@ func (c *Controller) processNextItem() bool { defer c.queue.Done(key) // Invoke the method containing the business logic - err := c.syncToStdout(key.(string)) + err := c.syncToStdout(key) // Handle the error if something went wrong during the execution of the business logic c.handleErr(err, key) return true @@ -90,7 +90,7 @@ func (c *Controller) syncToStdout(key string) error { } // handleErr checks if an error happened and makes sure we will retry later. -func (c *Controller) handleErr(err error, key interface{}) { +func (c *Controller) handleErr(err error, key string) { if err == nil { // Forget about the #AddRateLimited history of the key on every successful synchronization. // This ensures that future processing of updates for this key is not delayed because of @@ -168,7 +168,7 @@ func main() { podListWatcher := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), "pods", v1.NamespaceDefault, fields.Everything()) // create the workqueue - queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + queue := workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]()) // Bind the workqueue to a cache with the help of an informer. This way we make sure that // whenever the cache is updated, the pod key is added to the workqueue. diff --git a/staging/src/k8s.io/client-go/transport/cert_rotation.go b/staging/src/k8s.io/client-go/transport/cert_rotation.go index dc22b6ec4cc23..e76f65812d3ae 100644 --- a/staging/src/k8s.io/client-go/transport/cert_rotation.go +++ b/staging/src/k8s.io/client-go/transport/cert_rotation.go @@ -47,14 +47,17 @@ type dynamicClientCert struct { connDialer *connrotation.Dialer // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } func certRotatingDialer(reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert { d := &dynamicClientCert{ reload: reload, connDialer: connrotation.NewDialer(connrotation.DialFunc(dial)), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicClientCertificate"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicClientCertificate"}, + ), } return d diff --git a/staging/src/k8s.io/cloud-provider/controllers/node/node_controller.go b/staging/src/k8s.io/cloud-provider/controllers/node/node_controller.go index 07c8d23abd482..699047bcc21b0 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/node/node_controller.go +++ b/staging/src/k8s.io/cloud-provider/controllers/node/node_controller.go @@ -109,7 +109,7 @@ type CloudNodeController struct { nodesLister corelisters.NodeLister nodesSynced cache.InformerSynced - workqueue workqueue.RateLimitingInterface + workqueue workqueue.TypedRateLimitingInterface[string] } // NewCloudNodeController creates a CloudNodeController object @@ -134,7 +134,10 @@ func NewCloudNodeController( workerCount: workerCount, nodesLister: nodeInformer.Lister(), nodesSynced: nodeInformer.Informer().HasSynced, - workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Nodes"), + workqueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "Nodes"}, + ), } // Use shared informer to listen to add/update of nodes. Note that any nodes @@ -219,16 +222,8 @@ func (cnc *CloudNodeController) processNextWorkItem(ctx context.Context) bool { } // We wrap this block in a func so we can defer cnc.workqueue.Done. - err := func(obj interface{}) error { - defer cnc.workqueue.Done(obj) - - var key string - var ok bool - if key, ok = obj.(string); !ok { - cnc.workqueue.Forget(obj) - utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) - return nil - } + err := func(key string) error { + defer cnc.workqueue.Done(key) // Run the syncHandler, passing it the key of the // Node resource to be synced. @@ -241,7 +236,7 @@ func (cnc *CloudNodeController) processNextWorkItem(ctx context.Context) bool { // Finally, if no error occurs we Forget this item so it does not // get queued again until another change happens. - cnc.workqueue.Forget(obj) + cnc.workqueue.Forget(key) return nil }(obj) diff --git a/staging/src/k8s.io/cloud-provider/controllers/service/controller.go b/staging/src/k8s.io/cloud-provider/controllers/service/controller.go index dde3c36c5ddc8..0bb5fd50961cc 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/service/controller.go +++ b/staging/src/k8s.io/cloud-provider/controllers/service/controller.go @@ -90,8 +90,8 @@ type Controller struct { nodeLister corelisters.NodeLister nodeListerSynced cache.InformerSynced // services and nodes that need to be synced - serviceQueue workqueue.RateLimitingInterface - nodeQueue workqueue.RateLimitingInterface + serviceQueue workqueue.TypedRateLimitingInterface[string] + nodeQueue workqueue.TypedRateLimitingInterface[string] // lastSyncedNodes is used when reconciling node state and keeps track of // the last synced set of nodes per service key. This is accessed from the // service and node controllers, hence it is protected by a lock. @@ -117,9 +117,15 @@ func New( cache: &serviceCache{serviceMap: make(map[string]*cachedService)}, nodeLister: nodeInformer.Lister(), nodeListerSynced: nodeInformer.Informer().HasSynced, - serviceQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "service"), - nodeQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "node"), - lastSyncedNodes: make(map[string][]*v1.Node), + serviceQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](minRetryDelay, maxRetryDelay), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "service"}, + ), + nodeQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](minRetryDelay, maxRetryDelay), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "node"}, + ), + lastSyncedNodes: make(map[string][]*v1.Node), } serviceInformer.Informer().AddEventHandlerWithResyncPeriod( @@ -282,7 +288,7 @@ func (c *Controller) processNextServiceItem(ctx context.Context) bool { } defer c.serviceQueue.Done(key) - err := c.syncService(ctx, key.(string)) + err := c.syncService(ctx, key) if err == nil { c.serviceQueue.Forget(key) return true diff --git a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go index fa30e389467e9..4c710a7a6d588 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go +++ b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go @@ -175,9 +175,15 @@ func newController(ctx context.Context, objects ...runtime.Object) (*Controller, serviceListerSynced: serviceInformer.Informer().HasSynced, nodeLister: nodeInformer.Lister(), nodeListerSynced: nodeInformer.Informer().HasSynced, - serviceQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "service"), - nodeQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "node"), - lastSyncedNodes: make(map[string][]*v1.Node), + serviceQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](minRetryDelay, maxRetryDelay), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "service"}, + ), + nodeQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](minRetryDelay, maxRetryDelay), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "node"}, + ), + lastSyncedNodes: make(map[string][]*v1.Node), } informerFactory.Start(stopCh) @@ -897,8 +903,8 @@ func TestProcessServiceCreateOrUpdate(t *testing.T) { if quit { t.Fatalf("get no queue element") } - if keyExpected != keyGot.(string) { - t.Fatalf("get service key error, expected: %s, got: %s", keyExpected, keyGot.(string)) + if keyExpected != keyGot { + t.Fatalf("get service key error, expected: %s, got: %s", keyExpected, keyGot) } newService := svc.DeepCopy() @@ -2314,7 +2320,10 @@ func TestServiceQueueDelay(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() controller, cloud, client := newController(ctx) - queue := &spyWorkQueue{RateLimitingInterface: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test-service-queue-delay")} + queue := &spyWorkQueue{TypedRateLimitingInterface: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "test-service-queue-delay"}, + )} controller.serviceQueue = queue cloud.Err = tc.lbCloudErr @@ -2400,26 +2409,26 @@ func (l *fakeNodeLister) Get(name string) (*v1.Node, error) { // spyWorkQueue implements a work queue and adds the ability to inspect processed // items for testing purposes. type spyWorkQueue struct { - workqueue.RateLimitingInterface + workqueue.TypedRateLimitingInterface[string] items []spyQueueItem } // spyQueueItem represents an item that was being processed. type spyQueueItem struct { - Key interface{} + Key string // Delay represents the delayed duration if and only if AddAfter was invoked. Delay time.Duration } // AddAfter is like workqueue.RateLimitingInterface.AddAfter but records the // added key and delay internally. -func (f *spyWorkQueue) AddAfter(key interface{}, delay time.Duration) { +func (f *spyWorkQueue) AddAfter(key string, delay time.Duration) { f.items = append(f.items, spyQueueItem{ Key: key, Delay: delay, }) - f.RateLimitingInterface.AddAfter(key, delay) + f.TypedRateLimitingInterface.AddAfter(key, delay) } // getItems returns all items that were recorded. diff --git a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go b/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go index 2aeb3660b045e..473e613cfe679 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go +++ b/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go @@ -163,7 +163,7 @@ type controller struct { setReservedFor bool kubeClient kubernetes.Interface claimNameLookup *resourceclaim.Lookup - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] eventRecorder record.EventRecorder rcLister resourcev1alpha2listers.ResourceClassLister rcSynced cache.InformerSynced @@ -208,8 +208,10 @@ func New( v1.EventSource{Component: fmt.Sprintf("resource driver %s", name)}) // The work queue contains either keys for claims or PodSchedulingContext objects. - queue := workqueue.NewNamedRateLimitingQueue( - workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("%s-queue", name)) + queue := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("%s-queue", name)}, + ) // The mutation cache acts as an additional layer for the informer // cache and after an update made by the controller returns a more @@ -371,7 +373,7 @@ func (ctrl *controller) sync() { logger := klog.LoggerWithValues(ctrl.logger, "key", key) ctx := klog.NewContext(ctrl.ctx, logger) logger.V(4).Info("processing") - obj, err := ctrl.syncKey(ctx, key.(string)) + obj, err := ctrl.syncKey(ctx, key) switch err { case nil: logger.V(5).Info("completed") diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go index 52df3cb25fad2..d70e906f9853a 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go @@ -51,7 +51,7 @@ type APIServiceRegistrationController struct { // To allow injection for testing. syncFn func(key string) error - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } var _ dynamiccertificates.Listener = &APIServiceRegistrationController{} @@ -62,7 +62,10 @@ func NewAPIServiceRegistrationController(apiServiceInformer informers.APIService apiHandlerManager: apiHandlerManager, apiServiceLister: apiServiceInformer.Lister(), apiServiceSynced: apiServiceInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "APIServiceRegistrationController"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "APIServiceRegistrationController"}, + ), } apiServiceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -143,7 +146,7 @@ func (c *APIServiceRegistrationController) processNextWorkItem() bool { } defer c.queue.Done(key) - err := c.syncFn(key.(string)) + err := c.syncFn(key) if err == nil { c.queue.Forget(key) return true diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_discovery.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_discovery.go index c65454e7e548a..68199b6b30ce7 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_discovery.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_discovery.go @@ -103,7 +103,7 @@ type discoveryManager struct { // It is important that the reconciler for this queue does not excessively // contact the apiserver if a key was enqueued before the server was last // contacted. - dirtyAPIServiceQueue workqueue.RateLimitingInterface + dirtyAPIServiceQueue workqueue.TypedRateLimitingInterface[string] // Merged handler which stores all known groupversions mergedDiscoveryHandler discoveryendpoint.ResourceManager @@ -197,8 +197,11 @@ func NewDiscoveryManager( mergedDiscoveryHandler: target, apiServices: make(map[string]groupVersionInfo), cachedResults: make(map[serviceKey]cachedResult), - dirtyAPIServiceQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "discovery-manager"), - codecs: codecs, + dirtyAPIServiceQueue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "discovery-manager"}, + ), + codecs: codecs, } } @@ -488,7 +491,7 @@ func (dm *discoveryManager) Run(stopCh <-chan struct{}, discoverySyncedCh chan<- func() { defer dm.dirtyAPIServiceQueue.Done(next) - if err := dm.syncAPIService(next.(string)); err != nil { + if err := dm.syncAPIService(next); err != nil { dm.dirtyAPIServiceQueue.AddRateLimited(next) } else { dm.dirtyAPIServiceQueue.Forget(next) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_discovery_test.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_discovery_test.go index 863ac49c566b1..bf970290e6f9c 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_discovery_test.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_discovery_test.go @@ -1072,39 +1072,39 @@ func fetchPath(handler http.Handler, etag string) (*http.Response, []byte, *apid // isComplete type completerWorkqueue struct { lock sync.Mutex - workqueue.RateLimitingInterface - processing map[interface{}]struct{} + workqueue.TypedRateLimitingInterface[string] + processing map[string]struct{} } -var _ = workqueue.RateLimitingInterface(&completerWorkqueue{}) +var _ = workqueue.TypedRateLimitingInterface[string](&completerWorkqueue{}) -func newCompleterWorkqueue(wq workqueue.RateLimitingInterface) *completerWorkqueue { +func newCompleterWorkqueue(wq workqueue.TypedRateLimitingInterface[string]) *completerWorkqueue { return &completerWorkqueue{ - RateLimitingInterface: wq, - processing: make(map[interface{}]struct{}), + TypedRateLimitingInterface: wq, + processing: make(map[string]struct{}), } } -func (q *completerWorkqueue) Add(item interface{}) { +func (q *completerWorkqueue) Add(item string) { q.lock.Lock() defer q.lock.Unlock() q.processing[item] = struct{}{} - q.RateLimitingInterface.Add(item) + q.TypedRateLimitingInterface.Add(item) } -func (q *completerWorkqueue) AddAfter(item interface{}, duration time.Duration) { +func (q *completerWorkqueue) AddAfter(item string, duration time.Duration) { q.Add(item) } -func (q *completerWorkqueue) AddRateLimited(item interface{}) { +func (q *completerWorkqueue) AddRateLimited(item string) { q.Add(item) } -func (q *completerWorkqueue) Done(item interface{}) { +func (q *completerWorkqueue) Done(item string) { q.lock.Lock() defer q.lock.Unlock() delete(q.processing, item) - q.RateLimitingInterface.Done(item) + q.TypedRateLimitingInterface.Done(item) } func (q *completerWorkqueue) isComplete() bool { diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go index 01914585977c0..843adeb33f7c5 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go @@ -81,7 +81,7 @@ type autoRegisterController struct { apiServicesAtStart map[string]bool // queue is where incoming work is placed to de-dup and to allow "easy" rate limited requeues on errors - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } // NewAutoRegisterController creates a new autoRegisterController. @@ -97,7 +97,10 @@ func NewAutoRegisterController(apiServiceInformer informers.APIServiceInformer, syncedSuccessfullyLock: &sync.RWMutex{}, syncedSuccessfully: map[string]bool{}, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "autoregister"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "autoregister"}, + ), } c.syncHandler = c.checkAPIService @@ -182,7 +185,7 @@ func (c *autoRegisterController) processNextWorkItem() bool { defer c.queue.Done(key) // do your work on the key. This method will contains your "do stuff" logic - err := c.syncHandler(key.(string)) + err := c.syncHandler(key) if err == nil { // if you had no error, tell the queue to stop tracking history for your key. This will // reset things like failure counts for per-item rate limiting diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller_test.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller_test.go index ab4a01b925137..dbde154236edb 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller_test.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller_test.go @@ -316,7 +316,10 @@ func TestSync(t *testing.T) { apiServiceClient: fakeClient.ApiregistrationV1(), apiServiceLister: listers.NewAPIServiceLister(apiServiceIndexer), apiServicesToSync: map[string]*apiregistrationv1.APIService{}, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "autoregister"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "autoregister"}, + ), syncedSuccessfullyLock: &sync.RWMutex{}, syncedSuccessfully: alreadySynced, diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go index 69f32f4aa8622..3643df8003ec1 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go @@ -47,7 +47,7 @@ const ( // them if necessary. type AggregationController struct { openAPIAggregationManager aggregator.SpecAggregator - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] downloader *aggregator.Downloader // To allow injection for testing. @@ -58,9 +58,9 @@ type AggregationController struct { func NewAggregationController(downloader *aggregator.Downloader, openAPIAggregationManager aggregator.SpecAggregator) *AggregationController { c := &AggregationController{ openAPIAggregationManager: openAPIAggregationManager, - queue: workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemExponentialFailureRateLimiter(successfulUpdateDelay, failedUpdateMaxExpDelay), - "open_api_aggregation_controller", + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](successfulUpdateDelay, failedUpdateMaxExpDelay), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "open_api_aggregation_controller"}, ), downloader: downloader, } @@ -97,7 +97,7 @@ func (c *AggregationController) processNextWorkItem() bool { } klog.V(4).Infof("OpenAPI AggregationController: Processing item %s", key) - action, err := c.syncHandler(key.(string)) + action, err := c.syncHandler(key) if err != nil { utilruntime.HandleError(fmt.Errorf("loading OpenAPI spec for %q failed with: %v", key, err)) } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/controller.go index 0f5e37c4522f0..d91222824c199 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/controller.go @@ -46,7 +46,7 @@ const ( // AggregationController periodically checks the list of group-versions handled by each APIService and updates the discovery page periodically type AggregationController struct { openAPIAggregationManager aggregator.SpecProxier - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // To allow injection for testing. syncHandler func(key string) (syncAction, error) @@ -56,9 +56,9 @@ type AggregationController struct { func NewAggregationController(openAPIAggregationManager aggregator.SpecProxier) *AggregationController { c := &AggregationController{ openAPIAggregationManager: openAPIAggregationManager, - queue: workqueue.NewNamedRateLimitingQueue( - workqueue.NewItemExponentialFailureRateLimiter(successfulUpdateDelay, failedUpdateMaxExpDelay), - "open_api_v3_aggregation_controller", + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](successfulUpdateDelay, failedUpdateMaxExpDelay), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "open_api_v3_aggregation_controller"}, ), } @@ -98,7 +98,7 @@ func (c *AggregationController) processNextWorkItem() bool { return false } - if aggregator.IsLocalAPIService(key.(string)) { + if aggregator.IsLocalAPIService(key) { // for local delegation targets that are aggregated once per second, log at // higher level to avoid flooding the log klog.V(6).Infof("OpenAPI AggregationController: Processing item %s", key) @@ -106,7 +106,7 @@ func (c *AggregationController) processNextWorkItem() bool { klog.V(4).Infof("OpenAPI AggregationController: Processing item %s", key) } - action, err := c.syncHandler(key.(string)) + action, err := c.syncHandler(key) if err == nil { c.queue.Forget(key) } else { @@ -115,7 +115,7 @@ func (c *AggregationController) processNextWorkItem() bool { switch action { case syncRequeue: - if aggregator.IsLocalAPIService(key.(string)) { + if aggregator.IsLocalAPIService(key) { klog.V(7).Infof("OpenAPI AggregationController: action for local item %s: Requeue after %s.", key, successfulUpdateDelayLocal) c.queue.AddAfter(key, successfulUpdateDelayLocal) } else { diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go index 6277a81a4af36..14cd194aec468 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go @@ -81,7 +81,7 @@ type AvailableConditionController struct { // To allow injection for testing. syncFn func(key string) error - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] // map from service-namespace -> service-name -> apiservice names cache map[string]map[string][]string // this lock protects operations on the above cache @@ -107,12 +107,13 @@ func NewAvailableConditionController( serviceLister: serviceInformer.Lister(), endpointsLister: endpointsInformer.Lister(), serviceResolver: serviceResolver, - queue: workqueue.NewNamedRateLimitingQueue( + queue: workqueue.NewTypedRateLimitingQueueWithConfig( // We want a fairly tight requeue time. The controller listens to the API, but because it relies on the routability of the // service network, it is possible for an external, non-watchable factor to affect availability. This keeps // the maximum disruption time to a minimum, but it does prevent hot loops. - workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 30*time.Second), - "AvailableConditionController"), + workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 30*time.Second), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "AvailableConditionController"}, + ), proxyTransportDial: proxyTransportDial, proxyCurrentCertKeyContent: proxyCurrentCertKeyContent, metrics: newAvailabilityMetrics(), @@ -451,7 +452,7 @@ func (c *AvailableConditionController) processNextWorkItem() bool { } defer c.queue.Done(key) - err := c.syncFn(key.(string)) + err := c.syncFn(key) if err == nil { c.queue.Forget(key) return true diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller_test.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller_test.go index dc9fbdd35410e..d95005b0116e3 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller_test.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller_test.go @@ -126,12 +126,13 @@ func setupAPIServices(apiServices []*apiregistration.APIService) (*AvailableCond serviceLister: v1listers.NewServiceLister(serviceIndexer), endpointsLister: v1listers.NewEndpointsLister(endpointsIndexer), serviceResolver: &fakeServiceResolver{url: testServer.URL}, - queue: workqueue.NewNamedRateLimitingQueue( + queue: workqueue.NewTypedRateLimitingQueueWithConfig( // We want a fairly tight requeue time. The controller listens to the API, but because it relies on the routability of the // service network, it is possible for an external, non-watchable factor to affect availability. This keeps // the maximum disruption time to a minimum, but it does prevent hot loops. - workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 30*time.Second), - "AvailableConditionController"), + workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 30*time.Second), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "AvailableConditionController"}, + ), metrics: newAvailabilityMetrics(), } for _, svc := range apiServices { diff --git a/staging/src/k8s.io/sample-controller/controller.go b/staging/src/k8s.io/sample-controller/controller.go index 3c7936aeae105..f2e66c301ab65 100644 --- a/staging/src/k8s.io/sample-controller/controller.go +++ b/staging/src/k8s.io/sample-controller/controller.go @@ -80,7 +80,7 @@ type Controller struct { // means we can ensure we only process a fixed amount of resources at a // time, and makes it easy to ensure we are never processing the same item // simultaneously in two different workers. - workqueue workqueue.RateLimitingInterface + workqueue workqueue.TypedRateLimitingInterface[string] // recorder is an event recorder for recording Event resources to the // Kubernetes API. recorder record.EventRecorder @@ -105,9 +105,9 @@ func NewController( eventBroadcaster.StartStructuredLogging(0) eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) - ratelimiter := workqueue.NewMaxOfRateLimiter( - workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(50), 300)}, + ratelimiter := workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 1000*time.Second), + &workqueue.TypedBucketRateLimiter[string]{Limiter: rate.NewLimiter(rate.Limit(50), 300)}, ) controller := &Controller{ @@ -117,7 +117,7 @@ func NewController( deploymentsSynced: deploymentInformer.Informer().HasSynced, foosLister: fooInformer.Lister(), foosSynced: fooInformer.Informer().HasSynced, - workqueue: workqueue.NewRateLimitingQueue(ratelimiter), + workqueue: workqueue.NewTypedRateLimitingQueue(ratelimiter), recorder: recorder, } @@ -204,29 +204,14 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool { } // We wrap this block in a func so we can defer c.workqueue.Done. - err := func(obj interface{}) error { + err := func(key string) error { // We call Done here so the workqueue knows we have finished // processing this item. We also must remember to call Forget if we // do not want this work item being re-queued. For example, we do // not call Forget if a transient error occurs, instead the item is // put back on the workqueue and attempted again after a back-off // period. - defer c.workqueue.Done(obj) - var key string - var ok bool - // We expect strings to come off the workqueue. These are of the - // form namespace/name. We do this as the delayed nature of the - // workqueue means the items in the informer cache may actually be - // more up to date that when the item was initially put onto the - // workqueue. - if key, ok = obj.(string); !ok { - // As the item in the workqueue is actually invalid, we call - // Forget here else we'd go into a loop of attempting to - // process a work item that is invalid. - c.workqueue.Forget(obj) - utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) - return nil - } + defer c.workqueue.Done(key) // Run the syncHandler, passing it the namespace/name string of the // Foo resource to be synced. if err := c.syncHandler(ctx, key); err != nil {