From 00e7281a4a687766f2b8a43acfafec693d0214a6 Mon Sep 17 00:00:00 2001 From: "dom.bozzuto" Date: Mon, 24 Jun 2024 17:05:11 -0400 Subject: [PATCH] [local] Add support for doing refresh before IncreaseSize --- .../cloudprovider/azure/README.md | 8 +- .../cloudprovider/azure/azure_config.go | 10 +++ .../cloudprovider/azure/azure_scale_set.go | 26 +++++- .../azure/azure_scale_set_test.go | 80 ++++++++++++++++++- 4 files changed, 117 insertions(+), 7 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/azure/README.md b/cluster-autoscaler/cloudprovider/azure/README.md index 427fc980b634..466106c349a0 100644 --- a/cluster-autoscaler/cloudprovider/azure/README.md +++ b/cluster-autoscaler/cloudprovider/azure/README.md @@ -173,10 +173,12 @@ When using K8s versions older than v1.18, we recommend using at least **v.1.17.5 As for CA versions older than 1.18, we recommend using at least **v.1.17.2, v1.16.5, v1.15.6**. In addition, cluster-autoscaler exposes a `AZURE_VMSS_CACHE_TTL` environment variable which controls the rate of `GetVMScaleSet` being made. By default, this is 15 seconds but setting this to a higher value such as 60 seconds can protect against API throttling. The caches used are proactively incremented and decremented with the scale up and down operations and this higher value doesn't have any noticeable impact on performance. **Note that the value is in seconds** +`AZURE_VMSS_CACHE_FORCE_REFRESH` can also be set to always refresh the cache before an upscale. -| Config Name | Default | Environment Variable | Cloud Config File | -| ----------- | ------- | -------------------- | ----------------- | -| VmssCacheTTL | 60 | AZURE_VMSS_CACHE_TTL | vmssCacheTTL | +| Config Name | Default | Environment Variable | Cloud Config File | +|-----------------------|--------|--------------------------------|------------------------| +| VmssCacheTTL | 60 | AZURE_VMSS_CACHE_TTL | vmssCacheTTL | +| VmssCacheForceRefresh | false | AZURE_VMSS_CACHE_FORCE_REFRESH | vmssCacheForceRefresh | The `AZURE_VMSS_VMS_CACHE_TTL` environment variable affects the `GetScaleSetVms` (VMSS VM List) calls rate. The default value is 300 seconds. A configurable jitter (`AZURE_VMSS_VMS_CACHE_JITTER` environment variable, default 0) expresses the maximum number of second that will be subtracted from that initial VMSS cache TTL after a new VMSS is discovered by the cluster-autoscaler: this can prevent a dogpile effect on clusters having many VMSS. diff --git a/cluster-autoscaler/cloudprovider/azure/azure_config.go b/cluster-autoscaler/cloudprovider/azure/azure_config.go index 810884dfc691..182673549f1e 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_config.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_config.go @@ -119,6 +119,9 @@ type Config struct { // VMSS metadata cache TTL in seconds, only applies for vmss type VmssCacheTTL int64 `json:"vmssCacheTTL" yaml:"vmssCacheTTL"` + // VMSS cache option to force refresh of the vmss capacity before an upscale + VmssCacheForceRefresh bool `json:"vmssCacheForceRefresh" yaml:"vmssCacheForceRefresh"` + // VMSS instances cache TTL in seconds, only applies for vmss type VmssVmsCacheTTL int64 `json:"vmssVmsCacheTTL" yaml:"vmssVmsCacheTTL"` @@ -215,6 +218,13 @@ func BuildAzureConfig(configReader io.Reader) (*Config, error) { } } + if vmssCacheForceRefresh := os.Getenv("AZURE_VMSS_CACHE_FORCE_REFRESH"); vmssCacheForceRefresh != "" { + cfg.VmssCacheForceRefresh, err = strconv.ParseBool(vmssCacheForceRefresh) + if err != nil { + return nil, fmt.Errorf("failed to parse AZURE_VMSS_CACHE_FORCE_REFRESH %q: %v", vmssCacheForceRefresh, err) + } + } + if vmssVmsCacheTTL := os.Getenv("AZURE_VMSS_VMS_CACHE_TTL"); vmssVmsCacheTTL != "" { cfg.VmssVmsCacheTTL, err = strconv.ParseInt(vmssVmsCacheTTL, 10, 0) if err != nil { diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go index 3b7978c5868b..032f9585741f 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go @@ -231,11 +231,22 @@ func (scaleSet *ScaleSet) SetScaleSetSize(size int64) error { return err } - // Update the new capacity to cache. + // Update the new capacity to cache, if the size increase is a net increase vmssSizeMutex.Lock() - vmssInfo.Sku.Capacity = &size + rejectIncrease := false + lastCapacity := *vmssInfo.Sku.Capacity + if size < lastCapacity { + rejectIncrease = true + } else { + vmssInfo.Sku.Capacity = &size + } vmssSizeMutex.Unlock() + if rejectIncrease { + klog.Warningf("VMSS %s: rejecting size decrease from %d to %d", scaleSet.Name, lastCapacity, size) + return fmt.Errorf("vmss %s: rejected size decrease from %d to %d", scaleSet.Name, lastCapacity, size) + } + // Compose a new VMSS for updating. op := compute.VirtualMachineScaleSet{ Name: vmssInfo.Name, @@ -254,6 +265,7 @@ func (scaleSet *ScaleSet) SetScaleSetSize(size int64) error { // Proactively set the VMSS size so autoscaler makes better decisions. scaleSet.curSize = size scaleSet.lastSizeRefresh = time.Now() + klog.V(3).Infof("VMSS %s: requested size increase from %d to %d", scaleSet.Name, lastCapacity, size) go scaleSet.updateVMSSCapacity(future) return nil @@ -272,6 +284,15 @@ func (scaleSet *ScaleSet) IncreaseSize(delta int) error { return fmt.Errorf("size increase must be positive") } + if scaleSet.manager.config.VmssCacheForceRefresh { + if err := scaleSet.manager.forceRefresh(); err != nil { + klog.Errorf("VMSS %s: force refresh of VMSSs failed: %v", scaleSet.Name, err) + return err + } + klog.V(4).Infof("VMSS: %s, forced refreshed before checking size", scaleSet.Name) + scaleSet.invalidateLastSizeRefreshWithLock() + } + size, err := scaleSet.GetScaleSetSize() if err != nil { return err @@ -438,6 +459,7 @@ func (scaleSet *ScaleSet) DeleteInstances(instances []*azureRef, hasUnregistered scaleSet.sizeMutex.Lock() scaleSet.curSize -= int64(len(instanceIDs)) scaleSet.lastSizeRefresh = time.Now() + klog.V(3).Infof("VMSS %s: had unregistered nodes, current size decremented by %d to %d", scaleSet.Name, len(instanceIDs), scaleSet.curSize) scaleSet.sizeMutex.Unlock() } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go index 0f14a75a5b93..4a640e404a04 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go @@ -177,6 +177,7 @@ func TestIncreaseSize(t *testing.T) { provider := newTestProvider(t) expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", orchMode) + provider.azureManager.explicitlyConfigured["test-asg"] = true mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() @@ -263,7 +264,7 @@ func TestIncreaseSizeOnVMProvisioningFailed(t *testing.T) { } mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil) + mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() mockVMSSClient.EXPECT().CreateOrUpdateAsync(gomock.Any(), manager.config.ResourceGroup, vmssName, gomock.Any()).Return(nil, nil) mockVMSSClient.EXPECT().WaitForCreateOrUpdateResult(gomock.Any(), gomock.Any(), manager.config.ResourceGroup).Return(&http.Response{StatusCode: http.StatusOK}, nil).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient @@ -322,7 +323,7 @@ func TestIncreaseSizeOnVMSSUpdating(t *testing.T) { expectedVMSSVMs := newTestVMSSVMList(3) mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil) + mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() mockVMSSClient.EXPECT().CreateOrUpdateAsync(gomock.Any(), manager.config.ResourceGroup, vmssName, gomock.Any()).Return(nil, nil) mockVMSSClient.EXPECT().WaitForCreateOrUpdateResult(gomock.Any(), gomock.Any(), manager.config.ResourceGroup).Return(&http.Response{StatusCode: http.StatusOK}, nil).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient @@ -344,6 +345,81 @@ func TestIncreaseSizeOnVMSSUpdating(t *testing.T) { assert.NoError(t, err) } +func TestIncreaseSizeWithForceRefresh(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + orchestrationModes := [2]compute.OrchestrationMode{compute.Uniform, compute.Flexible} + + expectedVMSSVMs := newTestVMSSVMList(3) + expectedVMs := newTestVMList(3) + + for _, orchMode := range orchestrationModes { + + provider := newTestProvider(t) + expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", orchMode) + provider.azureManager.explicitlyConfigured["test-asg"] = true + provider.azureManager.config.VmssCacheForceRefresh = true + + mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) + mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).Times(2) + mockVMSSClient.EXPECT().CreateOrUpdateAsync(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(nil, nil) + mockVMSSClient.EXPECT().WaitForCreateOrUpdateResult(gomock.Any(), gomock.Any(), provider.azureManager.config.ResourceGroup).Return(&http.Response{StatusCode: http.StatusOK}, nil).AnyTimes() + provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient + + if orchMode == compute.Uniform { + mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) + mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient + } else { + + provider.azureManager.config.EnableVmssFlex = true + mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() + provider.azureManager.azClient.virtualMachinesClient = mockVMClient + } + err := provider.azureManager.forceRefresh() + assert.NoError(t, err) + + ss := newTestScaleSet(provider.azureManager, "test-asg-doesnt-exist") + err = ss.IncreaseSize(100) + expectedErr := fmt.Errorf("could not find vmss: test-asg-doesnt-exist") + assert.Equal(t, expectedErr, err) + + registered := provider.azureManager.RegisterNodeGroup( + newTestScaleSet(provider.azureManager, "test-asg")) + assert.True(t, registered) + assert.Equal(t, len(provider.NodeGroups()), 1) + + // current target size is 2. + targetSize, err := provider.NodeGroups()[0].TargetSize() + assert.NoError(t, err) + assert.Equal(t, 3, targetSize) + + // Simulate missing instances + mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(newTestVMSSList(1, "test-asg", "eastus", orchMode), nil).AnyTimes() + if orchMode == compute.Uniform { + mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) + mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(newTestVMSSVMList(1), nil).AnyTimes() + provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient + } else { + provider.azureManager.config.EnableVmssFlex = true + mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(newTestVMList(1), nil).AnyTimes() + provider.azureManager.azClient.virtualMachinesClient = mockVMClient + } + + // Update cache on IncreaseSize and increase back to 3 nodes. + err = provider.NodeGroups()[0].IncreaseSize(2) + assert.NoError(t, err) + + // new target size should be 3. + targetSize, err = provider.NodeGroups()[0].TargetSize() + assert.NoError(t, err) + assert.Equal(t, 3, targetSize) + } +} + func TestBelongs(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish()