diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go index cdc6a169d4..73c57eccd0 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go @@ -22,6 +22,7 @@ import ( "strings" "time" + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" @@ -602,3 +603,52 @@ func (c *Client) parseResp( } return errors, retryIDs } + +// AttachDetachDataDisks attaches or detaches a list of managed data disks to/from a VM. +func (c *Client) AttachDetachDataDisks(ctx context.Context, resourceGroupName, VMScaleSetName, instanceID string, parameters armcompute.AttachDetachDataDisksRequest, source string) (*armcompute.VirtualMachinesClientAttachDetachDataDisksResponse, *retry.Error) { + mc := metrics.NewMetricContext("vmssvm", "AttachDetachDataDisks", resourceGroupName, c.subscriptionID, source) + + // Report errors if the client is rate limited. + if !c.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() + return nil, retry.GetRateLimitError(true, "VMSSVMAttachDetachDataDisks") + } + + // Report errors if the client is throttled. + if c.RetryAfterWriter.After(time.Now()) { + mc.ThrottledCount() + rerr := retry.GetThrottlingError("VMSSVMAttachDetachDataDisks", "client throttled", c.RetryAfterWriter) + return nil, rerr + } + + resourceID := armclient.GetChildResourceID( + c.subscriptionID, + resourceGroupName, + vmssResourceType, + VMScaleSetName, + vmResourceType, + instanceID, + ) + + response, rerr := c.armClient.PostResource(ctx, resourceID, "attachDetachDataDisks", parameters, map[string]interface{}{}) + mc.Observe(rerr) + defer c.armClient.CloseResponse(ctx, response) + if rerr != nil { + if rerr.IsThrottled() { + // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. + c.RetryAfterWriter = rerr.RetryAfter + } + return nil, rerr + } + + result := armcompute.VirtualMachinesClientAttachDetachDataDisksResponse{} + err := autorest.Respond( + response, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result)) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "attachDetachDataDisks.respond", resourceID, err) + return &result, retry.GetError(response, err) + } + return &result, nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/interface.go index 352c3aa642..6283be1159 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/interface.go @@ -19,6 +19,7 @@ package vmssvmclient import ( "context" + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/go-autorest/autorest/azure" @@ -27,7 +28,7 @@ import ( const ( // APIVersion is the API version for VMSS. - APIVersion = "2022-03-01" + APIVersion = "2024-07-01" // AzureStackCloudAPIVersion is the API version for Azure Stack AzureStackCloudAPIVersion = "2019-07-01" // AzureStackCloudName is the cloud name of Azure Stack @@ -54,4 +55,7 @@ type Interface interface { // UpdateVMs updates a list of VirtualMachineScaleSetVM from map[instanceID]compute.VirtualMachineScaleSetVM. UpdateVMs(ctx context.Context, resourceGroupName string, VMScaleSetName string, instances map[string]compute.VirtualMachineScaleSetVM, source string, batchSize int) *retry.Error + + // AttachDetachDataDisks attaches or detaches a list of managed data disks to/from a VM. + AttachDetachDataDisks(ctx context.Context, resourceGroupName, VMScaleSetName, instanceID string, parameters armcompute.AttachDetachDataDisksRequest, source string) (*armcompute.VirtualMachinesClientAttachDetachDataDisksResponse, *retry.Error) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go index 0bc207ca6a..9f0f97a3ad 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go @@ -30,6 +30,7 @@ import ( context "context" reflect "reflect" + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" azure "github.com/Azure/go-autorest/autorest/azure" gomock "go.uber.org/mock/gomock" @@ -147,3 +148,18 @@ func (mr *MockInterfaceMockRecorder) WaitForUpdateResult(ctx, future, resourceGr mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForUpdateResult", reflect.TypeOf((*MockInterface)(nil).WaitForUpdateResult), ctx, future, resourceGroupName, source) } + +// AttachDetachDataDisks mocks base method. +func (m *MockInterface) AttachDetachDataDisks(ctx context.Context, resourceGroupName, VMScaleSetName, instanceID string, parameters armcompute.AttachDetachDataDisksRequest, source string) (*armcompute.VirtualMachinesClientAttachDetachDataDisksResponse, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AttachDetachDataDisks", ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) + ret0, _ := ret[0].(*armcompute.VirtualMachinesClientAttachDetachDataDisksResponse) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// AttachDetachDataDisks indicates an expected call of AttachDetachDataDisks. +func (mr *MockInterfaceMockRecorder) AttachDetachDataDisks(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachDetachDataDisks", reflect.TypeOf((*MockInterface)(nil).AttachDetachDataDisks), ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) +} \ No newline at end of file diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go index 48e4185048..9453d21ab3 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go @@ -22,7 +22,7 @@ import ( "net/http" "strings" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/go-autorest/autorest/azure" @@ -37,6 +37,10 @@ import ( // AttachDisk attaches a disk to vm func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error { + if len(diskMap) == 0 { + return fmt.Errorf("no disk to attach") + } + vmName := mapNodeNameToVMName(nodeName) vm, err := ss.getVmssVM(ctx, vmName, azcache.CacheReadTypeDefault) if err != nil { @@ -48,15 +52,9 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis return err } - var disks []compute.DataDisk - + var dataDisksToAttach []*armcompute.DataDisksToAttach storageProfile := vm.AsVirtualMachineScaleSetVM().StorageProfile - if storageProfile != nil && storageProfile.DataDisks != nil { - disks = make([]compute.DataDisk, len(*storageProfile.DataDisks)) - copy(disks, *storageProfile.DataDisks) - } - for k, v := range diskMap { diskURI := k opt := v @@ -76,7 +74,13 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis continue } - managedDisk := &compute.ManagedDiskParameters{ID: &diskURI} + cachingMode := armcompute.CachingTypes(opt.CachingMode) + dataDisk := armcompute.DataDisksToAttach{ + DiskID: &diskURI, + Lun: &opt.Lun, + Caching: &cachingMode, + WriteAcceleratorEnabled: ptr.To(opt.WriteAcceleratorEnabled), + } if opt.DiskEncryptionSetID == "" { if storageProfile.OsDisk != nil && storageProfile.OsDisk.ManagedDisk != nil && @@ -87,44 +91,27 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis } } if opt.DiskEncryptionSetID != "" { - managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &opt.DiskEncryptionSetID} + dataDisk.DiskEncryptionSet = &armcompute.DiskEncryptionSetParameters{ID: &opt.DiskEncryptionSetID} } - disks = append(disks, - compute.DataDisk{ - Name: &opt.DiskName, - Lun: &opt.Lun, - Caching: opt.CachingMode, - CreateOption: "attach", - ManagedDisk: managedDisk, - WriteAcceleratorEnabled: ptr.To(opt.WriteAcceleratorEnabled), - }) + dataDisksToAttach = append(dataDisksToAttach, &dataDisk) } - newVM := compute.VirtualMachineScaleSetVM{ - VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ - StorageProfile: &compute.StorageProfile{ - DataDisks: &disks, - }, - }, + attachDataDisksRequest := armcompute.AttachDetachDataDisksRequest{ + DataDisksToAttach: dataDisksToAttach, } - klog.V(2).Infof("azureDisk - update: rg(%s) vm(%s) - attach disk list(%+v)", nodeResourceGroup, nodeName, diskMap) - future, rerr := ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM, "attach_disk") - if rerr != nil { - klog.Errorf("azureDisk - attach disk list(%+v) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr) - if rerr.HTTPStatusCode == http.StatusNotFound { - klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, nodeName) - disks := FilterNonExistingDisks(ctx, ss.DisksClient, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks) - newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks = &disks - future, rerr = ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM, "attach_disk") - } - } + defer func() { + _ = ss.DeleteCacheForNode(ctx, vmName) + }() - klog.V(2).Infof("azureDisk - update: rg(%s) vm(%s) - attach disk list(%+v) returned with %v", nodeResourceGroup, nodeName, diskMap, rerr) - if rerr != nil { - return rerr.Error() + klog.V(2).Infof("azureDisk - update: rg(%s) vm(%s) - attach disk list(%+v)", nodeResourceGroup, nodeName, diskMap) + // todo: update cache with result + _, retryErr := ss.VirtualMachineScaleSetVMsClient.AttachDetachDataDisks(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, attachDataDisksRequest, "attach_disk") + klog.V(2).Infof("azureDisk - update: rg(%s) vm(%s) - attach disk list(%+v) returned with %v", nodeResourceGroup, nodeName, diskMap, retryErr) + if retryErr != nil { + return retryErr.Error() } - return ss.WaitForUpdateResult(ctx, future, nodeName, "attach_disk") + return nil } // WaitForUpdateResult waits for the response of the update request