From 7047504a22ff9472c46026cf7d05d30b7c049444 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Mon, 10 Nov 2025 09:02:50 -0700 Subject: [PATCH] Update Azure SDK to v2 --- .../cloudprovider/azure/azure_agent_pool.go | 187 +++-- .../azure/azure_agent_pool_test.go | 135 ++-- .../cloudprovider/azure/azure_cache.go | 81 ++- .../cloudprovider/azure/azure_client.go | 160 +++- .../azure/azure_cloud_provider_test.go | 134 ++-- .../cloudprovider/azure/azure_fakes.go | 168 ++++- .../azure/azure_force_delete_scale_set.go | 29 +- .../azure/azure_instance_gpu_sku.go | 2 +- .../cloudprovider/azure/azure_manager.go | 6 +- .../cloudprovider/azure/azure_manager_test.go | 179 +++-- .../azure/azure_mock_agentpool_client.go | 108 --- .../cloudprovider/azure/azure_mock_clients.go | 684 ++++++++++++++++++ .../cloudprovider/azure/azure_scale_set.go | 198 +++-- .../azure/azure_scale_set_instance_cache.go | 38 +- .../azure_scale_set_instance_cache_test.go | 28 +- .../azure/azure_scale_set_test.go | 603 +++++++++------ .../cloudprovider/azure/azure_template.go | 40 +- .../azure/azure_template_test.go | 69 +- .../cloudprovider/azure/azure_test_helpers.go | 90 +++ .../cloudprovider/azure/azure_util.go | 102 ++- .../cloudprovider/azure/azure_util_test.go | 6 +- .../cloudprovider/azure/azure_vms_pool.go | 16 +- .../azure/azure_vms_pool_test.go | 49 +- cluster-autoscaler/go.mod | 44 +- cluster-autoscaler/go.sum | 94 ++- 25 files changed, 2244 insertions(+), 1006 deletions(-) delete mode 100644 cluster-autoscaler/cloudprovider/azure/azure_mock_agentpool_client.go create mode 100644 cluster-autoscaler/cloudprovider/azure/azure_mock_clients.go create mode 100644 cluster-autoscaler/cloudprovider/azure/azure_test_helpers.go diff --git a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go index cd13369a81df..2d074060d041 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go @@ -17,16 +17,20 @@ limitations under the License. package azure import ( + "errors" "fmt" "math/rand" + "net/http" "sort" "strings" "sync" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources" - azStorage "github.com/Azure/azure-sdk-for-go/storage" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" "k8s.io/utils/ptr" apiv1 "k8s.io/api/core/v1" @@ -82,10 +86,10 @@ func (as *AgentPool) initialize() error { ctx, cancel := getContextWithCancel() defer cancel() - template, err := as.manager.azClient.deploymentClient.ExportTemplate(ctx, as.manager.config.ResourceGroup, as.manager.config.Deployment) + template, err := as.manager.azClient.deploymentClient.ExportTemplate(ctx, as.manager.config.ResourceGroup, as.manager.config.Deployment, nil) if err != nil { klog.Errorf("deploymentClient.ExportTemplate(%s, %s) failed: %v", as.manager.config.ResourceGroup, as.manager.config.Deployment, err) - return err.Error() + return err } as.template = template.Template.(map[string]interface{}) @@ -136,10 +140,10 @@ func (as *AgentPool) Id() string { return as.Name } -func (as *AgentPool) getVMsFromCache() ([]compute.VirtualMachine, error) { +func (as *AgentPool) getVMsFromCache() ([]armcompute.VirtualMachine, error) { allVMs := as.manager.azureCache.getVirtualMachines() if _, exists := allVMs[as.Name]; !exists { - return []compute.VirtualMachine{}, fmt.Errorf("could not find VMs with poolName: %s", as.Name) + return []armcompute.VirtualMachine{}, fmt.Errorf("could not find VMs with poolName: %s", as.Name) } return allVMs[as.Name], nil } @@ -157,7 +161,7 @@ func (as *AgentPool) GetVMIndexes() ([]int, map[int]string, error) { indexes := make([]int, 0) indexToVM := make(map[int]string) for _, instance := range instances { - index, err := GetVMNameIndex(instance.StorageProfile.OsDisk.OsType, *instance.Name) + index, err := GetVMNameIndex(ptr.Deref(instance.Properties.StorageProfile.OSDisk.OSType, armcompute.OperatingSystemTypesLinux), *instance.Name) if err != nil { return nil, nil, err } @@ -211,27 +215,28 @@ func (as *AgentPool) TargetSize() (int, error) { return int(size), nil } -func (as *AgentPool) getAllSucceededAndFailedDeployments() ([]resources.DeploymentExtended, error) { +func (as *AgentPool) getAllSucceededAndFailedDeployments() ([]armresources.DeploymentExtended, error) { ctx, cancel := getContextWithCancel() defer cancel() - allDeployments, rerr := as.manager.azClient.deploymentClient.List(ctx, as.manager.config.ResourceGroup) - if rerr != nil { - klog.Errorf("getAllSucceededAndFailedDeployments: failed to list deployments with error: %v", rerr.Error()) - return nil, rerr.Error() - } - - result := make([]resources.DeploymentExtended, 0) - for _, deployment := range allDeployments { - if deployment.Properties == nil || deployment.Properties.ProvisioningState == nil { - continue + var deployments []armresources.DeploymentExtended + pager := as.manager.azClient.deploymentClient.NewListByResourceGroupPager(as.manager.config.ResourceGroup, nil) + for pager.More() { + page, rerr := pager.NextPage(ctx) + if rerr != nil { + klog.Errorf("getAllSucceededAndFailedDeployments: failed to list deployments with error: %v", rerr.Error()) + return nil, rerr } - if *deployment.Properties.ProvisioningState == "Succeeded" || *deployment.Properties.ProvisioningState == "Failed" { - result = append(result, deployment) + for _, deployment := range page.Value { + if deployment.Properties == nil || deployment.Properties.ProvisioningState == nil { + continue + } + if *deployment.Properties.ProvisioningState == "Succeeded" || *deployment.Properties.ProvisioningState == "Failed" { + deployments = append(deployments, *deployment) + } } } - - return result, rerr.Error() + return deployments, nil } // deleteOutdatedDeployments keeps the newest deployments in the resource group and delete others, @@ -256,7 +261,7 @@ func (as *AgentPool) deleteOutdatedDeployments() (err error) { } sort.Slice(deployments, func(i, j int) bool { - return deployments[i].Properties.Timestamp.Time.After(deployments[j].Properties.Timestamp.Time) + return deployments[i].Properties.Timestamp.After(*deployments[j].Properties.Timestamp) }) toBeDeleted := deployments[as.manager.config.MaxDeploymentsCount:] @@ -266,10 +271,18 @@ func (as *AgentPool) deleteOutdatedDeployments() (err error) { errList := make([]error, 0) for _, deployment := range toBeDeleted { - klog.V(4).Infof("deleteOutdatedDeployments: starts deleting outdated deployment (%s)", *deployment.Name) - rerr := as.manager.azClient.deploymentClient.Delete(ctx, as.manager.config.ResourceGroup, *deployment.Name) + klog.V(4).Infof("deleteOutdatedDeployments: start deleting outdated deployment (%s)", *deployment.Name) + poller, rerr := as.manager.azClient.deploymentClient.BeginDelete(ctx, as.manager.config.ResourceGroup, *deployment.Name, nil) if rerr != nil { - errList = append(errList, rerr.Error()) + klog.Errorf("deleteOutdatedDeployments: failed to begin deleting deployment (%s) with error: %v", *deployment.Name, rerr.Error()) + errList = append(errList, rerr) + continue + } + _, rerr = poller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: 30 * time.Second}) + if rerr != nil { + klog.Errorf("deleteOutdatedDeployments: failed to delete deployment (%s) with error: %v", *deployment.Name, rerr.Error()) + errList = append(errList, rerr) + continue } } @@ -313,26 +326,39 @@ func (as *AgentPool) IncreaseSize(delta int) error { if highestUsedIndex != 0 { countForTemplate += highestUsedIndex + 1 - curSize } - as.parameters[as.Name+"Count"] = map[string]int{"value": countForTemplate} - as.parameters[as.Name+"Offset"] = map[string]int{"value": highestUsedIndex + 1} + as.parameters[as.Name+"Count"] = map[string]interface{}{"value": countForTemplate} + as.parameters[as.Name+"Offset"] = map[string]interface{}{"value": highestUsedIndex + 1} + + // Convert parameters to DeploymentParameter format + deploymentParams := make(map[string]*armresources.DeploymentParameter, len(as.parameters)) + for key, value := range as.parameters { + deploymentParams[key] = &armresources.DeploymentParameter{ + Value: value, + } + } newDeploymentName := fmt.Sprintf("cluster-autoscaler-%d", rand.New(rand.NewSource(time.Now().UnixNano())).Int31()) - newDeployment := resources.Deployment{ - Properties: &resources.DeploymentProperties{ - Template: &as.template, - Parameters: &as.parameters, - Mode: resources.Incremental, + newDeployment := armresources.Deployment{ + Properties: &armresources.DeploymentProperties{ + Template: as.template, + Parameters: deploymentParams, + Mode: ptr.To(armresources.DeploymentModeIncremental), }, } ctx, cancel := getContextWithCancel() defer cancel() klog.V(3).Infof("Waiting for deploymentClient.CreateOrUpdate(%s, %s, %v)", as.manager.config.ResourceGroup, newDeploymentName, newDeployment) - rerr := as.manager.azClient.deploymentClient.CreateOrUpdate(ctx, as.manager.config.ResourceGroup, newDeploymentName, newDeployment, "") + poller, rerr := as.manager.azClient.deploymentClient.BeginCreateOrUpdate(ctx, as.manager.config.ResourceGroup, newDeploymentName, newDeployment, nil) + if rerr != nil { + klog.Errorf("deploymentClient.BeginCreateOrUpdate for deployment %q failed: %v", newDeploymentName, rerr.Error()) + return rerr + } + resp, rerr := poller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: 30 * time.Second}) if rerr != nil { klog.Errorf("deploymentClient.CreateOrUpdate for deployment %q failed: %v", newDeploymentName, rerr.Error()) - return rerr.Error() + return rerr } - klog.V(3).Infof("deploymentClient.CreateOrUpdate(%s, %s, %v) success", as.manager.config.ResourceGroup, newDeploymentName, newDeployment) + klog.V(3).Infof("deploymentClient.CreateOrUpdate(%s, %s, %v) success", as.manager.config.ResourceGroup, *resp.DeploymentExtended.Name, resp.DeploymentExtended) // Update cache after scale success. as.curSize = int64(expectedSize) @@ -515,22 +541,34 @@ func (as *AgentPool) deleteBlob(accountName, vhdContainer, vhdBlob string) error ctx, cancel := getContextWithCancel() defer cancel() - storageKeysResult, rerr := as.manager.azClient.storageAccountsClient.ListKeys(ctx, as.manager.config.SubscriptionID, as.manager.config.ResourceGroup, accountName) + storageKeysResult, rerr := as.manager.azClient.storageAccountsClient.ListKeys(ctx, as.manager.config.ResourceGroup, accountName, nil) if rerr != nil { - return rerr.Error() + return rerr + } + + keys := storageKeysResult.Keys + if len(keys) == 0 { + return fmt.Errorf("no storage keys found for account %s", accountName) } - keys := *storageKeysResult.Keys - client, err := azStorage.NewBasicClientOnSovereignCloud(accountName, ptr.Deref(keys[0].Value, ""), as.manager.env) + // Construct service URL using the storage account endpoint + serviceURL := fmt.Sprintf("https://%s.blob.%s", accountName, as.manager.env.StorageEndpointSuffix) + + // Create a SharedKeyCredential + credential, err := azblob.NewSharedKeyCredential(accountName, ptr.Deref(keys[0].Value, "")) if err != nil { - return err + return fmt.Errorf("failed to create shared key credential: %w", err) } - bs := client.GetBlobService() - containerRef := bs.GetContainerReference(vhdContainer) - blobRef := containerRef.GetBlobReference(vhdBlob) + // Create a service client + serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, credential, nil) + if err != nil { + return fmt.Errorf("failed to create service client: %w", err) + } - return blobRef.Delete(&azStorage.DeleteBlobOptions{}) + // Delete the blob + _, err = serviceClient.DeleteBlob(ctx, vhdContainer, vhdBlob, nil) + return err } // deleteVirtualMachine deletes a VM and any associated OS disk @@ -538,27 +576,29 @@ func (as *AgentPool) deleteVirtualMachine(name string) error { ctx, cancel := getContextWithCancel() defer cancel() - vm, rerr := as.manager.azClient.virtualMachinesClient.Get(ctx, as.manager.config.ResourceGroup, name, "") + vm, rerr := as.manager.azClient.virtualMachinesClient.Get(ctx, as.manager.config.ResourceGroup, name, nil) if rerr != nil { - if exists, _ := checkResourceExistsFromRetryError(rerr); !exists { + // Check if it's a 404 error indicating resource doesn't exist + var respErr *azcore.ResponseError + if errors.As(rerr, &respErr) && respErr.StatusCode == http.StatusNotFound { klog.V(2).Infof("VirtualMachine %s/%s has already been removed", as.manager.config.ResourceGroup, name) return nil } klog.Errorf("failed to get VM: %s/%s: %s", as.manager.config.ResourceGroup, name, rerr.Error()) - return rerr.Error() + return rerr } - vhd := vm.VirtualMachineProperties.StorageProfile.OsDisk.Vhd - managedDisk := vm.VirtualMachineProperties.StorageProfile.OsDisk.ManagedDisk + vhd := vm.Properties.StorageProfile.OSDisk.Vhd + managedDisk := vm.Properties.StorageProfile.OSDisk.ManagedDisk if vhd == nil && managedDisk == nil { klog.Errorf("failed to get a valid os disk URI for VM: %s/%s", as.manager.config.ResourceGroup, name) return fmt.Errorf("os disk does not have a VHD URI") } - osDiskName := vm.VirtualMachineProperties.StorageProfile.OsDisk.Name + osDiskName := vm.Properties.StorageProfile.OSDisk.Name var nicName string - nicID := (*vm.VirtualMachineProperties.NetworkProfile.NetworkInterfaces)[0].ID + nicID := (vm.Properties.NetworkProfile.NetworkInterfaces)[0].ID if nicID == nil { klog.Warningf("NIC ID is not set for VM (%s/%s)", as.manager.config.ResourceGroup, name) } else { @@ -574,10 +614,15 @@ func (as *AgentPool) deleteVirtualMachine(name string) error { defer deleteCancel() klog.Infof("waiting for VirtualMachine deletion: %s/%s", as.manager.config.ResourceGroup, name) - rerr = as.manager.azClient.virtualMachinesClient.Delete(deleteCtx, as.manager.config.ResourceGroup, name) - _, realErr := checkResourceExistsFromRetryError(rerr) - if realErr != nil { - return realErr + poller, rerr := as.manager.azClient.virtualMachinesClient.BeginDelete(deleteCtx, as.manager.config.ResourceGroup, name, nil) + if rerr != nil { + klog.Errorf("failed to begin deleting VM: %s/%s: %s", as.manager.config.ResourceGroup, name, rerr.Error()) + return rerr + } + _, rerr = poller.PollUntilDone(deleteCtx, &runtime.PollUntilDoneOptions{Frequency: 30 * time.Second}) + if rerr != nil { + klog.Errorf("failed to delete VM: %s/%s: %s", as.manager.config.ResourceGroup, name, rerr.Error()) + return rerr } klog.V(2).Infof("VirtualMachine %s/%s removed", as.manager.config.ResourceGroup, name) @@ -585,11 +630,16 @@ func (as *AgentPool) deleteVirtualMachine(name string) error { klog.Infof("deleting nic: %s/%s", as.manager.config.ResourceGroup, nicName) interfaceCtx, interfaceCancel := getContextWithCancel() defer interfaceCancel() - rerr := as.manager.azClient.interfacesClient.Delete(interfaceCtx, as.manager.config.ResourceGroup, nicName) - klog.Infof("waiting for nic deletion: %s/%s", as.manager.config.ResourceGroup, nicName) - _, realErr := checkResourceExistsFromRetryError(rerr) - if realErr != nil { - return realErr + + poller, rerr := as.manager.azClient.interfacesClient.BeginDelete(interfaceCtx, as.manager.config.ResourceGroup, nicName, nil) + if rerr != nil { + klog.Errorf("failed to begin deleting nic: %s/%s: %s", as.manager.config.ResourceGroup, nicName, rerr.Error()) + return rerr + } + _, rerr = poller.PollUntilDone(interfaceCtx, &runtime.PollUntilDoneOptions{Frequency: 30 * time.Second}) + if rerr != nil { + klog.Errorf("failed to delete nic: %s/%s: %s", as.manager.config.ResourceGroup, nicName, rerr.Error()) + return rerr } klog.V(2).Infof("interface %s/%s removed", as.manager.config.ResourceGroup, nicName) } @@ -617,10 +667,15 @@ func (as *AgentPool) deleteVirtualMachine(name string) error { klog.Infof("deleting managed disk: %s/%s", as.manager.config.ResourceGroup, *osDiskName) disksCtx, disksCancel := getContextWithCancel() defer disksCancel() - rerr := as.manager.azClient.disksClient.Delete(disksCtx, as.manager.config.SubscriptionID, as.manager.config.ResourceGroup, *osDiskName) - _, realErr := checkResourceExistsFromRetryError(rerr) - if realErr != nil { - return realErr + poller, rerr := as.manager.azClient.disksClient.BeginDelete(disksCtx, as.manager.config.ResourceGroup, *osDiskName, nil) + if rerr != nil { + klog.Errorf("failed to begin deleting managed disk: %s/%s: %s", as.manager.config.ResourceGroup, *osDiskName, rerr.Error()) + return rerr + } + _, rerr = poller.PollUntilDone(disksCtx, &runtime.PollUntilDoneOptions{Frequency: 30 * time.Second}) + if rerr != nil { + klog.Errorf("failed to delete managed disk: %s/%s: %s", as.manager.config.ResourceGroup, *osDiskName, rerr.Error()) + return rerr } klog.V(2).Infof("disk %s/%s removed", as.manager.config.ResourceGroup, *osDiskName) } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go index 9eb1b195defa..457e83d6483f 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go @@ -19,28 +19,22 @@ package azure import ( "context" "fmt" - "net/http" "testing" "time" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" providerazureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" - "sigs.k8s.io/cloud-provider-azure/pkg/retry" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources" - "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" "github.com/stretchr/testify/assert" "go.uber.org/mock/gomock" ) var ( - rerrTooManyReqs = retry.Error{HTTPStatusCode: http.StatusTooManyRequests} - rerrInternalErr = retry.Error{HTTPStatusCode: http.StatusInternalServerError} testValidProviderID0 = "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/as-vm-0" testValidProviderID1 = "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/as-vm-1" testInvalidProviderID = "/subscriptions/sub/resourceGroups/rg/providers/provider/virtualMachines/as-vm-0/" @@ -59,21 +53,21 @@ func newTestAgentPool(manager *AzureManager, name string) *AgentPool { } } -func getExpectedVMs() []compute.VirtualMachine { - expectedVMs := []compute.VirtualMachine{ +func getExpectedVMs() []armcompute.VirtualMachine { + expectedVMs := []armcompute.VirtualMachine{ { Name: ptr.To("000-0-00000000-0"), ID: ptr.To("/subscriptions/sub/resourceGroups/rg/providers/provider/0"), Tags: map[string]*string{"poolName": ptr.To("as")}, - VirtualMachineProperties: &compute.VirtualMachineProperties{ - StorageProfile: &compute.StorageProfile{ - OsDisk: &compute.OSDisk{ - OsType: compute.OperatingSystemTypesLinux, - Vhd: &compute.VirtualHardDisk{URI: ptr.To("https://foo.blob/vhds/bar.vhd")}, + Properties: &armcompute.VirtualMachineProperties{ + StorageProfile: &armcompute.StorageProfile{ + OSDisk: &armcompute.OSDisk{ + OSType: ptr.To(armcompute.OperatingSystemTypesLinux), + Vhd: &armcompute.VirtualHardDisk{URI: ptr.To("https://foo.blob/vhds/bar.vhd")}, }, }, - NetworkProfile: &compute.NetworkProfile{ - NetworkInterfaces: &[]compute.NetworkInterfaceReference{ + NetworkProfile: &armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ {}, }, }, @@ -83,9 +77,9 @@ func getExpectedVMs() []compute.VirtualMachine { Name: ptr.To("00000000001"), ID: ptr.To("/subscriptions/sub/resourceGroups/rg/providers/provider/0"), Tags: map[string]*string{"poolName": ptr.To("as")}, - VirtualMachineProperties: &compute.VirtualMachineProperties{ - StorageProfile: &compute.StorageProfile{ - OsDisk: &compute.OSDisk{OsType: compute.OperatingSystemTypesWindows}, + Properties: &armcompute.VirtualMachineProperties{ + StorageProfile: &armcompute.StorageProfile{ + OSDisk: &armcompute.OSDisk{OSType: ptr.To(armcompute.OperatingSystemTypesWindows)}, }, }, }, @@ -107,39 +101,39 @@ func TestDeleteOutdatedDeployments(t *testing.T) { timeBenchMark, _ := time.Parse(timeLayout, "2000-01-01 00:00:00") testCases := []struct { - deployments map[string]resources.DeploymentExtended + deployments map[string]armresources.DeploymentExtended expectedDeploymentsNames map[string]bool expectedErr error desc string }{ { - deployments: map[string]resources.DeploymentExtended{ + deployments: map[string]armresources.DeploymentExtended{ "non-cluster-autoscaler-0000": { Name: ptr.To("non-cluster-autoscaler-0000"), - Properties: &resources.DeploymentPropertiesExtended{ - ProvisioningState: ptr.To("Succeeded"), - Timestamp: &date.Time{Time: timeBenchMark.Add(2 * time.Minute)}, + Properties: &armresources.DeploymentPropertiesExtended{ + ProvisioningState: ptr.To(armresources.ProvisioningStateSucceeded), + Timestamp: ptr.To(timeBenchMark.Add(2 * time.Minute)), }, }, "cluster-autoscaler-0000": { Name: ptr.To("cluster-autoscaler-0000"), - Properties: &resources.DeploymentPropertiesExtended{ - ProvisioningState: ptr.To("Succeeded"), - Timestamp: &date.Time{Time: timeBenchMark}, + Properties: &armresources.DeploymentPropertiesExtended{ + ProvisioningState: ptr.To(armresources.ProvisioningStateSucceeded), + Timestamp: ptr.To(timeBenchMark), }, }, "cluster-autoscaler-0001": { Name: ptr.To("cluster-autoscaler-0001"), - Properties: &resources.DeploymentPropertiesExtended{ - ProvisioningState: ptr.To("Succeeded"), - Timestamp: &date.Time{Time: timeBenchMark.Add(time.Minute)}, + Properties: &armresources.DeploymentPropertiesExtended{ + ProvisioningState: ptr.To(armresources.ProvisioningStateSucceeded), + Timestamp: ptr.To(timeBenchMark.Add(time.Minute)), }, }, "cluster-autoscaler-0002": { Name: ptr.To("cluster-autoscaler-0002"), - Properties: &resources.DeploymentPropertiesExtended{ - ProvisioningState: ptr.To("Succeeded"), - Timestamp: &date.Time{Time: timeBenchMark.Add(2 * time.Minute)}, + Properties: &armresources.DeploymentPropertiesExtended{ + ProvisioningState: ptr.To(armresources.ProvisioningStateSucceeded), + Timestamp: ptr.To(timeBenchMark.Add(2 * time.Minute)), }, }, }, @@ -155,13 +149,14 @@ func TestDeleteOutdatedDeployments(t *testing.T) { for _, test := range testCases { testAS := newTestAgentPool(newTestAzureManager(t), "testAS") - testAS.manager.azClient.deploymentClient = &DeploymentClientMock{ + mockDeploymentClient := &DeploymentClientMock{ FakeStore: test.deployments, } + testAS.manager.azClient.deploymentClient = mockDeploymentClient err := testAS.deleteOutdatedDeployments() assert.Equal(t, test.expectedErr, err, test.desc) - existedDeployments, _ := testAS.manager.azClient.deploymentClient.List(context.Background(), "") + existedDeployments, _ := mockDeploymentClient.List(context.Background(), "") existedDeploymentsNames := make(map[string]bool) for _, deployment := range existedDeployments { existedDeploymentsNames[*deployment.Name] = true @@ -175,15 +170,15 @@ func TestAgentPoolGetVMsFromCache(t *testing.T) { defer ctrl.Finish() testAS := newTestAgentPool(newTestAzureManager(t), "testAS") - expectedVMs := []compute.VirtualMachine{ + expectedVMs := []armcompute.VirtualMachine{ { Tags: map[string]*string{"poolName": ptr.To("testAS")}, }, } - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) testAS.manager.azClient.virtualMachinesClient = mockVMClient - mockVMClient.EXPECT().List(gomock.Any(), testAS.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(testAS.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) testAS.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(testAS.manager.azClient, refreshInterval, *testAS.manager.config) assert.NoError(t, err) @@ -200,9 +195,9 @@ func TestGetVMIndexes(t *testing.T) { as := newTestAgentPool(newTestAzureManager(t), "as") expectedVMs := getExpectedVMs() - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) as.manager.azClient.virtualMachinesClient = mockVMClient - mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(as.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) assert.NoError(t, err) @@ -214,7 +209,7 @@ func TestGetVMIndexes(t *testing.T) { assert.Equal(t, 2, len(indexToVM)) expectedVMs[0].ID = ptr.To("foo") - mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(as.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) err = as.manager.forceRefresh() assert.NoError(t, err) sortedIndexes, indexToVM, err = as.GetVMIndexes() @@ -224,7 +219,7 @@ func TestGetVMIndexes(t *testing.T) { assert.Nil(t, indexToVM) expectedVMs[0].Name = ptr.To("foo") - mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(as.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) err = as.manager.forceRefresh() sortedIndexes, indexToVM, err = as.GetVMIndexes() expectedErr = fmt.Errorf("resource name was missing from identifier") @@ -240,9 +235,9 @@ func TestGetCurSize(t *testing.T) { as := newTestAgentPool(newTestAzureManager(t), "as") as.curSize = 1 expectedVMs := getExpectedVMs() - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) as.manager.azClient.virtualMachinesClient = mockVMClient - mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(as.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) assert.NoError(t, err) @@ -264,10 +259,10 @@ func TestAgentPoolTargetSize(t *testing.T) { defer ctrl.Finish() as := newTestAgentPool(newTestAzureManager(t), "as") - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) as.manager.azClient.virtualMachinesClient = mockVMClient expectedVMs := getExpectedVMs() - mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(as.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) assert.NoError(t, err) @@ -284,10 +279,12 @@ func TestAgentPoolIncreaseSize(t *testing.T) { defer ctrl.Finish() as := newTestAgentPool(newTestAzureManager(t), "as") - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) as.manager.azClient.virtualMachinesClient = mockVMClient expectedVMs := getExpectedVMs() - mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil).MaxTimes(2) + mockVMClient.EXPECT().NewListPager(as.manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).MaxTimes(2) as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) assert.NoError(t, err) @@ -297,7 +294,9 @@ func TestAgentPoolIncreaseSize(t *testing.T) { expectedErr := fmt.Errorf("size increase must be positive") assert.Equal(t, expectedErr, err) - mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil).MaxTimes(2) + mockVMClient.EXPECT().NewListPager(as.manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).MaxTimes(2) err = as.manager.Refresh() assert.NoError(t, err) err = as.IncreaseSize(4) @@ -313,10 +312,12 @@ func TestAgentPoolDecreaseTargetSize(t *testing.T) { as := newTestAgentPool(newTestAzureManager(t), "as") as.curSize = 3 - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) as.manager.azClient.virtualMachinesClient = mockVMClient expectedVMs := getExpectedVMs() - mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil).MaxTimes(3) + mockVMClient.EXPECT().NewListPager(as.manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).MaxTimes(3) as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) assert.NoError(t, err) @@ -326,7 +327,9 @@ func TestAgentPoolDecreaseTargetSize(t *testing.T) { assert.NoError(t, err) assert.Equal(t, int64(2), as.curSize) - mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil).MaxTimes(2) + mockVMClient.EXPECT().NewListPager(as.manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).MaxTimes(2) err = as.manager.Refresh() assert.NoError(t, err) err = as.DecreaseTargetSize(-1) @@ -370,10 +373,10 @@ func TestDeleteInstances(t *testing.T) { as.manager.azureCache.instanceToNodeGroup[azureRef{Name: testValidProviderID1}] = as1 as.manager.azureCache.instanceToNodeGroup[azureRef{Name: testInvalidProviderID}] = as - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) as.manager.azClient.virtualMachinesClient = mockVMClient - mockSAClient := mockstorageaccountclient.NewMockInterface(ctrl) + mockSAClient := NewMockStorageAccountsClient(ctrl) as.manager.azClient.storageAccountsClient = mockSAClient err := as.DeleteInstances([]*azureRef{}) @@ -420,10 +423,10 @@ func TestForceDeleteNodes(t *testing.T) { as.manager.azureCache.instanceToNodeGroup[azureRef{Name: testValidProviderID1}] = as1 as.manager.azureCache.instanceToNodeGroup[azureRef{Name: testInvalidProviderID}] = as - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) as.manager.azClient.virtualMachinesClient = mockVMClient - mockSAClient := mockstorageaccountclient.NewMockInterface(ctrl) + mockSAClient := NewMockStorageAccountsClient(ctrl) as.manager.azClient.storageAccountsClient = mockSAClient err := as.ForceDeleteNodes([]*apiv1.Node{}) @@ -457,11 +460,11 @@ func TestAgentPoolDeleteNodes(t *testing.T) { as := newTestAgentPool(newTestAzureManager(t), "as") as.manager.azureCache.instanceToNodeGroup[azureRef{Name: testValidProviderID0}] = as expectedVMs := getExpectedVMs() - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) as.manager.azClient.virtualMachinesClient = mockVMClient - mockSAClient := mockstorageaccountclient.NewMockInterface(ctrl) + mockSAClient := NewMockStorageAccountsClient(ctrl) as.manager.azClient.storageAccountsClient = mockSAClient - mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(as.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) as.manager.config.VMType = providerazureconsts.VMTypeVMSS @@ -499,7 +502,7 @@ func TestAgentPoolNodes(t *testing.T) { defer ctrl.Finish() as := newTestAgentPool(newTestAzureManager(t), "as") - expectedVMs := []compute.VirtualMachine{ + expectedVMs := []armcompute.VirtualMachine{ { Tags: map[string]*string{"poolName": ptr.To("as")}, ID: ptr.To(""), @@ -510,9 +513,9 @@ func TestAgentPoolNodes(t *testing.T) { }, } - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) as.manager.azClient.virtualMachinesClient = mockVMClient - mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(as.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) assert.NoError(t, err) @@ -522,13 +525,13 @@ func TestAgentPoolNodes(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 1, len(nodes)) - expectedVMs = []compute.VirtualMachine{ + expectedVMs = []armcompute.VirtualMachine{ { Tags: map[string]*string{"poolName": ptr.To("as")}, ID: ptr.To("foo"), }, } - mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(as.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) err = as.manager.forceRefresh() assert.NoError(t, err) nodes, err = as.Nodes() diff --git a/cluster-autoscaler/cloudprovider/azure/azure_cache.go b/cluster-autoscaler/cloudprovider/azure/azure_cache.go index 29d2d63eb752..0b61b4500dce 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_cache.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_cache.go @@ -25,10 +25,11 @@ import ( "sync" "time" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/skewer" + "github.com/Azure/skewer/v2" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/utils/ptr" providerazureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" "k8s.io/klog/v2" @@ -82,10 +83,10 @@ type azureCache struct { // scaleSets keeps the set of all known scalesets in the resource group, populated/refreshed via VMSS.List() call. // It is only used/populated if vmType is vmTypeVMSS (default). - scaleSets map[string]compute.VirtualMachineScaleSet + scaleSets map[string]armcompute.VirtualMachineScaleSet // virtualMachines keeps the set of all VMs in the resource group. // It is only used/populated if vmType is vmTypeStandard. - virtualMachines map[string][]compute.VirtualMachine + virtualMachines map[string][]armcompute.VirtualMachine // registeredNodeGroups represents all known NodeGroups. registeredNodeGroups []cloudprovider.NodeGroup @@ -122,8 +123,8 @@ func newAzureCache(client *azClient, cacheTTL time.Duration, config Config) (*az enableVMsAgentPool: config.EnableVMsAgentPool, vmType: config.VMType, vmsPoolMap: make(map[string]armcontainerservice.AgentPool), - scaleSets: make(map[string]compute.VirtualMachineScaleSet), - virtualMachines: make(map[string][]compute.VirtualMachine), + scaleSets: make(map[string]armcompute.VirtualMachineScaleSet), + virtualMachines: make(map[string][]armcompute.VirtualMachine), registeredNodeGroups: make([]cloudprovider.NodeGroup, 0), instanceToNodeGroup: make(map[azureRef]cloudprovider.NodeGroup), unownedInstances: make(map[azureRef]bool), @@ -151,14 +152,14 @@ func (m *azureCache) getVMsPoolMap() map[string]armcontainerservice.AgentPool { return m.vmsPoolMap } -func (m *azureCache) getVirtualMachines() map[string][]compute.VirtualMachine { +func (m *azureCache) getVirtualMachines() map[string][]armcompute.VirtualMachine { m.mutex.Lock() defer m.mutex.Unlock() return m.virtualMachines } -func (m *azureCache) getScaleSets() map[string]compute.VirtualMachineScaleSet { +func (m *azureCache) getScaleSets() map[string]armcompute.VirtualMachineScaleSet { m.mutex.Lock() defer m.mutex.Unlock() @@ -272,32 +273,33 @@ const ( ) // fetchVirtualMachines returns the updated list of virtual machines in the config resource group using the Azure API. -func (m *azureCache) fetchVirtualMachines() (map[string][]compute.VirtualMachine, error) { +func (m *azureCache) fetchVirtualMachines() (map[string][]armcompute.VirtualMachine, error) { ctx, cancel := getContextWithCancel() defer cancel() - result, err := m.azClient.virtualMachinesClient.List(ctx, m.resourceGroup) - if err != nil { - klog.Errorf("VirtualMachinesClient.List in resource group %q failed: %v", m.resourceGroup, err) - return nil, err.Error() - } - - instances := make(map[string][]compute.VirtualMachine) - for _, instance := range result { - if instance.Tags == nil { - continue - } - - tags := instance.Tags - vmPoolName := tags[agentpoolNameTag] - // fall back to legacy tag name if not found - if vmPoolName == nil { - vmPoolName = tags[legacyAgentpoolNameTag] + instances := make(map[string][]armcompute.VirtualMachine) + pager := m.azClient.virtualMachinesClient.NewListPager(m.resourceGroup, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + klog.Errorf("VirtualMachinesClient.pager.NextPage in resource group %q failed: %v", m.resourceGroup, err) + return nil, err } - if vmPoolName != nil { - instances[*vmPoolName] = append(instances[*vmPoolName], instance) + for _, instance := range page.Value { + if instance.Tags != nil { + tags := instance.Tags + vmPoolName := tags[agentpoolNameTag] + // fall back to legacy tag name if not found + if vmPoolName == nil { + vmPoolName = tags[legacyAgentpoolNameTag] + } + if vmPoolName != nil { + instances[*vmPoolName] = append(instances[*vmPoolName], *instance) + } + } } } + return instances, nil } @@ -337,20 +339,23 @@ func (m *azureCache) fetchVMsPools() (map[string]armcontainerservice.AgentPool, } // fetchScaleSets returns the updated list of scale sets in the config resource group using the Azure API. -func (m *azureCache) fetchScaleSets() (map[string]compute.VirtualMachineScaleSet, error) { +func (m *azureCache) fetchScaleSets() (map[string]armcompute.VirtualMachineScaleSet, error) { ctx, cancel := getContextWithTimeout(vmssContextTimeout) defer cancel() - result, err := m.azClient.virtualMachineScaleSetsClient.List(ctx, m.resourceGroup) - if err != nil { - klog.Errorf("VirtualMachineScaleSetsClient.List in resource group %q failed: %v", m.resourceGroup, err) - return nil, err.Error() + sets := make(map[string]armcompute.VirtualMachineScaleSet) + pager := m.azClient.virtualMachineScaleSetsClient.NewListPager(m.resourceGroup, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + klog.Errorf("VirtualMachineScaleSetsClient.pager.NextPage in resource group %q failed: %v", m.resourceGroup, err) + return nil, err + } + for _, vmss := range page.Value { + sets[*vmss.Name] = *vmss + } } - sets := make(map[string]compute.VirtualMachineScaleSet) - for _, vmss := range result { - sets[*vmss.Name] = vmss - } return sets, nil } @@ -514,7 +519,7 @@ func (m *azureCache) FindForInstance(instance *azureRef, vmType string) (cloudpr // isAllScaleSetsAreUniform determines if all the scale set autoscaler is monitoring are Uniform or not. func (m *azureCache) areAllScaleSetsUniform() bool { for _, scaleSet := range m.scaleSets { - if scaleSet.VirtualMachineScaleSetProperties.OrchestrationMode == compute.Flexible { + if ptr.Deref(scaleSet.Properties.OrchestrationMode, "") == armcompute.OrchestrationModeFlexible { return false } } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_client.go b/cluster-autoscaler/cloudprovider/azure/azure_client.go index 8f0be7184a1f..68f5b13a8157 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_client.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_client.go @@ -31,31 +31,84 @@ import ( azurecore_policy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v7" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/azure/auth" klog "k8s.io/klog/v2" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient" providerazureconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" ) -//go:generate sh -c "mockgen -source=azure_client.go -destination azure_mock_agentpool_client.go -package azure -exclude_interfaces DeploymentsClient" +//go:generate sh -c "mockgen -source=azure_client.go -destination azure_mock_clients.go -package azure" const ( vmsContextTimeout = 5 * time.Minute vmsAsyncContextTimeout = 30 * time.Minute ) +// VirtualMachineScaleSetsClient interface for armcompute.VirtualMachineScaleSetsClient +type VirtualMachineScaleSetsClient interface { + Get(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientGetOptions) (armcompute.VirtualMachineScaleSetsClientGetResponse, error) + NewListPager(resourceGroupName string, options *armcompute.VirtualMachineScaleSetsClientListOptions) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] + BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters armcompute.VirtualMachineScaleSet, options *armcompute.VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse], error) + BeginDelete(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientBeginDeleteOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientDeleteResponse], error) + BeginDeleteInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs armcompute.VirtualMachineScaleSetVMInstanceRequiredIDs, options *armcompute.VirtualMachineScaleSetsClientBeginDeleteInstancesOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse], error) +} + +// VirtualMachineScaleSetVMsClient interface for armcompute.VirtualMachineScaleSetVMsClient +type VirtualMachineScaleSetVMsClient interface { + Get(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientGetOptions) (armcompute.VirtualMachineScaleSetVMsClientGetResponse, error) + NewListPager(resourceGroupName string, virtualMachineScaleSetName string, options *armcompute.VirtualMachineScaleSetVMsClientListOptions) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] + BeginUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters armcompute.VirtualMachineScaleSetVM, options *armcompute.VirtualMachineScaleSetVMsClientBeginUpdateOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetVMsClientUpdateResponse], error) + BeginDelete(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientBeginDeleteOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetVMsClientDeleteResponse], error) +} + +// VirtualMachinesClient interface for armcompute.VirtualMachinesClient +type VirtualMachinesClient interface { + Get(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientGetOptions) (armcompute.VirtualMachinesClientGetResponse, error) + NewListPager(resourceGroupName string, options *armcompute.VirtualMachinesClientListOptions) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] + BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.VirtualMachine, options *armcompute.VirtualMachinesClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcompute.VirtualMachinesClientCreateOrUpdateResponse], error) + BeginDelete(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientBeginDeleteOptions) (*runtime.Poller[armcompute.VirtualMachinesClientDeleteResponse], error) +} + +// DeploymentsClient interface for armresources.DeploymentsClient +type DeploymentsClient interface { + Get(ctx context.Context, resourceGroupName string, deploymentName string, options *armresources.DeploymentsClientGetOptions) (armresources.DeploymentsClientGetResponse, error) + ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string, options *armresources.DeploymentsClientExportTemplateOptions) (armresources.DeploymentsClientExportTemplateResponse, error) + BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters armresources.Deployment, options *armresources.DeploymentsClientBeginCreateOrUpdateOptions) (*runtime.Poller[armresources.DeploymentsClientCreateOrUpdateResponse], error) + BeginDelete(ctx context.Context, resourceGroupName string, deploymentName string, options *armresources.DeploymentsClientBeginDeleteOptions) (*runtime.Poller[armresources.DeploymentsClientDeleteResponse], error) + NewListByResourceGroupPager(resourceGroupName string, options *armresources.DeploymentsClientListByResourceGroupOptions) *runtime.Pager[armresources.DeploymentsClientListByResourceGroupResponse] +} + +// InterfacesClient interface for armnetwork.InterfacesClient +type InterfacesClient interface { + Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, options *armnetwork.InterfacesClientGetOptions) (armnetwork.InterfacesClientGetResponse, error) + BeginDelete(ctx context.Context, resourceGroupName string, networkInterfaceName string, options *armnetwork.InterfacesClientBeginDeleteOptions) (*runtime.Poller[armnetwork.InterfacesClientDeleteResponse], error) +} + +// DisksClient interface for armcompute.DisksClient +type DisksClient interface { + Get(ctx context.Context, resourceGroupName string, diskName string, options *armcompute.DisksClientGetOptions) (armcompute.DisksClientGetResponse, error) + BeginDelete(ctx context.Context, resourceGroupName string, diskName string, options *armcompute.DisksClientBeginDeleteOptions) (*runtime.Poller[armcompute.DisksClientDeleteResponse], error) +} + +// StorageAccountsClient interface for armstorage.AccountsClient +type StorageAccountsClient interface { + GetProperties(ctx context.Context, resourceGroupName string, accountName string, options *armstorage.AccountsClientGetPropertiesOptions) (armstorage.AccountsClientGetPropertiesResponse, error) + ListKeys(ctx context.Context, resourceGroupName string, accountName string, options *armstorage.AccountsClientListKeysOptions) (armstorage.AccountsClientListKeysResponse, error) +} + +// ResourceSKUsClient interface for armcompute.ResourceSKUsClient +type ResourceSKUsClient interface { + NewListPager(options *armcompute.ResourceSKUsClientListOptions) *runtime.Pager[armcompute.ResourceSKUsClientListResponse] +} + // AgentPoolsClient interface defines the methods needed for scaling vms pool. // it is implemented by track2 sdk armcontainerservice.AgentPoolsClient type AgentPoolsClient interface { @@ -185,14 +238,14 @@ func newAgentpoolClientWithConfig(subscriptionID string, cred azcore.TokenCreden } type azClient struct { - virtualMachineScaleSetsClient vmssclient.Interface - virtualMachineScaleSetVMsClient vmssvmclient.Interface - virtualMachinesClient vmclient.Interface - deploymentClient deploymentclient.Interface - interfacesClient interfaceclient.Interface - disksClient diskclient.Interface - storageAccountsClient storageaccountclient.Interface - skuClient compute.ResourceSkusClient + virtualMachineScaleSetsClient VirtualMachineScaleSetsClient + virtualMachineScaleSetVMsClient VirtualMachineScaleSetVMsClient + virtualMachinesClient VirtualMachinesClient + deploymentClient DeploymentsClient + interfacesClient InterfacesClient + disksClient DisksClient + storageAccountsClient StorageAccountsClient + skuClient ResourceSKUsClient agentPoolClient AgentPoolsClient } @@ -212,47 +265,74 @@ func newAuthorizer(config *Config, env *azure.Environment) (autorest.Authorizer, } func newAzClient(cfg *Config, env *azure.Environment) (*azClient, error) { - authorizer, err := newAuthorizer(cfg, env) + // Get v2 credentials for all Azure SDK v2 clients + cred, err := getAgentpoolClientCredentials(cfg) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get Azure credentials: %v", err) } - azClientConfig := cfg.getAzureClientConfig(authorizer, env) - azClientConfig.UserAgent = getUserAgentExtension() + // Create common client options for all v2 clients + clientOptions := &policy.ClientOptions{ + ClientOptions: azurecore_policy.ClientOptions{ + Cloud: cloud.Configuration{ + Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ + cloud.ResourceManager: { + Endpoint: env.ResourceManagerEndpoint, + Audience: env.TokenAudience, + }, + }, + }, + Telemetry: azextensions.DefaultTelemetryOpts(getUserAgentExtension()), + Transport: azextensions.DefaultHTTPClient(), + }, + } - vmssClientConfig := azClientConfig.WithRateLimiter(cfg.VirtualMachineScaleSetRateLimit) - scaleSetsClient := vmssclient.New(vmssClientConfig) + scaleSetsClient, err := armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, cred, clientOptions) + if err != nil { + return nil, fmt.Errorf("failed to create VMSS client: %v", err) + } klog.V(5).Infof("Created scale set client with authorizer: %v", scaleSetsClient) - vmssVMClientConfig := azClientConfig.WithRateLimiter(cfg.VirtualMachineScaleSetRateLimit) - scaleSetVMsClient := vmssvmclient.New(vmssVMClientConfig) + scaleSetVMsClient, err := armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, cred, clientOptions) + if err != nil { + return nil, fmt.Errorf("failed to create VMSS VMs client: %v", err) + } klog.V(5).Infof("Created scale set vm client with authorizer: %v", scaleSetVMsClient) - vmClientConfig := azClientConfig.WithRateLimiter(cfg.VirtualMachineRateLimit) - virtualMachinesClient := vmclient.New(vmClientConfig) + virtualMachinesClient, err := armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, cred, clientOptions) + if err != nil { + return nil, fmt.Errorf("failed to create VMs client: %v", err) + } klog.V(5).Infof("Created vm client with authorizer: %v", virtualMachinesClient) - deploymentConfig := azClientConfig.WithRateLimiter(cfg.DeploymentRateLimit) - deploymentClient := deploymentclient.New(deploymentConfig) + deploymentClient, err := armresources.NewDeploymentsClient(cfg.SubscriptionID, cred, clientOptions) + if err != nil { + return nil, fmt.Errorf("failed to create deployments client: %v", err) + } klog.V(5).Infof("Created deployments client with authorizer: %v", deploymentClient) - interfaceClientConfig := azClientConfig.WithRateLimiter(cfg.InterfaceRateLimit) - interfacesClient := interfaceclient.New(interfaceClientConfig) + interfacesClient, err := armnetwork.NewInterfacesClient(cfg.SubscriptionID, cred, clientOptions) + if err != nil { + return nil, fmt.Errorf("failed to create interfaces client: %v", err) + } klog.V(5).Infof("Created interfaces client with authorizer: %v", interfacesClient) - accountClientConfig := azClientConfig.WithRateLimiter(cfg.StorageAccountRateLimit) - storageAccountsClient := storageaccountclient.New(accountClientConfig) + storageAccountsClient, err := armstorage.NewAccountsClient(cfg.SubscriptionID, cred, clientOptions) + if err != nil { + return nil, fmt.Errorf("failed to create storage accounts client: %v", err) + } klog.V(5).Infof("Created storage accounts client with authorizer: %v", storageAccountsClient) - diskClientConfig := azClientConfig.WithRateLimiter(cfg.DiskRateLimit) - disksClient := diskclient.New(diskClientConfig) + disksClient, err := armcompute.NewDisksClient(cfg.SubscriptionID, cred, clientOptions) + if err != nil { + return nil, fmt.Errorf("failed to create disks client: %v", err) + } klog.V(5).Infof("Created disks client with authorizer: %v", disksClient) - // Reference on why selecting ResourceManagerEndpoint as baseURI - - // https://github.com/Azure/go-autorest/blob/main/autorest/azure/environments.go - skuClient := compute.NewResourceSkusClientWithBaseURI(azClientConfig.ResourceManagerEndpoint, cfg.SubscriptionID) - skuClient.Authorizer = azClientConfig.Authorizer - skuClient.UserAgent = azClientConfig.UserAgent + skuClient, err := armcompute.NewResourceSKUsClient(cfg.SubscriptionID, cred, clientOptions) + if err != nil { + return nil, fmt.Errorf("failed to create SKU client: %v", err) + } klog.V(5).Infof("Created sku client with authorizer: %v", skuClient) agentPoolClient, err := newAgentpoolClient(cfg) diff --git a/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider_test.go b/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider_test.go index d15e7984ae40..de97224aca70 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider_test.go @@ -20,18 +20,16 @@ import ( "fmt" "testing" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient" providerazureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" providerazure "sigs.k8s.io/cloud-provider-azure/pkg/provider" @@ -42,17 +40,23 @@ import ( func newTestAzureManager(t *testing.T) *AzureManager { ctrl := gomock.NewController(t) - defer ctrl.Finish() + // Note: Not calling defer ctrl.Finish() here - let the test function handle it - expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", compute.Uniform) + expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", armcompute.OrchestrationModeUniform) expectedVMSSVMs := newTestVMSSVMList(3) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), "rg").Return(expectedScaleSets, nil).AnyTimes() - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), "rg", "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager("rg", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager("rg", "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() + mockVMClient := NewMockVirtualMachinesClient(ctrl) expectedVMs := newTestVMList(3) - mockVMClient.EXPECT().List(gomock.Any(), "rg").Return(expectedVMs, nil).AnyTimes() + mockVMClient.EXPECT().NewListPager("rg", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() manager := &AzureManager{ env: azure.PublicCloud, @@ -72,19 +76,20 @@ func newTestAzureManager(t *testing.T) *AzureManager { virtualMachineScaleSetVMsClient: mockVMSSVMClient, virtualMachinesClient: mockVMClient, deploymentClient: &DeploymentClientMock{ - FakeStore: map[string]resources.DeploymentExtended{ + FakeStore: map[string]armresources.DeploymentExtended{ "deployment": { - Name: ptr.To("deployment"), - Properties: &resources.DeploymentPropertiesExtended{Template: map[string]interface{}{ - resourcesFieldName: []interface{}{ - map[string]interface{}{ - typeFieldName: nsgResourceType, - }, - map[string]interface{}{ - typeFieldName: rtResourceType, - }, + Name: ptr.To("deployment"), + Properties: &armresources.DeploymentPropertiesExtended{}, + }, + }, + Templates: map[string]interface{}{ + "deployment": map[string]interface{}{ + "resources": []interface{}{ + map[string]interface{}{ + "type": "Microsoft.Network/networkSecurityGroups", + "name": "test-nsg", }, - }}, + }, }, }, }, @@ -144,9 +149,9 @@ func TestHasInstance(t *testing.T) { defer ctrl.Finish() provider := newTestProvider(t) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) mockAgentpoolclient := NewMockAgentPoolsClient(ctrl) provider.azureManager.azClient.virtualMachinesClient = mockVMClient provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient @@ -157,13 +162,19 @@ func TestHasInstance(t *testing.T) { provider.azureManager.azureCache.enableVMsAgentPool = true // enable VMs agent pool to support mixed node group types // Simulate node groups and instances - expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", compute.Uniform) + expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", armcompute.OrchestrationModeUniform) expectedVMsPoolVMs := newTestVMsPoolVMList(3) expectedVMSSVMs := newTestVMSSVMList(3) - mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() - mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedVMsPoolVMs, nil).AnyTimes() - mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMSSClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() + mockVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMsPoolVMs) + }).AnyTimes() + mockVMSSVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() vmssType := armcontainerservice.AgentPoolTypeVirtualMachineScaleSets vmssPool := armcontainerservice.AgentPool{ Name: ptr.To("test-asg"), @@ -198,7 +209,7 @@ func TestHasInstance(t *testing.T) { provider.azureManager.forceRefresh() // Test HasInstance for a node from the VMSS pool - node := newApiNode(compute.Uniform, 0) + node := newApiNode(armcompute.OrchestrationModeUniform, 0) hasInstance, err := provider.azureManager.azureCache.HasInstance(node.Spec.ProviderID) assert.True(t, hasInstance) assert.NoError(t, err) @@ -216,9 +227,9 @@ func TestUnownedInstancesFallbackToDeletionTaint(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() provider := newTestProvider(t) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) provider.azureManager.azClient.virtualMachinesClient = mockVMClient provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient @@ -234,7 +245,9 @@ func TestUnownedInstancesFallbackToDeletionTaint(t *testing.T) { } // Mock responses to simulate that the instance belongs to a VMSS not in any registered ASG expectedVMSSVMs := newTestVMSSVMList(1) - mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "unregistered-vmss-instance-id", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMSSVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, "unregistered-vmss-instance-id", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() // Call HasInstance and check the result hasInstance, err := provider.azureManager.azureCache.HasInstance(unregisteredVMSSInstance.Spec.ProviderID) @@ -281,9 +294,9 @@ func TestHasInstanceProviderIDErrorValidation(t *testing.T) { func TestMixedNodeGroups(t *testing.T) { ctrl := gomock.NewController(t) provider := newTestProvider(t) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) mockAgentpoolclient := NewMockAgentPoolsClient(ctrl) provider.azureManager.azClient.virtualMachinesClient = mockVMClient provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient @@ -293,13 +306,19 @@ func TestMixedNodeGroups(t *testing.T) { provider.azureManager.azureCache.enableVMsAgentPool = true // enable VMs agent pool to support mixed node group types provider.azureManager.azClient.agentPoolClient = mockAgentpoolclient - expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", compute.Uniform) + expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", armcompute.OrchestrationModeUniform) expectedVMsPoolVMs := newTestVMsPoolVMList(3) expectedVMSSVMs := newTestVMSSVMList(3) - mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() - mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedVMsPoolVMs, nil).AnyTimes() - mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMSSClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() + mockVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMsPoolVMs) + }).AnyTimes() + mockVMSSVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() vmssType := armcontainerservice.AgentPoolTypeVirtualMachineScaleSets vmssPool := armcontainerservice.AgentPool{ @@ -332,7 +351,7 @@ func TestMixedNodeGroups(t *testing.T) { provider.azureManager.forceRefresh() // node from vmss pool - node := newApiNode(compute.Uniform, 0) + node := newApiNode(armcompute.OrchestrationModeUniform, 0) group, err := provider.NodeGroupForNode(node) assert.NoError(t, err) assert.NotNil(t, group, "Group should not be nil") @@ -353,7 +372,7 @@ func TestMixedNodeGroups(t *testing.T) { func TestNodeGroupForNode(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - orchestrationModes := []compute.OrchestrationMode{compute.Uniform, compute.Flexible} + orchestrationModes := []armcompute.OrchestrationMode{armcompute.OrchestrationModeUniform, armcompute.OrchestrationModeFlexible} expectedVMSSVMs := newTestVMSSVMList(3) expectedVMs := newTestVMList(3) @@ -362,20 +381,27 @@ func TestNodeGroupForNode(t *testing.T) { t.Run(fmt.Sprintf("OrchestrationMode_%v", orchMode), func(t *testing.T) { expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", orchMode) provider := newTestProvider(t) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil) + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }) provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) provider.azureManager.azClient.virtualMachinesClient = mockVMClient - mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedVMs, nil).AnyTimes() - - if orchMode == compute.Uniform { - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() + + if orchMode == armcompute.OrchestrationModeUniform { + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { provider.azureManager.config.EnableVmssFlexNodes = true - mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() + // TODO: Need to implement ListVmssFlexVMsWithoutInstanceView mock or handle it differently + // mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() } registered := provider.azureManager.RegisterNodeGroup( diff --git a/cluster-autoscaler/cloudprovider/azure/azure_fakes.go b/cluster-autoscaler/cloudprovider/azure/azure_fakes.go index adc38e80b39a..5a4325bee4fe 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_fakes.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_fakes.go @@ -19,12 +19,14 @@ package azure import ( "context" "fmt" + "net/http" "sync" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" "github.com/stretchr/testify/mock" - "sigs.k8s.io/cloud-provider-azure/pkg/retry" + "k8s.io/utils/ptr" ) const ( @@ -37,89 +39,193 @@ type DeploymentClientMock struct { mock.Mock mutex sync.Mutex - FakeStore map[string]resources.DeploymentExtended + FakeStore map[string]armresources.DeploymentExtended + // Store templates separately since DeploymentPropertiesExtended doesn't have Template field in SDK v2 + Templates map[string]interface{} } // Get gets the DeploymentExtended by deploymentName. -func (m *DeploymentClientMock) Get(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExtended, err *retry.Error) { +func (m *DeploymentClientMock) Get(ctx context.Context, resourceGroupName string, deploymentName string, options *armresources.DeploymentsClientGetOptions) (armresources.DeploymentsClientGetResponse, error) { m.mutex.Lock() defer m.mutex.Unlock() deploy, ok := m.FakeStore[deploymentName] if !ok { - return result, retry.NewError(false, fmt.Errorf("deployment not found")) + return armresources.DeploymentsClientGetResponse{}, fmt.Errorf("deployment not found") } - return deploy, nil + return armresources.DeploymentsClientGetResponse{ + DeploymentExtended: deploy, + }, nil } // ExportTemplate exports the deployment's template. -func (m *DeploymentClientMock) ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExportResult, err *retry.Error) { +func (m *DeploymentClientMock) ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string, options *armresources.DeploymentsClientExportTemplateOptions) (armresources.DeploymentsClientExportTemplateResponse, error) { m.mutex.Lock() defer m.mutex.Unlock() - deploy, ok := m.FakeStore[deploymentName] + _, ok := m.FakeStore[deploymentName] if !ok { - return result, retry.NewError(false, fmt.Errorf("deployment not found")) + return armresources.DeploymentsClientExportTemplateResponse{}, fmt.Errorf("deployment not found") + } + + template, templateOk := m.Templates[deploymentName] + if !templateOk { + template = make(map[string]interface{}) } - return resources.DeploymentExportResult{ - Template: deploy.Properties.Template, + return armresources.DeploymentsClientExportTemplateResponse{ + DeploymentExportResult: armresources.DeploymentExportResult{ + Template: template, + }, }, nil } -// CreateOrUpdate creates or updates the Deployment. -func (m *DeploymentClientMock) CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment, etag string) (err *retry.Error) { +// BeginCreateOrUpdate creates or updates the Deployment. +func (m *DeploymentClientMock) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters armresources.Deployment, options *armresources.DeploymentsClientBeginCreateOrUpdateOptions) (*runtime.Poller[armresources.DeploymentsClientCreateOrUpdateResponse], error) { m.mutex.Lock() defer m.mutex.Unlock() + if m.Templates == nil { + m.Templates = make(map[string]interface{}) + } + deploy, ok := m.FakeStore[deploymentName] if !ok { - deploy = resources.DeploymentExtended{ - Properties: &resources.DeploymentPropertiesExtended{}, + deploy = armresources.DeploymentExtended{ + Name: ptr.To(deploymentName), + Properties: &armresources.DeploymentPropertiesExtended{}, } - m.FakeStore[deploymentName] = deploy } deploy.Properties.Parameters = parameters.Properties.Parameters - deploy.Properties.Template = parameters.Properties.Template - return nil + deploy.Properties.TemplateLink = parameters.Properties.TemplateLink + + // Store the template separately if provided + if parameters.Properties.Template != nil { + m.Templates[deploymentName] = parameters.Properties.Template + } + + m.FakeStore[deploymentName] = deploy + + // Return a fake poller for the create/update operation + result := armresources.DeploymentsClientCreateOrUpdateResponse{ + DeploymentExtended: deploy, + } + handler := &fakePollerHandler[armresources.DeploymentsClientCreateOrUpdateResponse]{ + done: true, + result: result, + } + + return runtime.NewPoller( + &http.Response{StatusCode: http.StatusAccepted}, + runtime.Pipeline{}, + &runtime.NewPollerOptions[armresources.DeploymentsClientCreateOrUpdateResponse]{ + Handler: handler, + }, + ) } -// List gets all the deployments for a resource group. -func (m *DeploymentClientMock) List(ctx context.Context, resourceGroupName string) (result []resources.DeploymentExtended, err *retry.Error) { +// NewListByResourceGroupPager gets all the deployments for a resource group. +func (m *DeploymentClientMock) NewListByResourceGroupPager(resourceGroupName string, options *armresources.DeploymentsClientListByResourceGroupOptions) *runtime.Pager[armresources.DeploymentsClientListByResourceGroupResponse] { m.mutex.Lock() defer m.mutex.Unlock() - result = make([]resources.DeploymentExtended, 0) + result := make([]*armresources.DeploymentExtended, 0) for i := range m.FakeStore { - result = append(result, m.FakeStore[i]) + deploy := m.FakeStore[i] + result = append(result, &deploy) } - return result, nil + // Create a fake pager that returns all deployments + return runtime.NewPager(runtime.PagingHandler[armresources.DeploymentsClientListByResourceGroupResponse]{ + More: func(page armresources.DeploymentsClientListByResourceGroupResponse) bool { + return false + }, + Fetcher: func(ctx context.Context, page *armresources.DeploymentsClientListByResourceGroupResponse) (armresources.DeploymentsClientListByResourceGroupResponse, error) { + return armresources.DeploymentsClientListByResourceGroupResponse{ + DeploymentListResult: armresources.DeploymentListResult{ + Value: result, + }, + }, nil + }, + }) } -// Delete deletes the given deployment -func (m *DeploymentClientMock) Delete(ctx context.Context, resourceGroupName, deploymentName string) (err *retry.Error) { +// BeginDelete deletes the given deployment +func (m *DeploymentClientMock) BeginDelete(ctx context.Context, resourceGroupName, deploymentName string, options *armresources.DeploymentsClientBeginDeleteOptions) (*runtime.Poller[armresources.DeploymentsClientDeleteResponse], error) { m.mutex.Lock() defer m.mutex.Unlock() if _, ok := m.FakeStore[deploymentName]; !ok { - return retry.NewError(false, fmt.Errorf("there is no such a deployment with name %s", deploymentName)) + return nil, fmt.Errorf("there is no such a deployment with name %s", deploymentName) } delete(m.FakeStore, deploymentName) + delete(m.Templates, deploymentName) + + // Create a fake poller using NewPoller with a proper handler + handler := &fakePollerHandler[armresources.DeploymentsClientDeleteResponse]{ + done: true, + result: armresources.DeploymentsClientDeleteResponse{}, + } + + return runtime.NewPoller( + &http.Response{StatusCode: http.StatusAccepted}, + runtime.Pipeline{}, + &runtime.NewPollerOptions[armresources.DeploymentsClientDeleteResponse]{ + Handler: handler, + }, + ) +} + +// fakePollerHandler is a fake poller handler for testing +type fakePollerHandler[T any] struct { + mu sync.Mutex + done bool + result T +} - return +func (f *fakePollerHandler[T]) Done() bool { + f.mu.Lock() + defer f.mu.Unlock() + return f.done +} + +func (f *fakePollerHandler[T]) Poll(ctx context.Context) (*http.Response, error) { + f.mu.Lock() + defer f.mu.Unlock() + f.done = true + return &http.Response{StatusCode: http.StatusOK}, nil +} + +func (f *fakePollerHandler[T]) Result(ctx context.Context, out *T) error { + f.mu.Lock() + defer f.mu.Unlock() + *out = f.result + return nil +} + +// List is a helper method for tests that returns deployments as a slice. +func (m *DeploymentClientMock) List(ctx context.Context, resourceGroupName string) ([]armresources.DeploymentExtended, error) { + m.mutex.Lock() + defer m.mutex.Unlock() + + result := make([]armresources.DeploymentExtended, 0) + for i := range m.FakeStore { + result = append(result, m.FakeStore[i]) + } + + return result, nil } -func fakeVMSSWithTags(vmssName string, tags map[string]*string) compute.VirtualMachineScaleSet { +func fakeVMSSWithTags(vmssName string, tags map[string]*string) armcompute.VirtualMachineScaleSet { skuName := "Standard_D4_v2" var vmssCapacity int64 = 3 - return compute.VirtualMachineScaleSet{ + return armcompute.VirtualMachineScaleSet{ Name: &vmssName, - Sku: &compute.Sku{ + SKU: &armcompute.SKU{ Capacity: &vmssCapacity, Name: &skuName, }, diff --git a/cluster-autoscaler/cloudprovider/azure/azure_force_delete_scale_set.go b/cluster-autoscaler/cloudprovider/azure/azure_force_delete_scale_set.go index ee83119084e1..a3308656b512 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_force_delete_scale_set.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_force_delete_scale_set.go @@ -20,8 +20,8 @@ import ( "context" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "k8s.io/klog/v2" "sigs.k8s.io/cloud-provider-azure/pkg/retry" @@ -62,19 +62,32 @@ var isolatedVMSizes = map[string]bool{ strings.ToLower("Standard_M128ms"): true, } -func (scaleSet *ScaleSet) deleteInstances(ctx context.Context, requiredIds *compute.VirtualMachineScaleSetVMInstanceRequiredIDs, commonAsgId string) (*azure.Future, *retry.Error) { +func (scaleSet *ScaleSet) deleteInstances(ctx context.Context, requiredIds *armcompute.VirtualMachineScaleSetVMInstanceRequiredIDs, commonAsgId string) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse], *retry.Error) { scaleSet.instanceMutex.Lock() defer scaleSet.instanceMutex.Unlock() skuName := scaleSet.getSKU() resourceGroup := scaleSet.manager.config.ResourceGroup forceDelete := shouldForceDelete(skuName, scaleSet) - future, rerr := scaleSet.manager.azClient.virtualMachineScaleSetsClient.DeleteInstancesAsync(ctx, resourceGroup, commonAsgId, *requiredIds, forceDelete) - if forceDelete && isOperationNotAllowed(rerr) { - klog.Infof("falling back to normal delete for instances %v for %s", requiredIds.InstanceIds, scaleSet.Name) - return scaleSet.manager.azClient.virtualMachineScaleSetsClient.DeleteInstancesAsync(ctx, resourceGroup, commonAsgId, *requiredIds, false) + future, err := scaleSet.manager.azClient.virtualMachineScaleSetsClient.BeginDeleteInstances(ctx, resourceGroup, commonAsgId, *requiredIds, &armcompute.VirtualMachineScaleSetsClientBeginDeleteInstancesOptions{ + ForceDeletion: &forceDelete, + }) + if err != nil { + rerr := &retry.Error{RawError: err} + if forceDelete && isOperationNotAllowed(rerr) { + klog.Infof("falling back to normal delete for instances %v for %s", requiredIds.InstanceIDs, scaleSet.Name) + normalForceDelete := false + future2, err2 := scaleSet.manager.azClient.virtualMachineScaleSetsClient.BeginDeleteInstances(ctx, resourceGroup, commonAsgId, *requiredIds, &armcompute.VirtualMachineScaleSetsClientBeginDeleteInstancesOptions{ + ForceDeletion: &normalForceDelete, + }) + if err2 != nil { + return future2, &retry.Error{RawError: err2} + } + return future2, nil + } + return future, rerr } - return future, rerr + return future, nil } func shouldForceDelete(skuName string, scaleSet *ScaleSet) bool { diff --git a/cluster-autoscaler/cloudprovider/azure/azure_instance_gpu_sku.go b/cluster-autoscaler/cloudprovider/azure/azure_instance_gpu_sku.go index 577bf47845e0..77550866bdad 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_instance_gpu_sku.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_instance_gpu_sku.go @@ -19,7 +19,7 @@ package azure import ( "strings" - "github.com/Azure/skewer" + "github.com/Azure/skewer/v2" "github.com/pkg/errors" ) diff --git a/cluster-autoscaler/cloudprovider/azure/azure_manager.go b/cluster-autoscaler/cloudprovider/azure/azure_manager.go index 4ea25d1eb1b3..0b33595a9dfa 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_manager.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_manager.go @@ -401,11 +401,11 @@ func (m *AzureManager) getFilteredScaleSets(filter []labelAutoDiscoveryConfig) ( } curSize := int64(-1) - if scaleSet.Sku != nil && scaleSet.Sku.Capacity != nil { - curSize = *scaleSet.Sku.Capacity + if scaleSet.SKU != nil && scaleSet.SKU.Capacity != nil { + curSize = *scaleSet.SKU.Capacity } - dedicatedHost := scaleSet.VirtualMachineScaleSetProperties != nil && scaleSet.VirtualMachineScaleSetProperties.HostGroup != nil + dedicatedHost := scaleSet.Properties != nil && scaleSet.Properties.HostGroup != nil vmss, err := NewScaleSet(spec, m, curSize, dedicatedHost) if err != nil { diff --git a/cluster-autoscaler/cloudprovider/azure/azure_manager_test.go b/cluster-autoscaler/cloudprovider/azure/azure_manager_test.go index 4fe5273f0cda..746adb433859 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_manager_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_manager_test.go @@ -24,12 +24,10 @@ import ( "testing" "time" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" - "sigs.k8s.io/cloud-provider-azure/pkg/retry" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources" - "github.com/Azure/go-autorest/autorest/date" "github.com/stretchr/testify/assert" "go.uber.org/mock/gomock" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" @@ -37,8 +35,6 @@ import ( "k8s.io/utils/ptr" azclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient" azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient" providerazureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" providerazure "sigs.k8s.io/cloud-provider-azure/pkg/provider" providerazureconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" @@ -221,10 +217,14 @@ func TestCreateAzureManagerValidConfig(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachineScaleSet{}, nil).Times(2) - mockVMClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachine{}, nil).Times(2) + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager("fakeId", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager([]armcompute.VirtualMachineScaleSet{}) + }).Times(2) + mockVMClient.EXPECT().NewListPager("fakeId", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager([]armcompute.VirtualMachine{}) + }).Times(2) mockAzClient := &azClient{ virtualMachinesClient: mockVMClient, virtualMachineScaleSetsClient: mockVMSSClient, @@ -312,10 +312,14 @@ func TestCreateAzureManagerLegacyConfig(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachineScaleSet{}, nil).Times(2) - mockVMClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachine{}, nil).Times(2) + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager("fakeId", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager([]armcompute.VirtualMachineScaleSet{}) + }).Times(2) + mockVMClient.EXPECT().NewListPager("fakeId", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager([]armcompute.VirtualMachine{}) + }).Times(2) mockAzClient := &azClient{ virtualMachinesClient: mockVMClient, virtualMachineScaleSetsClient: mockVMSSClient, @@ -400,10 +404,14 @@ func TestCreateAzureManagerValidConfigForStandardVMType(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachine{}, nil).Times(2) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachineScaleSet{}, nil).Times(2) + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMClient.EXPECT().NewListPager("fakeId", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager([]armcompute.VirtualMachine{}) + }).Times(2) + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager("fakeId", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager([]armcompute.VirtualMachineScaleSet{}) + }).Times(2) mockAzClient := &azClient{ virtualMachinesClient: mockVMClient, virtualMachineScaleSetsClient: mockVMSSClient, @@ -523,10 +531,14 @@ func TestCreateAzureManagerValidConfigForVMsPool(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachineScaleSet{}, nil).Times(2) - mockVMClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachine{}, nil).Times(2) + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager("fakeId", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager([]armcompute.VirtualMachineScaleSet{}) + }).Times(2) + mockVMClient.EXPECT().NewListPager("fakeId", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager([]armcompute.VirtualMachine{}) + }).Times(2) mockAzClient := &azClient{ virtualMachinesClient: mockVMClient, virtualMachineScaleSetsClient: mockVMSSClient, @@ -615,10 +627,14 @@ func TestCreateAzureManagerWithNilConfig(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), "resourceGroup").Return([]compute.VirtualMachineScaleSet{}, nil).AnyTimes() - mockVMClient.EXPECT().List(gomock.Any(), "resourceGroup").Return([]compute.VirtualMachine{}, nil).AnyTimes() + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager("resourceGroup", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager([]armcompute.VirtualMachineScaleSet{}) + }).AnyTimes() + mockVMClient.EXPECT().NewListPager("resourceGroup", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager([]armcompute.VirtualMachine{}) + }).AnyTimes() mockAgentpoolclient := NewMockAgentPoolsClient(ctrl) vmspool := getTestVMsAgentPool(false) fakeAPListPager := getFakeAgentpoolListPager(&vmspool) @@ -874,10 +890,14 @@ func TestCreateAzureManagerWithEnvOverridingConfig(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), "resourceGroup").Return([]compute.VirtualMachineScaleSet{}, nil).AnyTimes() - mockVMClient.EXPECT().List(gomock.Any(), "resourceGroup").Return([]compute.VirtualMachine{}, nil).AnyTimes() + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager("resourceGroup", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager([]armcompute.VirtualMachineScaleSet{}) + }).AnyTimes() + mockVMClient.EXPECT().NewListPager("resourceGroup", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager([]armcompute.VirtualMachine{}) + }).AnyTimes() mockAzClient := &azClient{ virtualMachinesClient: mockVMClient, virtualMachineScaleSetsClient: mockVMSSClient, @@ -1028,28 +1048,35 @@ func TestFetchExplicitNodeGroups(t *testing.T) { }, } - orchestrationModes := [2]compute.OrchestrationMode{compute.Uniform, compute.Flexible} + orchestrationModes := [2]armcompute.OrchestrationMode{armcompute.OrchestrationModeUniform, armcompute.OrchestrationModeFlexible} expectedVMSSVMs := newTestVMSSVMList(3) expectedVMs := newTestVMList(3) for _, orchMode := range orchestrationModes { manager := newTestAzureManager(t) - expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", compute.Uniform) + expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", armcompute.OrchestrationModeUniform) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - if orchMode == compute.Uniform { + if orchMode == armcompute.OrchestrationModeUniform { - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) manager.config.EnableVmssFlexNodes = true - mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() + // TODO: ListVmssFlexVMsWithoutInstanceView needs to be added to interface or use NewListPager + mockVMClient.EXPECT().NewListPager("", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() manager.azClient.virtualMachinesClient = mockVMClient } @@ -1067,19 +1094,19 @@ func TestFetchExplicitNodeGroups(t *testing.T) { timeLayout := "2006-01-02 15:04:05" timeBenchMark, _ := time.Parse(timeLayout, "2000-01-01 00:00:00") testAS.manager.azClient.deploymentClient = &DeploymentClientMock{ - FakeStore: map[string]resources.DeploymentExtended{ + FakeStore: map[string]armresources.DeploymentExtended{ "cluster-autoscaler-0001": { Name: ptr.To("cluster-autoscaler-0001"), - Properties: &resources.DeploymentPropertiesExtended{ - ProvisioningState: ptr.To("Succeeded"), - Timestamp: &date.Time{Time: timeBenchMark.Add(2 * time.Minute)}, + Properties: &armresources.DeploymentPropertiesExtended{ + ProvisioningState: ptr.To(armresources.ProvisioningStateSucceeded), + Timestamp: ptr.To(timeBenchMark.Add(2 * time.Minute)), }, }, }, } testAS.manager.config.VMType = providerazureconsts.VMTypeStandard err := testAS.manager.fetchExplicitNodeGroups([]string{"1:5:testAS"}) - expectedErr := fmt.Errorf("failed to parse node group spec: %v", retry.NewError(false, fmt.Errorf("deployment not found")).Error()) + expectedErr := fmt.Errorf("failed to parse node group spec: %v", fmt.Errorf("deployment not found")) assert.Equal(t, expectedErr, err, "testAS.manager.fetchExplicitNodeGroups return error does not match, expected: %v, actual: %v", expectedErr, err) err = testAS.manager.fetchExplicitNodeGroups(nil) assert.NoError(t, err) @@ -1114,9 +1141,11 @@ func TestGetFilteredAutoscalingGroupsVmss(t *testing.T) { } manager := newTestAzureManager(t) - expectedScaleSets := []compute.VirtualMachineScaleSet{fakeVMSSWithTags(vmssName, map[string]*string{vmssTag: &vmssTagValue, "min": &min, "max": &max})} - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() + expectedScaleSets := []armcompute.VirtualMachineScaleSet{fakeVMSSWithTags(vmssName, map[string]*string{vmssTag: &vmssTagValue, "min": &min, "max": &max})} + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient err := manager.forceRefresh() assert.NoError(t, err) @@ -1167,9 +1196,11 @@ func TestGetFilteredAutoscalingGroupsVmssWithConfiguredSizes(t *testing.T) { } manager := newTestAzureManager(t) - expectedScaleSets := []compute.VirtualMachineScaleSet{fakeVMSSWithTags(vmssName, map[string]*string{vmssTag: &vmssTagValue, vmssTag2: &vmssTagValue2})} - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() + expectedScaleSets := []armcompute.VirtualMachineScaleSet{fakeVMSSWithTags(vmssName, map[string]*string{vmssTag: &vmssTagValue, vmssTag2: &vmssTagValue2})} + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient err := manager.forceRefresh() assert.NoError(t, err) @@ -1209,9 +1240,11 @@ func TestGetFilteredAutoscalingGroupsWithInvalidVMType(t *testing.T) { } manager := newTestAzureManager(t) - expectedScaleSets := []compute.VirtualMachineScaleSet{} - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() + expectedScaleSets := []armcompute.VirtualMachineScaleSet{} + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient manager.config.VMType = "invalidVMType" @@ -1245,19 +1278,25 @@ func TestFetchAutoAsgsVmss(t *testing.T) { NodeGroupAutoDiscoverySpecs: []string{fmt.Sprintf("label:%s=%s", vmssTag, vmssTagValue)}, } - expectedScaleSets := []compute.VirtualMachineScaleSet{fakeVMSSWithTags(vmssName, map[string]*string{vmssTag: &vmssTagValue, "min": &minString, "max": &maxString})} + expectedScaleSets := []armcompute.VirtualMachineScaleSet{fakeVMSSWithTags(vmssName, map[string]*string{vmssTag: &vmssTagValue, "min": &minString, "max": &maxString})} expectedVMSSVMs := newTestVMSSVMList(1) manager := newTestAzureManager(t) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, vmssName, gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, vmssName, gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) manager.azClient.virtualMachinesClient = mockVMClient - mockVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return([]compute.VirtualMachine{}, nil).AnyTimes() + mockVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager([]armcompute.VirtualMachine{}) + }).AnyTimes() err := manager.forceRefresh() assert.NoError(t, err) @@ -1360,18 +1399,24 @@ func TestVMSSNotFound(t *testing.T) { // client setup ctrl := gomock.NewController(t) defer ctrl.Finish() - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) client := azClient{} client.virtualMachineScaleSetsClient = mockVMSSClient client.virtualMachinesClient = mockVMClient client.virtualMachineScaleSetVMsClient = mockVMSSVMClient // Expect that no vmss are present in the vmss client - mockVMSSVMClient.EXPECT().List(gomock.Any(), "fakeId", testASG, gomock.Any()).Return([]compute.VirtualMachineScaleSetVM{}, nil).AnyTimes() - mockVMClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachine{}, nil).AnyTimes() - mockVMSSClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachineScaleSet{}, nil).AnyTimes() + mockVMSSVMClient.EXPECT().NewListPager("fakeId", testASG, gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager([]armcompute.VirtualMachineScaleSetVM{}) + }).AnyTimes() + mockVMClient.EXPECT().NewListPager("fakeId", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager([]armcompute.VirtualMachine{}) + }).AnyTimes() + mockVMSSClient.EXPECT().NewListPager("fakeId", gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager([]armcompute.VirtualMachineScaleSet{}) + }).AnyTimes() // Add explicit node group to look for during init ngdo := cloudprovider.NodeGroupDiscoveryOptions{ diff --git a/cluster-autoscaler/cloudprovider/azure/azure_mock_agentpool_client.go b/cluster-autoscaler/cloudprovider/azure/azure_mock_agentpool_client.go deleted file mode 100644 index 0e63d30b6465..000000000000 --- a/cluster-autoscaler/cloudprovider/azure/azure_mock_agentpool_client.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure - -import ( - context "context" - reflect "reflect" - - runtime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" - gomock "go.uber.org/mock/gomock" -) - -// MockAgentPoolsClient is a mock of AgentPoolsClient interface. -type MockAgentPoolsClient struct { - ctrl *gomock.Controller - recorder *MockAgentPoolsClientMockRecorder -} - -// MockAgentPoolsClientMockRecorder is the mock recorder for MockAgentPoolsClient. -type MockAgentPoolsClientMockRecorder struct { - mock *MockAgentPoolsClient -} - -// NewMockAgentPoolsClient creates a new mock instance. -func NewMockAgentPoolsClient(ctrl *gomock.Controller) *MockAgentPoolsClient { - mock := &MockAgentPoolsClient{ctrl: ctrl} - mock.recorder = &MockAgentPoolsClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockAgentPoolsClient) EXPECT() *MockAgentPoolsClientMockRecorder { - return m.recorder -} - -// BeginCreateOrUpdate mocks base method. -func (m *MockAgentPoolsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName, resourceName, agentPoolName string, parameters armcontainerservice.AgentPool, options *armcontainerservice.AgentPoolsClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcontainerservice.AgentPoolsClientCreateOrUpdateResponse], error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BeginCreateOrUpdate", ctx, resourceGroupName, resourceName, agentPoolName, parameters, options) - ret0, _ := ret[0].(*runtime.Poller[armcontainerservice.AgentPoolsClientCreateOrUpdateResponse]) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// BeginCreateOrUpdate indicates an expected call of BeginCreateOrUpdate. -func (mr *MockAgentPoolsClientMockRecorder) BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, agentPoolName, parameters, options any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginCreateOrUpdate", reflect.TypeOf((*MockAgentPoolsClient)(nil).BeginCreateOrUpdate), ctx, resourceGroupName, resourceName, agentPoolName, parameters, options) -} - -// BeginDeleteMachines mocks base method. -func (m *MockAgentPoolsClient) BeginDeleteMachines(ctx context.Context, resourceGroupName, resourceName, agentPoolName string, machines armcontainerservice.AgentPoolDeleteMachinesParameter, options *armcontainerservice.AgentPoolsClientBeginDeleteMachinesOptions) (*runtime.Poller[armcontainerservice.AgentPoolsClientDeleteMachinesResponse], error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BeginDeleteMachines", ctx, resourceGroupName, resourceName, agentPoolName, machines, options) - ret0, _ := ret[0].(*runtime.Poller[armcontainerservice.AgentPoolsClientDeleteMachinesResponse]) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// BeginDeleteMachines indicates an expected call of BeginDeleteMachines. -func (mr *MockAgentPoolsClientMockRecorder) BeginDeleteMachines(ctx, resourceGroupName, resourceName, agentPoolName, machines, options any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDeleteMachines", reflect.TypeOf((*MockAgentPoolsClient)(nil).BeginDeleteMachines), ctx, resourceGroupName, resourceName, agentPoolName, machines, options) -} - -// Get mocks base method. -func (m *MockAgentPoolsClient) Get(ctx context.Context, resourceGroupName, resourceName, agentPoolName string, options *armcontainerservice.AgentPoolsClientGetOptions) (armcontainerservice.AgentPoolsClientGetResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, resourceName, agentPoolName, options) - ret0, _ := ret[0].(armcontainerservice.AgentPoolsClientGetResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockAgentPoolsClientMockRecorder) Get(ctx, resourceGroupName, resourceName, agentPoolName, options any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockAgentPoolsClient)(nil).Get), ctx, resourceGroupName, resourceName, agentPoolName, options) -} - -// NewListPager mocks base method. -func (m *MockAgentPoolsClient) NewListPager(resourceGroupName, resourceName string, options *armcontainerservice.AgentPoolsClientListOptions) *runtime.Pager[armcontainerservice.AgentPoolsClientListResponse] { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, resourceName, options) - ret0, _ := ret[0].(*runtime.Pager[armcontainerservice.AgentPoolsClientListResponse]) - return ret0 -} - -// NewListPager indicates an expected call of NewListPager. -func (mr *MockAgentPoolsClientMockRecorder) NewListPager(resourceGroupName, resourceName, options any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockAgentPoolsClient)(nil).NewListPager), resourceGroupName, resourceName, options) -} diff --git a/cluster-autoscaler/cloudprovider/azure/azure_mock_clients.go b/cluster-autoscaler/cloudprovider/azure/azure_mock_clients.go new file mode 100644 index 000000000000..42ffd5bca208 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/azure/azure_mock_clients.go @@ -0,0 +1,684 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: azure_client.go +// +// Generated by this command: +// +// mockgen -source=azure_client.go -destination azure_mock_clients.go -package azure +// + +// Package azure is a generated GoMock package. +package azure + +import ( + context "context" + reflect "reflect" + + runtime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v7" + armresources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" + armstorage "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" + gomock "go.uber.org/mock/gomock" +) + +// MockVirtualMachineScaleSetsClient is a mock of VirtualMachineScaleSetsClient interface. +type MockVirtualMachineScaleSetsClient struct { + ctrl *gomock.Controller + recorder *MockVirtualMachineScaleSetsClientMockRecorder + isgomock struct{} +} + +// MockVirtualMachineScaleSetsClientMockRecorder is the mock recorder for MockVirtualMachineScaleSetsClient. +type MockVirtualMachineScaleSetsClientMockRecorder struct { + mock *MockVirtualMachineScaleSetsClient +} + +// NewMockVirtualMachineScaleSetsClient creates a new mock instance. +func NewMockVirtualMachineScaleSetsClient(ctrl *gomock.Controller) *MockVirtualMachineScaleSetsClient { + mock := &MockVirtualMachineScaleSetsClient{ctrl: ctrl} + mock.recorder = &MockVirtualMachineScaleSetsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockVirtualMachineScaleSetsClient) EXPECT() *MockVirtualMachineScaleSetsClientMockRecorder { + return m.recorder +} + +// BeginCreateOrUpdate mocks base method. +func (m *MockVirtualMachineScaleSetsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName, vmScaleSetName string, parameters armcompute.VirtualMachineScaleSet, options *armcompute.VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginCreateOrUpdate", ctx, resourceGroupName, vmScaleSetName, parameters, options) + ret0, _ := ret[0].(*runtime.Poller[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginCreateOrUpdate indicates an expected call of BeginCreateOrUpdate. +func (mr *MockVirtualMachineScaleSetsClientMockRecorder) BeginCreateOrUpdate(ctx, resourceGroupName, vmScaleSetName, parameters, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginCreateOrUpdate", reflect.TypeOf((*MockVirtualMachineScaleSetsClient)(nil).BeginCreateOrUpdate), ctx, resourceGroupName, vmScaleSetName, parameters, options) +} + +// BeginDelete mocks base method. +func (m *MockVirtualMachineScaleSetsClient) BeginDelete(ctx context.Context, resourceGroupName, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientBeginDeleteOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientDeleteResponse], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginDelete", ctx, resourceGroupName, vmScaleSetName, options) + ret0, _ := ret[0].(*runtime.Poller[armcompute.VirtualMachineScaleSetsClientDeleteResponse]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginDelete indicates an expected call of BeginDelete. +func (mr *MockVirtualMachineScaleSetsClientMockRecorder) BeginDelete(ctx, resourceGroupName, vmScaleSetName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDelete", reflect.TypeOf((*MockVirtualMachineScaleSetsClient)(nil).BeginDelete), ctx, resourceGroupName, vmScaleSetName, options) +} + +// BeginDeleteInstances mocks base method. +func (m *MockVirtualMachineScaleSetsClient) BeginDeleteInstances(ctx context.Context, resourceGroupName, vmScaleSetName string, vmInstanceIDs armcompute.VirtualMachineScaleSetVMInstanceRequiredIDs, options *armcompute.VirtualMachineScaleSetsClientBeginDeleteInstancesOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginDeleteInstances", ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs, options) + ret0, _ := ret[0].(*runtime.Poller[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginDeleteInstances indicates an expected call of BeginDeleteInstances. +func (mr *MockVirtualMachineScaleSetsClientMockRecorder) BeginDeleteInstances(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDeleteInstances", reflect.TypeOf((*MockVirtualMachineScaleSetsClient)(nil).BeginDeleteInstances), ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs, options) +} + +// Get mocks base method. +func (m *MockVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName, vmScaleSetName string, options *armcompute.VirtualMachineScaleSetsClientGetOptions) (armcompute.VirtualMachineScaleSetsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, vmScaleSetName, options) + ret0, _ := ret[0].(armcompute.VirtualMachineScaleSetsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockVirtualMachineScaleSetsClientMockRecorder) Get(ctx, resourceGroupName, vmScaleSetName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockVirtualMachineScaleSetsClient)(nil).Get), ctx, resourceGroupName, vmScaleSetName, options) +} + +// NewListPager mocks base method. +func (m *MockVirtualMachineScaleSetsClient) NewListPager(resourceGroupName string, options *armcompute.VirtualMachineScaleSetsClientListOptions) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, options) + ret0, _ := ret[0].(*runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse]) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockVirtualMachineScaleSetsClientMockRecorder) NewListPager(resourceGroupName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockVirtualMachineScaleSetsClient)(nil).NewListPager), resourceGroupName, options) +} + +// MockVirtualMachineScaleSetVMsClient is a mock of VirtualMachineScaleSetVMsClient interface. +type MockVirtualMachineScaleSetVMsClient struct { + ctrl *gomock.Controller + recorder *MockVirtualMachineScaleSetVMsClientMockRecorder + isgomock struct{} +} + +// MockVirtualMachineScaleSetVMsClientMockRecorder is the mock recorder for MockVirtualMachineScaleSetVMsClient. +type MockVirtualMachineScaleSetVMsClientMockRecorder struct { + mock *MockVirtualMachineScaleSetVMsClient +} + +// NewMockVirtualMachineScaleSetVMsClient creates a new mock instance. +func NewMockVirtualMachineScaleSetVMsClient(ctrl *gomock.Controller) *MockVirtualMachineScaleSetVMsClient { + mock := &MockVirtualMachineScaleSetVMsClient{ctrl: ctrl} + mock.recorder = &MockVirtualMachineScaleSetVMsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockVirtualMachineScaleSetVMsClient) EXPECT() *MockVirtualMachineScaleSetVMsClientMockRecorder { + return m.recorder +} + +// BeginDelete mocks base method. +func (m *MockVirtualMachineScaleSetVMsClient) BeginDelete(ctx context.Context, resourceGroupName, vmScaleSetName, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientBeginDeleteOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetVMsClientDeleteResponse], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginDelete", ctx, resourceGroupName, vmScaleSetName, instanceID, options) + ret0, _ := ret[0].(*runtime.Poller[armcompute.VirtualMachineScaleSetVMsClientDeleteResponse]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginDelete indicates an expected call of BeginDelete. +func (mr *MockVirtualMachineScaleSetVMsClientMockRecorder) BeginDelete(ctx, resourceGroupName, vmScaleSetName, instanceID, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDelete", reflect.TypeOf((*MockVirtualMachineScaleSetVMsClient)(nil).BeginDelete), ctx, resourceGroupName, vmScaleSetName, instanceID, options) +} + +// BeginUpdate mocks base method. +func (m *MockVirtualMachineScaleSetVMsClient) BeginUpdate(ctx context.Context, resourceGroupName, vmScaleSetName, instanceID string, parameters armcompute.VirtualMachineScaleSetVM, options *armcompute.VirtualMachineScaleSetVMsClientBeginUpdateOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetVMsClientUpdateResponse], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginUpdate", ctx, resourceGroupName, vmScaleSetName, instanceID, parameters, options) + ret0, _ := ret[0].(*runtime.Poller[armcompute.VirtualMachineScaleSetVMsClientUpdateResponse]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginUpdate indicates an expected call of BeginUpdate. +func (mr *MockVirtualMachineScaleSetVMsClientMockRecorder) BeginUpdate(ctx, resourceGroupName, vmScaleSetName, instanceID, parameters, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginUpdate", reflect.TypeOf((*MockVirtualMachineScaleSetVMsClient)(nil).BeginUpdate), ctx, resourceGroupName, vmScaleSetName, instanceID, parameters, options) +} + +// Get mocks base method. +func (m *MockVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName, vmScaleSetName, instanceID string, options *armcompute.VirtualMachineScaleSetVMsClientGetOptions) (armcompute.VirtualMachineScaleSetVMsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, vmScaleSetName, instanceID, options) + ret0, _ := ret[0].(armcompute.VirtualMachineScaleSetVMsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockVirtualMachineScaleSetVMsClientMockRecorder) Get(ctx, resourceGroupName, vmScaleSetName, instanceID, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockVirtualMachineScaleSetVMsClient)(nil).Get), ctx, resourceGroupName, vmScaleSetName, instanceID, options) +} + +// NewListPager mocks base method. +func (m *MockVirtualMachineScaleSetVMsClient) NewListPager(resourceGroupName, virtualMachineScaleSetName string, options *armcompute.VirtualMachineScaleSetVMsClientListOptions) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, virtualMachineScaleSetName, options) + ret0, _ := ret[0].(*runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse]) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockVirtualMachineScaleSetVMsClientMockRecorder) NewListPager(resourceGroupName, virtualMachineScaleSetName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockVirtualMachineScaleSetVMsClient)(nil).NewListPager), resourceGroupName, virtualMachineScaleSetName, options) +} + +// MockVirtualMachinesClient is a mock of VirtualMachinesClient interface. +type MockVirtualMachinesClient struct { + ctrl *gomock.Controller + recorder *MockVirtualMachinesClientMockRecorder + isgomock struct{} +} + +// MockVirtualMachinesClientMockRecorder is the mock recorder for MockVirtualMachinesClient. +type MockVirtualMachinesClientMockRecorder struct { + mock *MockVirtualMachinesClient +} + +// NewMockVirtualMachinesClient creates a new mock instance. +func NewMockVirtualMachinesClient(ctrl *gomock.Controller) *MockVirtualMachinesClient { + mock := &MockVirtualMachinesClient{ctrl: ctrl} + mock.recorder = &MockVirtualMachinesClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockVirtualMachinesClient) EXPECT() *MockVirtualMachinesClientMockRecorder { + return m.recorder +} + +// BeginCreateOrUpdate mocks base method. +func (m *MockVirtualMachinesClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName, vmName string, parameters armcompute.VirtualMachine, options *armcompute.VirtualMachinesClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcompute.VirtualMachinesClientCreateOrUpdateResponse], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginCreateOrUpdate", ctx, resourceGroupName, vmName, parameters, options) + ret0, _ := ret[0].(*runtime.Poller[armcompute.VirtualMachinesClientCreateOrUpdateResponse]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginCreateOrUpdate indicates an expected call of BeginCreateOrUpdate. +func (mr *MockVirtualMachinesClientMockRecorder) BeginCreateOrUpdate(ctx, resourceGroupName, vmName, parameters, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginCreateOrUpdate", reflect.TypeOf((*MockVirtualMachinesClient)(nil).BeginCreateOrUpdate), ctx, resourceGroupName, vmName, parameters, options) +} + +// BeginDelete mocks base method. +func (m *MockVirtualMachinesClient) BeginDelete(ctx context.Context, resourceGroupName, vmName string, options *armcompute.VirtualMachinesClientBeginDeleteOptions) (*runtime.Poller[armcompute.VirtualMachinesClientDeleteResponse], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginDelete", ctx, resourceGroupName, vmName, options) + ret0, _ := ret[0].(*runtime.Poller[armcompute.VirtualMachinesClientDeleteResponse]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginDelete indicates an expected call of BeginDelete. +func (mr *MockVirtualMachinesClientMockRecorder) BeginDelete(ctx, resourceGroupName, vmName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDelete", reflect.TypeOf((*MockVirtualMachinesClient)(nil).BeginDelete), ctx, resourceGroupName, vmName, options) +} + +// Get mocks base method. +func (m *MockVirtualMachinesClient) Get(ctx context.Context, resourceGroupName, vmName string, options *armcompute.VirtualMachinesClientGetOptions) (armcompute.VirtualMachinesClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, vmName, options) + ret0, _ := ret[0].(armcompute.VirtualMachinesClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockVirtualMachinesClientMockRecorder) Get(ctx, resourceGroupName, vmName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockVirtualMachinesClient)(nil).Get), ctx, resourceGroupName, vmName, options) +} + +// NewListPager mocks base method. +func (m *MockVirtualMachinesClient) NewListPager(resourceGroupName string, options *armcompute.VirtualMachinesClientListOptions) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, options) + ret0, _ := ret[0].(*runtime.Pager[armcompute.VirtualMachinesClientListResponse]) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockVirtualMachinesClientMockRecorder) NewListPager(resourceGroupName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockVirtualMachinesClient)(nil).NewListPager), resourceGroupName, options) +} + +// MockDeploymentsClient is a mock of DeploymentsClient interface. +type MockDeploymentsClient struct { + ctrl *gomock.Controller + recorder *MockDeploymentsClientMockRecorder + isgomock struct{} +} + +// MockDeploymentsClientMockRecorder is the mock recorder for MockDeploymentsClient. +type MockDeploymentsClientMockRecorder struct { + mock *MockDeploymentsClient +} + +// NewMockDeploymentsClient creates a new mock instance. +func NewMockDeploymentsClient(ctrl *gomock.Controller) *MockDeploymentsClient { + mock := &MockDeploymentsClient{ctrl: ctrl} + mock.recorder = &MockDeploymentsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDeploymentsClient) EXPECT() *MockDeploymentsClientMockRecorder { + return m.recorder +} + +// BeginCreateOrUpdate mocks base method. +func (m *MockDeploymentsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName, deploymentName string, parameters armresources.Deployment, options *armresources.DeploymentsClientBeginCreateOrUpdateOptions) (*runtime.Poller[armresources.DeploymentsClientCreateOrUpdateResponse], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginCreateOrUpdate", ctx, resourceGroupName, deploymentName, parameters, options) + ret0, _ := ret[0].(*runtime.Poller[armresources.DeploymentsClientCreateOrUpdateResponse]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginCreateOrUpdate indicates an expected call of BeginCreateOrUpdate. +func (mr *MockDeploymentsClientMockRecorder) BeginCreateOrUpdate(ctx, resourceGroupName, deploymentName, parameters, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginCreateOrUpdate", reflect.TypeOf((*MockDeploymentsClient)(nil).BeginCreateOrUpdate), ctx, resourceGroupName, deploymentName, parameters, options) +} + +// BeginDelete mocks base method. +func (m *MockDeploymentsClient) BeginDelete(ctx context.Context, resourceGroupName, deploymentName string, options *armresources.DeploymentsClientBeginDeleteOptions) (*runtime.Poller[armresources.DeploymentsClientDeleteResponse], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginDelete", ctx, resourceGroupName, deploymentName, options) + ret0, _ := ret[0].(*runtime.Poller[armresources.DeploymentsClientDeleteResponse]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginDelete indicates an expected call of BeginDelete. +func (mr *MockDeploymentsClientMockRecorder) BeginDelete(ctx, resourceGroupName, deploymentName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDelete", reflect.TypeOf((*MockDeploymentsClient)(nil).BeginDelete), ctx, resourceGroupName, deploymentName, options) +} + +// ExportTemplate mocks base method. +func (m *MockDeploymentsClient) ExportTemplate(ctx context.Context, resourceGroupName, deploymentName string, options *armresources.DeploymentsClientExportTemplateOptions) (armresources.DeploymentsClientExportTemplateResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExportTemplate", ctx, resourceGroupName, deploymentName, options) + ret0, _ := ret[0].(armresources.DeploymentsClientExportTemplateResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExportTemplate indicates an expected call of ExportTemplate. +func (mr *MockDeploymentsClientMockRecorder) ExportTemplate(ctx, resourceGroupName, deploymentName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportTemplate", reflect.TypeOf((*MockDeploymentsClient)(nil).ExportTemplate), ctx, resourceGroupName, deploymentName, options) +} + +// Get mocks base method. +func (m *MockDeploymentsClient) Get(ctx context.Context, resourceGroupName, deploymentName string, options *armresources.DeploymentsClientGetOptions) (armresources.DeploymentsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, deploymentName, options) + ret0, _ := ret[0].(armresources.DeploymentsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockDeploymentsClientMockRecorder) Get(ctx, resourceGroupName, deploymentName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDeploymentsClient)(nil).Get), ctx, resourceGroupName, deploymentName, options) +} + +// NewListByResourceGroupPager mocks base method. +func (m *MockDeploymentsClient) NewListByResourceGroupPager(resourceGroupName string, options *armresources.DeploymentsClientListByResourceGroupOptions) *runtime.Pager[armresources.DeploymentsClientListByResourceGroupResponse] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListByResourceGroupPager", resourceGroupName, options) + ret0, _ := ret[0].(*runtime.Pager[armresources.DeploymentsClientListByResourceGroupResponse]) + return ret0 +} + +// NewListByResourceGroupPager indicates an expected call of NewListByResourceGroupPager. +func (mr *MockDeploymentsClientMockRecorder) NewListByResourceGroupPager(resourceGroupName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListByResourceGroupPager", reflect.TypeOf((*MockDeploymentsClient)(nil).NewListByResourceGroupPager), resourceGroupName, options) +} + +// MockInterfacesClient is a mock of InterfacesClient interface. +type MockInterfacesClient struct { + ctrl *gomock.Controller + recorder *MockInterfacesClientMockRecorder + isgomock struct{} +} + +// MockInterfacesClientMockRecorder is the mock recorder for MockInterfacesClient. +type MockInterfacesClientMockRecorder struct { + mock *MockInterfacesClient +} + +// NewMockInterfacesClient creates a new mock instance. +func NewMockInterfacesClient(ctrl *gomock.Controller) *MockInterfacesClient { + mock := &MockInterfacesClient{ctrl: ctrl} + mock.recorder = &MockInterfacesClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockInterfacesClient) EXPECT() *MockInterfacesClientMockRecorder { + return m.recorder +} + +// BeginDelete mocks base method. +func (m *MockInterfacesClient) BeginDelete(ctx context.Context, resourceGroupName, networkInterfaceName string, options *armnetwork.InterfacesClientBeginDeleteOptions) (*runtime.Poller[armnetwork.InterfacesClientDeleteResponse], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginDelete", ctx, resourceGroupName, networkInterfaceName, options) + ret0, _ := ret[0].(*runtime.Poller[armnetwork.InterfacesClientDeleteResponse]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginDelete indicates an expected call of BeginDelete. +func (mr *MockInterfacesClientMockRecorder) BeginDelete(ctx, resourceGroupName, networkInterfaceName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDelete", reflect.TypeOf((*MockInterfacesClient)(nil).BeginDelete), ctx, resourceGroupName, networkInterfaceName, options) +} + +// Get mocks base method. +func (m *MockInterfacesClient) Get(ctx context.Context, resourceGroupName, networkInterfaceName string, options *armnetwork.InterfacesClientGetOptions) (armnetwork.InterfacesClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, networkInterfaceName, options) + ret0, _ := ret[0].(armnetwork.InterfacesClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockInterfacesClientMockRecorder) Get(ctx, resourceGroupName, networkInterfaceName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterfacesClient)(nil).Get), ctx, resourceGroupName, networkInterfaceName, options) +} + +// MockDisksClient is a mock of DisksClient interface. +type MockDisksClient struct { + ctrl *gomock.Controller + recorder *MockDisksClientMockRecorder + isgomock struct{} +} + +// MockDisksClientMockRecorder is the mock recorder for MockDisksClient. +type MockDisksClientMockRecorder struct { + mock *MockDisksClient +} + +// NewMockDisksClient creates a new mock instance. +func NewMockDisksClient(ctrl *gomock.Controller) *MockDisksClient { + mock := &MockDisksClient{ctrl: ctrl} + mock.recorder = &MockDisksClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDisksClient) EXPECT() *MockDisksClientMockRecorder { + return m.recorder +} + +// BeginDelete mocks base method. +func (m *MockDisksClient) BeginDelete(ctx context.Context, resourceGroupName, diskName string, options *armcompute.DisksClientBeginDeleteOptions) (*runtime.Poller[armcompute.DisksClientDeleteResponse], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginDelete", ctx, resourceGroupName, diskName, options) + ret0, _ := ret[0].(*runtime.Poller[armcompute.DisksClientDeleteResponse]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginDelete indicates an expected call of BeginDelete. +func (mr *MockDisksClientMockRecorder) BeginDelete(ctx, resourceGroupName, diskName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDelete", reflect.TypeOf((*MockDisksClient)(nil).BeginDelete), ctx, resourceGroupName, diskName, options) +} + +// Get mocks base method. +func (m *MockDisksClient) Get(ctx context.Context, resourceGroupName, diskName string, options *armcompute.DisksClientGetOptions) (armcompute.DisksClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, diskName, options) + ret0, _ := ret[0].(armcompute.DisksClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockDisksClientMockRecorder) Get(ctx, resourceGroupName, diskName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDisksClient)(nil).Get), ctx, resourceGroupName, diskName, options) +} + +// MockStorageAccountsClient is a mock of StorageAccountsClient interface. +type MockStorageAccountsClient struct { + ctrl *gomock.Controller + recorder *MockStorageAccountsClientMockRecorder + isgomock struct{} +} + +// MockStorageAccountsClientMockRecorder is the mock recorder for MockStorageAccountsClient. +type MockStorageAccountsClientMockRecorder struct { + mock *MockStorageAccountsClient +} + +// NewMockStorageAccountsClient creates a new mock instance. +func NewMockStorageAccountsClient(ctrl *gomock.Controller) *MockStorageAccountsClient { + mock := &MockStorageAccountsClient{ctrl: ctrl} + mock.recorder = &MockStorageAccountsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStorageAccountsClient) EXPECT() *MockStorageAccountsClientMockRecorder { + return m.recorder +} + +// GetProperties mocks base method. +func (m *MockStorageAccountsClient) GetProperties(ctx context.Context, resourceGroupName, accountName string, options *armstorage.AccountsClientGetPropertiesOptions) (armstorage.AccountsClientGetPropertiesResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProperties", ctx, resourceGroupName, accountName, options) + ret0, _ := ret[0].(armstorage.AccountsClientGetPropertiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProperties indicates an expected call of GetProperties. +func (mr *MockStorageAccountsClientMockRecorder) GetProperties(ctx, resourceGroupName, accountName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProperties", reflect.TypeOf((*MockStorageAccountsClient)(nil).GetProperties), ctx, resourceGroupName, accountName, options) +} + +// ListKeys mocks base method. +func (m *MockStorageAccountsClient) ListKeys(ctx context.Context, resourceGroupName, accountName string, options *armstorage.AccountsClientListKeysOptions) (armstorage.AccountsClientListKeysResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListKeys", ctx, resourceGroupName, accountName, options) + ret0, _ := ret[0].(armstorage.AccountsClientListKeysResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListKeys indicates an expected call of ListKeys. +func (mr *MockStorageAccountsClientMockRecorder) ListKeys(ctx, resourceGroupName, accountName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListKeys", reflect.TypeOf((*MockStorageAccountsClient)(nil).ListKeys), ctx, resourceGroupName, accountName, options) +} + +// MockResourceSKUsClient is a mock of ResourceSKUsClient interface. +type MockResourceSKUsClient struct { + ctrl *gomock.Controller + recorder *MockResourceSKUsClientMockRecorder + isgomock struct{} +} + +// MockResourceSKUsClientMockRecorder is the mock recorder for MockResourceSKUsClient. +type MockResourceSKUsClientMockRecorder struct { + mock *MockResourceSKUsClient +} + +// NewMockResourceSKUsClient creates a new mock instance. +func NewMockResourceSKUsClient(ctrl *gomock.Controller) *MockResourceSKUsClient { + mock := &MockResourceSKUsClient{ctrl: ctrl} + mock.recorder = &MockResourceSKUsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockResourceSKUsClient) EXPECT() *MockResourceSKUsClientMockRecorder { + return m.recorder +} + +// NewListPager mocks base method. +func (m *MockResourceSKUsClient) NewListPager(options *armcompute.ResourceSKUsClientListOptions) *runtime.Pager[armcompute.ResourceSKUsClientListResponse] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", options) + ret0, _ := ret[0].(*runtime.Pager[armcompute.ResourceSKUsClientListResponse]) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockResourceSKUsClientMockRecorder) NewListPager(options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockResourceSKUsClient)(nil).NewListPager), options) +} + +// MockAgentPoolsClient is a mock of AgentPoolsClient interface. +type MockAgentPoolsClient struct { + ctrl *gomock.Controller + recorder *MockAgentPoolsClientMockRecorder + isgomock struct{} +} + +// MockAgentPoolsClientMockRecorder is the mock recorder for MockAgentPoolsClient. +type MockAgentPoolsClientMockRecorder struct { + mock *MockAgentPoolsClient +} + +// NewMockAgentPoolsClient creates a new mock instance. +func NewMockAgentPoolsClient(ctrl *gomock.Controller) *MockAgentPoolsClient { + mock := &MockAgentPoolsClient{ctrl: ctrl} + mock.recorder = &MockAgentPoolsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAgentPoolsClient) EXPECT() *MockAgentPoolsClientMockRecorder { + return m.recorder +} + +// BeginCreateOrUpdate mocks base method. +func (m *MockAgentPoolsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName, resourceName, agentPoolName string, parameters armcontainerservice.AgentPool, options *armcontainerservice.AgentPoolsClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcontainerservice.AgentPoolsClientCreateOrUpdateResponse], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginCreateOrUpdate", ctx, resourceGroupName, resourceName, agentPoolName, parameters, options) + ret0, _ := ret[0].(*runtime.Poller[armcontainerservice.AgentPoolsClientCreateOrUpdateResponse]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginCreateOrUpdate indicates an expected call of BeginCreateOrUpdate. +func (mr *MockAgentPoolsClientMockRecorder) BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, agentPoolName, parameters, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginCreateOrUpdate", reflect.TypeOf((*MockAgentPoolsClient)(nil).BeginCreateOrUpdate), ctx, resourceGroupName, resourceName, agentPoolName, parameters, options) +} + +// BeginDeleteMachines mocks base method. +func (m *MockAgentPoolsClient) BeginDeleteMachines(ctx context.Context, resourceGroupName, resourceName, agentPoolName string, machines armcontainerservice.AgentPoolDeleteMachinesParameter, options *armcontainerservice.AgentPoolsClientBeginDeleteMachinesOptions) (*runtime.Poller[armcontainerservice.AgentPoolsClientDeleteMachinesResponse], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginDeleteMachines", ctx, resourceGroupName, resourceName, agentPoolName, machines, options) + ret0, _ := ret[0].(*runtime.Poller[armcontainerservice.AgentPoolsClientDeleteMachinesResponse]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginDeleteMachines indicates an expected call of BeginDeleteMachines. +func (mr *MockAgentPoolsClientMockRecorder) BeginDeleteMachines(ctx, resourceGroupName, resourceName, agentPoolName, machines, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDeleteMachines", reflect.TypeOf((*MockAgentPoolsClient)(nil).BeginDeleteMachines), ctx, resourceGroupName, resourceName, agentPoolName, machines, options) +} + +// Get mocks base method. +func (m *MockAgentPoolsClient) Get(ctx context.Context, resourceGroupName, resourceName, agentPoolName string, options *armcontainerservice.AgentPoolsClientGetOptions) (armcontainerservice.AgentPoolsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, resourceName, agentPoolName, options) + ret0, _ := ret[0].(armcontainerservice.AgentPoolsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockAgentPoolsClientMockRecorder) Get(ctx, resourceGroupName, resourceName, agentPoolName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockAgentPoolsClient)(nil).Get), ctx, resourceGroupName, resourceName, agentPoolName, options) +} + +// NewListPager mocks base method. +func (m *MockAgentPoolsClient) NewListPager(resourceGroupName, resourceName string, options *armcontainerservice.AgentPoolsClientListOptions) *runtime.Pager[armcontainerservice.AgentPoolsClientListResponse] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, resourceName, options) + ret0, _ := ret[0].(*runtime.Pager[armcontainerservice.AgentPoolsClientListResponse]) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockAgentPoolsClientMockRecorder) NewListPager(resourceGroupName, resourceName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockAgentPoolsClient)(nil).NewListPager), resourceGroupName, resourceName, options) +} diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go index 34fe9097eb18..7c4bcd20360c 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go @@ -32,8 +32,9 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/cloud-provider-azure/pkg/retry" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" ) var ( @@ -67,14 +68,14 @@ type ScaleSet struct { // Current Size (Number of VMs) // curSize tracks (and caches) the number of VMs in this ScaleSet. - // It is periodically updated from vmss.Sku.Capacity, with VMSS itself coming + // It is periodically updated from vmss.SKU.Capacity, with VMSS itself coming // either from azure.Cache (which periodically does VMSS.List) // or from direct VMSS.Get (always used for Spot). curSize int64 - // sizeRefreshPeriod is how often curSize is refreshed from vmss.Sku.Capacity. + // sizeRefreshPeriod is how often curSize is refreshed from vmss.SKU.Capacity. // (Set from azureCache.refreshInterval = VmssCacheTTL or [defaultMetadataCache]refreshInterval = 1min) sizeRefreshPeriod time.Duration - // lastSizeRefresh is the time curSize was last refreshed from vmss.Sku.Capacity. + // lastSizeRefresh is the time curSize was last refreshed from vmss.SKU.Capacity. // Together with sizeRefreshPeriod, it is used to determine if it is time to refresh curSize. lastSizeRefresh time.Time // getVmssSizeRefreshPeriod is how often curSize should be refreshed in case VMSS.Get call is used. @@ -185,11 +186,11 @@ func (scaleSet *ScaleSet) MaxSize() int { return scaleSet.maxSize } -func (scaleSet *ScaleSet) getVMSSFromCache() (compute.VirtualMachineScaleSet, error) { +func (scaleSet *ScaleSet) getVMSSFromCache() (armcompute.VirtualMachineScaleSet, error) { allVMSS := scaleSet.manager.azureCache.getScaleSets() if _, exists := allVMSS[scaleSet.Name]; !exists { - return compute.VirtualMachineScaleSet{}, fmt.Errorf("could not find vmss: %s", scaleSet.Name) + return armcompute.VirtualMachineScaleSet{}, fmt.Errorf("could not find vmss: %s", scaleSet.Name) } return allVMSS[scaleSet.Name], nil @@ -209,14 +210,14 @@ func (scaleSet *ScaleSet) getCurSize() (int64, *GetVMSSFailedError) { // // If VMSS state is updating, return the currentSize which would've been proactively incremented or decremented by CA // // unless it's -1. In that case, its better to initialize it. // if scaleSet.curSize != -1 && set.VirtualMachineScaleSetProperties != nil && - // strings.EqualFold(ptr.Deref(set.VirtualMachineScaleSetProperties.ProvisioningState, ""), string(compute.GalleryProvisioningStateUpdating)) { + // strings.EqualFold(ptr.Deref(set.VirtualMachineScaleSetProperties.ProvisioningState, ""), string(armcompute.GalleryProvisioningStateUpdating)) { // klog.V(3).Infof("VMSS %q is in updating state, returning cached size: %d", scaleSet.Name, scaleSet.curSize) // return scaleSet.curSize, nil // } effectiveSizeRefreshPeriod := scaleSet.sizeRefreshPeriod - // If the scale set is Spot, we want to have a more fresh view of the Sku.Capacity field. + // If the scale set is Spot, we want to have a more fresh view of the SKU.Capacity field. // This is because evictions can happen // at any given point in time, even before VMs are materialized as // nodes. We should be able to react to those and have the autoscaler @@ -235,16 +236,16 @@ func (scaleSet *ScaleSet) getCurSize() (int64, *GetVMSSFailedError) { ctx, cancel := getContextWithCancel() defer cancel() - var rerr *retry.Error - set, rerr = scaleSet.manager.azClient.virtualMachineScaleSetsClient.Get(ctx, scaleSet.manager.config.ResourceGroup, scaleSet.Name) - if rerr != nil { - klog.Errorf("failed to get information for VMSS: %s, error: %v", scaleSet.Name, rerr) - return -1, newGetVMSSFailedError(rerr.Error(), rerr.IsNotFound()) + resp, err := scaleSet.manager.azClient.virtualMachineScaleSetsClient.Get(ctx, scaleSet.manager.config.ResourceGroup, scaleSet.Name, nil) + if err != nil { + klog.Errorf("failed to get information for VMSS: %s, error: %v", scaleSet.Name, err) + return -1, newGetVMSSFailedError(err, false) } + set = resp.VirtualMachineScaleSet } vmssSizeMutex.Lock() - curSize := *set.Sku.Capacity + curSize := *set.SKU.Capacity vmssSizeMutex.Unlock() if scaleSet.curSize != curSize { @@ -270,8 +271,8 @@ func (scaleSet *ScaleSet) getScaleSetSize() (int64, error) { return size, nil } -// waitForCreateOrUpdate waits for the outcome of VMSS capacity update initiated via CreateOrUpdateAsync. -func (scaleSet *ScaleSet) waitForCreateOrUpdateInstances(future *azure.Future) { +// waitForCreateOrUpdate waits for the outcome of VMSS capacity update initiated via BeginCreateOrUpdate. +func (scaleSet *ScaleSet) waitForCreateOrUpdateInstances(poller *runtime.Poller[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse]) { var err error defer func() { @@ -288,11 +289,10 @@ func (scaleSet *ScaleSet) waitForCreateOrUpdateInstances(future *azure.Future) { ctx, cancel := getContextWithTimeout(asyncContextTimeout) defer cancel() - klog.V(3).Infof("Calling virtualMachineScaleSetsClient.WaitForCreateOrUpdateResult(%s)", scaleSet.Name) - httpResponse, err := scaleSet.manager.azClient.virtualMachineScaleSetsClient.WaitForCreateOrUpdateResult(ctx, future, scaleSet.manager.config.ResourceGroup) + klog.V(3).Infof("Calling poller.PollUntilDone for VMSS %s", scaleSet.Name) + _, err = poller.PollUntilDone(ctx, nil) - isSuccess, err := isSuccessHTTPResponse(httpResponse, err) - if isSuccess { + if err == nil { klog.V(3).Infof("waitForCreateOrUpdateInstances(%s) success", scaleSet.Name) return } @@ -359,25 +359,38 @@ func (scaleSet *ScaleSet) AtomicIncreaseSize(delta int) error { } // GetScaleSetVms returns list of nodes for the given scale set. -func (scaleSet *ScaleSet) GetScaleSetVms() ([]compute.VirtualMachineScaleSetVM, *retry.Error) { +func (scaleSet *ScaleSet) GetScaleSetVms() ([]armcompute.VirtualMachineScaleSetVM, *retry.Error) { ctx, cancel := getContextWithTimeout(vmssContextTimeout) defer cancel() - vmList, rerr := scaleSet.manager.azClient.virtualMachineScaleSetVMsClient.List(ctx, scaleSet.manager.config.ResourceGroup, - scaleSet.Name, string(compute.InstanceViewTypesInstanceView)) - - klog.V(4).Infof("GetScaleSetVms: scaleSet.Name: %s, vmList: %v", scaleSet.Name, vmList) + pager := scaleSet.manager.azClient.virtualMachineScaleSetVMsClient.NewListPager( + scaleSet.manager.config.ResourceGroup, + scaleSet.Name, + &armcompute.VirtualMachineScaleSetVMsClientListOptions{ + Expand: to.Ptr(string(armcompute.InstanceViewTypesInstanceView)), + }) - if rerr != nil { - klog.Errorf("VirtualMachineScaleSetVMsClient.List failed for %s: %v", scaleSet.Name, rerr) - return nil, rerr + var vmList []armcompute.VirtualMachineScaleSetVM + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + klog.Errorf("VirtualMachineScaleSetVMsClient.List failed for %s: %v", scaleSet.Name, err) + return nil, &retry.Error{RawError: err} + } + for _, vm := range page.Value { + if vm != nil { + vmList = append(vmList, *vm) + } + } } + klog.V(4).Infof("GetScaleSetVms: scaleSet.Name: %s, vmList count: %d", scaleSet.Name, len(vmList)) + return vmList, nil } // GetFlexibleScaleSetVms returns list of nodes for flexible scale set. -func (scaleSet *ScaleSet) GetFlexibleScaleSetVms() ([]compute.VirtualMachine, *retry.Error) { +func (scaleSet *ScaleSet) GetFlexibleScaleSetVms() ([]armcompute.VirtualMachine, *retry.Error) { klog.V(4).Infof("GetScaleSetVms: starts") ctx, cancel := getContextWithTimeout(vmssContextTimeout) defer cancel() @@ -392,12 +405,28 @@ func (scaleSet *ScaleSet) GetFlexibleScaleSetVms() ([]compute.VirtualMachine, *r } return nil, rerr } - vmList, rerr := scaleSet.manager.azClient.virtualMachinesClient.ListVmssFlexVMsWithoutInstanceView(ctx, *vmssInfo.ID) - if rerr != nil { - klog.Errorf("VirtualMachineScaleSetVMsClient.List failed for %s: %v", scaleSet.Name, rerr) - return nil, rerr + + // Use NewListPager with filter to get VMs belonging to this VMSS + filter := fmt.Sprintf("virtualMachineScaleSet/id eq '%s'", *vmssInfo.ID) + pager := scaleSet.manager.azClient.virtualMachinesClient.NewListPager(scaleSet.manager.config.ResourceGroup, &armcompute.VirtualMachinesClientListOptions{ + Filter: &filter, + }) + + var vmList []armcompute.VirtualMachine + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + klog.Errorf("VirtualMachinesClient.List failed for %s: %v", scaleSet.Name, err) + return nil, &retry.Error{RawError: err} + } + for _, vm := range page.Value { + if vm != nil { + vmList = append(vmList, *vm) + } + } } - klog.V(4).Infof("GetFlexibleScaleSetVms: scaleSet.Name: %s, vmList: %v", scaleSet.Name, vmList) + + klog.V(4).Infof("GetFlexibleScaleSetVms: scaleSet.Name: %s, vmList count: %d", scaleSet.Name, len(vmList)) return vmList, nil } @@ -438,7 +467,7 @@ func (scaleSet *ScaleSet) Belongs(node *apiv1.Node) (bool, error) { return true, nil } -func (scaleSet *ScaleSet) createOrUpdateInstances(vmssInfo *compute.VirtualMachineScaleSet, newSize int64) error { +func (scaleSet *ScaleSet) createOrUpdateInstances(vmssInfo *armcompute.VirtualMachineScaleSet, newSize int64) error { if vmssInfo == nil { return fmt.Errorf("vmssInfo cannot be nil while increating scaleSet capacity") } @@ -448,18 +477,18 @@ func (scaleSet *ScaleSet) createOrUpdateInstances(vmssInfo *compute.VirtualMachi // Update the new capacity to cache. vmssSizeMutex.Lock() - vmssInfo.Sku.Capacity = &newSize + vmssInfo.SKU.Capacity = &newSize vmssSizeMutex.Unlock() // Compose a new VMSS for updating. - op := compute.VirtualMachineScaleSet{ + op := armcompute.VirtualMachineScaleSet{ Name: vmssInfo.Name, - Sku: vmssInfo.Sku, + SKU: vmssInfo.SKU, Location: vmssInfo.Location, } if vmssInfo.ExtendedLocation != nil { - op.ExtendedLocation = &compute.ExtendedLocation{ + op.ExtendedLocation = &armcompute.ExtendedLocation{ Name: vmssInfo.ExtendedLocation.Name, Type: vmssInfo.ExtendedLocation.Type, } @@ -469,18 +498,18 @@ func (scaleSet *ScaleSet) createOrUpdateInstances(vmssInfo *compute.VirtualMachi ctx, cancel := getContextWithTimeout(vmssContextTimeout) defer cancel() - klog.V(3).Infof("Waiting for virtualMachineScaleSetsClient.CreateOrUpdateAsync(%s)", scaleSet.Name) - future, rerr := scaleSet.manager.azClient.virtualMachineScaleSetsClient.CreateOrUpdateAsync(ctx, scaleSet.manager.config.ResourceGroup, scaleSet.Name, op) - if rerr != nil { - klog.Errorf("virtualMachineScaleSetsClient.CreateOrUpdate for scale set %q failed: %+v", scaleSet.Name, rerr) - return rerr.Error() + klog.V(3).Infof("Waiting for virtualMachineScaleSetsClient.BeginCreateOrUpdate(%s)", scaleSet.Name) + poller, err := scaleSet.manager.azClient.virtualMachineScaleSetsClient.BeginCreateOrUpdate(ctx, scaleSet.manager.config.ResourceGroup, scaleSet.Name, op, nil) + if err != nil { + klog.Errorf("virtualMachineScaleSetsClient.BeginCreateOrUpdate for scale set %q failed: %+v", scaleSet.Name, err) + return err } // Proactively set the VMSS size so autoscaler makes better decisions. scaleSet.curSize = newSize scaleSet.lastSizeRefresh = time.Now() - go scaleSet.waitForCreateOrUpdateInstances(future) + go scaleSet.waitForCreateOrUpdateInstances(poller) return nil } @@ -518,18 +547,18 @@ func (scaleSet *ScaleSet) DeleteInstances(instances []*azureRef, hasUnregistered return nil } - instanceIDs := []string{} + var instanceIDs []*string for _, instance := range instancesToDelete { instanceID, err := getLastSegment(instance.Name) if err != nil { klog.Errorf("getLastSegment failed with error: %v", err) return err } - instanceIDs = append(instanceIDs, instanceID) + instanceIDs = append(instanceIDs, to.Ptr(instanceID)) } - requiredIds := &compute.VirtualMachineScaleSetVMInstanceRequiredIDs{ - InstanceIds: &instanceIDs, + requiredIds := &armcompute.VirtualMachineScaleSetVMInstanceRequiredIDs{ + InstanceIDs: instanceIDs, } ctx, cancel := getContextWithTimeout(vmssContextTimeout) @@ -537,7 +566,7 @@ func (scaleSet *ScaleSet) DeleteInstances(instances []*azureRef, hasUnregistered future, rerr := scaleSet.deleteInstances(ctx, requiredIds, commonAsg.Id()) if rerr != nil { - klog.Errorf("virtualMachineScaleSetsClient.DeleteInstancesAsync for instances %v for %s failed: %+v", requiredIds.InstanceIds, scaleSet.Name, rerr) + klog.Errorf("virtualMachineScaleSetsClient.DeleteInstancesAsync for instances %v for %s failed: %+v", requiredIds.InstanceIDs, scaleSet.Name, rerr) return rerr.Error() } @@ -563,15 +592,14 @@ func (scaleSet *ScaleSet) DeleteInstances(instances []*azureRef, hasUnregistered return nil } -func (scaleSet *ScaleSet) waitForDeleteInstances(future *azure.Future, requiredIds *compute.VirtualMachineScaleSetVMInstanceRequiredIDs) { +func (scaleSet *ScaleSet) waitForDeleteInstances(poller *runtime.Poller[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse], requiredIds *armcompute.VirtualMachineScaleSetVMInstanceRequiredIDs) { ctx, cancel := getContextWithTimeout(asyncContextTimeout) defer cancel() - klog.V(3).Infof("Calling virtualMachineScaleSetsClient.WaitForDeleteInstancesResult(%v) for %s", requiredIds.InstanceIds, scaleSet.Name) - httpResponse, err := scaleSet.manager.azClient.virtualMachineScaleSetsClient.WaitForDeleteInstancesResult(ctx, future, scaleSet.manager.config.ResourceGroup) - isSuccess, err := isSuccessHTTPResponse(httpResponse, err) - if isSuccess { - klog.V(3).Infof(".WaitForDeleteInstancesResult(%v) for %s success", requiredIds.InstanceIds, scaleSet.Name) + klog.V(3).Infof("Calling poller.PollUntilDone for deleting instances %v for %s", requiredIds.InstanceIDs, scaleSet.Name) + _, err := poller.PollUntilDone(ctx, nil) + if err == nil { + klog.V(3).Infof("Delete instances (%v) for %s success", requiredIds.InstanceIDs, scaleSet.Name) if scaleSet.manager.config.StrictCacheUpdates { if err := scaleSet.manager.forceRefresh(); err != nil { klog.Errorf("forceRefresh failed with error: %v", err) @@ -584,7 +612,7 @@ func (scaleSet *ScaleSet) waitForDeleteInstances(future *azure.Future, requiredI // On failure, invalidate the instanceCache - cannot have instances in deletingState scaleSet.invalidateInstanceCache() } - klog.Errorf("WaitForDeleteInstancesResult(%v) for %s failed with error: %v", requiredIds.InstanceIds, scaleSet.Name, err) + klog.Errorf("Delete instances (%v) for %s failed with error: %v", requiredIds.InstanceIDs, scaleSet.Name, err) } // DeleteNodes deletes the nodes from the group. @@ -690,7 +718,7 @@ func (scaleSet *ScaleSet) Nodes() ([]cloudprovider.Instance, error) { return scaleSet.instanceCache, nil } -// buildScaleSetCacheForFlex is used by orchestrationMode == compute.Flexible +// buildScaleSetCacheForFlex is used by orchestrationMode == armcompute.Flexible func (scaleSet *ScaleSet) buildScaleSetCacheForFlex() error { klog.V(3).Infof("buildScaleSetCacheForFlex: resetting instance Cache for scaleSet %s", scaleSet.Name) @@ -765,21 +793,31 @@ func (scaleSet *ScaleSet) buildScaleSetCacheForUniform() error { // Note that the GetScaleSetVms() results is not used directly because for the List endpoint, // their resource ID format is not consistent with Get endpoint -// buildInstanceCacheForFlex used by orchestrationMode == compute.Flexible -func buildInstanceCacheForFlex(vms []compute.VirtualMachine, enableFastDeleteOnFailedProvisioning bool) []cloudprovider.Instance { +// buildInstanceCacheForFlex used by orchestrationMode == armcompute.Flexible +func buildInstanceCacheForFlex(vms []armcompute.VirtualMachine, enableFastDeleteOnFailedProvisioning bool) []cloudprovider.Instance { var instances []cloudprovider.Instance for _, vm := range vms { powerState := vmPowerStateRunning - if vm.InstanceView != nil && vm.InstanceView.Statuses != nil { - powerState = vmPowerStateFromStatuses(*vm.InstanceView.Statuses) + if vm.Properties != nil && vm.Properties.InstanceView != nil && vm.Properties.InstanceView.Statuses != nil { + statuses := make([]armcompute.InstanceViewStatus, 0) + for _, status := range vm.Properties.InstanceView.Statuses { + if status != nil { + statuses = append(statuses, *status) + } + } + powerState = vmPowerStateFromStatuses(statuses) + } + var provisioningState *string + if vm.Properties != nil { + provisioningState = vm.Properties.ProvisioningState } - addVMToCache(&instances, vm.ID, vm.ProvisioningState, powerState, enableFastDeleteOnFailedProvisioning) + addVMToCache(&instances, vm.ID, provisioningState, powerState, enableFastDeleteOnFailedProvisioning) } return instances } -// addVMToCache used by orchestrationMode == compute.Flexible +// addVMToCache used by orchestrationMode == armcompute.Flexible func addVMToCache(instances *[]cloudprovider.Instance, id, provisioningState *string, powerState string, enableFastDeleteOnFailedProvisioning bool) { // The resource ID is empty string, which indicates the instance may be in deleting state. if len(*id) == 0 { @@ -800,7 +838,7 @@ func addVMToCache(instances *[]cloudprovider.Instance, id, provisioningState *st } // instanceStatusFromProvisioningStateAndPowerState converts the VM provisioning state to cloudprovider.InstanceStatus -// instanceStatusFromProvisioningStateAndPowerState used by orchestrationMode == compute.Flexible +// instanceStatusFromProvisioningStateAndPowerState used by orchestrationMode == armcompute.Flexible // Suggestion: reunify this with scaleSet.instanceStatusFromVM() func instanceStatusFromProvisioningStateAndPowerState(resourceID string, provisioningState *string, powerState string, enableFastDeleteOnFailedProvisioning bool) *cloudprovider.InstanceStatus { if provisioningState == nil { @@ -843,10 +881,11 @@ func instanceStatusFromProvisioningStateAndPowerState(resourceID string, provisi return status } -func isSpot(vmss *compute.VirtualMachineScaleSet) bool { - return vmss != nil && vmss.VirtualMachineScaleSetProperties != nil && - vmss.VirtualMachineScaleSetProperties.VirtualMachineProfile != nil && - vmss.VirtualMachineScaleSetProperties.VirtualMachineProfile.Priority == compute.Spot +func isSpot(vmss *armcompute.VirtualMachineScaleSet) bool { + return vmss != nil && vmss.Properties != nil && + vmss.Properties.VirtualMachineProfile != nil && + vmss.Properties.VirtualMachineProfile.Priority != nil && + *vmss.Properties.VirtualMachineProfile.Priority == armcompute.VirtualMachinePriorityTypesSpot } func (scaleSet *ScaleSet) invalidateLastSizeRefreshWithLock() { @@ -855,23 +894,26 @@ func (scaleSet *ScaleSet) invalidateLastSizeRefreshWithLock() { scaleSet.sizeMutex.Unlock() } -func (scaleSet *ScaleSet) getOrchestrationMode() (compute.OrchestrationMode, error) { +func (scaleSet *ScaleSet) getOrchestrationMode() (armcompute.OrchestrationMode, error) { vmss, err := scaleSet.getVMSSFromCache() if err != nil { klog.Errorf("failed to get information for VMSS: %s, error: %v", scaleSet.Name, err) return "", err } - return vmss.OrchestrationMode, nil + if vmss.Properties != nil && vmss.Properties.OrchestrationMode != nil { + return *vmss.Properties.OrchestrationMode, nil + } + return "", fmt.Errorf("orchestration mode not found for VMSS: %s", scaleSet.Name) } -func (scaleSet *ScaleSet) cseErrors(extensions *[]compute.VirtualMachineExtensionInstanceView) ([]string, bool) { +func (scaleSet *ScaleSet) cseErrors(extensions []*armcompute.VirtualMachineExtensionInstanceView) ([]string, bool) { var errs []string failed := false if extensions != nil { - for _, extension := range *extensions { - if strings.EqualFold(ptr.Deref(extension.Name, ""), vmssCSEExtensionName) && extension.Statuses != nil { - for _, status := range *extension.Statuses { - if status.Level == "Error" { + for _, extension := range extensions { + if extension != nil && strings.EqualFold(ptr.Deref(extension.Name, ""), vmssCSEExtensionName) && extension.Statuses != nil { + for _, status := range extension.Statuses { + if status != nil && status.Level != nil && *status.Level == armcompute.StatusLevelTypesError { errs = append(errs, ptr.Deref(status.Message, "")) failed = true } @@ -888,7 +930,7 @@ func (scaleSet *ScaleSet) getSKU() string { klog.Errorf("Failed to get information for VMSS (%q): %v", scaleSet.Name, err) return "" } - return ptr.Deref(vmssInfo.Sku.Name, "") + return ptr.Deref(vmssInfo.SKU.Name, "") } func (scaleSet *ScaleSet) verifyNodeGroup(instance *azureRef, commonNgID string) error { diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_instance_cache.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_instance_cache.go index 5f003905e9e6..b32637933374 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_instance_cache.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_instance_cache.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/klog/v2" @@ -106,12 +106,12 @@ func (scaleSet *ScaleSet) updateInstanceCache() error { return err } - if orchestrationMode == compute.Flexible { + if orchestrationMode == armcompute.OrchestrationModeFlexible { if scaleSet.manager.config.EnableVmssFlexNodes { return scaleSet.buildScaleSetCacheForFlex() } return fmt.Errorf("vmss - %q with Flexible orchestration detected but 'enableVmssFlexNodes' feature flag is turned off", scaleSet.Name) - } else if orchestrationMode == compute.Uniform { + } else if orchestrationMode == armcompute.OrchestrationModeUniform { return scaleSet.buildScaleSetCacheForUniform() } @@ -199,11 +199,15 @@ func (scaleSet *ScaleSet) setInstanceStatusByProviderID(providerID string, statu // instanceStatusFromVM converts the VM provisioning state to cloudprovider.InstanceStatus. // Suggestion: reunify this with instanceStatusFromProvisioningStateAndPowerState() in azure_scale_set.go -func (scaleSet *ScaleSet) instanceStatusFromVM(vm *compute.VirtualMachineScaleSetVM) *cloudprovider.InstanceStatus { +func (scaleSet *ScaleSet) instanceStatusFromVM(vm *armcompute.VirtualMachineScaleSetVM) *cloudprovider.InstanceStatus { // Prefer the proactive cache view of the instance state if we aren't in a terminal state // This is because the power state may be taking longer to update and we don't want // an unfortunate VM update (TTL 5 min) to reset that state to running. - if vm.ProvisioningState == nil || *vm.ProvisioningState == string(compute.GalleryProvisioningStateUpdating) { + var provisioningState *string + if vm.Properties != nil { + provisioningState = vm.Properties.ProvisioningState + } + if provisioningState == nil || *provisioningState == string(armcompute.GalleryProvisioningStateUpdating) { resourceID, _ := convertResourceGroupNameToLower(*vm.ID) providerID := azurePrefix + resourceID for _, instance := range scaleSet.instanceCache { @@ -214,17 +218,23 @@ func (scaleSet *ScaleSet) instanceStatusFromVM(vm *compute.VirtualMachineScaleSe return nil } powerState := vmPowerStateRunning - if vm.InstanceView != nil && vm.InstanceView.Statuses != nil { - powerState = vmPowerStateFromStatuses(*vm.InstanceView.Statuses) + if vm.Properties != nil && vm.Properties.InstanceView != nil && vm.Properties.InstanceView.Statuses != nil { + statuses := make([]armcompute.InstanceViewStatus, 0) + for _, status := range vm.Properties.InstanceView.Statuses { + if status != nil { + statuses = append(statuses, *status) + } + } + powerState = vmPowerStateFromStatuses(statuses) } status := &cloudprovider.InstanceStatus{} - switch *vm.ProvisioningState { - case string(compute.GalleryProvisioningStateDeleting): + switch *provisioningState { + case string(armcompute.GalleryProvisioningStateDeleting): status.State = cloudprovider.InstanceDeleting - case string(compute.GalleryProvisioningStateCreating): + case string(armcompute.GalleryProvisioningStateCreating): status.State = cloudprovider.InstanceCreating - case string(compute.GalleryProvisioningStateFailed): + case string(armcompute.GalleryProvisioningStateFailed): status.State = cloudprovider.InstanceRunning klog.V(3).Infof("VM %s reports failed provisioning state with power state: %s, eligible for fast delete: %s", ptr.Deref(vm.ID, ""), powerState, strconv.FormatBool(scaleSet.enableFastDeleteOnFailedProvisioning)) @@ -251,9 +261,9 @@ func (scaleSet *ScaleSet) instanceStatusFromVM(vm *compute.VirtualMachineScaleSe } // Add vmssCSE Provisioning Failed Message in error info body for vmssCSE Extensions if enableDetailedCSEMessage is true - if scaleSet.enableDetailedCSEMessage && vm.InstanceView != nil { - if err, failed := scaleSet.cseErrors(vm.InstanceView.Extensions); failed { - klog.V(3).Infof("VM %s reports CSE failure: %v, with provisioning state %s, power state %s", ptr.Deref(vm.ID, ""), err, ptr.Deref(vm.ProvisioningState, ""), powerState) + if scaleSet.enableDetailedCSEMessage && vm.Properties != nil && vm.Properties.InstanceView != nil { + if err, failed := scaleSet.cseErrors(vm.Properties.InstanceView.Extensions); failed { + klog.V(3).Infof("VM %s reports CSE failure: %v, with provisioning state %s, power state %s", ptr.Deref(vm.ID, ""), err, ptr.Deref(provisioningState, ""), powerState) status.State = cloudprovider.InstanceCreating errorInfo := &cloudprovider.InstanceErrorInfo{ ErrorClass: cloudprovider.OtherErrorClass, diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_instance_cache_test.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_instance_cache_test.go index 4d94556a5790..8e38b34c2067 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_instance_cache_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_instance_cache_test.go @@ -20,13 +20,13 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/stretchr/testify/assert" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" ) -func testGetInstanceCacheWithStates(t *testing.T, vms []compute.VirtualMachineScaleSetVM, +func testGetInstanceCacheWithStates(t *testing.T, vms []armcompute.VirtualMachineScaleSetVM, states []cloudprovider.InstanceState) []cloudprovider.Instance { assert.Equal(t, len(vms), len(states)) var instanceCacheTest []cloudprovider.Instance @@ -46,7 +46,7 @@ func TestInstanceStatusFromVM(t *testing.T) { scaleSet := newTestScaleSet(provider.azureManager, "testScaleSet") t.Run("provisioning state = failed, power state = starting", func(t *testing.T) { - vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateStarting) + vm := newVMObjectWithState(string(armcompute.GalleryProvisioningStateFailed), vmPowerStateStarting) status := scaleSet.instanceStatusFromVM(vm) @@ -55,7 +55,7 @@ func TestInstanceStatusFromVM(t *testing.T) { }) t.Run("provisioning state = failed, power state = running", func(t *testing.T) { - vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateRunning) + vm := newVMObjectWithState(string(armcompute.GalleryProvisioningStateFailed), vmPowerStateRunning) status := scaleSet.instanceStatusFromVM(vm) @@ -64,7 +64,7 @@ func TestInstanceStatusFromVM(t *testing.T) { }) t.Run("provisioning state = failed, power state = stopping", func(t *testing.T) { - vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateStopping) + vm := newVMObjectWithState(string(armcompute.GalleryProvisioningStateFailed), vmPowerStateStopping) status := scaleSet.instanceStatusFromVM(vm) @@ -73,7 +73,7 @@ func TestInstanceStatusFromVM(t *testing.T) { }) t.Run("provisioning state = failed, power state = stopped", func(t *testing.T) { - vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateStopped) + vm := newVMObjectWithState(string(armcompute.GalleryProvisioningStateFailed), vmPowerStateStopped) status := scaleSet.instanceStatusFromVM(vm) @@ -82,7 +82,7 @@ func TestInstanceStatusFromVM(t *testing.T) { }) t.Run("provisioning state = failed, power state = deallocated", func(t *testing.T) { - vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateDeallocated) + vm := newVMObjectWithState(string(armcompute.GalleryProvisioningStateFailed), vmPowerStateDeallocated) status := scaleSet.instanceStatusFromVM(vm) @@ -91,7 +91,7 @@ func TestInstanceStatusFromVM(t *testing.T) { }) t.Run("provisioning state = failed, power state = unknown", func(t *testing.T) { - vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateUnknown) + vm := newVMObjectWithState(string(armcompute.GalleryProvisioningStateFailed), vmPowerStateUnknown) status := scaleSet.instanceStatusFromVM(vm) @@ -105,7 +105,7 @@ func TestInstanceStatusFromVM(t *testing.T) { scaleSet := newTestScaleSetWithFastDelete(provider.azureManager, "testScaleSet") t.Run("provisioning state = failed, power state = starting", func(t *testing.T) { - vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateStarting) + vm := newVMObjectWithState(string(armcompute.GalleryProvisioningStateFailed), vmPowerStateStarting) status := scaleSet.instanceStatusFromVM(vm) @@ -114,7 +114,7 @@ func TestInstanceStatusFromVM(t *testing.T) { }) t.Run("provisioning state = failed, power state = running", func(t *testing.T) { - vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateRunning) + vm := newVMObjectWithState(string(armcompute.GalleryProvisioningStateFailed), vmPowerStateRunning) status := scaleSet.instanceStatusFromVM(vm) @@ -123,7 +123,7 @@ func TestInstanceStatusFromVM(t *testing.T) { }) t.Run("provisioning state = failed, power state = stopping", func(t *testing.T) { - vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateStopping) + vm := newVMObjectWithState(string(armcompute.GalleryProvisioningStateFailed), vmPowerStateStopping) status := scaleSet.instanceStatusFromVM(vm) @@ -133,7 +133,7 @@ func TestInstanceStatusFromVM(t *testing.T) { }) t.Run("provisioning state = failed, power state = stopped", func(t *testing.T) { - vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateStopped) + vm := newVMObjectWithState(string(armcompute.GalleryProvisioningStateFailed), vmPowerStateStopped) status := scaleSet.instanceStatusFromVM(vm) @@ -143,7 +143,7 @@ func TestInstanceStatusFromVM(t *testing.T) { }) t.Run("provisioning state = failed, power state = deallocated", func(t *testing.T) { - vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateDeallocated) + vm := newVMObjectWithState(string(armcompute.GalleryProvisioningStateFailed), vmPowerStateDeallocated) status := scaleSet.instanceStatusFromVM(vm) @@ -153,7 +153,7 @@ func TestInstanceStatusFromVM(t *testing.T) { }) t.Run("provisioning state = failed, power state = unknown", func(t *testing.T) { - vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateUnknown) + vm := newVMObjectWithState(string(armcompute.GalleryProvisioningStateFailed), vmPowerStateUnknown) status := scaleSet.instanceStatusFromVM(vm) diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go index 25b0d4554962..13879881844e 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go @@ -17,27 +17,35 @@ limitations under the License. package azure import ( + "context" "fmt" "net/http" "testing" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/stretchr/testify/assert" "go.uber.org/mock/gomock" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/utils/ptr" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient" ) const ( testLocation = "eastus" ) +// convertToPointerSlice converts a slice of InstanceViewStatus to a slice of pointers +func convertToPointerSlice(statuses []armcompute.InstanceViewStatus) []*armcompute.InstanceViewStatus { + result := make([]*armcompute.InstanceViewStatus, len(statuses)) + for i := range statuses { + result[i] = &statuses[i] + } + return result +} + func newTestScaleSet(manager *AzureManager, name string) *ScaleSet { return &ScaleSet{ azureRef: azureRef{ @@ -75,16 +83,16 @@ func newTestScaleSetWithFastDelete(manager *AzureManager, name string) *ScaleSet } } -func newTestVMSSList(cap int64, name, loc string, orchmode compute.OrchestrationMode) []compute.VirtualMachineScaleSet { - return []compute.VirtualMachineScaleSet{ +func newTestVMSSList(cap int64, name, loc string, orchmode armcompute.OrchestrationMode) []armcompute.VirtualMachineScaleSet { + return []armcompute.VirtualMachineScaleSet{ { Name: ptr.To(name), - Sku: &compute.Sku{ + SKU: &armcompute.SKU{ Capacity: ptr.To(cap), Name: ptr.To("Standard_D4_v2"), }, - VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ - OrchestrationMode: orchmode, + Properties: &armcompute.VirtualMachineScaleSetProperties{ + OrchestrationMode: &orchmode, }, Location: ptr.To(loc), ID: ptr.To(name), @@ -92,29 +100,29 @@ func newTestVMSSList(cap int64, name, loc string, orchmode compute.Orchestration } } -func newTestVMSSListForEdgeZones(capacity int64, name string) *compute.VirtualMachineScaleSet { - return &compute.VirtualMachineScaleSet{ +func newTestVMSSListForEdgeZones(capacity int64, name string) *armcompute.VirtualMachineScaleSet { + return &armcompute.VirtualMachineScaleSet{ Name: ptr.To(name), - Sku: &compute.Sku{ + SKU: &armcompute.SKU{ Capacity: ptr.To(capacity), Name: ptr.To("Standard_D4_v2"), }, - VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{}, - Location: ptr.To(testLocation), - ExtendedLocation: &compute.ExtendedLocation{ + Properties: &armcompute.VirtualMachineScaleSetProperties{}, + Location: ptr.To(testLocation), + ExtendedLocation: &armcompute.ExtendedLocation{ Name: ptr.To("losangeles"), - Type: compute.ExtendedLocationTypes("EdgeZone"), + Type: ptr.To(armcompute.ExtendedLocationTypes("EdgeZone")), }, } } -func newTestVMSSVMList(count int) []compute.VirtualMachineScaleSetVM { - var vmssVMList []compute.VirtualMachineScaleSetVM +func newTestVMSSVMList(count int) []armcompute.VirtualMachineScaleSetVM { + var vmssVMList []armcompute.VirtualMachineScaleSetVM for i := 0; i < count; i++ { - vmssVM := compute.VirtualMachineScaleSetVM{ + vmssVM := armcompute.VirtualMachineScaleSetVM{ ID: ptr.To(fmt.Sprintf(fakeVirtualMachineScaleSetVMID, i)), InstanceID: ptr.To(fmt.Sprintf("%d", i)), - VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + Properties: &armcompute.VirtualMachineScaleSetVMProperties{ VMID: ptr.To(fmt.Sprintf("123E4567-E89B-12D3-A456-426655440000-%d", i)), }, } @@ -123,13 +131,18 @@ func newTestVMSSVMList(count int) []compute.VirtualMachineScaleSetVM { return vmssVMList } -func newTestVMList(count int) []compute.VirtualMachine { - var vmssVMList []compute.VirtualMachine +func newTestVMList(count int) []armcompute.VirtualMachine { + var vmssVMList []armcompute.VirtualMachine + // For flexible orchestration, VMs need to reference their VMSS + vmssID := "/subscriptions/test-subscription-id/resourceGroups/test-asg/providers/Microsoft.Compute/virtualMachineScaleSets/test-asg" for i := 0; i < count; i++ { - vmssVM := compute.VirtualMachine{ + vmssVM := armcompute.VirtualMachine{ ID: ptr.To(fmt.Sprintf(fakeVirtualMachineVMID, i)), - VirtualMachineProperties: &compute.VirtualMachineProperties{ + Properties: &armcompute.VirtualMachineProperties{ VMID: ptr.To(fmt.Sprintf("123E4567-E89B-12D3-A456-426655440000-%d", i)), + VirtualMachineScaleSet: &armcompute.SubResource{ + ID: ptr.To(vmssID), + }, }, } vmssVMList = append(vmssVMList, vmssVM) @@ -137,10 +150,10 @@ func newTestVMList(count int) []compute.VirtualMachine { return vmssVMList } -func newApiNode(orchmode compute.OrchestrationMode, vmID int64) *apiv1.Node { +func newApiNode(orchmode armcompute.OrchestrationMode, vmID int64) *apiv1.Node { providerId := fakeVirtualMachineScaleSetVMID - if orchmode == compute.Flexible { + if orchmode == armcompute.OrchestrationModeFlexible { providerId = fakeVirtualMachineVMID } @@ -182,10 +195,10 @@ func TestScaleSetTargetSize(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - orchestrationModes := [2]compute.OrchestrationMode{compute.Uniform, compute.Flexible} - expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", compute.Uniform) - spotScaleSet := newTestVMSSList(5, "spot-vmss", "eastus", compute.Uniform)[0] - spotScaleSet.VirtualMachineProfile = &compute.VirtualMachineScaleSetVMProfile{Priority: compute.Spot} + orchestrationModes := [2]armcompute.OrchestrationMode{armcompute.OrchestrationModeUniform, armcompute.OrchestrationModeFlexible} + expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", armcompute.OrchestrationModeUniform) + spotScaleSet := newTestVMSSList(5, "spot-vmss", "eastus", armcompute.OrchestrationModeUniform)[0] + spotScaleSet.Properties.VirtualMachineProfile = &armcompute.VirtualMachineScaleSetVMProfile{Priority: ptr.To(armcompute.VirtualMachinePriorityTypesSpot)} expectedScaleSets = append(expectedScaleSets, spotScaleSet) expectedVMSSVMs := newTestVMSSVMList(3) @@ -193,31 +206,42 @@ func TestScaleSetTargetSize(t *testing.T) { for _, orchMode := range orchestrationModes { provider := newTestProvider(t) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return([]compute.VirtualMachine{}, nil).AnyTimes() + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager([]armcompute.VirtualMachine{}) + }).AnyTimes() provider.azureManager.azClient.virtualMachinesClient = mockVMClient // return a different capacity from GET API - spotScaleSet.Sku.Capacity = ptr.To[int64](1) - mockVMSSClient.EXPECT().Get(gomock.Any(), provider.azureManager.config.ResourceGroup, "spot-vmss").Return(spotScaleSet, nil).Times(1) + spotScaleSet.SKU.Capacity = ptr.To[int64](1) + mockVMSSClient.EXPECT().Get(gomock.Any(), provider.azureManager.config.ResourceGroup, "spot-vmss", gomock.Any()).Return(armcompute.VirtualMachineScaleSetsClientGetResponse{VirtualMachineScaleSet: spotScaleSet}, nil).Times(1) provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMSSVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient err := provider.azureManager.forceRefresh() assert.NoError(t, err) - if orchMode == compute.Uniform { - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + if orchMode == armcompute.OrchestrationModeUniform { + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { provider.azureManager.config.EnableVmssFlexNodes = true - mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() + // TODO: Use appropriate method instead of ListVmssFlexVMsWithoutInstanceView + mockVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() } err = provider.azureManager.forceRefresh() @@ -252,7 +276,7 @@ func TestScaleSetIncreaseSize(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - orchestrationModes := [2]compute.OrchestrationMode{compute.Uniform, compute.Flexible} + orchestrationModes := [2]armcompute.OrchestrationMode{armcompute.OrchestrationModeUniform, armcompute.OrchestrationModeFlexible} for _, orchMode := range orchestrationModes { @@ -273,26 +297,32 @@ func TestScaleSetIncreaseSize(t *testing.T) { // expectedEdgeZoneMinZeroScaleSets := newTestVMSSListForEdgeZones(0, "edgezone-minzero-vmss") // expectedScaleSets = append(expectedScaleSets, *expectedEdgeZoneScaleSets, *expectedEdgeZoneMinZeroScaleSets) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() - mockVMSSClient.EXPECT().CreateOrUpdateAsync(gomock.Any(), provider.azureManager.config.ResourceGroup, testASG, gomock.Any()).Return(nil, nil) - // This should be Anytimes() because the parent function of this call - updateVMSSCapacity() is a goroutine - // and this test doesn't wait on goroutine, hence, it is difficult to write exact expected number (which is 3 here) - // before we return from this this. - // This is a future TODO: sync.WaitGroup should be used in actual code and make code easily testable - mockVMSSClient.EXPECT().WaitForCreateOrUpdateResult(gomock.Any(), gomock.Any(), provider.azureManager.config.ResourceGroup).Return(&http.Response{StatusCode: http.StatusOK}, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() + mockVMSSClient.EXPECT().BeginCreateOrUpdate(gomock.Any(), provider.azureManager.config.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, rg string, name string, vmss armcompute.VirtualMachineScaleSet, opts *armcompute.VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse], error) { + return getFakeVMSSPoller() + }).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return([]compute.VirtualMachine{}, nil).AnyTimes() + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager([]armcompute.VirtualMachine{}) + }).AnyTimes() provider.azureManager.azClient.virtualMachinesClient = mockVMClient - if orchMode == compute.Uniform { - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + if orchMode == armcompute.OrchestrationModeUniform { + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { provider.azureManager.config.EnableVmssFlexNodes = true - mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() + // TODO: Use appropriate method instead of ListVmssFlexVMsWithoutInstanceView + mockVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() } err := provider.azureManager.forceRefresh() assert.NoError(t, err) @@ -331,8 +361,7 @@ func TestScaleSetIncreaseSize(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 3, targetSizeForEdgeZone) - mockVMSSClient.EXPECT().CreateOrUpdateAsync(gomock.Any(), provider.azureManager.config.ResourceGroup, - "edgezone-vmss", gomock.Any()).Return(nil, nil) + // TODO SDK v2: Mock BeginCreateOrUpdate if needed err = provider.NodeGroups()[1].IncreaseSize(2) assert.NoError(t, err) @@ -351,8 +380,7 @@ func TestScaleSetIncreaseSize(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 0, targetSizeForEdgeZoneMinZero) - mockVMSSClient.EXPECT().CreateOrUpdateAsync(gomock.Any(), provider.azureManager.config.ResourceGroup, - "edgezone-minzero-vmss", gomock.Any()).Return(nil, nil) + // TODO SDK v2: Mock BeginCreateOrUpdate if needed err = provider.NodeGroups()[2].IncreaseSize(2) assert.NoError(t, err) @@ -369,23 +397,23 @@ func TestScaleSetIncreaseSizeOnVMProvisioningFailed(t *testing.T) { testCases := map[string]struct { expectInstanceRunning bool isMissingInstanceView bool - statuses []compute.InstanceViewStatus + statuses []armcompute.InstanceViewStatus expectErrorInfoPopulated bool }{ "out of resources when no power state exists": { expectErrorInfoPopulated: false, }, "out of resources when VM is stopped": { - statuses: []compute.InstanceViewStatus{{Code: ptr.To(vmPowerStateStopped)}}, + statuses: []armcompute.InstanceViewStatus{{Code: ptr.To(vmPowerStateStopped)}}, expectErrorInfoPopulated: false, }, "out of resources when VM reports invalid power state": { - statuses: []compute.InstanceViewStatus{{Code: ptr.To("PowerState/invalid")}}, + statuses: []armcompute.InstanceViewStatus{{Code: ptr.To("PowerState/invalid")}}, expectErrorInfoPopulated: false, }, "instance running when power state is running": { expectInstanceRunning: true, - statuses: []compute.InstanceViewStatus{{Code: ptr.To(vmPowerStateRunning)}}, + statuses: []armcompute.InstanceViewStatus{{Code: ptr.To(vmPowerStateRunning)}}, expectErrorInfoPopulated: false, }, "instance running if instance view cannot be retrieved": { @@ -402,26 +430,33 @@ func TestScaleSetIncreaseSizeOnVMProvisioningFailed(t *testing.T) { manager := newTestAzureManager(t) vmssName := "vmss-failed-upscale" - expectedScaleSets := newTestVMSSList(3, "vmss-failed-upscale", "eastus", compute.Uniform) + expectedScaleSets := newTestVMSSList(3, "vmss-failed-upscale", "eastus", armcompute.OrchestrationModeUniform) expectedVMSSVMs := newTestVMSSVMList(3) // The failed state is important line of code here expectedVMs := newTestVMList(3) - expectedVMSSVMs[2].ProvisioningState = ptr.To(provisioningStateFailed) + expectedVMSSVMs[2].Properties.ProvisioningState = ptr.To(provisioningStateFailed) if !testCase.isMissingInstanceView { - expectedVMSSVMs[2].InstanceView = &compute.VirtualMachineScaleSetVMInstanceView{Statuses: &testCase.statuses} + expectedVMSSVMs[2].Properties.InstanceView = &armcompute.VirtualMachineScaleSetVMInstanceView{Statuses: convertToPointerSlice(testCase.statuses)} } - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil) - mockVMSSClient.EXPECT().CreateOrUpdateAsync(gomock.Any(), manager.config.ResourceGroup, vmssName, gomock.Any()).Return(nil, nil) - mockVMSSClient.EXPECT().WaitForCreateOrUpdateResult(gomock.Any(), gomock.Any(), manager.config.ResourceGroup).Return(&http.Response{StatusCode: http.StatusOK}, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() + mockVMSSClient.EXPECT().BeginCreateOrUpdate(gomock.Any(), manager.config.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, rg string, name string, vmss armcompute.VirtualMachineScaleSet, opts *armcompute.VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse], error) { + return getFakeVMSSPoller() + }).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "vmss-failed-upscale", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, "vmss-failed-upscale", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedVMs, nil).AnyTimes() + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() manager.azClient.virtualMachinesClient = mockVMClient manager.explicitlyConfigured["vmss-failed-upscale"] = true @@ -458,23 +493,23 @@ func TestIncreaseSizeOnVMProvisioningFailedWithFastDelete(t *testing.T) { testCases := map[string]struct { expectInstanceRunning bool isMissingInstanceView bool - statuses []compute.InstanceViewStatus + statuses []armcompute.InstanceViewStatus expectErrorInfoPopulated bool }{ "out of resources when no power state exists": { expectErrorInfoPopulated: true, }, "out of resources when VM is stopped": { - statuses: []compute.InstanceViewStatus{{Code: ptr.To(vmPowerStateStopped)}}, + statuses: []armcompute.InstanceViewStatus{{Code: ptr.To(vmPowerStateStopped)}}, expectErrorInfoPopulated: true, }, "out of resources when VM reports invalid power state": { - statuses: []compute.InstanceViewStatus{{Code: ptr.To("PowerState/invalid")}}, + statuses: []armcompute.InstanceViewStatus{{Code: ptr.To("PowerState/invalid")}}, expectErrorInfoPopulated: true, }, "instance running when power state is running": { expectInstanceRunning: true, - statuses: []compute.InstanceViewStatus{{Code: ptr.To(vmPowerStateRunning)}}, + statuses: []armcompute.InstanceViewStatus{{Code: ptr.To(vmPowerStateRunning)}}, expectErrorInfoPopulated: false, }, "instance running if instance view cannot be retrieved": { @@ -491,26 +526,33 @@ func TestIncreaseSizeOnVMProvisioningFailedWithFastDelete(t *testing.T) { manager := newTestAzureManager(t) vmssName := "vmss-failed-upscale" - expectedScaleSets := newTestVMSSList(3, "vmss-failed-upscale", "eastus", compute.Uniform) + expectedScaleSets := newTestVMSSList(3, "vmss-failed-upscale", "eastus", armcompute.OrchestrationModeUniform) expectedVMSSVMs := newTestVMSSVMList(3) // The failed state is important line of code here expectedVMs := newTestVMList(3) - expectedVMSSVMs[2].ProvisioningState = ptr.To(provisioningStateFailed) + expectedVMSSVMs[2].Properties.ProvisioningState = ptr.To(provisioningStateFailed) if !testCase.isMissingInstanceView { - expectedVMSSVMs[2].InstanceView = &compute.VirtualMachineScaleSetVMInstanceView{Statuses: &testCase.statuses} + expectedVMSSVMs[2].Properties.InstanceView = &armcompute.VirtualMachineScaleSetVMInstanceView{Statuses: convertToPointerSlice(testCase.statuses)} } - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil) - mockVMSSClient.EXPECT().CreateOrUpdateAsync(gomock.Any(), manager.config.ResourceGroup, vmssName, gomock.Any()).Return(nil, nil) - mockVMSSClient.EXPECT().WaitForCreateOrUpdateResult(gomock.Any(), gomock.Any(), manager.config.ResourceGroup).Return(&http.Response{StatusCode: http.StatusOK}, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() + mockVMSSClient.EXPECT().BeginCreateOrUpdate(gomock.Any(), manager.config.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, rg string, name string, vmss armcompute.VirtualMachineScaleSet, opts *armcompute.VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse], error) { + return getFakeVMSSPoller() + }).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "vmss-failed-upscale", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, "vmss-failed-upscale", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedVMs, nil).AnyTimes() + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() manager.azClient.virtualMachinesClient = mockVMClient manager.explicitlyConfigured["vmss-failed-upscale"] = true @@ -551,30 +593,33 @@ func TestScaleSetIncreaseSizeOnVMSSUpdating(t *testing.T) { vmssName := "vmss-updating" var vmssCapacity int64 = 3 - expectedScaleSets := []compute.VirtualMachineScaleSet{ + expectedScaleSets := []armcompute.VirtualMachineScaleSet{ { Name: &vmssName, - Sku: &compute.Sku{ + SKU: &armcompute.SKU{ Capacity: &vmssCapacity, }, - VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ - ProvisioningState: ptr.To(string(compute.GalleryProvisioningStateUpdating)), - OrchestrationMode: compute.Uniform, + Properties: &armcompute.VirtualMachineScaleSetProperties{ + ProvisioningState: ptr.To(string(armcompute.GalleryProvisioningStateUpdating)), + OrchestrationMode: ptr.To(armcompute.OrchestrationModeUniform), }, }, } expectedVMSSVMs := newTestVMSSVMList(3) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil) - mockVMSSClient.EXPECT().CreateOrUpdateAsync(gomock.Any(), manager.config.ResourceGroup, vmssName, gomock.Any()).Return( - nil, nil) - mockVMSSClient.EXPECT().WaitForCreateOrUpdateResult(gomock.Any(), gomock.Any(), manager.config.ResourceGroup).Return( - &http.Response{StatusCode: http.StatusOK}, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }) + mockVMSSClient.EXPECT().BeginCreateOrUpdate(gomock.Any(), manager.config.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, rg string, name string, vmss armcompute.VirtualMachineScaleSet, opts *armcompute.VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse], error) { + return getFakeVMSSPoller() + }).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "vmss-updating", - gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, "vmss-updating", + gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient manager.explicitlyConfigured["vmss-updating"] = true registered := manager.RegisterNodeGroup(newTestScaleSet(manager, vmssName)) @@ -597,27 +642,38 @@ func TestScaleSetBelongs(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - orchestrationModes := [2]compute.OrchestrationMode{compute.Uniform, compute.Flexible} + orchestrationModes := [2]armcompute.OrchestrationMode{armcompute.OrchestrationModeUniform, armcompute.OrchestrationModeFlexible} expectedVMSSVMs := newTestVMSSVMList(3) expectedVMs := newTestVMList(3) for _, orchMode := range orchestrationModes { expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", orchMode) provider := newTestProvider(t) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil) + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return([]compute.VirtualMachine{}, nil).AnyTimes() + + mockVMClient := NewMockVirtualMachinesClient(ctrl) provider.azureManager.azClient.virtualMachinesClient = mockVMClient - if orchMode == compute.Uniform { - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + if orchMode == armcompute.OrchestrationModeUniform { + // For uniform mode, VMs are not listed, only VMSS VMs + mockVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager([]armcompute.VirtualMachine{}) + }).AnyTimes() + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { provider.azureManager.config.EnableVmssFlexNodes = true - mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() + // For flexible orchestration, VMs are listed with a filter + mockVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() } registered := provider.azureManager.RegisterNodeGroup( @@ -653,27 +709,27 @@ func TestScaleSetDeleteNodes(t *testing.T) { var vmssCapacity int64 = 3 cases := []struct { name string - orchestrationMode compute.OrchestrationMode + orchestrationMode armcompute.OrchestrationMode enableForceDelete bool }{ { name: "uniform, force delete enabled", - orchestrationMode: compute.Uniform, + orchestrationMode: armcompute.OrchestrationModeUniform, enableForceDelete: true, }, { name: "uniform, force delete disabled", - orchestrationMode: compute.Uniform, + orchestrationMode: armcompute.OrchestrationModeUniform, enableForceDelete: false, }, { name: "flexible, force delete enabled", - orchestrationMode: compute.Flexible, + orchestrationMode: armcompute.OrchestrationModeFlexible, enableForceDelete: true, }, { name: "flexible, force delete disabled", - orchestrationMode: compute.Flexible, + orchestrationMode: armcompute.OrchestrationModeFlexible, enableForceDelete: false, }, } @@ -690,23 +746,33 @@ func TestScaleSetDeleteNodes(t *testing.T) { expectedScaleSets := newTestVMSSList(vmssCapacity, vmssName, "eastus", orchMode) fmt.Printf("orchMode: %s, enableForceDelete: %t\n", orchMode, enableForceDelete) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).Times(2) - mockVMSSClient.EXPECT().DeleteInstancesAsync(gomock.Any(), manager.config.ResourceGroup, gomock.Any(), gomock.Any(), enableForceDelete).Return(nil, nil) - mockVMSSClient.EXPECT().WaitForDeleteInstancesResult(gomock.Any(), gomock.Any(), manager.config.ResourceGroup).Return(&http.Response{StatusCode: http.StatusOK}, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).Times(2) + mockVMSSClient.EXPECT().BeginDeleteInstances(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, rg string, vmssName string, vmInstanceIDs armcompute.VirtualMachineScaleSetVMInstanceRequiredIDs, opts *armcompute.VirtualMachineScaleSetsClientBeginDeleteInstancesOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse], error) { + return getFakeVMSSDeleteInstancesPoller() + }).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) manager.azClient.virtualMachinesClient = mockVMClient - mockVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedVMs, nil).AnyTimes() - - if orchMode == compute.Uniform { - mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() + + if orchMode == armcompute.OrchestrationModeUniform { + mockVMSSVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { manager.config.EnableVmssFlexNodes = true - mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() + // TODO: Use appropriate method instead of ListVmssFlexVMsWithoutInstanceView + mockVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() manager.azClient.virtualMachinesClient = mockVMClient } @@ -746,16 +812,23 @@ func TestScaleSetDeleteNodes(t *testing.T) { // create scale set with vmss capacity 1 expectedScaleSets = newTestVMSSList(1, vmssName, "eastus", orchMode) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() - if orchMode == compute.Uniform { - expectedVMSSVMs[0].ProvisioningState = ptr.To(provisioningStateDeleting) - expectedVMSSVMs[2].ProvisioningState = ptr.To(provisioningStateDeleting) - mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + if orchMode == armcompute.OrchestrationModeUniform { + expectedVMSSVMs[0].Properties.ProvisioningState = ptr.To(provisioningStateDeleting) + expectedVMSSVMs[2].Properties.ProvisioningState = ptr.To(provisioningStateDeleting) + mockVMSSVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() } else { - expectedVMs[0].ProvisioningState = ptr.To(provisioningStateDeleting) - expectedVMs[2].ProvisioningState = ptr.To(provisioningStateDeleting) - mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() + expectedVMs[0].Properties.ProvisioningState = ptr.To(provisioningStateDeleting) + expectedVMs[2].Properties.ProvisioningState = ptr.To(provisioningStateDeleting) + // TODO: Use appropriate method instead of ListVmssFlexVMsWithoutInstanceView + mockVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() } err = manager.forceRefresh() @@ -792,27 +865,27 @@ func TestScaleSetDeleteNodeUnregistered(t *testing.T) { cases := []struct { name string - orchestrationMode compute.OrchestrationMode + orchestrationMode armcompute.OrchestrationMode enableForceDelete bool }{ { name: "uniform, force delete enabled", - orchestrationMode: compute.Uniform, + orchestrationMode: armcompute.OrchestrationModeUniform, enableForceDelete: true, }, { name: "uniform, force delete disabled", - orchestrationMode: compute.Uniform, + orchestrationMode: armcompute.OrchestrationModeUniform, enableForceDelete: false, }, { name: "flexible, force delete enabled", - orchestrationMode: compute.Flexible, + orchestrationMode: armcompute.OrchestrationModeFlexible, enableForceDelete: true, }, { name: "flexible, force delete disabled", - orchestrationMode: compute.Flexible, + orchestrationMode: armcompute.OrchestrationModeFlexible, enableForceDelete: false, }, } @@ -827,22 +900,32 @@ func TestScaleSetDeleteNodeUnregistered(t *testing.T) { manager.config.EnableForceDelete = enableForceDelete expectedScaleSets := newTestVMSSList(vmssCapacity, vmssName, "eastus", orchMode) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).Times(2) - mockVMSSClient.EXPECT().DeleteInstancesAsync(gomock.Any(), manager.config.ResourceGroup, gomock.Any(), gomock.Any(), enableForceDelete).Return(nil, nil) - mockVMSSClient.EXPECT().WaitForDeleteInstancesResult(gomock.Any(), gomock.Any(), manager.config.ResourceGroup).Return(&http.Response{StatusCode: http.StatusOK}, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).Times(2) + mockVMSSClient.EXPECT().BeginDeleteInstances(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, rg string, vmssName string, vmInstanceIDs armcompute.VirtualMachineScaleSetVMInstanceRequiredIDs, opts *armcompute.VirtualMachineScaleSetsClientBeginDeleteInstancesOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse], error) { + return getFakeVMSSDeleteInstancesPoller() + }).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedVMs, nil).AnyTimes() + mockVMClient := NewMockVirtualMachinesClient(ctrl) + mockVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() manager.azClient.virtualMachinesClient = mockVMClient - if orchMode == compute.Uniform { - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + if orchMode == armcompute.OrchestrationModeUniform { + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { manager.config.EnableVmssFlexNodes = true - mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() + // TODO: Use appropriate method instead of ListVmssFlexVMsWithoutInstanceView + mockVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() } err := manager.forceRefresh() assert.NoError(t, err) @@ -906,30 +989,35 @@ func TestScaleSetDeleteInstancesWithForceDeleteEnabled(t *testing.T) { vmssName := "test-asg" var vmssCapacity int64 = 3 //hostGroupId := "test-hostGroup" - //hostGroup := &compute.SubResource{ + //hostGroup := &armcompute.SubResource{ // ID: &hostGroupId, //} - expectedScaleSets := []compute.VirtualMachineScaleSet{ + expectedScaleSets := []armcompute.VirtualMachineScaleSet{ { Name: &vmssName, - Sku: &compute.Sku{ + SKU: &armcompute.SKU{ Capacity: &vmssCapacity, }, - VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ - OrchestrationMode: compute.Uniform, + Properties: &armcompute.VirtualMachineScaleSetProperties{ + OrchestrationMode: ptr.To(armcompute.OrchestrationModeUniform), }, }, } expectedVMSSVMs := newTestVMSSVMList(3) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).Times(2) - mockVMSSClient.EXPECT().DeleteInstancesAsync(gomock.Any(), manager.config.ResourceGroup, gomock.Any(), gomock.Any(), true).Return(nil, nil) - mockVMSSClient.EXPECT().WaitForDeleteInstancesResult(gomock.Any(), gomock.Any(), manager.config.ResourceGroup).Return(&http.Response{StatusCode: http.StatusOK}, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).Times(2) + mockVMSSClient.EXPECT().BeginDeleteInstances(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, rg string, vmssName string, vmInstanceIDs armcompute.VirtualMachineScaleSetVMInstanceRequiredIDs, opts *armcompute.VirtualMachineScaleSetsClientBeginDeleteInstancesOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse], error) { + return getFakeVMSSDeleteInstancesPoller() + }).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient err := manager.forceRefresh() assert.NoError(t, err) @@ -970,21 +1058,25 @@ func TestScaleSetDeleteInstancesWithForceDeleteEnabled(t *testing.T) { err = scaleSet.DeleteNodes(nodesToDelete) assert.NoError(t, err) vmssCapacity = 1 - expectedScaleSets = []compute.VirtualMachineScaleSet{ + expectedScaleSets = []armcompute.VirtualMachineScaleSet{ { Name: &vmssName, - Sku: &compute.Sku{ + SKU: &armcompute.SKU{ Capacity: &vmssCapacity, }, - VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ - OrchestrationMode: compute.Uniform, + Properties: &armcompute.VirtualMachineScaleSetProperties{ + OrchestrationMode: ptr.To(armcompute.OrchestrationModeUniform), }, }, } - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() - expectedVMSSVMs[0].ProvisioningState = ptr.To(string(compute.GalleryProvisioningStateDeleting)) - expectedVMSSVMs[2].ProvisioningState = ptr.To(string(compute.GalleryProvisioningStateDeleting)) - mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() + expectedVMSSVMs[0].Properties.ProvisioningState = ptr.To(string(armcompute.GalleryProvisioningStateDeleting)) + expectedVMSSVMs[2].Properties.ProvisioningState = ptr.To(string(armcompute.GalleryProvisioningStateDeleting)) + mockVMSSVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() err = manager.forceRefresh() assert.NoError(t, err) @@ -1019,33 +1111,37 @@ func TestScaleSetDeleteNoConflictRequest(t *testing.T) { manager := newTestAzureManager(t) - expectedVMSSVMs := []compute.VirtualMachineScaleSetVM{ + expectedVMSSVMs := []armcompute.VirtualMachineScaleSetVM{ { ID: ptr.To(fakeVirtualMachineScaleSetVMID), InstanceID: ptr.To("0"), - VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + Properties: &armcompute.VirtualMachineScaleSetVMProperties{ VMID: ptr.To("123E4567-E89B-12D3-A456-426655440000"), ProvisioningState: ptr.To("Deleting"), }, }, } - expectedScaleSets := []compute.VirtualMachineScaleSet{ + expectedScaleSets := []armcompute.VirtualMachineScaleSet{ { Name: &vmssName, - Sku: &compute.Sku{ + SKU: &armcompute.SKU{ Capacity: &vmssCapacity, }, - VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ - OrchestrationMode: compute.Uniform, + Properties: &armcompute.VirtualMachineScaleSetProperties{ + OrchestrationMode: ptr.To(armcompute.OrchestrationModeUniform), }, }, } - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(manager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager(manager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient resourceLimiter := cloudprovider.NewResourceLimiter( @@ -1093,7 +1189,7 @@ func TestAgentPoolDebug(t *testing.T) { func TestScaleSetNodes(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - orchestrationModes := [2]compute.OrchestrationMode{compute.Uniform, compute.Flexible} + orchestrationModes := [2]armcompute.OrchestrationMode{armcompute.OrchestrationModeUniform, armcompute.OrchestrationModeFlexible} expectedVMSSVMs := newTestVMSSVMList(3) expectedVMs := newTestVMList(3) @@ -1102,21 +1198,31 @@ func TestScaleSetNodes(t *testing.T) { expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", orchMode) provider := newTestProvider(t) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMClient := mockvmclient.NewMockInterface(ctrl) - mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return([]compute.VirtualMachine{}, nil).AnyTimes() + + mockVMClient := NewMockVirtualMachinesClient(ctrl) provider.azureManager.azClient.virtualMachinesClient = mockVMClient - if orchMode == compute.Uniform { - mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl) - mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() + if orchMode == armcompute.OrchestrationModeUniform { + mockVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager([]armcompute.VirtualMachine{}) + }).AnyTimes() + mockVMSSVMClient := NewMockVirtualMachineScaleSetVMsClient(ctrl) + mockVMSSVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).DoAndReturn(func(string, string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + return getFakeVMSSVMListPager(expectedVMSSVMs) + }).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { provider.azureManager.config.EnableVmssFlexNodes = true - mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() + // For flexible orchestration, VMs are listed with a filter + mockVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() } registered := provider.azureManager.RegisterNodeGroup( @@ -1144,7 +1250,7 @@ func TestScaleSetNodes(t *testing.T) { assert.NoError(t, err) assert.Equal(t, len(instances), 3) - if orchMode == compute.Uniform { + if orchMode == armcompute.OrchestrationModeUniform { assert.Equal(t, instances[0], cloudprovider.Instance{Id: azurePrefix + fmt.Sprintf(fakeVirtualMachineScaleSetVMID, 0)}) assert.Equal(t, instances[1], cloudprovider.Instance{Id: azurePrefix + fmt.Sprintf(fakeVirtualMachineScaleSetVMID, 1)}) @@ -1166,17 +1272,24 @@ func TestScaleSetEnableVmssFlexNodesFlag(t *testing.T) { defer ctrl.Finish() expectedVMs := newTestVMList(3) - expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", compute.Flexible) + expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", armcompute.OrchestrationModeFlexible) provider := newTestProvider(t) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() provider.azureManager.config.EnableVmssFlexNodes = false provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient - mockVMClient := mockvmclient.NewMockInterface(ctrl) - - mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedVMs, nil).AnyTimes() - mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), testASG).Return(expectedVMs, nil).AnyTimes() + mockVMClient := NewMockVirtualMachinesClient(ctrl) + + mockVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() + // TODO: Use appropriate method instead of ListVmssFlexVMsWithoutInstanceView + mockVMClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + return getFakeVMListPager(expectedVMs) + }).AnyTimes() provider.azureManager.azClient.virtualMachinesClient = mockVMClient provider.azureManager.RegisterNodeGroup( @@ -1195,11 +1308,13 @@ func TestScaleSetTemplateNodeInfo(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", compute.Uniform) + expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", armcompute.OrchestrationModeUniform) provider := newTestProvider(t) - mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) - mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() + mockVMSSClient := NewMockVirtualMachineScaleSetsClient(ctrl) + mockVMSSClient.EXPECT().NewListPager(provider.azureManager.config.ResourceGroup, gomock.Any()).DoAndReturn(func(string, interface{}) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + return getFakeVMSSListPager(expectedScaleSets) + }).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient err := provider.azureManager.forceRefresh() assert.NoError(t, err) @@ -1319,19 +1434,19 @@ func TestScaleSetTemplateNodeInfo(t *testing.T) { } func TestScaleSetCseErrors(t *testing.T) { errorMessage := ptr.To("Error Message Test") - vmssVMs := compute.VirtualMachineScaleSetVM{ + vmssVMs := armcompute.VirtualMachineScaleSetVM{ Name: ptr.To("vmTest"), ID: ptr.To(fakeVirtualMachineScaleSetVMID), InstanceID: ptr.To("0"), - VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + Properties: &armcompute.VirtualMachineScaleSetVMProperties{ VMID: ptr.To("123E4567-E89B-12D3-A456-426655440000"), ProvisioningState: ptr.To("Succeeded"), - InstanceView: &compute.VirtualMachineScaleSetVMInstanceView{ - Extensions: &[]compute.VirtualMachineExtensionInstanceView{ + InstanceView: &armcompute.VirtualMachineScaleSetVMInstanceView{ + Extensions: []*armcompute.VirtualMachineExtensionInstanceView{ { - Statuses: &[]compute.InstanceViewStatus{ + Statuses: []*armcompute.InstanceViewStatus{ { - Level: "Error", + Level: ptr.To(armcompute.StatusLevelTypesError), Message: errorMessage, }, }, @@ -1353,26 +1468,26 @@ func TestScaleSetCseErrors(t *testing.T) { t.Run("getCSEErrorMessages test with CSE error in VM extensions", func(t *testing.T) { expectedCSEWErrorMessage := "Error Message Test" - (*vmssVMs.InstanceView.Extensions)[0].Name = ptr.To(vmssCSEExtensionName) - actualCSEErrorMessage, actualCSEFailureBool := scaleSet.cseErrors(vmssVMs.InstanceView.Extensions) + vmssVMs.Properties.InstanceView.Extensions[0].Name = ptr.To(vmssCSEExtensionName) + actualCSEErrorMessage, actualCSEFailureBool := scaleSet.cseErrors(vmssVMs.Properties.InstanceView.Extensions) assert.True(t, actualCSEFailureBool) assert.Equal(t, []string{expectedCSEWErrorMessage}, actualCSEErrorMessage) }) t.Run("getCSEErrorMessages test with no CSE error in VM extensions", func(t *testing.T) { - (*vmssVMs.InstanceView.Extensions)[0].Name = ptr.To("notCSEExtension") - actualCSEErrorMessage, actualCSEFailureBool := scaleSet.cseErrors(vmssVMs.InstanceView.Extensions) + vmssVMs.Properties.InstanceView.Extensions[0].Name = ptr.To("notCSEExtension") + actualCSEErrorMessage, actualCSEFailureBool := scaleSet.cseErrors(vmssVMs.Properties.InstanceView.Extensions) assert.False(t, actualCSEFailureBool) assert.Equal(t, []string(nil), actualCSEErrorMessage) }) } -func newVMObjectWithState(provisioningState string, powerState string) *compute.VirtualMachineScaleSetVM { - return &compute.VirtualMachineScaleSetVM{ +func newVMObjectWithState(provisioningState string, powerState string) *armcompute.VirtualMachineScaleSetVM { + return &armcompute.VirtualMachineScaleSetVM{ ID: ptr.To("1"), // Beware; refactor if needed - VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + Properties: &armcompute.VirtualMachineScaleSetVMProperties{ ProvisioningState: ptr.To(provisioningState), - InstanceView: &compute.VirtualMachineScaleSetVMInstanceView{ - Statuses: &[]compute.InstanceViewStatus{ + InstanceView: &armcompute.VirtualMachineScaleSetVMInstanceView{ + Statuses: []*armcompute.InstanceViewStatus{ {Code: ptr.To(powerState)}, }, }, @@ -1384,21 +1499,21 @@ func newVMObjectWithState(provisioningState string, powerState string) *compute. func TestInstanceStatusFromProvisioningStateAndPowerState(t *testing.T) { t.Run("fast delete enablement = false", func(t *testing.T) { t.Run("provisioning state = failed, power state = starting", func(t *testing.T) { - status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(compute.GalleryProvisioningStateFailed)), vmPowerStateStarting, false) + status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(armcompute.GalleryProvisioningStateFailed)), vmPowerStateStarting, false) assert.NotNil(t, status) assert.Equal(t, cloudprovider.InstanceRunning, status.State) }) t.Run("provisioning state = failed, power state = running", func(t *testing.T) { - status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(compute.GalleryProvisioningStateFailed)), vmPowerStateRunning, false) + status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(armcompute.GalleryProvisioningStateFailed)), vmPowerStateRunning, false) assert.NotNil(t, status) assert.Equal(t, cloudprovider.InstanceRunning, status.State) }) t.Run("provisioning state = failed, power state = stopping", func(t *testing.T) { - status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(compute.GalleryProvisioningStateFailed)), vmPowerStateStopping, false) + status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(armcompute.GalleryProvisioningStateFailed)), vmPowerStateStopping, false) assert.NotNil(t, status) assert.Equal(t, cloudprovider.InstanceRunning, status.State) @@ -1406,21 +1521,21 @@ func TestInstanceStatusFromProvisioningStateAndPowerState(t *testing.T) { t.Run("provisioning state = failed, power state = stopped", func(t *testing.T) { - status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(compute.GalleryProvisioningStateFailed)), vmPowerStateStopped, false) + status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(armcompute.GalleryProvisioningStateFailed)), vmPowerStateStopped, false) assert.NotNil(t, status) assert.Equal(t, cloudprovider.InstanceRunning, status.State) }) t.Run("provisioning state = failed, power state = deallocated", func(t *testing.T) { - status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(compute.GalleryProvisioningStateFailed)), vmPowerStateDeallocated, false) + status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(armcompute.GalleryProvisioningStateFailed)), vmPowerStateDeallocated, false) assert.NotNil(t, status) assert.Equal(t, cloudprovider.InstanceRunning, status.State) }) t.Run("provisioning state = failed, power state = unknown", func(t *testing.T) { - status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(compute.GalleryProvisioningStateFailed)), vmPowerStateUnknown, false) + status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(armcompute.GalleryProvisioningStateFailed)), vmPowerStateUnknown, false) assert.NotNil(t, status) assert.Equal(t, cloudprovider.InstanceRunning, status.State) @@ -1429,21 +1544,21 @@ func TestInstanceStatusFromProvisioningStateAndPowerState(t *testing.T) { t.Run("fast delete enablement = true", func(t *testing.T) { t.Run("provisioning state = failed, power state = starting", func(t *testing.T) { - status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(compute.GalleryProvisioningStateFailed)), vmPowerStateStarting, true) + status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(armcompute.GalleryProvisioningStateFailed)), vmPowerStateStarting, true) assert.NotNil(t, status) assert.Equal(t, cloudprovider.InstanceRunning, status.State) }) t.Run("provisioning state = failed, power state = running", func(t *testing.T) { - status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(compute.GalleryProvisioningStateFailed)), vmPowerStateRunning, true) + status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(armcompute.GalleryProvisioningStateFailed)), vmPowerStateRunning, true) assert.NotNil(t, status) assert.Equal(t, cloudprovider.InstanceRunning, status.State) }) t.Run("provisioning state = failed, power state = stopping", func(t *testing.T) { - status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(compute.GalleryProvisioningStateFailed)), vmPowerStateStopping, true) + status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(armcompute.GalleryProvisioningStateFailed)), vmPowerStateStopping, true) assert.NotNil(t, status) assert.Equal(t, cloudprovider.InstanceCreating, status.State) @@ -1451,7 +1566,7 @@ func TestInstanceStatusFromProvisioningStateAndPowerState(t *testing.T) { }) t.Run("provisioning state = failed, power state = stopped", func(t *testing.T) { - status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(compute.GalleryProvisioningStateFailed)), vmPowerStateStopped, true) + status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(armcompute.GalleryProvisioningStateFailed)), vmPowerStateStopped, true) assert.NotNil(t, status) assert.Equal(t, cloudprovider.InstanceCreating, status.State) @@ -1459,7 +1574,7 @@ func TestInstanceStatusFromProvisioningStateAndPowerState(t *testing.T) { }) t.Run("provisioning state = failed, power state = deallocated", func(t *testing.T) { - status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(compute.GalleryProvisioningStateFailed)), vmPowerStateDeallocated, true) + status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(armcompute.GalleryProvisioningStateFailed)), vmPowerStateDeallocated, true) assert.NotNil(t, status) assert.Equal(t, cloudprovider.InstanceCreating, status.State) @@ -1467,7 +1582,7 @@ func TestInstanceStatusFromProvisioningStateAndPowerState(t *testing.T) { }) t.Run("provisioning state = failed, power state = unknown", func(t *testing.T) { - status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(compute.GalleryProvisioningStateFailed)), vmPowerStateUnknown, true) + status := instanceStatusFromProvisioningStateAndPowerState("1", ptr.To(string(armcompute.GalleryProvisioningStateFailed)), vmPowerStateUnknown, true) assert.NotNil(t, status) assert.Equal(t, cloudprovider.InstanceCreating, status.State) @@ -1475,3 +1590,33 @@ func TestInstanceStatusFromProvisioningStateAndPowerState(t *testing.T) { }) }) } + +func getFakeVMSSPoller() (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse], error) { + handler := &fakePollerHandler[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse]{ + done: true, + result: armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse{}, + } + + return runtime.NewPoller( + &http.Response{StatusCode: http.StatusAccepted}, + runtime.Pipeline{}, + &runtime.NewPollerOptions[armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse]{ + Handler: handler, + }, + ) +} + +func getFakeVMSSDeleteInstancesPoller() (*runtime.Poller[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse], error) { + handler := &fakePollerHandler[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse]{ + done: true, + result: armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse{}, + } + + return runtime.NewPoller( + &http.Response{StatusCode: http.StatusAccepted}, + runtime.Pipeline{}, + &runtime.NewPollerOptions[armcompute.VirtualMachineScaleSetsClientDeleteInstancesResponse]{ + Handler: handler, + }, + ) +} diff --git a/cluster-autoscaler/cloudprovider/azure/azure_template.go b/cluster-autoscaler/cloudprovider/azure/azure_template.go index bba2641a60cb..58724a880347 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_template.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_template.go @@ -24,8 +24,8 @@ import ( "strings" "time" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -99,7 +99,7 @@ type VMSSNodeTemplate struct { InputLabels map[string]string InputTaints string Tags map[string]*string - OSDisk *compute.VirtualMachineScaleSetOSDisk + OSDisk *armcompute.VirtualMachineScaleSetOSDisk } // NodeTemplate represents a template for an Azure node @@ -112,22 +112,22 @@ type NodeTemplate struct { VMSSNodeTemplate *VMSSNodeTemplate } -func buildNodeTemplateFromVMSS(vmss compute.VirtualMachineScaleSet, inputLabels map[string]string, inputTaints string) (NodeTemplate, error) { +func buildNodeTemplateFromVMSS(vmss armcompute.VirtualMachineScaleSet, inputLabels map[string]string, inputTaints string) (NodeTemplate, error) { instanceOS := cloudprovider.DefaultOS - if vmss.VirtualMachineProfile != nil && - vmss.VirtualMachineProfile.OsProfile != nil && - vmss.VirtualMachineProfile.OsProfile.WindowsConfiguration != nil { + if vmss.Properties != nil && vmss.Properties.VirtualMachineProfile != nil && + vmss.Properties.VirtualMachineProfile.OSProfile != nil && + vmss.Properties.VirtualMachineProfile.OSProfile.WindowsConfiguration != nil { instanceOS = "windows" } - var osDisk *compute.VirtualMachineScaleSetOSDisk - if vmss.VirtualMachineProfile != nil && - vmss.VirtualMachineProfile.StorageProfile != nil && - vmss.VirtualMachineProfile.StorageProfile.OsDisk != nil { - osDisk = vmss.VirtualMachineProfile.StorageProfile.OsDisk + var osDisk *armcompute.VirtualMachineScaleSetOSDisk + if vmss.Properties != nil && vmss.Properties.VirtualMachineProfile != nil && + vmss.Properties.VirtualMachineProfile.StorageProfile != nil && + vmss.Properties.VirtualMachineProfile.StorageProfile.OSDisk != nil { + osDisk = vmss.Properties.VirtualMachineProfile.StorageProfile.OSDisk } - if vmss.Sku == nil || vmss.Sku.Name == nil { + if vmss.SKU == nil || vmss.SKU.Name == nil { return NodeTemplate{}, fmt.Errorf("VMSS %s has no SKU", ptr.Deref(vmss.Name, "")) } @@ -137,11 +137,15 @@ func buildNodeTemplateFromVMSS(vmss compute.VirtualMachineScaleSet, inputLabels zones := []string{} if vmss.Zones != nil { - zones = *vmss.Zones + for _, zone := range vmss.Zones { + if zone != nil { + zones = append(zones, *zone) + } + } } return NodeTemplate{ - SkuName: *vmss.Sku.Name, + SkuName: *vmss.SKU.Name, Location: *vmss.Location, Zones: zones, @@ -342,16 +346,16 @@ func processVMSSTemplate(template NodeTemplate, nodeName string, node apiv1.Node // Add the storage profile and storage tier labels for vmss node if template.VMSSNodeTemplate.OSDisk != nil { // ephemeral - if template.VMSSNodeTemplate.OSDisk.DiffDiskSettings != nil && template.VMSSNodeTemplate.OSDisk.DiffDiskSettings.Option == compute.Local { + if template.VMSSNodeTemplate.OSDisk.DiffDiskSettings != nil && template.VMSSNodeTemplate.OSDisk.DiffDiskSettings.Option != nil && *template.VMSSNodeTemplate.OSDisk.DiffDiskSettings.Option == armcompute.DiffDiskOptionsLocal { labels[legacyStorageProfileNodeLabelKey] = "ephemeral" labels[storageProfileNodeLabelKey] = "ephemeral" } else { labels[legacyStorageProfileNodeLabelKey] = "managed" labels[storageProfileNodeLabelKey] = "managed" } - if template.VMSSNodeTemplate.OSDisk.ManagedDisk != nil { - labels[legacyStorageTierNodeLabelKey] = string(template.VMSSNodeTemplate.OSDisk.ManagedDisk.StorageAccountType) - labels[storageTierNodeLabelKey] = string(template.VMSSNodeTemplate.OSDisk.ManagedDisk.StorageAccountType) + if template.VMSSNodeTemplate.OSDisk.ManagedDisk != nil && template.VMSSNodeTemplate.OSDisk.ManagedDisk.StorageAccountType != nil { + labels[legacyStorageTierNodeLabelKey] = string(*template.VMSSNodeTemplate.OSDisk.ManagedDisk.StorageAccountType) + labels[storageTierNodeLabelKey] = string(*template.VMSSNodeTemplate.OSDisk.ManagedDisk.StorageAccountType) } } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_template_test.go b/cluster-autoscaler/cloudprovider/azure/azure_template_test.go index 99fc79e730a1..d70d712a52e1 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_template_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_template_test.go @@ -21,9 +21,8 @@ import ( "strings" "testing" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/go-autorest/autorest" "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" @@ -172,13 +171,12 @@ func TestExtractAllocatableResourcesFromScaleSet(t *testing.T) { func TestTopologyFromScaleSet(t *testing.T) { testNodeName := "test-node" testSkuName := "test-sku" - testVmss := compute.VirtualMachineScaleSet{ - Response: autorest.Response{}, - Sku: &compute.Sku{Name: &testSkuName}, - Plan: nil, - VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ - VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{OsProfile: nil}}, - Zones: &[]string{"1", "2", "3"}, + testVmss := armcompute.VirtualMachineScaleSet{ + SKU: &armcompute.SKU{Name: &testSkuName}, + Plan: nil, + Properties: &armcompute.VirtualMachineScaleSetProperties{ + VirtualMachineProfile: &armcompute.VirtualMachineScaleSetVMProfile{OSProfile: nil}}, + Zones: []*string{ptr.To("1"), ptr.To("2"), ptr.To("3")}, Location: ptr.To("westus"), } expectedZoneValues := []string{"westus-1", "westus-2", "westus-3"} @@ -200,12 +198,11 @@ func TestTopologyFromScaleSet(t *testing.T) { func TestEmptyTopologyFromScaleSet(t *testing.T) { testNodeName := "test-node" testSkuName := "test-sku" - testVmss := compute.VirtualMachineScaleSet{ - Response: autorest.Response{}, - Sku: &compute.Sku{Name: &testSkuName}, - Plan: nil, - VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ - VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{OsProfile: nil}}, + testVmss := armcompute.VirtualMachineScaleSet{ + SKU: &armcompute.SKU{Name: &testSkuName}, + Plan: nil, + Properties: &armcompute.VirtualMachineScaleSetProperties{ + VirtualMachineProfile: &armcompute.VirtualMachineScaleSetVMProfile{OSProfile: nil}}, Location: ptr.To("westus"), } @@ -297,17 +294,16 @@ func TestBuildNodeFromTemplateWithLabelPrediction(t *testing.T) { testSkuName := "Standard_DS2_v2" testNodeName := "test-node" - vmss := compute.VirtualMachineScaleSet{ - Response: autorest.Response{}, - Sku: &compute.Sku{Name: &testSkuName}, - Plan: nil, - VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ - VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{ - StorageProfile: &compute.VirtualMachineScaleSetStorageProfile{ - OsDisk: &compute.VirtualMachineScaleSetOSDisk{ + vmss := armcompute.VirtualMachineScaleSet{ + SKU: &armcompute.SKU{Name: &testSkuName}, + Plan: nil, + Properties: &armcompute.VirtualMachineScaleSetProperties{ + VirtualMachineProfile: &armcompute.VirtualMachineScaleSetVMProfile{ + StorageProfile: &armcompute.VirtualMachineScaleSetStorageProfile{ + OSDisk: &armcompute.VirtualMachineScaleSetOSDisk{ DiffDiskSettings: nil, // This makes it managed - ManagedDisk: &compute.VirtualMachineScaleSetManagedDiskParameters{ - StorageAccountType: compute.StorageAccountTypesPremiumLRS, + ManagedDisk: &armcompute.VirtualMachineScaleSetManagedDiskParameters{ + StorageAccountType: ptr.To(armcompute.StorageAccountTypesPremiumLRS), }, }, }, @@ -316,7 +312,7 @@ func TestBuildNodeFromTemplateWithLabelPrediction(t *testing.T) { Tags: map[string]*string{ "poolName": &poolName, }, - Zones: &[]string{"1", "2"}, + Zones: []*string{ptr.To("1"), ptr.To("2")}, Location: ptr.To("westus"), } @@ -341,18 +337,17 @@ func TestBuildNodeFromTemplateWithEphemeralStorage(t *testing.T) { testNodeName := "test-node" diskSizeGB := int32(128) - vmss := compute.VirtualMachineScaleSet{ - Response: autorest.Response{}, - Sku: &compute.Sku{Name: &testSkuName}, - Plan: nil, - VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ - VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{ - StorageProfile: &compute.VirtualMachineScaleSetStorageProfile{ - OsDisk: &compute.VirtualMachineScaleSetOSDisk{ + vmss := armcompute.VirtualMachineScaleSet{ + SKU: &armcompute.SKU{Name: &testSkuName}, + Plan: nil, + Properties: &armcompute.VirtualMachineScaleSetProperties{ + VirtualMachineProfile: &armcompute.VirtualMachineScaleSetVMProfile{ + StorageProfile: &armcompute.VirtualMachineScaleSetStorageProfile{ + OSDisk: &armcompute.VirtualMachineScaleSetOSDisk{ DiskSizeGB: &diskSizeGB, DiffDiskSettings: nil, // This makes it managed - ManagedDisk: &compute.VirtualMachineScaleSetManagedDiskParameters{ - StorageAccountType: compute.StorageAccountTypesPremiumLRS, + ManagedDisk: &armcompute.VirtualMachineScaleSetManagedDiskParameters{ + StorageAccountType: ptr.To(armcompute.StorageAccountTypesPremiumLRS), }, }, }, @@ -361,7 +356,7 @@ func TestBuildNodeFromTemplateWithEphemeralStorage(t *testing.T) { Tags: map[string]*string{ "poolName": &poolName, }, - Zones: &[]string{"1", "2"}, + Zones: []*string{ptr.To("1"), ptr.To("2")}, Location: ptr.To("westus"), } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_test_helpers.go b/cluster-autoscaler/cloudprovider/azure/azure_test_helpers.go new file mode 100644 index 000000000000..ebd2d8be2d38 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/azure/azure_test_helpers.go @@ -0,0 +1,90 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" +) + +// getFakeVMListPager creates a fake pager for listing VMs. +func getFakeVMListPager(vms []armcompute.VirtualMachine) *runtime.Pager[armcompute.VirtualMachinesClientListResponse] { + // Convert to pointers + vmPointers := make([]*armcompute.VirtualMachine, len(vms)) + for i := range vms { + vmPointers[i] = &vms[i] + } + + return runtime.NewPager(runtime.PagingHandler[armcompute.VirtualMachinesClientListResponse]{ + More: func(page armcompute.VirtualMachinesClientListResponse) bool { + return false + }, + Fetcher: func(ctx context.Context, page *armcompute.VirtualMachinesClientListResponse) (armcompute.VirtualMachinesClientListResponse, error) { + return armcompute.VirtualMachinesClientListResponse{ + VirtualMachineListResult: armcompute.VirtualMachineListResult{ + Value: vmPointers, + }, + }, nil + }, + }) +} + +// getFakeVMSSListPager creates a fake pager for listing VMSSs. +func getFakeVMSSListPager(vmsss []armcompute.VirtualMachineScaleSet) *runtime.Pager[armcompute.VirtualMachineScaleSetsClientListResponse] { + // Convert to pointers + vmssPointers := make([]*armcompute.VirtualMachineScaleSet, len(vmsss)) + for i := range vmsss { + vmssPointers[i] = &vmsss[i] + } + + return runtime.NewPager(runtime.PagingHandler[armcompute.VirtualMachineScaleSetsClientListResponse]{ + More: func(page armcompute.VirtualMachineScaleSetsClientListResponse) bool { + return false + }, + Fetcher: func(ctx context.Context, page *armcompute.VirtualMachineScaleSetsClientListResponse) (armcompute.VirtualMachineScaleSetsClientListResponse, error) { + return armcompute.VirtualMachineScaleSetsClientListResponse{ + VirtualMachineScaleSetListResult: armcompute.VirtualMachineScaleSetListResult{ + Value: vmssPointers, + }, + }, nil + }, + }) +} + +// getFakeVMSSVMListPager creates a fake pager for listing VMSS VMs. +func getFakeVMSSVMListPager(vms []armcompute.VirtualMachineScaleSetVM) *runtime.Pager[armcompute.VirtualMachineScaleSetVMsClientListResponse] { + // Convert to pointers + vmPointers := make([]*armcompute.VirtualMachineScaleSetVM, len(vms)) + for i := range vms { + vmPointers[i] = &vms[i] + } + + return runtime.NewPager(runtime.PagingHandler[armcompute.VirtualMachineScaleSetVMsClientListResponse]{ + More: func(page armcompute.VirtualMachineScaleSetVMsClientListResponse) bool { + return false + }, + Fetcher: func(ctx context.Context, page *armcompute.VirtualMachineScaleSetVMsClientListResponse) (armcompute.VirtualMachineScaleSetVMsClientListResponse, error) { + return armcompute.VirtualMachineScaleSetVMsClientListResponse{ + VirtualMachineScaleSetVMListResult: armcompute.VirtualMachineScaleSetVMListResult{ + Value: vmPointers, + }, + }, nil + }, + }) +} diff --git a/cluster-autoscaler/cloudprovider/azure/azure_util.go b/cluster-autoscaler/cloudprovider/azure/azure_util.go index 8e90464de6b0..e8e7749479b1 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_util.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_util.go @@ -19,6 +19,7 @@ package azure import ( "context" "encoding/json" + "errors" "fmt" "io/ioutil" "net/http" @@ -30,8 +31,10 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - azStorage "github.com/Azure/azure-sdk-for-go/storage" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" "github.com/Azure/go-autorest/autorest" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" @@ -117,22 +120,34 @@ func (util *AzUtil) DeleteBlob(accountName, vhdContainer, vhdBlob string) error ctx, cancel := getContextWithCancel() defer cancel() - storageKeysResult, rerr := util.manager.azClient.storageAccountsClient.ListKeys(ctx, util.manager.config.SubscriptionID, util.manager.config.ResourceGroup, accountName) + storageKeysResult, rerr := util.manager.azClient.storageAccountsClient.ListKeys(ctx, util.manager.config.ResourceGroup, accountName, nil) if rerr != nil { - return rerr.Error() + return rerr } - keys := *storageKeysResult.Keys - client, err := azStorage.NewBasicClientOnSovereignCloud(accountName, ptr.Deref(keys[0].Value, ""), util.manager.env) + keys := storageKeysResult.Keys + if len(keys) == 0 { + return fmt.Errorf("no storage keys found for account %s", accountName) + } + + // Construct service URL using the storage account endpoint + serviceURL := fmt.Sprintf("https://%s.blob.%s", accountName, util.manager.env.StorageEndpointSuffix) + + // Create a SharedKeyCredential + credential, err := azblob.NewSharedKeyCredential(accountName, ptr.Deref(keys[0].Value, "")) if err != nil { - return err + return fmt.Errorf("failed to create shared key credential: %w", err) } - bs := client.GetBlobService() - containerRef := bs.GetContainerReference(vhdContainer) - blobRef := containerRef.GetBlobReference(vhdBlob) + // Create a service client + serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, credential, nil) + if err != nil { + return fmt.Errorf("failed to create service client: %w", err) + } - return blobRef.Delete(&azStorage.DeleteBlobOptions{}) + // Delete the blob + _, err = serviceClient.DeleteBlob(ctx, vhdContainer, vhdBlob, nil) + return err } // DeleteVirtualMachine deletes a VM and any associated OS disk @@ -140,28 +155,30 @@ func (util *AzUtil) DeleteVirtualMachine(rg string, name string) error { ctx, cancel := getContextWithCancel() defer cancel() - vm, rerr := util.manager.azClient.virtualMachinesClient.Get(ctx, rg, name, "") + vm, rerr := util.manager.azClient.virtualMachinesClient.Get(ctx, rg, name, nil) if rerr != nil { - if exists, _ := checkResourceExistsFromRetryError(rerr); !exists { + // Check if it's a 404 error indicating resource doesn't exist + var respErr *azcore.ResponseError + if errors.As(rerr, &respErr) && respErr.StatusCode == http.StatusNotFound { klog.V(2).Infof("VirtualMachine %s/%s has already been removed", rg, name) return nil } klog.Errorf("failed to get VM: %s/%s: %s", rg, name, rerr.Error()) - return rerr.Error() + return rerr } - vhd := vm.VirtualMachineProperties.StorageProfile.OsDisk.Vhd - managedDisk := vm.VirtualMachineProperties.StorageProfile.OsDisk.ManagedDisk + vhd := vm.Properties.StorageProfile.OSDisk.Vhd + managedDisk := vm.Properties.StorageProfile.OSDisk.ManagedDisk if vhd == nil && managedDisk == nil { klog.Errorf("failed to get a valid os disk URI for VM: %s/%s", rg, name) return fmt.Errorf("os disk does not have a VHD URI") } - osDiskName := vm.VirtualMachineProperties.StorageProfile.OsDisk.Name + osDiskName := vm.Properties.StorageProfile.OSDisk.Name var nicName string var err error - nicID := (*vm.VirtualMachineProperties.NetworkProfile.NetworkInterfaces)[0].ID + nicID := (vm.Properties.NetworkProfile.NetworkInterfaces)[0].ID if nicID == nil { klog.Warningf("NIC ID is not set for VM (%s/%s)", rg, name) } else { @@ -177,10 +194,13 @@ func (util *AzUtil) DeleteVirtualMachine(rg string, name string) error { defer deleteCancel() klog.Infof("waiting for VirtualMachine deletion: %s/%s", rg, name) - rerr = util.manager.azClient.virtualMachinesClient.Delete(deleteCtx, rg, name) - _, realErr := checkResourceExistsFromRetryError(rerr) - if realErr != nil { - return realErr + poller, rerr := util.manager.azClient.virtualMachinesClient.BeginDelete(deleteCtx, rg, name, nil) + if rerr != nil { + return rerr + } + _, rerr = poller.PollUntilDone(deleteCtx, &runtime.PollUntilDoneOptions{Frequency: 30 * time.Second}) + if rerr != nil { + return rerr } klog.V(2).Infof("VirtualMachine %s/%s removed", rg, name) @@ -189,10 +209,13 @@ func (util *AzUtil) DeleteVirtualMachine(rg string, name string) error { interfaceCtx, interfaceCancel := getContextWithCancel() defer interfaceCancel() klog.Infof("waiting for nic deletion: %s/%s", rg, nicName) - nicErr := util.manager.azClient.interfacesClient.Delete(interfaceCtx, rg, nicName) - _, realErr := checkResourceExistsFromRetryError(nicErr) - if realErr != nil { - return realErr + poller, nicErr := util.manager.azClient.interfacesClient.BeginDelete(interfaceCtx, rg, nicName, nil) + if nicErr != nil { + return nicErr + } + _, nicErr = poller.PollUntilDone(interfaceCtx, &runtime.PollUntilDoneOptions{Frequency: 30 * time.Second}) + if nicErr != nil { + return nicErr } klog.V(2).Infof("interface %s/%s removed", rg, nicName) } @@ -220,12 +243,15 @@ func (util *AzUtil) DeleteVirtualMachine(rg string, name string) error { klog.Infof("deleting managed disk: %s/%s", rg, *osDiskName) disksCtx, disksCancel := getContextWithCancel() defer disksCancel() - diskErr := util.manager.azClient.disksClient.Delete(disksCtx, util.manager.config.SubscriptionID, rg, *osDiskName) - _, realErr := checkResourceExistsFromRetryError(diskErr) - if realErr != nil { - return realErr + poller, diskErr := util.manager.azClient.disksClient.BeginDelete(disksCtx, rg, *osDiskName, nil) + if diskErr != nil { + return diskErr + } + _, diskErr = poller.PollUntilDone(disksCtx, &runtime.PollUntilDoneOptions{Frequency: 30 * time.Second}) + if diskErr != nil { + return diskErr } - klog.V(2).Infof("disk %s/%s removed", rg, *osDiskName) + klog.V(2).Infof("Managed disk %s/%s removed", rg, *osDiskName) } } return nil @@ -259,7 +285,7 @@ func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error { if ok && resourceType == nsgResourceType { if nsgIndex != -1 { err := fmt.Errorf("found 2 resources with type %s in the template. There should only be 1", nsgResourceType) - klog.Errorf(err.Error()) + klog.Error(err.Error()) return err } nsgIndex = index @@ -267,7 +293,7 @@ func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error { if ok && resourceType == rtResourceType { if rtIndex != -1 { err := fmt.Errorf("found 2 resources with type %s in the template. There should only be 1", rtResourceType) - klog.Warningf(err.Error()) + klog.Warning(err.Error()) return err } rtIndex = index @@ -296,7 +322,7 @@ func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error { indexesToRemove := []int{} if nsgIndex == -1 { err := fmt.Errorf("found no resources with type %s in the template. There should have been 1", nsgResourceType) - klog.Errorf(err.Error()) + klog.Error(err.Error()) return err } if rtIndex == -1 { @@ -478,15 +504,15 @@ func windowsVMNameParts(vmName string) (poolPrefix string, orch string, poolInde } // GetVMNameIndex return the index of VM in the node pools. -func GetVMNameIndex(osType compute.OperatingSystemTypes, vmName string) (int, error) { +func GetVMNameIndex(osType armcompute.OperatingSystemTypes, vmName string) (int, error) { var agentIndex int var err error - if osType == compute.OperatingSystemTypesLinux { + if osType == armcompute.OperatingSystemTypesLinux { _, _, agentIndex, err = k8sLinuxVMNameParts(vmName) if err != nil { return 0, err } - } else if osType == compute.OperatingSystemTypesWindows { + } else if osType == armcompute.OperatingSystemTypesWindows { _, _, _, agentIndex, err = windowsVMNameParts(vmName) if err != nil { return 0, err @@ -625,7 +651,7 @@ func isKnownVmPowerState(powerState string) bool { return knownPowerStates[powerState] } -func vmPowerStateFromStatuses(statuses []compute.InstanceViewStatus) string { +func vmPowerStateFromStatuses(statuses []armcompute.InstanceViewStatus) string { for _, status := range statuses { if status.Code == nil || !isKnownVmPowerState(*status.Code) { continue diff --git a/cluster-autoscaler/cloudprovider/azure/azure_util_test.go b/cluster-autoscaler/cloudprovider/azure/azure_util_test.go index a1d5313c4797..89195a2cb844 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_util_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_util_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/stretchr/testify/assert" "sigs.k8s.io/cloud-provider-azure/pkg/retry" @@ -113,7 +113,7 @@ func TestWindowsVMNameParts(t *testing.T) { func TestGetVMNameIndexLinux(t *testing.T) { expectedAgentIndex := 65 - agentIndex, err := GetVMNameIndex(compute.OperatingSystemTypesLinux, "k8s-agentpool1-38988164-65") + agentIndex, err := GetVMNameIndex(armcompute.OperatingSystemTypesLinux, "k8s-agentpool1-38988164-65") if agentIndex != expectedAgentIndex { t.Fatalf("incorrect agentIndex. expected=%d actual=%d", expectedAgentIndex, agentIndex) } @@ -125,7 +125,7 @@ func TestGetVMNameIndexLinux(t *testing.T) { func TestGetVMNameIndexWindows(t *testing.T) { expectedAgentIndex := 20 - agentIndex, err := GetVMNameIndex(compute.OperatingSystemTypesWindows, "38988k8s90320") + agentIndex, err := GetVMNameIndex(armcompute.OperatingSystemTypesWindows, "38988k8s90320") if agentIndex != expectedAgentIndex { t.Fatalf("incorrect agentIndex. expected=%d actual=%d", expectedAgentIndex, agentIndex) } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_vms_pool.go b/cluster-autoscaler/cloudprovider/azure/azure_vms_pool.go index 3a75f935ca64..508214e585f9 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_vms_pool.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_vms_pool.go @@ -22,8 +22,8 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" @@ -416,23 +416,23 @@ func (vmPool *VMPool) getSpotPoolSize() (int32, error) { // getVMsFromCache retrieves the list of virtual machines in this VMPool. // If excludeDeleting is true, it skips VMs in the "Deleting" state. // https://learn.microsoft.com/en-us/azure/virtual-machines/states-billing#provisioning-states -func (vmPool *VMPool) getVMsFromCache(op skipOption) ([]compute.VirtualMachine, error) { +func (vmPool *VMPool) getVMsFromCache(op skipOption) ([]armcompute.VirtualMachine, error) { vmsMap := vmPool.manager.azureCache.getVirtualMachines() - var filteredVMs []compute.VirtualMachine + var filteredVMs []armcompute.VirtualMachine for _, vm := range vmsMap[vmPool.agentPoolName] { - if vm.VirtualMachineProperties == nil || - vm.VirtualMachineProperties.HardwareProfile == nil || - !strings.EqualFold(string(vm.HardwareProfile.VMSize), vmPool.sku) { + if vm.Properties == nil || + vm.Properties.HardwareProfile == nil || + !strings.EqualFold(string(*vm.Properties.HardwareProfile.VMSize), vmPool.sku) { continue } - if op.skipDeleting && strings.Contains(ptr.Deref(vm.VirtualMachineProperties.ProvisioningState, ""), "Deleting") { + if op.skipDeleting && strings.Contains(ptr.Deref(vm.Properties.ProvisioningState, ""), "Deleting") { klog.V(4).Infof("Skipping VM %s in deleting state", ptr.Deref(vm.ID, "")) continue } - if op.skipFailed && strings.Contains(ptr.Deref(vm.VirtualMachineProperties.ProvisioningState, ""), "Failed") { + if op.skipFailed && strings.Contains(ptr.Deref(vm.Properties.ProvisioningState, ""), "Failed") { klog.V(4).Infof("Skipping VM %s in failed state", ptr.Deref(vm.ID, "")) continue } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_vms_pool_test.go b/cluster-autoscaler/cloudprovider/azure/azure_vms_pool_test.go index 37e99e3b17dc..27732ec0a52f 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_vms_pool_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_vms_pool_test.go @@ -23,8 +23,8 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "go.uber.org/mock/gomock" "github.com/stretchr/testify/assert" @@ -35,7 +35,6 @@ import ( "k8s.io/utils/ptr" "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" ) const ( @@ -59,16 +58,16 @@ func newTestVMsPool(manager *AzureManager) *VMPool { } } -func newTestVMsPoolVMList(count int) []compute.VirtualMachine { - var vmList []compute.VirtualMachine +func newTestVMsPoolVMList(count int) []armcompute.VirtualMachine { + var vmList []armcompute.VirtualMachine for i := 0; i < count; i++ { - vm := compute.VirtualMachine{ + vm := armcompute.VirtualMachine{ ID: ptr.To(fmt.Sprintf(fakeVMsPoolVMID, i)), - VirtualMachineProperties: &compute.VirtualMachineProperties{ + Properties: &armcompute.VirtualMachineProperties{ VMID: ptr.To(fmt.Sprintf("123E4567-E89B-12D3-A456-426655440000-%d", i)), - HardwareProfile: &compute.HardwareProfile{ - VMSize: compute.VirtualMachineSizeTypes(vmSku), + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: ptr.To(armcompute.VirtualMachineSizeTypes(vmSku)), }, ProvisioningState: ptr.To("Succeeded"), }, @@ -258,7 +257,7 @@ func TestAtomicIncreaseSize(t *testing.T) { func TestGetVMsFromCache(t *testing.T) { manager := &AzureManager{ azureCache: &azureCache{ - virtualMachines: make(map[string][]compute.VirtualMachine), + virtualMachines: make(map[string][]armcompute.VirtualMachine), vmsPoolMap: make(map[string]armcontainerservice.AgentPool), }, } @@ -274,7 +273,7 @@ func TestGetVMsFromCache(t *testing.T) { assert.Len(t, vms, 0) // Test case 2 - when the vms pool is found in the cache but has no VMs - manager.azureCache.virtualMachines[vmsAgentPoolName] = []compute.VirtualMachine{} + manager.azureCache.virtualMachines[vmsAgentPoolName] = []armcompute.VirtualMachine{} vms, err = agentPool.getVMsFromCache(skipOption{}) assert.NoError(t, err) assert.Len(t, vms, 0) @@ -287,7 +286,7 @@ func TestGetVMsFromCache(t *testing.T) { // Test case 4 - should skip failed VMs vmList := newTestVMsPoolVMList(3) - vmList[0].VirtualMachineProperties.ProvisioningState = ptr.To("Failed") + vmList[0].Properties.ProvisioningState = ptr.To("Failed") manager.azureCache.virtualMachines[vmsAgentPoolName] = vmList vms, err = agentPool.getVMsFromCache(skipOption{skipFailed: true}) assert.NoError(t, err) @@ -295,7 +294,7 @@ func TestGetVMsFromCache(t *testing.T) { // Test case 5 - should skip deleting VMs vmList = newTestVMsPoolVMList(3) - vmList[0].VirtualMachineProperties.ProvisioningState = ptr.To("Deleting") + vmList[0].Properties.ProvisioningState = ptr.To("Deleting") manager.azureCache.virtualMachines[vmsAgentPoolName] = vmList vms, err = agentPool.getVMsFromCache(skipOption{skipDeleting: true}) assert.NoError(t, err) @@ -303,7 +302,7 @@ func TestGetVMsFromCache(t *testing.T) { // Test case 6 - should not skip deleting VMs vmList = newTestVMsPoolVMList(3) - vmList[0].VirtualMachineProperties.ProvisioningState = ptr.To("Deleting") + vmList[0].Properties.ProvisioningState = ptr.To("Deleting") manager.azureCache.virtualMachines[vmsAgentPoolName] = vmList vms, err = agentPool.getVMsFromCache(skipOption{skipFailed: true}) assert.NoError(t, err) @@ -324,12 +323,12 @@ func TestGetVMsFromCacheForVMsPool(t *testing.T) { ap := newTestVMsPool(newTestAzureManager(t)) expectedVMs := newTestVMsPoolVMList(2) - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) ap.manager.azClient.virtualMachinesClient = mockVMClient ap.manager.config.EnableVMsAgentPool = true mockAgentpoolclient := NewMockAgentPoolsClient(ctrl) ap.manager.azClient.agentPoolClient = mockAgentpoolclient - mockVMClient.EXPECT().List(gomock.Any(), ap.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(ap.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) agentpool := getTestVMsAgentPool(false) fakeAPListPager := getFakeAgentpoolListPager(&agentpool) @@ -353,9 +352,9 @@ func TestNodes(t *testing.T) { ap := newTestVMsPool(newTestAzureManager(t)) expectedVMs := newTestVMsPoolVMList(2) - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) ap.manager.azClient.virtualMachinesClient = mockVMClient - mockVMClient.EXPECT().List(gomock.Any(), ap.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(ap.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) ap.manager.config.EnableVMsAgentPool = true mockAgentpoolclient := NewMockAgentPoolsClient(ctrl) @@ -381,9 +380,9 @@ func TestGetCurSizeForVMsPool(t *testing.T) { ap := newTestVMsPool(newTestAzureManager(t)) expectedVMs := newTestVMsPoolVMList(3) - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) ap.manager.azClient.virtualMachinesClient = mockVMClient - mockVMClient.EXPECT().List(gomock.Any(), ap.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(ap.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) ap.manager.config.EnableVMsAgentPool = true mockAgentpoolclient := NewMockAgentPoolsClient(ctrl) @@ -410,9 +409,9 @@ func TestVMsPoolIncreaseSize(t *testing.T) { ap := newTestVMsPool(manager) expectedVMs := newTestVMsPoolVMList(3) - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) ap.manager.azClient.virtualMachinesClient = mockVMClient - mockVMClient.EXPECT().List(gomock.Any(), ap.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(ap.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) ap.manager.config.EnableVMsAgentPool = true mockAgentpoolclient := NewMockAgentPoolsClient(ctrl) @@ -467,7 +466,7 @@ func TestDeleteVMsPoolNodes_Failed(t *testing.T) { node := newVMsNode(0) expectedVMs := newTestVMsPoolVMList(3) - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) ap.manager.azClient.virtualMachinesClient = mockVMClient ap.manager.config.EnableVMsAgentPool = true mockAgentpoolclient := NewMockAgentPoolsClient(ctrl) @@ -475,7 +474,7 @@ func TestDeleteVMsPoolNodes_Failed(t *testing.T) { ap.manager.azClient.agentPoolClient = mockAgentpoolclient fakeAPListPager := getFakeAgentpoolListPager(&agentpool) mockAgentpoolclient.EXPECT().NewListPager(gomock.Any(), gomock.Any(), nil).Return(fakeAPListPager) - mockVMClient.EXPECT().List(gomock.Any(), ap.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(ap.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) ap.manager.azureCache.enableVMsAgentPool = true registered := ap.manager.RegisterNodeGroup(ap) @@ -496,7 +495,7 @@ func TestDeleteVMsPoolNodes_Success(t *testing.T) { ap := newTestVMsPool(newTestAzureManager(t)) expectedVMs := newTestVMsPoolVMList(5) - mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMClient := NewMockVirtualMachinesClient(ctrl) ap.manager.azClient.virtualMachinesClient = mockVMClient ap.manager.config.EnableVMsAgentPool = true mockAgentpoolclient := NewMockAgentPoolsClient(ctrl) @@ -504,7 +503,7 @@ func TestDeleteVMsPoolNodes_Success(t *testing.T) { ap.manager.azClient.agentPoolClient = mockAgentpoolclient fakeAPListPager := getFakeAgentpoolListPager(&agentpool) mockAgentpoolclient.EXPECT().NewListPager(gomock.Any(), gomock.Any(), nil).Return(fakeAPListPager) - mockVMClient.EXPECT().List(gomock.Any(), ap.manager.config.ResourceGroup).Return(expectedVMs, nil) + mockVMClient.EXPECT().NewListPager(ap.manager.config.ResourceGroup, gomock.Any()).Return(getFakeVMListPager(expectedVMs)) ap.manager.azureCache.enableVMsAgentPool = true registered := ap.manager.RegisterNodeGroup(ap) diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod index 2a49b384b4af..63d279e11fc1 100644 --- a/cluster-autoscaler/go.mod +++ b/cluster-autoscaler/go.mod @@ -4,16 +4,19 @@ go 1.24.0 require ( cloud.google.com/go/compute/metadata v0.6.0 - github.com/Azure/azure-sdk-for-go v68.0.0+incompatible github.com/Azure/azure-sdk-for-go-extensions v0.1.6 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7 v7.1.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5 v5.1.0-beta.2 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v7 v7.1.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2 v2.1.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/adal v0.9.24 github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 - github.com/Azure/go-autorest/autorest/date v0.3.0 - github.com/Azure/skewer v0.0.19 + github.com/Azure/skewer/v2 v2.0.0 github.com/aws/aws-sdk-go v1.44.241 github.com/cenkalti/backoff/v4 v4.3.0 github.com/digitalocean/godo v1.27.0 @@ -28,13 +31,13 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.22.0 github.com/spf13/pflag v1.0.6 - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 github.com/vburenin/ifacemaker v1.2.1 go.uber.org/mock v0.4.0 - golang.org/x/crypto v0.36.0 - golang.org/x/net v0.38.0 + golang.org/x/crypto v0.43.0 + golang.org/x/net v0.46.0 golang.org/x/oauth2 v0.27.0 - golang.org/x/sys v0.31.0 + golang.org/x/sys v0.37.0 google.golang.org/api v0.151.0 google.golang.org/grpc v1.72.1 google.golang.org/protobuf v1.36.5 @@ -63,26 +66,27 @@ require ( require ( cel.dev/expr v0.24.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 // indirect github.com/Azure/go-armbalancer v0.0.2 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/mocks v0.4.2 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect github.com/GoogleCloudPlatform/k8s-cloud-provider v1.25.0 // indirect github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -122,10 +126,9 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect - github.com/golang-jwt/jwt/v5 v5.2.2 // indirect + github.com/golang-jwt/jwt/v5 v5.3.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.1.3 // indirect github.com/google/cadvisor v0.52.1 // indirect @@ -192,12 +195,13 @@ require ( go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.21.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/mod v0.28.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/term v0.36.0 // indirect + golang.org/x/text v0.30.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.26.0 // indirect + golang.org/x/tools v0.37.0 // indirect + golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect diff --git a/cluster-autoscaler/go.sum b/cluster-autoscaler/go.sum index 8668deef6866..b7e741dd344c 100644 --- a/cluster-autoscaler/go.sum +++ b/cluster-autoscaler/go.sum @@ -8,18 +8,22 @@ github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go-extensions v0.1.6 h1:EXGvDcj54u98XfaI/Cy65Ds6vNsIJeGKYf0eNLB1y4Q= github.com/Azure/azure-sdk-for-go-extensions v0.1.6/go.mod h1:27StPiXJp6Xzkq2AQL7gPK7VC0hgmCnUKlco1dO1jaM= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 h1:KpMC6LFL7mqpExyMC9jVOYRiVhLmamjeZfRsUpB7l4s= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 h1:xnO4sFyG8UH2fElBkcqLTOZsAajvKfnSlgBBW8dXYjw= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0/go.mod h1:XD3DIOOVgBCO03OleB1fHjgktVRFxlT++KwKgIOewdM= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7 v7.1.0 h1:DgBMWmMRpTwrwIctOVZVC/p9qro/EjIQgn7q7m9TvSM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7 v7.1.0/go.mod h1:QbzbFyebUZ1eJam5EjIIUjCdSi2di3LTmsJEWcnmii0= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 h1:DWlwvVV5r/Wy1561nZ3wrpI1/vDIBRY/Wd1HWaRBZWA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0/go.mod h1:E7ltexgRDmeJ0fJWv0D/HLwY2xbDdN+uv+X2uZtOx3w= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0 h1:1u/K2BFv0MwkG6he8RYuUcbbeK22rkoZbg4lKa/msZU= @@ -34,16 +38,22 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0 h1:2qsI github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0/go.mod h1:AW8VEadnhw9xox+VaVd9sP7NjzOAnaZBLRH6Tq3cJ38= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 h1:HlZMUZW8S4P9oob1nCHxCCKrytxyLc+24nUJGssoEto= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0/go.mod h1:StGsLbuJh06Bd8IBfnAlIFV3fLb+gkczONWf15hpX2E= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0/go.mod h1:mLfWfj8v3jfWKsL9G4eoBoXVcsqcIUTapmdKy7uGOp0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.2.0 h1:akP6VpxJGgQRpDR1P462piz/8OhYLRCreDj48AyNabc= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.2.0/go.mod h1:8wzvopPfyZYPaQUoKW87Zfdul7jmJMDfp/k7YY3oJyA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v7 v7.1.0 h1:IoByPFaDJu73ubiPSUYuWZIhX45x2pDYaC6uuDUcCxo= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v7 v7.1.0/go.mod h1:1CGS8iwsScDbgH5lg7S9ulkzoDNRJJPbIZXPtmTH+4k= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0 h1:9Eih8XcEeQnFD0ntMlUDleKMzfeCeUfa+VbnDCI4AZs= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0/go.mod h1:wGPyTi+aURdqPAGMZDQqnNs9IrShADF8w2WZb6bKeq0= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2 v2.1.0 h1:seyVIpxalxYmfjoo8MB4rRzWaobMG+KJ2+MAUrEvDGU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2 v2.1.0/go.mod h1:M3QD7IyKZBaC4uAKjitTOSOXdcPC6JS1A9oOW3hYjbQ= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 h1:nVocQV40OQne5613EeLayJiRAJuKlBGy+m22qWG+WRg= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0/go.mod h1:7QJP7dr2wznCMeqIrhMgWGf7XpAQnVrJqDm9nvV3Cu4= github.com/Azure/go-armbalancer v0.0.2 h1:NVnxsTWHI5/fEzL6k6TjxPUfcB/3Si3+HFOZXOu0QtA= github.com/Azure/go-armbalancer v0.0.2/go.mod h1:yTg7MA/8YnfKQc9o97tzAJ7fbdVkod1xGsIvKmhYPRE= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= @@ -72,10 +82,12 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/Azure/skewer v0.0.19 h1:+qA1z8isKmlNkhAwZErNS2wD2jaemSk9NszYKr8dddU= -github.com/Azure/skewer v0.0.19/go.mod h1:LVH7jmduRKmPj8YcIz7V4f53xJEntjweL4aoLyChkwk= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Azure/skewer/v2 v2.0.0 h1:DH2nrzfiW+U3AczfmGXPf/rgvGOe0Nt/2xDBUToTIr0= +github.com/Azure/skewer/v2 v2.0.0/go.mod h1:Lw55RMYaKg9UINXZLH6+Z7KAPcGHX4fQNIXnTBIjHCg= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/GoogleCloudPlatform/k8s-cloud-provider v1.25.0 h1:lwL1vLWmdBJ5h+StMEN6+GMz1J/Y0yUU3RDv+QBy+Q4= github.com/GoogleCloudPlatform/k8s-cloud-provider v1.25.0/go.mod h1:UTfhBnADaj2rybPT049NScSh7Eall3u2ib43wmz3deg= @@ -140,8 +152,6 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -184,8 +194,6 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= -github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= @@ -193,8 +201,8 @@ github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= -github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -271,6 +279,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= +github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= @@ -368,8 +378,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/vburenin/ifacemaker v1.2.1 h1:3Vq8B/bfBgjWTkv+jDg4dVL1KHt3k1K4lO7XRxYA2sk= @@ -443,8 +453,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= @@ -456,8 +466,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= +golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -475,8 +485,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= @@ -490,8 +500,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -508,16 +518,16 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= +golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -526,8 +536,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -541,8 +551,12 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= +golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=