Skip to content

Commit 1c689c2

Browse files
authored
Merge pull request kubernetes#77491 from andyzhangx/azuredisk-metrics
add operation name for vm/vmss update operations in prometheus metrics
2 parents 3bc595e + 00c972c commit 1c689c2

File tree

9 files changed

+63
-74
lines changed

9 files changed

+63
-74
lines changed

staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -550,25 +550,13 @@ func (az *Cloud) deleteRouteWithRetry(routeName string) error {
550550
})
551551
}
552552

553-
// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry
554-
func (az *Cloud) CreateOrUpdateVMWithRetry(resourceGroup, vmName string, newVM compute.VirtualMachine) error {
555-
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
556-
ctx, cancel := getContextWithCancel()
557-
defer cancel()
558-
559-
resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, resourceGroup, vmName, newVM)
560-
klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName)
561-
return az.processHTTPRetryResponse(nil, "", resp, err)
562-
})
563-
}
564-
565553
// UpdateVmssVMWithRetry invokes az.VirtualMachineScaleSetVMsClient.Update with exponential backoff retry
566-
func (az *Cloud) UpdateVmssVMWithRetry(resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) error {
554+
func (az *Cloud) UpdateVmssVMWithRetry(resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) error {
567555
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
568556
ctx, cancel := getContextWithCancel()
569557
defer cancel()
570558

571-
resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
559+
resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source)
572560
klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID)
573561
return az.processHTTPRetryResponse(nil, "", resp, err)
574562
})

staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go

Lines changed: 45 additions & 45 deletions
Large diffs are not rendered by default.

staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
8484
// Invalidate the cache right after updating
8585
defer as.cloud.vmCache.Delete(vmName)
8686

87-
_, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
87+
_, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM, "attach_disk")
8888
if err != nil {
8989
klog.Errorf("azureDisk - attach disk(%s, %s) failed, err: %v", diskName, diskURI, err)
9090
detail := err.Error()
@@ -151,7 +151,7 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N
151151
// Invalidate the cache right after updating
152152
defer as.cloud.vmCache.Delete(vmName)
153153

154-
return as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
154+
return as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM, "detach_disk")
155155
}
156156

157157
// GetDataDisks gets a list of data disks attached to the node.

staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
8989
defer ss.vmssVMCache.Delete(key)
9090

9191
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI)
92-
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
92+
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk")
9393
if err != nil {
9494
detail := err.Error()
9595
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
@@ -159,7 +159,7 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
159159
defer ss.vmssVMCache.Delete(key)
160160

161161
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI)
162-
return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
162+
return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk")
163163
}
164164

165165
// GetDataDisks gets a list of data disks attached to the node.

staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ func newFakeAzureVirtualMachinesClient() *fakeAzureVirtualMachinesClient {
290290
return fVMC
291291
}
292292

293-
func (fVMC *fakeAzureVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (resp *http.Response, err error) {
293+
func (fVMC *fakeAzureVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine, source string) (resp *http.Response, err error) {
294294
fVMC.mutex.Lock()
295295
defer fVMC.mutex.Unlock()
296296

@@ -550,7 +550,7 @@ func (fVMC *fakeVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Con
550550
return result, nil
551551
}
552552

553-
func (fVMC *fakeVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) (resp *http.Response, err error) {
553+
func (fVMC *fakeVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (resp *http.Response, err error) {
554554
fVMC.mutex.Lock()
555555
defer fVMC.mutex.Unlock()
556556

staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ var (
3333
"request", // API function that is being invoked
3434
"resource_group", // Resource group of the resource being monitored
3535
"subscription_id", // Subscription ID of the resource being monitored
36+
"source", // Oeration source(optional)
3637
}
3738

3839
apiMetrics = registerAPIMetrics(metricLabels...)
@@ -43,10 +44,10 @@ type metricContext struct {
4344
attributes []string
4445
}
4546

46-
func newMetricContext(prefix, request, resourceGroup, subscriptionID string) *metricContext {
47+
func newMetricContext(prefix, request, resourceGroup, subscriptionID, source string) *metricContext {
4748
return &metricContext{
4849
start: time.Now(),
49-
attributes: []string{prefix + "_" + request, strings.ToLower(resourceGroup), subscriptionID},
50+
attributes: []string{prefix + "_" + request, strings.ToLower(resourceGroup), subscriptionID, source},
5051
}
5152
}
5253

staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,12 +23,12 @@ import (
2323
)
2424

2525
func TestAzureMetricLabelCardinality(t *testing.T) {
26-
mc := newMetricContext("test", "create", "resource_group", "subscription_id")
26+
mc := newMetricContext("test", "create", "resource_group", "subscription_id", "source")
2727
assert.Len(t, mc.attributes, len(metricLabels), "cardinalities of labels and values must match")
2828
}
2929

3030
func TestAzureMetricLabelPrefix(t *testing.T) {
31-
mc := newMetricContext("prefix", "request", "resource_group", "subscription_id")
31+
mc := newMetricContext("prefix", "request", "resource_group", "subscription_id", "source")
3232
found := false
3333
for _, attribute := range mc.attributes {
3434
if attribute == "prefix_request" {

staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1108,7 +1108,7 @@ func getClusterResources(az *Cloud, vmCount int, availabilitySetCount int) (clus
11081108

11091109
vmCtx, vmCancel := getContextWithCancel()
11101110
defer vmCancel()
1111-
_, err := az.VirtualMachinesClient.CreateOrUpdate(vmCtx, az.Config.ResourceGroup, vmName, newVM)
1111+
_, err := az.VirtualMachinesClient.CreateOrUpdate(vmCtx, az.Config.ResourceGroup, vmName, newVM, "")
11121112
if err != nil {
11131113
}
11141114
// add to kubernetes

staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -712,10 +712,10 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam
712712
ctx, cancel := getContextWithCancel()
713713
defer cancel()
714714
klog.V(2).Infof("EnsureHostInPool begins to update vmssVM(%s) with new backendPoolID %s", vmName, backendPoolID)
715-
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
715+
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "network_update")
716716
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
717717
klog.V(2).Infof("EnsureHostInPool update backing off vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err)
718-
retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM)
718+
retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM, "network_update")
719719
if retryErr != nil {
720720
err = retryErr
721721
klog.Errorf("EnsureHostInPool update abort backoff vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err)
@@ -841,10 +841,10 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeNa
841841
ctx, cancel := getContextWithCancel()
842842
defer cancel()
843843
klog.V(2).Infof("ensureBackendPoolDeletedFromNode begins to update vmssVM(%s) with backendPoolID %s", nodeName, backendPoolID)
844-
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
844+
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "network_update")
845845
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
846846
klog.V(2).Infof("ensureBackendPoolDeletedFromNode update backing off vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err)
847-
retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM)
847+
retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM, "network_update")
848848
if retryErr != nil {
849849
err = retryErr
850850
klog.Errorf("ensureBackendPoolDeletedFromNode update abort backoff vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err)

0 commit comments

Comments
 (0)