Skip to content

Commit 7b64836

Browse files
authored
Merge pull request #7613 from walidghallab/err
Refactor NewAutoscalerError function.
2 parents e4898a9 + 720f594 commit 7b64836

File tree

14 files changed

+44
-36
lines changed

14 files changed

+44
-36
lines changed

cluster-autoscaler/cloudprovider/gce/autoscaling_gce_client.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -247,7 +247,7 @@ func (client *autoscalingGceClientV1) FetchMigTargetSize(migRef GceRef) (int64,
247247
if err != nil {
248248
if err, ok := err.(*googleapi.Error); ok {
249249
if err.Code == http.StatusNotFound {
250-
return 0, errors.NewAutoscalerError(errors.NodeGroupDoesNotExistError, "%s", err.Error())
250+
return 0, errors.NewAutoscalerError(errors.NodeGroupDoesNotExistError, err.Error())
251251
}
252252
}
253253
return 0, err
@@ -262,7 +262,7 @@ func (client *autoscalingGceClientV1) FetchMigBasename(migRef GceRef) (string, e
262262
igm, err := client.gceService.InstanceGroupManagers.Get(migRef.Project, migRef.Zone, migRef.Name).Context(ctx).Do()
263263
if err != nil {
264264
if err, ok := err.(*googleapi.Error); ok && err.Code == http.StatusNotFound {
265-
return "", errors.NewAutoscalerError(errors.NodeGroupDoesNotExistError, "%s", err.Error())
265+
return "", errors.NewAutoscalerError(errors.NodeGroupDoesNotExistError, err.Error())
266266
}
267267
return "", err
268268
}
@@ -277,7 +277,7 @@ func (client *autoscalingGceClientV1) FetchListManagedInstancesResults(migRef Gc
277277
if err != nil {
278278
if err, ok := err.(*googleapi.Error); ok {
279279
if err.Code == http.StatusNotFound {
280-
return "", errors.NewAutoscalerError(errors.NodeGroupDoesNotExistError, "%s", err.Error())
280+
return "", errors.NewAutoscalerError(errors.NodeGroupDoesNotExistError, err.Error())
281281
}
282282
}
283283
return "", err
@@ -785,7 +785,7 @@ func (client *autoscalingGceClientV1) FetchMigTemplateName(migRef GceRef) (Insta
785785
if err != nil {
786786
if err, ok := err.(*googleapi.Error); ok {
787787
if err.Code == http.StatusNotFound {
788-
return InstanceTemplateName{}, errors.NewAutoscalerError(errors.NodeGroupDoesNotExistError, "%s", err.Error())
788+
return InstanceTemplateName{}, errors.NewAutoscalerError(errors.NodeGroupDoesNotExistError, err.Error())
789789
}
790790
}
791791
return InstanceTemplateName{}, err

cluster-autoscaler/core/scaledown/actuation/actuator.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,7 @@ func (a *Actuator) taintNodesSync(NodeGroupViews []*budgets.NodeGroupView) (time
229229
if a.ctx.AutoscalingOptions.DynamicNodeDeleteDelayAfterTaintEnabled {
230230
close(updateLatencyTracker.AwaitOrStopChan)
231231
}
232-
return nodeDeleteDelayAfterTaint, errors.NewAutoscalerError(errors.ApiCallError, "couldn't taint %d nodes with ToBeDeleted", len(failedTaintedNodes))
232+
return nodeDeleteDelayAfterTaint, errors.NewAutoscalerErrorf(errors.ApiCallError, "couldn't taint %d nodes with ToBeDeleted", len(failedTaintedNodes))
233233
}
234234

235235
if a.ctx.AutoscalingOptions.DynamicNodeDeleteDelayAfterTaintEnabled {
@@ -287,7 +287,7 @@ func (a *Actuator) deleteNodesAsync(nodes []*apiv1.Node, nodeGroup cloudprovider
287287
clusterSnapshot, err := a.createSnapshot(nodes)
288288
if err != nil {
289289
klog.Errorf("Scale-down: couldn't create delete snapshot, err: %v", err)
290-
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerError(errors.InternalError, "createSnapshot returned error %v", err)}
290+
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerErrorf(errors.InternalError, "createSnapshot returned error %v", err)}
291291
for _, node := range nodes {
292292
a.nodeDeletionScheduler.AbortNodeDeletion(node, nodeGroup.Id(), drain, "failed to create delete snapshot", nodeDeleteResult)
293293
}
@@ -298,7 +298,7 @@ func (a *Actuator) deleteNodesAsync(nodes []*apiv1.Node, nodeGroup cloudprovider
298298
pdbs, err := a.ctx.PodDisruptionBudgetLister().List()
299299
if err != nil {
300300
klog.Errorf("Scale-down: couldn't fetch pod disruption budgets, err: %v", err)
301-
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerError(errors.InternalError, "podDisruptionBudgetLister.List returned error %v", err)}
301+
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerErrorf(errors.InternalError, "podDisruptionBudgetLister.List returned error %v", err)}
302302
for _, node := range nodes {
303303
a.nodeDeletionScheduler.AbortNodeDeletion(node, nodeGroup.Id(), drain, "failed to fetch pod disruption budgets", nodeDeleteResult)
304304
}
@@ -317,22 +317,22 @@ func (a *Actuator) deleteNodesAsync(nodes []*apiv1.Node, nodeGroup cloudprovider
317317
nodeInfo, err := clusterSnapshot.GetNodeInfo(node.Name)
318318
if err != nil {
319319
klog.Errorf("Scale-down: can't retrieve node %q from snapshot, err: %v", node.Name, err)
320-
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerError(errors.InternalError, "nodeInfos.Get for %q returned error: %v", node.Name, err)}
320+
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerErrorf(errors.InternalError, "nodeInfos.Get for %q returned error: %v", node.Name, err)}
321321
a.nodeDeletionScheduler.AbortNodeDeletion(node, nodeGroup.Id(), drain, "failed to get node info", nodeDeleteResult)
322322
continue
323323
}
324324

325325
podsToRemove, _, _, err := simulator.GetPodsToMove(nodeInfo, a.deleteOptions, a.drainabilityRules, registry, remainingPdbTracker, time.Now())
326326
if err != nil {
327327
klog.Errorf("Scale-down: couldn't delete node %q, err: %v", node.Name, err)
328-
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerError(errors.InternalError, "GetPodsToMove for %q returned error: %v", node.Name, err)}
328+
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerErrorf(errors.InternalError, "GetPodsToMove for %q returned error: %v", node.Name, err)}
329329
a.nodeDeletionScheduler.AbortNodeDeletion(node, nodeGroup.Id(), drain, "failed to get pods to move on node", nodeDeleteResult)
330330
continue
331331
}
332332

333333
if !drain && len(podsToRemove) != 0 {
334334
klog.Errorf("Scale-down: couldn't delete empty node %q, new pods got scheduled", node.Name)
335-
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerError(errors.InternalError, "failed to delete empty node %q, new pods scheduled", node.Name)}
335+
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerErrorf(errors.InternalError, "failed to delete empty node %q, new pods scheduled", node.Name)}
336336
a.nodeDeletionScheduler.AbortNodeDeletion(node, nodeGroup.Id(), drain, "node is not empty", nodeDeleteResult)
337337
continue
338338
}

cluster-autoscaler/core/scaledown/actuation/delete_in_batch.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -149,16 +149,16 @@ func (d *NodeDeletionBatcher) remove(nodeGroupId string) error {
149149
func deleteNodesFromCloudProvider(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupchange.NodeGroupChangeObserver, nodes []*apiv1.Node) (cloudprovider.NodeGroup, error) {
150150
nodeGroup, err := ctx.CloudProvider.NodeGroupForNode(nodes[0])
151151
if err != nil {
152-
return nodeGroup, errors.NewAutoscalerError(errors.CloudProviderError, "failed to find node group for %s: %v", nodes[0].Name, err)
152+
return nodeGroup, errors.NewAutoscalerErrorf(errors.CloudProviderError, "failed to find node group for %s: %v", nodes[0].Name, err)
153153
}
154154
if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() {
155-
return nil, errors.NewAutoscalerError(errors.InternalError, "picked node that doesn't belong to a node group: %s", nodes[0].Name)
155+
return nil, errors.NewAutoscalerErrorf(errors.InternalError, "picked node that doesn't belong to a node group: %s", nodes[0].Name)
156156
}
157157
if err := nodeGroup.DeleteNodes(nodes); err != nil {
158158
scaleStateNotifier.RegisterFailedScaleDown(nodeGroup,
159159
string(errors.CloudProviderError),
160160
time.Now())
161-
return nodeGroup, errors.NewAutoscalerError(errors.CloudProviderError, "failed to delete nodes from group %s: %v", nodeGroup.Id(), err)
161+
return nodeGroup, errors.NewAutoscalerErrorf(errors.CloudProviderError, "failed to delete nodes from group %s: %v", nodeGroup.Id(), err)
162162
}
163163
return nodeGroup, nil
164164
}

cluster-autoscaler/core/scaledown/actuation/drain.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ func (e Evictor) drainNodeWithPodsBasedOnPodPriority(ctx *acontext.AutoscalingCo
102102
for _, group := range groups {
103103
for _, pod := range group.FullEvictionPods {
104104
evictionResults[pod.Name] = status.PodEvictionResult{Pod: pod, TimedOut: false,
105-
Err: errors.NewAutoscalerError(errors.UnexpectedScaleDownStateError, "Eviction did not attempted for the pod %s because some of the previous evictions failed", pod.Name)}
105+
Err: errors.NewAutoscalerErrorf(errors.UnexpectedScaleDownStateError, "Eviction did not attempted for the pod %s because some of the previous evictions failed", pod.Name)}
106106
}
107107
}
108108

@@ -163,7 +163,7 @@ func (e Evictor) waitPodsToDisappear(ctx *acontext.AutoscalingContext, node *api
163163
}
164164
}
165165

166-
return evictionResults, errors.NewAutoscalerError(errors.TransientError, "Failed to drain node %s/%s: pods remaining after timeout", node.Namespace, node.Name)
166+
return evictionResults, errors.NewAutoscalerErrorf(errors.TransientError, "Failed to drain node %s/%s: pods remaining after timeout", node.Namespace, node.Name)
167167
}
168168

169169
func (e Evictor) initiateEviction(ctx *acontext.AutoscalingContext, node *apiv1.Node, fullEvictionPods, bestEffortEvictionPods []*apiv1.Pod, evictionResults map[string]status.PodEvictionResult,
@@ -207,7 +207,7 @@ func (e Evictor) initiateEviction(ctx *acontext.AutoscalingContext, node *apiv1.
207207
}
208208
}
209209
if len(evictionErrs) != 0 {
210-
return evictionResults, errors.NewAutoscalerError(errors.ApiCallError, "Failed to drain node %s/%s, due to following errors: %v", node.Namespace, node.Name, evictionErrs)
210+
return evictionResults, errors.NewAutoscalerErrorf(errors.ApiCallError, "Failed to drain node %s/%s, due to following errors: %v", node.Namespace, node.Name, evictionErrs)
211211
}
212212
return evictionResults, nil
213213
}

cluster-autoscaler/core/scaledown/actuation/group_deletion_scheduler.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ func (ds *GroupDeletionScheduler) ResetAndReportMetrics() {
7979
func (ds *GroupDeletionScheduler) ScheduleDeletion(nodeInfo *framework.NodeInfo, nodeGroup cloudprovider.NodeGroup, batchSize int, drain bool) {
8080
opts, err := nodeGroup.GetOptions(ds.ctx.NodeGroupDefaults)
8181
if err != nil && err != cloudprovider.ErrNotImplemented {
82-
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerError(errors.InternalError, "GetOptions returned error %v", err)}
82+
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerErrorf(errors.InternalError, "GetOptions returned error %v", err)}
8383
ds.AbortNodeDeletion(nodeInfo.Node(), nodeGroup.Id(), drain, "failed to get autoscaling options for a node group", nodeDeleteResult)
8484
return
8585
}

cluster-autoscaler/core/scaleup/orchestrator/executor.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ func checkUniqueNodeGroups(scaleUpInfos []nodegroupset.ScaleUpInfo) errors.Autos
253253
uniqueGroups := make(map[string]bool)
254254
for _, info := range scaleUpInfos {
255255
if uniqueGroups[info.Group.Id()] {
256-
return errors.NewAutoscalerError(
256+
return errors.NewAutoscalerErrorf(
257257
errors.InternalError,
258258
"assertion failure: detected group double scaling: %s", info.Group.Id(),
259259
)

cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -603,7 +603,7 @@ func (o *ScaleUpOrchestrator) UpcomingNodes(nodeInfos map[string]*framework.Node
603603
for nodeGroup, numberOfNodes := range upcomingCounts {
604604
nodeTemplate, found := nodeInfos[nodeGroup]
605605
if !found {
606-
return nil, errors.NewAutoscalerError(errors.InternalError, "failed to find template node for node group %s", nodeGroup)
606+
return nil, errors.NewAutoscalerErrorf(errors.InternalError, "failed to find template node for node group %s", nodeGroup)
607607
}
608608
for i := 0; i < numberOfNodes; i++ {
609609
upcomingNodes = append(upcomingNodes, nodeTemplate)

cluster-autoscaler/core/scaleup/resource/manager.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@ func (m *Manager) coresMemoryTotal(ctx *context.AutoscalingContext, nodeInfos ma
214214

215215
nodeInfo, found := nodeInfos[nodeGroup.Id()]
216216
if !found {
217-
return 0, 0, errors.NewAutoscalerError(errors.CloudProviderError, "No node info for: %s", nodeGroup.Id())
217+
return 0, 0, errors.NewAutoscalerErrorf(errors.CloudProviderError, "No node info for: %s", nodeGroup.Id())
218218
}
219219

220220
if currentSize > 0 {
@@ -243,7 +243,7 @@ func (m *Manager) customResourcesTotal(ctx *context.AutoscalingContext, nodeInfo
243243

244244
nodeInfo, found := nodeInfos[nodeGroup.Id()]
245245
if !found {
246-
return nil, errors.NewAutoscalerError(errors.CloudProviderError, "No node info for: %s", nodeGroup.Id())
246+
return nil, errors.NewAutoscalerErrorf(errors.CloudProviderError, "No node info for: %s", nodeGroup.Id())
247247
}
248248

249249
if currentSize > 0 {

cluster-autoscaler/expander/factory/expander_factory.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,18 +58,18 @@ func (f *Factory) Build(names []string) (expander.Strategy, errors.AutoscalerErr
5858
strategySeen := false
5959
for i, name := range names {
6060
if _, ok := seenExpanders[name]; ok {
61-
return nil, errors.NewAutoscalerError(errors.InternalError, "Expander %s was specified multiple times, each expander must not be specified more than once", name)
61+
return nil, errors.NewAutoscalerErrorf(errors.InternalError, "Expander %s was specified multiple times, each expander must not be specified more than once", name)
6262
}
6363
if strategySeen {
64-
return nil, errors.NewAutoscalerError(errors.InternalError, "Expander %s came after an expander %s that will always return only one result, this is not allowed since %s will never be used", name, names[i-1], name)
64+
return nil, errors.NewAutoscalerErrorf(errors.InternalError, "Expander %s came after an expander %s that will always return only one result, this is not allowed since %s will never be used", name, names[i-1], name)
6565
}
6666
seenExpanders[name] = struct{}{}
6767

6868
create, known := f.createFunc[name]
6969
if known {
7070
filters = append(filters, create())
7171
} else {
72-
return nil, errors.NewAutoscalerError(errors.InternalError, "Expander %s not supported", name)
72+
return nil, errors.NewAutoscalerErrorf(errors.InternalError, "Expander %s not supported", name)
7373
}
7474
if _, ok := filters[len(filters)-1].(expander.Strategy); ok {
7575
strategySeen = true

cluster-autoscaler/processors/nodegroupset/balancing_processor.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ func (b *BalancingNodeGroupSetProcessor) FindSimilarNodeGroups(context *context.
4141
nodeGroupId := nodeGroup.Id()
4242
nodeInfo, found := nodeInfosForGroups[nodeGroupId]
4343
if !found {
44-
return []cloudprovider.NodeGroup{}, errors.NewAutoscalerError(
44+
return []cloudprovider.NodeGroup{}, errors.NewAutoscalerErrorf(
4545
errors.InternalError,
4646
"failed to find template node for node group %s",
4747
nodeGroupId)
@@ -88,7 +88,7 @@ func (b *BalancingNodeGroupSetProcessor) BalanceScaleUpBetweenGroups(context *co
8888
for _, ng := range groups {
8989
currentSize, err := ng.TargetSize()
9090
if err != nil {
91-
return []ScaleUpInfo{}, errors.NewAutoscalerError(
91+
return []ScaleUpInfo{}, errors.NewAutoscalerErrorf(
9292
errors.CloudProviderError,
9393
"failed to get node group size: %v", err)
9494
}

0 commit comments

Comments
 (0)