Skip to content

Commit 95860cf

Browse files
Fix Go vet errors for master golang
Co-authored-by: Rajalakshmi-Girish <[email protected]> Co-authored-by: Abhishek Kr Srivastav <[email protected]>
1 parent e30d994 commit 95860cf

File tree

111 files changed

+345
-318
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

111 files changed

+345
-318
lines changed

cmd/kubeadm/app/phases/certs/renewal/readwriter_test.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ package renewal
1919
import (
2020
"crypto"
2121
"crypto/x509"
22-
"fmt"
2322
"net"
2423
"os"
2524
"path/filepath"
@@ -265,7 +264,7 @@ func TestPKICertificateReadWriterExists(t *testing.T) {
265264
}
266265
}()
267266
filename := "testfile"
268-
tmpfilepath := filepath.Join(tmpdir, fmt.Sprintf(filename+".crt"))
267+
tmpfilepath := filepath.Join(tmpdir, filename+".crt")
269268
err = os.WriteFile(tmpfilepath, nil, 0644)
270269
if err != nil {
271270
t.Fatalf("Couldn't write file: %v", err)

pkg/apis/core/v1/defaults_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ func testWorkloadDefaults(t *testing.T, featuresEnabled bool) {
185185
defaults := detectDefaults(t, rc, reflect.ValueOf(template))
186186
if !reflect.DeepEqual(expectedDefaults, defaults) {
187187
t.Errorf("Defaults for PodTemplateSpec changed. This can cause spurious rollouts of workloads on API server upgrade.")
188-
t.Logf(cmp.Diff(expectedDefaults, defaults))
188+
t.Log(cmp.Diff(expectedDefaults, defaults))
189189
}
190190
})
191191
t.Run("hostnet PodTemplateSpec with ports", func(t *testing.T) {
@@ -223,7 +223,7 @@ func testWorkloadDefaults(t *testing.T, featuresEnabled bool) {
223223
}()
224224
if !reflect.DeepEqual(expected, defaults) {
225225
t.Errorf("Defaults for PodTemplateSpec changed. This can cause spurious rollouts of workloads on API server upgrade.")
226-
t.Logf(cmp.Diff(expected, defaults))
226+
t.Log(cmp.Diff(expected, defaults))
227227
}
228228
})
229229
}
@@ -374,7 +374,7 @@ func testPodDefaults(t *testing.T, featuresEnabled bool) {
374374
defaults := detectDefaults(t, pod, reflect.ValueOf(pod))
375375
if !reflect.DeepEqual(expectedDefaults, defaults) {
376376
t.Errorf("Defaults for PodSpec changed. This can cause spurious restarts of containers on API server upgrade.")
377-
t.Logf(cmp.Diff(expectedDefaults, defaults))
377+
t.Log(cmp.Diff(expectedDefaults, defaults))
378378
}
379379
}
380380

pkg/controller/endpoint/endpoints_controller_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2782,7 +2782,7 @@ func waitForChanReceive(t *testing.T, timeout time.Duration, receivingChan chan
27822782
timer := time.NewTimer(timeout)
27832783
select {
27842784
case <-timer.C:
2785-
t.Errorf(errorMsg)
2785+
t.Error(errorMsg)
27862786
case <-receivingChan:
27872787
}
27882788
}

pkg/controller/nodelifecycle/node_lifecycle_controller_test.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2450,7 +2450,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
24502450
node2.Status = healthyNodeNewStatus
24512451
_, err = fakeNodeHandler.UpdateStatus(ctx, node2, metav1.UpdateOptions{})
24522452
if err != nil {
2453-
t.Errorf(err.Error())
2453+
t.Error(err.Error())
24542454
return
24552455
}
24562456
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
@@ -2479,7 +2479,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
24792479
node3.Status = unhealthyNodeNewStatus
24802480
_, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{})
24812481
if err != nil {
2482-
t.Errorf(err.Error())
2482+
t.Error(err.Error())
24832483
return
24842484
}
24852485
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
@@ -2492,7 +2492,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
24922492
node3.Status.Conditions = overrideNodeNewStatusConditions
24932493
_, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{})
24942494
if err != nil {
2495-
t.Errorf(err.Error())
2495+
t.Error(err.Error())
24962496
return
24972497
}
24982498
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
@@ -2638,7 +2638,7 @@ func TestApplyNoExecuteTaintsToNodesEnqueueTwice(t *testing.T) {
26382638
node0.Status = healthyNodeNewStatus
26392639
_, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{})
26402640
if err != nil {
2641-
t.Errorf(err.Error())
2641+
t.Error(err.Error())
26422642
return
26432643
}
26442644

@@ -2870,12 +2870,12 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
28702870
node1.Status = healthyNodeNewStatus
28712871
_, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{})
28722872
if err != nil {
2873-
t.Errorf(err.Error())
2873+
t.Error(err.Error())
28742874
return
28752875
}
28762876
_, err = fakeNodeHandler.UpdateStatus(ctx, node1, metav1.UpdateOptions{})
28772877
if err != nil {
2878-
t.Errorf(err.Error())
2878+
t.Error(err.Error())
28792879
return
28802880
}
28812881

pkg/controller/podautoscaler/horizontal.go

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -338,7 +338,7 @@ func (a *HorizontalController) computeReplicasForMetrics(ctx context.Context, hp
338338
// return an error and set the condition of the hpa based on the first invalid metric.
339339
// Otherwise set the condition as scaling active as we're going to scale
340340
if invalidMetricsCount >= len(metricSpecs) || (invalidMetricsCount > 0 && replicas < specReplicas) {
341-
setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, invalidMetricCondition.Message)
341+
setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, "%s", invalidMetricCondition.Message)
342342
return -1, "", statuses, time.Time{}, invalidMetricError
343343
}
344344
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric)
@@ -385,15 +385,15 @@ func (a *HorizontalController) validateAndParseSelector(hpa *autoscalingv2.Horiz
385385
errMsg := "selector is required"
386386
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
387387
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector")
388-
return nil, fmt.Errorf(errMsg)
388+
return nil, errors.New(errMsg)
389389
}
390390

391391
parsedSelector, err := labels.Parse(selector)
392392
if err != nil {
393393
errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
394394
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
395-
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", errMsg)
396-
return nil, fmt.Errorf(errMsg)
395+
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "%s", errMsg)
396+
return nil, errors.New(errMsg)
397397
}
398398

399399
hpaKey := selectors.Key{Name: hpa.Name, Namespace: hpa.Namespace}
@@ -413,8 +413,8 @@ func (a *HorizontalController) validateAndParseSelector(hpa *autoscalingv2.Horiz
413413
if len(selectingHpas) > 1 {
414414
errMsg := fmt.Sprintf("pods by selector %v are controlled by multiple HPAs: %v", selector, selectingHpas)
415415
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "AmbiguousSelector", errMsg)
416-
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "AmbiguousSelector", errMsg)
417-
return nil, fmt.Errorf(errMsg)
416+
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "AmbiguousSelector", "%s", errMsg)
417+
return nil, errors.New(errMsg)
418418
}
419419

420420
return parsedSelector, nil
@@ -570,7 +570,7 @@ func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, status
570570
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
571571
}
572572
errMsg := "invalid object metric source: neither a value target nor an average value target was set"
573-
err = fmt.Errorf(errMsg)
573+
err = errors.New(errMsg)
574574
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
575575
return 0, time.Time{}, "", condition, err
576576
}
@@ -617,7 +617,7 @@ func (a *HorizontalController) computeStatusForResourceMetricGeneric(ctx context
617617

618618
if target.AverageUtilization == nil {
619619
errMsg := "invalid resource metric source: neither an average utilization target nor an average value (usage) target was set"
620-
return 0, nil, time.Time{}, "", condition, fmt.Errorf(errMsg)
620+
return 0, nil, time.Time{}, "", condition, errors.New(errMsg)
621621
}
622622

623623
targetUtilization := *target.AverageUtilization
@@ -719,9 +719,9 @@ func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, stat
719719
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
720720
}
721721
errMsg := "invalid external metric source: neither a value target nor an average value target was set"
722-
err = fmt.Errorf(errMsg)
722+
err = errors.New(errMsg)
723723
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err)
724-
return 0, time.Time{}, "", condition, fmt.Errorf(errMsg)
724+
return 0, time.Time{}, "", condition, errors.New(errMsg)
725725
}
726726

727727
func (a *HorizontalController) recordInitialRecommendation(currentReplicas int32, key string) {
@@ -950,12 +950,12 @@ func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.Horiz
950950
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
951951
}
952952

953-
desiredReplicas, condition, reason := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas)
953+
desiredReplicas, reason, message := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas)
954954

955955
if desiredReplicas == stabilizedRecommendation {
956-
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, condition, reason)
956+
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, "%s", message)
957957
} else {
958-
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, condition, reason)
958+
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, "%s", message)
959959
}
960960

961961
return desiredReplicas
@@ -991,15 +991,15 @@ func (a *HorizontalController) normalizeDesiredReplicasWithBehaviors(hpa *autosc
991991
normalizationArg.DesiredReplicas = stabilizedRecommendation
992992
if stabilizedRecommendation != prenormalizedDesiredReplicas {
993993
// "ScaleUpStabilized" || "ScaleDownStabilized"
994-
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, reason, message)
994+
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, reason, "%s", message)
995995
} else {
996996
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
997997
}
998998
desiredReplicas, reason, message := a.convertDesiredReplicasWithBehaviorRate(normalizationArg)
999999
if desiredReplicas == stabilizedRecommendation {
1000-
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, message)
1000+
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, "%s", message)
10011001
} else {
1002-
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, message)
1002+
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, "%s", message)
10031003
}
10041004

10051005
return desiredReplicas

pkg/controller/podautoscaler/horizontal_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ func statusOkWithOverrides(overrides ...autoscalingv2.HorizontalPodAutoscalerCon
8282
resv2 := make([]autoscalingv2.HorizontalPodAutoscalerCondition, len(statusOk))
8383
copy(resv2, statusOk)
8484
for _, override := range overrides {
85-
resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, override.Message)
85+
resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, "%s", override.Message)
8686
}
8787

8888
// copy to a v1 slice

pkg/controller/statefulset/stateful_set_test.go

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -824,35 +824,35 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) {
824824
logger, ctx := ktesting.NewTestContext(t)
825825
ssc, spc, om, _ := newFakeStatefulSetController(ctx, set)
826826
if err := scaleUpStatefulSetController(logger, set, ssc, spc, om); err != nil {
827-
t.Errorf(onPolicy("Failed to turn up StatefulSet : %s", err))
827+
t.Error(onPolicy("Failed to turn up StatefulSet : %s", err))
828828
}
829829
var err error
830830
if set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name); err != nil {
831-
t.Errorf(onPolicy("Could not get scaled up set: %v", err))
831+
t.Error(onPolicy("Could not get scaled up set: %v", err))
832832
}
833833
if set.Status.Replicas != 3 {
834-
t.Errorf(onPolicy("set.Status.Replicas = %v; want 3", set.Status.Replicas))
834+
t.Error(onPolicy("set.Status.Replicas = %v; want 3", set.Status.Replicas))
835835
}
836836
*set.Spec.Replicas = 2
837837
if err := scaleDownStatefulSetController(logger, set, ssc, spc, om); err != nil {
838-
t.Errorf(onPolicy("Failed to scale down StatefulSet : msg, %s", err))
838+
t.Error(onPolicy("Failed to scale down StatefulSet : msg, %s", err))
839839
}
840840
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
841841
if err != nil {
842-
t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err))
842+
t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err))
843843
}
844844
if set.Status.Replicas != 2 {
845-
t.Errorf(onPolicy("Failed to scale statefulset to 2 replicas"))
845+
t.Error(onPolicy("Failed to scale statefulset to 2 replicas"))
846846
}
847847

848848
var claim *v1.PersistentVolumeClaim
849849
claim, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).Get("datadir-foo-2")
850850
if err != nil {
851-
t.Errorf(onPolicy("Could not find expected pvc datadir-foo-2"))
851+
t.Error(onPolicy("Could not find expected pvc datadir-foo-2"))
852852
}
853853
refs := claim.GetOwnerReferences()
854854
if len(refs) != 1 {
855-
t.Errorf(onPolicy("Expected only one refs: %v", refs))
855+
t.Error(onPolicy("Expected only one refs: %v", refs))
856856
}
857857
// Make the pod ref stale.
858858
for i := range refs {
@@ -863,29 +863,29 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) {
863863
}
864864
claim.SetOwnerReferences(refs)
865865
if err = om.claimsIndexer.Update(claim); err != nil {
866-
t.Errorf(onPolicy("Could not update claim with new owner ref: %v", err))
866+
t.Error(onPolicy("Could not update claim with new owner ref: %v", err))
867867
}
868868

869869
*set.Spec.Replicas = 3
870870
// Until the stale PVC goes away, the scale up should never finish. Run 10 iterations, then delete the PVC.
871871
if err := scaleUpStatefulSetControllerBounded(logger, set, ssc, spc, om, 10); err != nil {
872-
t.Errorf(onPolicy("Failed attempt to scale StatefulSet back up: %v", err))
872+
t.Error(onPolicy("Failed attempt to scale StatefulSet back up: %v", err))
873873
}
874874
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
875875
if err != nil {
876-
t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err))
876+
t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err))
877877
}
878878
if set.Status.Replicas != 2 {
879-
t.Errorf(onPolicy("Expected set to stay at two replicas"))
879+
t.Error(onPolicy("Expected set to stay at two replicas"))
880880
}
881881

882882
claim, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).Get("datadir-foo-2")
883883
if err != nil {
884-
t.Errorf(onPolicy("Could not find expected pvc datadir-foo-2"))
884+
t.Error(onPolicy("Could not find expected pvc datadir-foo-2"))
885885
}
886886
refs = claim.GetOwnerReferences()
887887
if len(refs) != 1 {
888-
t.Errorf(onPolicy("Unexpected change to condemned pvc ownerRefs: %v", refs))
888+
t.Error(onPolicy("Unexpected change to condemned pvc ownerRefs: %v", refs))
889889
}
890890
foundPodRef := false
891891
for i := range refs {
@@ -895,21 +895,21 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) {
895895
}
896896
}
897897
if !foundPodRef {
898-
t.Errorf(onPolicy("Claim ref unexpectedly changed: %v", refs))
898+
t.Error(onPolicy("Claim ref unexpectedly changed: %v", refs))
899899
}
900900
if err = om.claimsIndexer.Delete(claim); err != nil {
901-
t.Errorf(onPolicy("Could not delete stale pvc: %v", err))
901+
t.Error(onPolicy("Could not delete stale pvc: %v", err))
902902
}
903903

904904
if err := scaleUpStatefulSetController(logger, set, ssc, spc, om); err != nil {
905-
t.Errorf(onPolicy("Failed to scale StatefulSet back up: %v", err))
905+
t.Error(onPolicy("Failed to scale StatefulSet back up: %v", err))
906906
}
907907
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
908908
if err != nil {
909-
t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err))
909+
t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err))
910910
}
911911
if set.Status.Replicas != 3 {
912-
t.Errorf(onPolicy("Failed to scale set back up once PVC was deleted"))
912+
t.Error(onPolicy("Failed to scale set back up once PVC was deleted"))
913913
}
914914
}
915915
}

pkg/controller/util/selectors/bimultimap_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ func TestAssociations(t *testing.T) {
235235
// Run consistency check after every operation.
236236
err := consistencyCheck(multimap)
237237
if err != nil {
238-
t.Fatalf(err.Error())
238+
t.Fatal(err.Error())
239239
}
240240
}
241241
for _, expect := range tc.want {
@@ -261,7 +261,7 @@ func TestEfficientAssociation(t *testing.T) {
261261

262262
err := forwardSelect(key("hpa-1"), key("pod-1"), key("pod-2"))(m)
263263
if err != nil {
264-
t.Errorf(err.Error())
264+
t.Error(err.Error())
265265
}
266266
}
267267

pkg/controller/volume/attachdetach/cache/actual_state_of_world.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ reference them.
2222
package cache
2323

2424
import (
25+
"errors"
2526
"fmt"
2627
"sync"
2728
"time"
@@ -524,7 +525,7 @@ func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeN
524525
// should not happen
525526
errMsg := fmt.Sprintf("Failed to set statusUpdateNeeded to needed %t, because nodeName=%q does not exist",
526527
needed, nodeName)
527-
return fmt.Errorf(errMsg)
528+
return errors.New(errMsg)
528529
}
529530

530531
nodeToUpdate.statusUpdateNeeded = needed

0 commit comments

Comments
 (0)