Skip to content

Commit 5da9ba5

Browse files
authored
Merge pull request kubernetes#92750 from wawa0210/e2e-app-staticcheck
fix test/e2e/apps staticcheck
2 parents af29f81 + 9d1948a commit 5da9ba5

File tree

9 files changed

+21
-18
lines changed

9 files changed

+21
-18
lines changed

hack/.staticcheck_failures

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ pkg/volume/azure_dd
88
pkg/volume/gcepd
99
pkg/volume/rbd
1010
pkg/volume/testing
11-
test/e2e/apps
1211
test/e2e/autoscaling
1312
test/e2e_node
1413
test/integration/examples

test/e2e/apps/cronjob.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -453,7 +453,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
453453
// Ignore Jobs pending deletion, since deletion of Jobs is now asynchronous.
454454
aliveJobs := filterNotDeletedJobs(jobs)
455455
if len(aliveJobs) > 1 {
456-
return false, fmt.Errorf("More than one job is running %+v", jobs.Items)
456+
return false, fmt.Errorf("more than one job is running %+v", jobs.Items)
457457
} else if len(aliveJobs) == 0 {
458458
framework.Logf("Warning: Found 0 jobs in namespace %v", ns)
459459
return false, nil

test/e2e/apps/daemon_set.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -606,7 +606,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
606606
if err != nil {
607607
return nil, err
608608
} else if len(newLabels) != len(labels) {
609-
return nil, fmt.Errorf("Could not set daemon set test labels as expected")
609+
return nil, fmt.Errorf("could not set daemon set test labels as expected")
610610
}
611611

612612
return newNode, nil
@@ -698,11 +698,11 @@ func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func()
698698
func checkDaemonStatus(f *framework.Framework, dsName string) error {
699699
ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(context.TODO(), dsName, metav1.GetOptions{})
700700
if err != nil {
701-
return fmt.Errorf("Could not get daemon set from v1")
701+
return fmt.Errorf("could not get daemon set from v1")
702702
}
703703
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
704704
if desired != scheduled && desired != ready {
705-
return fmt.Errorf("Error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
705+
return fmt.Errorf("error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
706706
}
707707
return nil
708708
}

test/e2e/apps/deployment.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -378,7 +378,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
378378
numPodCreation := 1
379379
for {
380380
select {
381-
case event, _ := <-w.ResultChan():
381+
case event := <-w.ResultChan():
382382
if event.Type != watch.Added {
383383
continue
384384
}
@@ -455,6 +455,7 @@ func testRolloverDeployment(f *framework.Framework) {
455455
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
456456
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
457457
err = waitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
458+
framework.ExpectNoError(err)
458459
// Check if it's updated to revision 1 correctly
459460
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
460461
err = checkDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
@@ -626,6 +627,7 @@ func testIterativeDeployments(f *framework.Framework) {
626627
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
627628
update.Spec.Paused = false
628629
})
630+
framework.ExpectNoError(err)
629631
}
630632

631633
framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
@@ -799,7 +801,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
799801
// Scale the deployment to 30 replicas.
800802
newReplicas = int32(30)
801803
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
802-
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
804+
_, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
803805
update.Spec.Replicas = &newReplicas
804806
})
805807
framework.ExpectNoError(err)

test/e2e/apps/disruption.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -396,9 +396,11 @@ func patchPDBOrDie(cs kubernetes.Interface, dc dynamic.Interface, ns string, nam
396396
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
397397
old := getPDBStatusOrDie(dc, ns, name)
398398
patchBytes, err := f(old)
399+
framework.ExpectNoError(err)
399400
if updated, err = cs.PolicyV1beta1().PodDisruptionBudgets(ns).Patch(context.TODO(), old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, subresources...); err != nil {
400401
return err
401402
}
403+
framework.ExpectNoError(err)
402404
return nil
403405
})
404406

test/e2e/apps/network_partition.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
123123

124124
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
125125
e2eskipper.SkipUnlessProviderIs("gke", "aws")
126-
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
126+
if strings.Contains(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
127127
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
128128
}
129129
})

test/e2e/apps/rc.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -251,6 +251,7 @@ var _ = SIGDescribe("ReplicationController", func() {
251251
}
252252
return true, nil
253253
})
254+
framework.ExpectNoError(err, "Failed to find updated ready replica count")
254255
framework.ExpectEqual(eventFound, true, "Failed to find updated ready replica count")
255256

256257
ginkgo.By("fetching ReplicationController status")
@@ -445,9 +446,9 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
445446
if err != nil {
446447
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
447448
if getErr == nil {
448-
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
449+
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
449450
} else {
450-
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
451+
err = fmt.Errorf("pod %q never run: %v", pod.Name, err)
451452
}
452453
}
453454
framework.ExpectNoError(err)

test/e2e/apps/replica_set.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -148,9 +148,9 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
148148
if err != nil {
149149
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
150150
if getErr == nil {
151-
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
151+
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
152152
} else {
153-
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
153+
err = fmt.Errorf("pod %q never run: %v", pod.Name, err)
154154
}
155155
}
156156
framework.ExpectNoError(err)

test/e2e/apps/statefulset.go

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -843,7 +843,7 @@ var _ = SIGDescribe("StatefulSet", func() {
843843
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
844844
framework.ExpectNoError(err)
845845
e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
846-
ss = waitForStatus(c, ss)
846+
waitForStatus(c, ss)
847847

848848
ginkgo.By("getting scale subresource")
849849
scale, err := c.AppsV1().StatefulSets(ns).GetScale(context.TODO(), ssName, metav1.GetOptions{})
@@ -1151,7 +1151,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
11511151
e2estatefulset.SortStatefulPods(pods)
11521152
err = breakPodHTTPProbe(ss, &pods.Items[1])
11531153
framework.ExpectNoError(err)
1154-
ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name)
1154+
ss, _ = waitForPodNotReady(c, ss, pods.Items[1].Name)
11551155
newImage := NewWebserverImage
11561156
oldImage := ss.Spec.Template.Spec.Containers[0].Image
11571157

@@ -1172,7 +1172,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
11721172
e2estatefulset.SortStatefulPods(pods)
11731173
err = restorePodHTTPProbe(ss, &pods.Items[1])
11741174
framework.ExpectNoError(err)
1175-
ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
1175+
ss, _ = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
11761176
ss, pods = waitForRollingUpdate(c, ss)
11771177
framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
11781178
ss.Namespace,
@@ -1195,9 +1195,8 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
11951195
ginkgo.By("Rolling back to a previous revision")
11961196
err = breakPodHTTPProbe(ss, &pods.Items[1])
11971197
framework.ExpectNoError(err)
1198-
ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name)
1198+
ss, _ = waitForPodNotReady(c, ss, pods.Items[1].Name)
11991199
priorRevision := currentRevision
1200-
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
12011200
ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
12021201
update.Spec.Template.Spec.Containers[0].Image = oldImage
12031202
})
@@ -1211,7 +1210,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
12111210
pods = e2estatefulset.GetPodList(c, ss)
12121211
e2estatefulset.SortStatefulPods(pods)
12131212
restorePodHTTPProbe(ss, &pods.Items[1])
1214-
ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
1213+
ss, _ = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
12151214
ss, pods = waitForRollingUpdate(c, ss)
12161215
framework.ExpectEqual(ss.Status.CurrentRevision, priorRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
12171216
ss.Namespace,

0 commit comments

Comments
 (0)