Skip to content

Commit 33a0afa

Browse files
authored
Merge pull request kubernetes#74508 from danielqsj/uapps
Fix golint failures for e2e/upgrades/...
2 parents 3c92a6d + 6322025 commit 33a0afa

21 files changed

+278
-254
lines changed

hack/.golint_failures

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -673,9 +673,6 @@ test/e2e/storage/testsuites
673673
test/e2e/storage/utils
674674
test/e2e/storage/vsphere
675675
test/e2e/ui
676-
test/e2e/upgrades
677-
test/e2e/upgrades/apps
678-
test/e2e/upgrades/storage
679676
test/e2e/windows
680677
test/e2e_kubeadm
681678
test/e2e_node

test/e2e/lifecycle/cluster_upgrade.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ var gpuUpgradeTests = []upgrades.Test{
6666
}
6767

6868
var statefulsetUpgradeTests = []upgrades.Test{
69-
&upgrades.MySqlUpgradeTest{},
69+
&upgrades.MySQLUpgradeTest{},
7070
&upgrades.EtcdUpgradeTest{},
7171
&upgrades.CassandraUpgradeTest{},
7272
}

test/e2e/upgrades/apparmor.go

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ import (
2222
"k8s.io/kubernetes/test/e2e/common"
2323
"k8s.io/kubernetes/test/e2e/framework"
2424

25-
. "github.com/onsi/ginkgo"
26-
. "github.com/onsi/gomega"
25+
"github.com/onsi/ginkgo"
26+
"github.com/onsi/gomega"
2727
"github.com/onsi/gomega/gstruct"
2828
)
2929

@@ -32,8 +32,10 @@ type AppArmorUpgradeTest struct {
3232
pod *api.Pod
3333
}
3434

35+
// Name returns the tracking name of the test.
3536
func (AppArmorUpgradeTest) Name() string { return "apparmor-upgrade" }
3637

38+
// Skip returns true when this test can be skipped.
3739
func (AppArmorUpgradeTest) Skip(upgCtx UpgradeContext) bool {
3840
supportedImages := make(map[string]bool)
3941
for _, d := range common.AppArmorDistros {
@@ -50,11 +52,11 @@ func (AppArmorUpgradeTest) Skip(upgCtx UpgradeContext) bool {
5052

5153
// Setup creates a secret and then verifies that a pod can consume it.
5254
func (t *AppArmorUpgradeTest) Setup(f *framework.Framework) {
53-
By("Loading AppArmor profiles to nodes")
55+
ginkgo.By("Loading AppArmor profiles to nodes")
5456
common.LoadAppArmorProfiles(f)
5557

5658
// Create the initial test pod.
57-
By("Creating a long-running AppArmor enabled pod.")
59+
ginkgo.By("Creating a long-running AppArmor enabled pod.")
5860
t.pod = common.CreateAppArmorTestPod(f, false, false)
5961

6062
// Verify initial state.
@@ -76,32 +78,32 @@ func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{},
7678
// Teardown cleans up any remaining resources.
7779
func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
7880
// rely on the namespace deletion to clean up everything
79-
By("Logging container failures")
81+
ginkgo.By("Logging container failures")
8082
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
8183
}
8284

8385
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
84-
By("Verifying an AppArmor profile is continuously enforced for a pod")
86+
ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod")
8587
pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{})
8688
framework.ExpectNoError(err, "Should be able to get pod")
87-
Expect(pod.Status.Phase).To(Equal(api.PodRunning), "Pod should stay running")
88-
Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(BeNil(), "Container should be running")
89-
Expect(pod.Status.ContainerStatuses[0].RestartCount).To(BeZero(), "Container should not need to be restarted")
89+
gomega.Expect(pod.Status.Phase).To(gomega.Equal(api.PodRunning), "Pod should stay running")
90+
gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running")
91+
gomega.Expect(pod.Status.ContainerStatuses[0].RestartCount).To(gomega.BeZero(), "Container should not need to be restarted")
9092
}
9193

9294
func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) {
93-
By("Verifying an AppArmor profile is enforced for a new pod")
95+
ginkgo.By("Verifying an AppArmor profile is enforced for a new pod")
9496
common.CreateAppArmorTestPod(f, false, true)
9597
}
9698

9799
func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) {
98-
By("Verifying nodes are AppArmor enabled")
100+
ginkgo.By("Verifying nodes are AppArmor enabled")
99101
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
100102
framework.ExpectNoError(err, "Failed to list nodes")
101103
for _, node := range nodes.Items {
102-
Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{
104+
gomega.Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{
103105
"Ready": gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
104-
"Message": ContainSubstring("AppArmor enabled"),
106+
"Message": gomega.ContainSubstring("AppArmor enabled"),
105107
}),
106108
}))
107109
}

test/e2e/upgrades/apps/daemonsets.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ limitations under the License.
1717
package upgrades
1818

1919
import (
20-
. "github.com/onsi/ginkgo"
20+
"github.com/onsi/ginkgo"
2121

2222
apps "k8s.io/api/apps/v1"
2323
v1 "k8s.io/api/core/v1"
@@ -35,6 +35,7 @@ type DaemonSetUpgradeTest struct {
3535
daemonSet *apps.DaemonSet
3636
}
3737

38+
// Name returns the tracking name of the test.
3839
func (DaemonSetUpgradeTest) Name() string { return "[sig-apps] daemonset-upgrade" }
3940

4041
// Setup creates a DaemonSet and verifies that it's running
@@ -74,29 +75,29 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
7475
},
7576
}
7677

77-
By("Creating a DaemonSet")
78+
ginkgo.By("Creating a DaemonSet")
7879
var err error
7980
if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil {
8081
framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err)
8182
}
8283

83-
By("Waiting for DaemonSet pods to become ready")
84+
ginkgo.By("Waiting for DaemonSet pods to become ready")
8485
err = wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
8586
return checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
8687
})
8788
framework.ExpectNoError(err)
8889

89-
By("Validating the DaemonSet after creation")
90+
ginkgo.By("Validating the DaemonSet after creation")
9091
t.validateRunningDaemonSet(f)
9192
}
9293

9394
// Test waits until the upgrade has completed and then verifies that the DaemonSet
9495
// is still running
9596
func (t *DaemonSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
96-
By("Waiting for upgradet to complete before re-validating DaemonSet")
97+
ginkgo.By("Waiting for upgradet to complete before re-validating DaemonSet")
9798
<-done
9899

99-
By("validating the DaemonSet is still running after upgrade")
100+
ginkgo.By("validating the DaemonSet is still running after upgrade")
100101
t.validateRunningDaemonSet(f)
101102
}
102103

@@ -106,15 +107,15 @@ func (t *DaemonSetUpgradeTest) Teardown(f *framework.Framework) {
106107
}
107108

108109
func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) {
109-
By("confirming the DaemonSet pods are running on all expected nodes")
110+
ginkgo.By("confirming the DaemonSet pods are running on all expected nodes")
110111
res, err := checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
111112
framework.ExpectNoError(err)
112113
if !res {
113114
framework.Failf("expected DaemonSet pod to be running on all nodes, it was not")
114115
}
115116

116117
// DaemonSet resource itself should be good
117-
By("confirming the DaemonSet resource is in a good state")
118+
ginkgo.By("confirming the DaemonSet resource is in a good state")
118119
res, err = checkDaemonStatus(f, t.daemonSet.Namespace, t.daemonSet.Name)
119120
framework.ExpectNoError(err)
120121
if !res {

test/e2e/upgrades/apps/deployments.go

Lines changed: 23 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@ import (
2626
"k8s.io/kubernetes/test/e2e/framework"
2727
"k8s.io/kubernetes/test/e2e/upgrades"
2828

29-
. "github.com/onsi/ginkgo"
30-
. "github.com/onsi/gomega"
29+
"github.com/onsi/ginkgo"
30+
"github.com/onsi/gomega"
3131
imageutils "k8s.io/kubernetes/test/utils/image"
3232
)
3333

@@ -46,6 +46,7 @@ type DeploymentUpgradeTest struct {
4646
newRSUID types.UID
4747
}
4848

49+
// Name returns the tracking name of the test.
4950
func (DeploymentUpgradeTest) Name() string { return "[sig-apps] deployment-upgrade" }
5051

5152
// Setup creates a deployment and makes sure it has a new and an old replicaset running.
@@ -57,15 +58,15 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
5758
deploymentClient := c.AppsV1().Deployments(ns)
5859
rsClient := c.AppsV1().ReplicaSets(ns)
5960

60-
By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
61+
ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
6162
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, apps.RollingUpdateDeploymentStrategyType)
6263
deployment, err := deploymentClient.Create(d)
6364
framework.ExpectNoError(err)
6465

65-
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
66+
ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
6667
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
6768

68-
By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
69+
ginkgo.By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
6970
rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
7071
framework.ExpectNoError(err)
7172
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
@@ -76,28 +77,28 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
7677
}
7778
t.oldRSUID = rss[0].UID
7879

79-
By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName))
80+
ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName))
8081
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "1"))
8182

8283
// Trigger a new rollout so that we have some history.
83-
By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
84+
ginkgo.By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
8485
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
8586
update.Spec.Template.Spec.Containers[0].Name = "updated-name"
8687
})
8788
framework.ExpectNoError(err)
8889

89-
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
90+
ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
9091
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
9192

92-
By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
93+
ginkgo.By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
9394
rsList, err = rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
9495
framework.ExpectNoError(err)
9596
rss = rsList.Items
9697
if len(rss) != 2 {
9798
framework.ExpectNoError(fmt.Errorf("expected 2 replicaset, got %d", len(rss)))
9899
}
99100

100-
By(fmt.Sprintf("Checking replicaset of deployment %q that is created before rollout survives the rollout", deploymentName))
101+
ginkgo.By(fmt.Sprintf("Checking replicaset of deployment %q that is created before rollout survives the rollout", deploymentName))
101102
switch t.oldRSUID {
102103
case rss[0].UID:
103104
t.newRSUID = rss[1].UID
@@ -107,7 +108,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
107108
framework.ExpectNoError(fmt.Errorf("old replicaset with UID %q does not survive rollout", t.oldRSUID))
108109
}
109110

110-
By(fmt.Sprintf("Waiting for revision of the deployment %q to become 2", deploymentName))
111+
ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 2", deploymentName))
111112
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "2"))
112113

113114
t.oldDeploymentUID = deployment.UID
@@ -116,7 +117,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
116117
// Test checks whether the replicasets for a deployment are the same after an upgrade.
117118
func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
118119
// Block until upgrade is done
119-
By(fmt.Sprintf("Waiting for upgrade to finish before checking replicasets for deployment %q", deploymentName))
120+
ginkgo.By(fmt.Sprintf("Waiting for upgrade to finish before checking replicasets for deployment %q", deploymentName))
120121
<-done
121122

122123
c := f.ClientSet
@@ -127,10 +128,10 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
127128
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
128129
framework.ExpectNoError(err)
129130

130-
By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName))
131-
Expect(deployment.UID).To(Equal(t.oldDeploymentUID))
131+
ginkgo.By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName))
132+
gomega.Expect(deployment.UID).To(gomega.Equal(t.oldDeploymentUID))
132133

133-
By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName))
134+
ginkgo.By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName))
134135
rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
135136
framework.ExpectNoError(err)
136137
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
@@ -142,27 +143,27 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
142143

143144
switch t.oldRSUID {
144145
case rss[0].UID:
145-
Expect(rss[1].UID).To(Equal(t.newRSUID))
146+
gomega.Expect(rss[1].UID).To(gomega.Equal(t.newRSUID))
146147
case rss[1].UID:
147-
Expect(rss[0].UID).To(Equal(t.newRSUID))
148+
gomega.Expect(rss[0].UID).To(gomega.Equal(t.newRSUID))
148149
default:
149150
framework.ExpectNoError(fmt.Errorf("new replicasets are created during upgrade of deployment %q", deploymentName))
150151
}
151152

152-
By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName))
153-
Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(Equal("2"))
153+
ginkgo.By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName))
154+
gomega.Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(gomega.Equal("2"))
154155

155-
By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
156+
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
156157
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
157158

158159
// Verify the upgraded deployment is active by scaling up the deployment by 1
159-
By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
160+
ginkgo.By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
160161
_, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) {
161162
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
162163
})
163164
framework.ExpectNoError(err)
164165

165-
By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName))
166+
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName))
166167
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
167168
}
168169

test/e2e/upgrades/apps/job.go

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ import (
2222
"k8s.io/kubernetes/test/e2e/framework"
2323
"k8s.io/kubernetes/test/e2e/upgrades"
2424

25-
. "github.com/onsi/ginkgo"
26-
. "github.com/onsi/gomega"
25+
"github.com/onsi/ginkgo"
26+
"github.com/onsi/gomega"
2727
)
2828

2929
// JobUpgradeTest is a test harness for batch Jobs.
@@ -32,30 +32,31 @@ type JobUpgradeTest struct {
3232
namespace string
3333
}
3434

35+
// Name returns the tracking name of the test.
3536
func (JobUpgradeTest) Name() string { return "[sig-apps] job-upgrade" }
3637

3738
// Setup starts a Job with a parallelism of 2 and 2 completions running.
3839
func (t *JobUpgradeTest) Setup(f *framework.Framework) {
3940
t.namespace = f.Namespace.Name
4041

41-
By("Creating a job")
42+
ginkgo.By("Creating a job")
4243
t.job = framework.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6)
4344
job, err := framework.CreateJob(f.ClientSet, t.namespace, t.job)
4445
t.job = job
45-
Expect(err).NotTo(HaveOccurred())
46+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
4647

47-
By("Ensuring active pods == parallelism")
48+
ginkgo.By("Ensuring active pods == parallelism")
4849
err = framework.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2)
49-
Expect(err).NotTo(HaveOccurred())
50+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
5051
}
5152

5253
// Test verifies that the Jobs Pods are running after the an upgrade
5354
func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
5455
<-done
55-
By("Ensuring active pods == parallelism")
56+
ginkgo.By("Ensuring active pods == parallelism")
5657
running, err := framework.CheckForAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)
57-
Expect(err).NotTo(HaveOccurred())
58-
Expect(running).To(BeTrue())
58+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
59+
gomega.Expect(running).To(gomega.BeTrue())
5960
}
6061

6162
// Teardown cleans up any remaining resources.

0 commit comments

Comments
 (0)