Skip to content

Commit ccecc67

Browse files
committed
fix golint error in test/e2e/scheduling
1 parent 4cb4864 commit ccecc67

File tree

10 files changed

+87
-78
lines changed

10 files changed

+87
-78
lines changed

hack/.golint_failures

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -605,7 +605,6 @@ test/e2e/common
605605
test/e2e/framework
606606
test/e2e/lifecycle/bootstrap
607607
test/e2e/scalability
608-
test/e2e/scheduling
609608
test/e2e/storage/drivers
610609
test/e2e/storage/testsuites
611610
test/e2e/storage/utils

test/e2e/scheduling/equivalence_cache_predicates.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import (
3333

3434
"github.com/onsi/ginkgo"
3535
"github.com/onsi/gomega"
36+
// ensure libs have a chance to initialize
3637
_ "github.com/stretchr/testify/assert"
3738
)
3839

@@ -92,7 +93,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
9293
nodeSelector := map[string]string{"kubernetes.io/hostname": nodeName}
9394

9495
ginkgo.By("One pod should be scheduled, the other should be rejected")
95-
// CreateNodeSelectorPods creates RC with host port 4312
96+
// CreateNodeSelectorPods creates RC with host port 4321
9697
WaitForSchedulerAfterAction(f, func() error {
9798
err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false)
9899
return err
@@ -269,6 +270,7 @@ func getRCWithInterPodAffinityNodeSelector(name string, labelsMap map[string]str
269270
}
270271
}
271272

273+
// CreateNodeSelectorPods creates RC with host port 4321 and defines node selector
272274
func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error {
273275
ginkgo.By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
274276

test/e2e/scheduling/framework.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package scheduling
1818

1919
import "github.com/onsi/ginkgo"
2020

21+
// SIGDescribe annotates the test with the SIG label.
2122
func SIGDescribe(text string, body func()) bool {
2223
return ginkgo.Describe("[sig-scheduling] "+text, body)
2324
}

test/e2e/scheduling/nvidia-gpus.go

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ const (
4242

4343
var (
4444
gpuResourceName v1.ResourceName
45-
dsYamlUrl string
45+
dsYamlURL string
4646
)
4747

4848
func makeCudaAdditionDevicePluginTestPod() *v1.Pod {
@@ -116,20 +116,21 @@ func getGPUsAvailable(f *framework.Framework) int64 {
116116
return gpusAvailable
117117
}
118118

119+
// SetupNVIDIAGPUNode install Nvidia Drivers and wait for Nvidia GPUs to be available on nodes
119120
func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *framework.ContainerResourceGatherer {
120121
logOSImages(f)
121122

122-
dsYamlUrlFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET")
123-
if dsYamlUrlFromEnv != "" {
124-
dsYamlUrl = dsYamlUrlFromEnv
123+
dsYamlURLFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET")
124+
if dsYamlURLFromEnv != "" {
125+
dsYamlURL = dsYamlURLFromEnv
125126
} else {
126-
dsYamlUrl = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml"
127+
dsYamlURL = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml"
127128
}
128129
gpuResourceName = gpu.NVIDIAGPUResourceName
129130

130-
e2elog.Logf("Using %v", dsYamlUrl)
131+
e2elog.Logf("Using %v", dsYamlURL)
131132
// Creates the DaemonSet that installs Nvidia Drivers.
132-
ds, err := framework.DsFromManifest(dsYamlUrl)
133+
ds, err := framework.DsFromManifest(dsYamlURL)
133134
gomega.Expect(err).NotTo(gomega.HaveOccurred())
134135
ds.Namespace = f.Namespace.Name
135136
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds)

test/e2e/scheduling/predicates.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ import (
3636

3737
"github.com/onsi/ginkgo"
3838
"github.com/onsi/gomega"
39+
// ensure libs have a chance to initialize
3940
_ "github.com/stretchr/testify/assert"
4041
)
4142

@@ -727,9 +728,8 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched
727728
if !printed {
728729
printed = true
729730
return msg
730-
} else {
731-
return ""
732731
}
732+
return ""
733733
}
734734

735735
gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
@@ -746,9 +746,8 @@ func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expected
746746
if !printed {
747747
printed = true
748748
return msg
749-
} else {
750-
return ""
751749
}
750+
return ""
752751
}
753752

754753
gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
@@ -775,6 +774,7 @@ func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, strin
775774
return pod.Spec.NodeName, pod.Name
776775
}
777776

777+
// GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it
778778
func GetNodeThatCanRunPod(f *framework.Framework) string {
779779
ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.")
780780
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"})
@@ -785,6 +785,7 @@ func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string {
785785
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"})
786786
}
787787

788+
// CreateHostPortPods creates RC with host port 4321
788789
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
789790
ginkgo.By(fmt.Sprintf("Running RC which reserves host port"))
790791
config := &testutils.RCConfig{

test/e2e/scheduling/preemption.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ import (
3939

4040
"github.com/onsi/ginkgo"
4141
"github.com/onsi/gomega"
42+
// ensure libs have a chance to initialize
4243
_ "github.com/stretchr/testify/assert"
4344
)
4445

test/e2e/scheduling/priorities.go

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424

2525
"github.com/onsi/ginkgo"
2626
"github.com/onsi/gomega"
27+
// ensure libs have a chance to initialize
2728
_ "github.com/stretchr/testify/assert"
2829

2930
"k8s.io/api/core/v1"
@@ -40,14 +41,15 @@ import (
4041
imageutils "k8s.io/kubernetes/test/utils/image"
4142
)
4243

44+
// Resource is a collection of compute resource.
4345
type Resource struct {
4446
MilliCPU int64
4547
Memory int64
4648
}
4749

48-
var balancePodLabel map[string]string = map[string]string{"name": "priority-balanced-memory"}
50+
var balancePodLabel = map[string]string{"name": "priority-balanced-memory"}
4951

50-
var podRequestedResource *v1.ResourceRequirements = &v1.ResourceRequirements{
52+
var podRequestedResource = &v1.ResourceRequirements{
5153
Limits: v1.ResourceList{
5254
v1.ResourceMemory: resource.MustParse("100Mi"),
5355
v1.ResourceCPU: resource.MustParse("100m"),
@@ -265,7 +267,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
265267
var cpuFractionMap = make(map[string]float64)
266268
var memFractionMap = make(map[string]float64)
267269
for _, node := range nodes {
268-
cpuFraction, memFraction := computeCpuMemFraction(cs, node, requestedResource)
270+
cpuFraction, memFraction := computeCPUMemFraction(cs, node, requestedResource)
269271
cpuFractionMap[node.Name] = cpuFraction
270272
memFractionMap[node.Name] = memFraction
271273
if cpuFraction > maxCPUFraction {
@@ -311,15 +313,15 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
311313

312314
for _, node := range nodes {
313315
ginkgo.By("Compute Cpu, Mem Fraction after create balanced pods.")
314-
computeCpuMemFraction(cs, node, requestedResource)
316+
computeCPUMemFraction(cs, node, requestedResource)
315317
}
316318

317319
return nil
318320
}
319321

320-
func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) {
321-
e2elog.Logf("ComputeCpuMemFraction for node: %v", node.Name)
322-
totalRequestedCpuResource := resource.Requests.Cpu().MilliValue()
322+
func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) {
323+
e2elog.Logf("ComputeCPUMemFraction for node: %v", node.Name)
324+
totalRequestedCPUResource := resource.Requests.Cpu().MilliValue()
323325
totalRequestedMemResource := resource.Requests.Memory().Value()
324326
allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
325327
if err != nil {
@@ -332,7 +334,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
332334
if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort {
333335
continue
334336
}
335-
totalRequestedCpuResource += getNonZeroRequests(&pod).MilliCPU
337+
totalRequestedCPUResource += getNonZeroRequests(&pod).MilliCPU
336338
totalRequestedMemResource += getNonZeroRequests(&pod).Memory
337339
}
338340
}
@@ -341,7 +343,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
341343
cpuAllocatableMil := cpuAllocatable.MilliValue()
342344

343345
floatOne := float64(1)
344-
cpuFraction := float64(totalRequestedCpuResource) / float64(cpuAllocatableMil)
346+
cpuFraction := float64(totalRequestedCPUResource) / float64(cpuAllocatableMil)
345347
if cpuFraction > floatOne {
346348
cpuFraction = floatOne
347349
}
@@ -353,7 +355,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
353355
memFraction = floatOne
354356
}
355357

356-
e2elog.Logf("Node: %v, totalRequestedCpuResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCpuResource, cpuAllocatableMil, cpuFraction)
358+
e2elog.Logf("Node: %v, totalRequestedCPUResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCPUResource, cpuAllocatableMil, cpuFraction)
357359
e2elog.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction)
358360

359361
return cpuFraction, memFraction

0 commit comments

Comments
 (0)