Skip to content

Commit ae6c831

Browse files
authored
Merge pull request kubernetes#3016 from jbartosik/rollback-oom-tests
Rollback OOM tests
2 parents 9fd1073 + 644aa7a commit ae6c831

File tree

7 files changed

+27
-189
lines changed

7 files changed

+27
-189
lines changed

vertical-pod-autoscaler/e2e/utils/ooming_resource_consumer/Dockerfile

Lines changed: 0 additions & 2 deletions
This file was deleted.

vertical-pod-autoscaler/e2e/v1/autoscaling_utils.go

Lines changed: 1 addition & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,6 @@ const (
6060
customMetricName = "QPS"
6161
serviceInitializationTimeout = 2 * time.Minute
6262
serviceInitializationInterval = 15 * time.Second
63-
// TODO(jbartosik): put the image in a VPA project
64-
stressImage = "gcr.io/jbartosik-gke-dev/stress:0.10"
6563
)
6664

6765
var (
@@ -365,7 +363,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
365363
Timeout: timeoutRC,
366364
Replicas: replicas,
367365
CpuRequest: cpuRequestMillis,
368-
MemRequest: memRequestMb * 1024 * 1024, // Mem Request is in bytes
366+
MemRequest: memRequestMb * 1024 * 1024, // MemLimit is in bytes
369367
Annotations: podAnnotations,
370368
}
371369

@@ -429,27 +427,3 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
429427
framework.ExpectNoError(framework.WaitForServiceEndpointsNum(
430428
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
431429
}
432-
433-
func runOomingReplicationController(c clientset.Interface, ns, name string, replicas int) {
434-
ginkgo.By(fmt.Sprintf("Running OOMing RC %s with %v replicas", name, replicas))
435-
436-
rcConfig := testutils.RCConfig{
437-
Client: c,
438-
Image: stressImage,
439-
Name: name,
440-
Namespace: ns,
441-
Timeout: timeoutRC,
442-
Replicas: replicas,
443-
Annotations: make(map[string]string),
444-
MemRequest: 1024 * 1024 * 1024,
445-
MemLimit: 1024 * 1024 * 1024,
446-
}
447-
448-
dpConfig := testutils.DeploymentConfig{
449-
RCConfig: rcConfig,
450-
}
451-
ginkgo.By(fmt.Sprintf("Creating deployment %s in namespace %s", dpConfig.Name, dpConfig.Namespace))
452-
dpConfig.NodeDumpFunc = framework.DumpNodeDebugInfo
453-
dpConfig.ContainerDumpFunc = framework.LogFailedContainers
454-
framework.ExpectNoError(testutils.RunDeployment(dpConfig))
455-
}

vertical-pod-autoscaler/e2e/v1/common.go

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -192,17 +192,12 @@ func NewHamsterDeploymentWithResourcesAndLimits(f *framework.Framework, cpuQuant
192192
return d
193193
}
194194

195-
func getPodSelectorExcludingDonePodsOrDie() string {
196-
stringSelector := "status.phase!=" + string(apiv1.PodSucceeded) +
197-
",status.phase!=" + string(apiv1.PodFailed)
198-
selector := fields.ParseSelectorOrDie(stringSelector)
199-
return selector.String()
200-
}
201-
202195
// GetHamsterPods returns running hamster pods (matched by hamsterLabels)
203196
func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) {
204197
label := labels.SelectorFromSet(labels.Set(hamsterLabels))
205-
options := metav1.ListOptions{LabelSelector: label.String(), FieldSelector: getPodSelectorExcludingDonePodsOrDie()}
198+
selector := fields.ParseSelectorOrDie("status.phase!=" + string(apiv1.PodSucceeded) +
199+
",status.phase!=" + string(apiv1.PodFailed))
200+
options := metav1.ListOptions{LabelSelector: label.String(), FieldSelector: selector.String()}
206201
return f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(options)
207202
}
208203

vertical-pod-autoscaler/e2e/v1/full_vpa.go

Lines changed: 10 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ package autoscaling
1818

1919
import (
2020
"fmt"
21-
"time"
2221

2322
autoscaling "k8s.io/api/autoscaling/v1"
2423
apiv1 "k8s.io/api/core/v1"
@@ -92,80 +91,37 @@ var _ = FullVpaE2eDescribe("Pods under VPA", func() {
9291
ginkgo.It("have cpu requests growing with usage", func() {
9392
// initial CPU usage is low so a minimal recommendation is expected
9493
err := waitForResourceRequestInRangeInPods(
95-
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
94+
f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
9695
ParseQuantityOrDie(minimalCPULowerBound), ParseQuantityOrDie(minimalCPUUpperBound))
9796
gomega.Expect(err).NotTo(gomega.HaveOccurred())
9897

9998
// consume more CPU to get a higher recommendation
10099
rc.ConsumeCPU(600 * replicas)
101100
err = waitForResourceRequestInRangeInPods(
102-
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
101+
f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
103102
ParseQuantityOrDie("500m"), ParseQuantityOrDie("900m"))
104103
gomega.Expect(err).NotTo(gomega.HaveOccurred())
105104
})
106105

107106
ginkgo.It("have memory requests growing with usage", func() {
108107
// initial memory usage is low so a minimal recommendation is expected
109108
err := waitForResourceRequestInRangeInPods(
110-
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
109+
f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
111110
ParseQuantityOrDie(minimalMemoryLowerBound), ParseQuantityOrDie(minimalMemoryUpperBound))
112111
gomega.Expect(err).NotTo(gomega.HaveOccurred())
113112

114113
// consume more memory to get a higher recommendation
115114
// NOTE: large range given due to unpredictability of actual memory usage
116115
rc.ConsumeMem(1024 * replicas)
117116
err = waitForResourceRequestInRangeInPods(
118-
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
117+
f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
119118
ParseQuantityOrDie("900Mi"), ParseQuantityOrDie("4000Mi"))
120119
gomega.Expect(err).NotTo(gomega.HaveOccurred())
121120
})
122121
})
123122

124-
var _ = FullVpaE2eDescribe("OOMing pods under VPA", func() {
125-
var (
126-
vpaClientSet *vpa_clientset.Clientset
127-
vpaCRD *vpa_types.VerticalPodAutoscaler
128-
)
129-
const replicas = 3
130-
131-
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
132-
133-
ginkgo.BeforeEach(func() {
134-
ns := f.Namespace.Name
135-
ginkgo.By("Setting up a hamster deployment")
136-
137-
runOomingReplicationController(
138-
f.ClientSet,
139-
ns,
140-
"hamster",
141-
replicas)
142-
ginkgo.By("Setting up a VPA CRD")
143-
config, err := framework.LoadConfig()
144-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
145-
146-
vpaCRD = NewVPA(f, "hamster-vpa", &autoscaling.CrossVersionObjectReference{
147-
APIVersion: "v1",
148-
Kind: "Deployment",
149-
Name: "hamster",
150-
})
151-
152-
vpaClientSet = vpa_clientset.NewForConfigOrDie(config)
153-
vpaClient := vpaClientSet.AutoscalingV1()
154-
_, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpaCRD)
155-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
156-
})
157-
158-
ginkgo.It("have memory requests growing with OOMs", func() {
159-
listOptions := metav1.ListOptions{LabelSelector: "name=hamster", FieldSelector: getPodSelectorExcludingDonePodsOrDie()}
160-
err := waitForResourceRequestInRangeInPods(
161-
f, 7*time.Minute, listOptions, apiv1.ResourceMemory,
162-
ParseQuantityOrDie("1400Mi"), ParseQuantityOrDie("10000Mi"))
163-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
164-
})
165-
})
166-
167-
func waitForPodsMatch(f *framework.Framework, timeout time.Duration, listOptions metav1.ListOptions, matcher func(pod apiv1.Pod) bool) error {
168-
return wait.PollImmediate(pollInterval, timeout, func() (bool, error) {
123+
func waitForPodsMatch(f *framework.Framework, listOptions metav1.ListOptions, matcher func(pod apiv1.Pod) bool) error {
124+
return wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
169125

170126
ns := f.Namespace.Name
171127
c := f.ClientSet
@@ -179,23 +135,18 @@ func waitForPodsMatch(f *framework.Framework, timeout time.Duration, listOptions
179135
return false, nil
180136
}
181137

182-
// Run matcher on all pods, even if we find pod that doesn't match early.
183-
// This allows the matcher to write logs for all pods. This in turns makes
184-
// it easier to spot some problems (for example unexpected pods in the list
185-
// results).
186-
result := true
187138
for _, pod := range podList.Items {
188139
if !matcher(pod) {
189-
result = false
140+
return false, nil
190141
}
191142
}
192-
return result, nil
143+
return true, nil
193144

194145
})
195146
}
196147

197-
func waitForResourceRequestInRangeInPods(f *framework.Framework, timeout time.Duration, listOptions metav1.ListOptions, resourceName apiv1.ResourceName, lowerBound, upperBound resource.Quantity) error {
198-
err := waitForPodsMatch(f, timeout, listOptions,
148+
func waitForResourceRequestInRangeInPods(f *framework.Framework, listOptions metav1.ListOptions, resourceName apiv1.ResourceName, lowerBound, upperBound resource.Quantity) error {
149+
err := waitForPodsMatch(f, listOptions,
199150
func(pod apiv1.Pod) bool {
200151
resourceRequest, found := pod.Spec.Containers[0].Resources.Requests[resourceName]
201152
framework.Logf("Comparing %v request %v against range of (%v, %v)", resourceName, resourceRequest, lowerBound, upperBound)

vertical-pod-autoscaler/e2e/v1beta2/autoscaling_utils.go

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,6 @@ const (
6060
customMetricName = "QPS"
6161
serviceInitializationTimeout = 2 * time.Minute
6262
serviceInitializationInterval = 15 * time.Second
63-
// TODO(jbartosik): put the image in a VPA project
64-
stressImage = "gcr.io/jbartosik-gke-dev/stress:0.10"
6563
)
6664

6765
var (
@@ -429,27 +427,3 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
429427
framework.ExpectNoError(framework.WaitForServiceEndpointsNum(
430428
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
431429
}
432-
433-
func runOomingReplicationController(c clientset.Interface, ns, name string, replicas int) {
434-
ginkgo.By(fmt.Sprintf("Running OOMing RC %s with %v replicas", name, replicas))
435-
436-
rcConfig := testutils.RCConfig{
437-
Client: c,
438-
Image: stressImage,
439-
Name: name,
440-
Namespace: ns,
441-
Timeout: timeoutRC,
442-
Replicas: replicas,
443-
Annotations: make(map[string]string),
444-
MemRequest: 1024 * 1024 * 1024,
445-
MemLimit: 1024 * 1024 * 1024,
446-
}
447-
448-
dpConfig := testutils.DeploymentConfig{
449-
RCConfig: rcConfig,
450-
}
451-
ginkgo.By(fmt.Sprintf("Creating deployment %s in namespace %s", dpConfig.Name, dpConfig.Namespace))
452-
dpConfig.NodeDumpFunc = framework.DumpNodeDebugInfo
453-
dpConfig.ContainerDumpFunc = framework.LogFailedContainers
454-
framework.ExpectNoError(testutils.RunDeployment(dpConfig))
455-
}

vertical-pod-autoscaler/e2e/v1beta2/common.go

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -192,17 +192,12 @@ func NewHamsterDeploymentWithResourcesAndLimits(f *framework.Framework, cpuQuant
192192
return d
193193
}
194194

195-
func getPodSelectorExcludingDonePodsOrDie() string {
196-
stringSelector := "status.phase!=" + string(apiv1.PodSucceeded) +
197-
",status.phase!=" + string(apiv1.PodFailed)
198-
selector := fields.ParseSelectorOrDie(stringSelector)
199-
return selector.String()
200-
}
201-
202195
// GetHamsterPods returns running hamster pods (matched by hamsterLabels)
203196
func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) {
204197
label := labels.SelectorFromSet(labels.Set(hamsterLabels))
205-
options := metav1.ListOptions{LabelSelector: label.String(), FieldSelector: getPodSelectorExcludingDonePodsOrDie()}
198+
selector := fields.ParseSelectorOrDie("status.phase!=" + string(apiv1.PodSucceeded) +
199+
",status.phase!=" + string(apiv1.PodFailed))
200+
options := metav1.ListOptions{LabelSelector: label.String(), FieldSelector: selector.String()}
206201
return f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(options)
207202
}
208203

vertical-pod-autoscaler/e2e/v1beta2/full_vpa.go

Lines changed: 10 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ package autoscaling
1818

1919
import (
2020
"fmt"
21-
"time"
2221

2322
autoscaling "k8s.io/api/autoscaling/v1"
2423
apiv1 "k8s.io/api/core/v1"
@@ -92,80 +91,37 @@ var _ = FullVpaE2eDescribe("Pods under VPA", func() {
9291
ginkgo.It("have cpu requests growing with usage", func() {
9392
// initial CPU usage is low so a minimal recommendation is expected
9493
err := waitForResourceRequestInRangeInPods(
95-
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
94+
f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
9695
ParseQuantityOrDie(minimalCPULowerBound), ParseQuantityOrDie(minimalCPUUpperBound))
9796
gomega.Expect(err).NotTo(gomega.HaveOccurred())
9897

9998
// consume more CPU to get a higher recommendation
10099
rc.ConsumeCPU(600 * replicas)
101100
err = waitForResourceRequestInRangeInPods(
102-
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
101+
f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU,
103102
ParseQuantityOrDie("500m"), ParseQuantityOrDie("900m"))
104103
gomega.Expect(err).NotTo(gomega.HaveOccurred())
105104
})
106105

107106
ginkgo.It("have memory requests growing with usage", func() {
108107
// initial memory usage is low so a minimal recommendation is expected
109108
err := waitForResourceRequestInRangeInPods(
110-
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
109+
f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
111110
ParseQuantityOrDie(minimalMemoryLowerBound), ParseQuantityOrDie(minimalMemoryUpperBound))
112111
gomega.Expect(err).NotTo(gomega.HaveOccurred())
113112

114113
// consume more memory to get a higher recommendation
115114
// NOTE: large range given due to unpredictability of actual memory usage
116115
rc.ConsumeMem(1024 * replicas)
117116
err = waitForResourceRequestInRangeInPods(
118-
f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
117+
f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory,
119118
ParseQuantityOrDie("900Mi"), ParseQuantityOrDie("4000Mi"))
120119
gomega.Expect(err).NotTo(gomega.HaveOccurred())
121120
})
122121
})
123122

124-
var _ = FullVpaE2eDescribe("OOMing pods under VPA", func() {
125-
var (
126-
vpaClientSet *vpa_clientset.Clientset
127-
vpaCRD *vpa_types.VerticalPodAutoscaler
128-
)
129-
const replicas = 3
130-
131-
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
132-
133-
ginkgo.BeforeEach(func() {
134-
ns := f.Namespace.Name
135-
ginkgo.By("Setting up a hamster deployment")
136-
137-
runOomingReplicationController(
138-
f.ClientSet,
139-
ns,
140-
"hamster",
141-
replicas)
142-
ginkgo.By("Setting up a VPA CRD")
143-
config, err := framework.LoadConfig()
144-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
145-
146-
vpaCRD = NewVPA(f, "hamster-vpa", &autoscaling.CrossVersionObjectReference{
147-
APIVersion: "apps/v1",
148-
Kind: "Deployment",
149-
Name: "hamster",
150-
})
151-
152-
vpaClientSet = vpa_clientset.NewForConfigOrDie(config)
153-
vpaClient := vpaClientSet.AutoscalingV1beta2()
154-
_, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpaCRD)
155-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
156-
})
157-
158-
ginkgo.It("have memory requests growing with OOMs", func() {
159-
listOptions := metav1.ListOptions{LabelSelector: "name=hamster", FieldSelector: getPodSelectorExcludingDonePodsOrDie()}
160-
err := waitForResourceRequestInRangeInPods(
161-
f, 7*time.Minute, listOptions, apiv1.ResourceMemory,
162-
ParseQuantityOrDie("1400Mi"), ParseQuantityOrDie("10000Mi"))
163-
gomega.Expect(err).NotTo(gomega.HaveOccurred())
164-
})
165-
})
166-
167-
func waitForPodsMatch(f *framework.Framework, timeout time.Duration, listOptions metav1.ListOptions, matcher func(pod apiv1.Pod) bool) error {
168-
return wait.PollImmediate(pollInterval, timeout, func() (bool, error) {
123+
func waitForPodsMatch(f *framework.Framework, listOptions metav1.ListOptions, matcher func(pod apiv1.Pod) bool) error {
124+
return wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
169125

170126
ns := f.Namespace.Name
171127
c := f.ClientSet
@@ -179,23 +135,18 @@ func waitForPodsMatch(f *framework.Framework, timeout time.Duration, listOptions
179135
return false, nil
180136
}
181137

182-
// Run matcher on all pods, even if we find pod that doesn't match early.
183-
// This allows the matcher to write logs for all pods. This in turns makes
184-
// it easier to spot some problems (for example unexpected pods in the list
185-
// results).
186-
result := true
187138
for _, pod := range podList.Items {
188139
if !matcher(pod) {
189-
result = false
140+
return false, nil
190141
}
191142
}
192-
return result, nil
143+
return true, nil
193144

194145
})
195146
}
196147

197-
func waitForResourceRequestInRangeInPods(f *framework.Framework, timeout time.Duration, listOptions metav1.ListOptions, resourceName apiv1.ResourceName, lowerBound, upperBound resource.Quantity) error {
198-
err := waitForPodsMatch(f, timeout, listOptions,
148+
func waitForResourceRequestInRangeInPods(f *framework.Framework, listOptions metav1.ListOptions, resourceName apiv1.ResourceName, lowerBound, upperBound resource.Quantity) error {
149+
err := waitForPodsMatch(f, listOptions,
199150
func(pod apiv1.Pod) bool {
200151
resourceRequest, found := pod.Spec.Containers[0].Resources.Requests[resourceName]
201152
framework.Logf("Comparing %v request %v against range of (%v, %v)", resourceName, resourceRequest, lowerBound, upperBound)

0 commit comments

Comments
 (0)