Skip to content

Commit d650b57

Browse files
committed
Added Preemption benchmark
1 parent d680e05 commit d650b57

File tree

6 files changed

+68
-10
lines changed

6 files changed

+68
-10
lines changed

test/integration/scheduler_perf/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ go_library(
1818
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
1919
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
2020
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
21+
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
2122
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
2223
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
2324
"//staging/src/k8s.io/client-go/rest:go_default_library",

test/integration/scheduler_perf/config/performance-config.yaml

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,3 +227,16 @@
227227
- numNodes: 5000
228228
numInitPods: [2000, 2000, 2000, 2000, 2000]
229229
numPodsToSchedule: 1000
230+
- template:
231+
desc: Preemption
232+
initPods:
233+
- podTemplatePath: config/pod-low-priority.yaml
234+
podsToSchedule:
235+
podTemplatePath: config/pod-high-priority.yaml
236+
params:
237+
- numNodes: 500
238+
numInitPods: [2000]
239+
numPodsToSchedule: 500
240+
- numNodes: 5000
241+
numInitPods: [20000]
242+
numPodsToSchedule: 5000
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
apiVersion: v1
2+
kind: Pod
3+
metadata:
4+
generateName: pod-
5+
spec:
6+
priority: 10
7+
containers:
8+
- image: k8s.gcr.io/pause:3.2
9+
name: pause
10+
ports:
11+
- containerPort: 80
12+
resources:
13+
limits:
14+
cpu: 3000m
15+
memory: 500Mi
16+
requests:
17+
cpu: 3000m
18+
memory: 500Mi
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
apiVersion: v1
2+
kind: Pod
3+
metadata:
4+
generateName: pod-
5+
spec:
6+
terminationGracePeriodSeconds: 0
7+
containers:
8+
- image: k8s.gcr.io/pause:3.2
9+
name: pause
10+
ports:
11+
- containerPort: 80
12+
resources:
13+
limits:
14+
cpu: 900m
15+
memory: 500Mi
16+
requests:
17+
cpu: 900m
18+
memory: 500Mi

test/integration/scheduler_perf/scheduler_perf_test.go

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,8 @@ var (
4545
"scheduler_scheduling_algorithm_priority_evaluation_seconds",
4646
"scheduler_binding_duration_seconds",
4747
"scheduler_e2e_scheduling_duration_seconds",
48+
"scheduler_scheduling_algorithm_preemption_evaluation_seconds",
49+
"scheduler_pod_scheduling_duration_seconds",
4850
},
4951
}
5052
)
@@ -154,7 +156,7 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
154156
}
155157
total += p.Num
156158
}
157-
if err := waitNumPodsScheduled(b, total, podInformer); err != nil {
159+
if err := waitNumPodsScheduled(b, total, podInformer, setupNamespace); err != nil {
158160
b.Fatal(err)
159161
}
160162

@@ -172,7 +174,7 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
172174
if err := createPods(testNamespace, test.PodsToSchedule, clientset); err != nil {
173175
b.Fatal(err)
174176
}
175-
if err := waitNumPodsScheduled(b, total+test.PodsToSchedule.Num, podInformer); err != nil {
177+
if err := waitNumPodsScheduled(b, test.PodsToSchedule.Num, podInformer, testNamespace); err != nil {
176178
b.Fatal(err)
177179
}
178180

@@ -187,9 +189,9 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
187189
return dataItems
188190
}
189191

190-
func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodInformer) error {
192+
func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodInformer, namespace string) error {
191193
for {
192-
scheduled, err := getScheduledPods(podInformer)
194+
scheduled, err := getScheduledPods(podInformer, namespace)
193195
if err != nil {
194196
return err
195197
}
@@ -203,7 +205,7 @@ func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodIn
203205
}
204206

205207
func getTestDataCollectors(tc testCase, podInformer coreinformers.PodInformer, b *testing.B) []testDataCollector {
206-
collectors := []testDataCollector{newThroughputCollector(podInformer, map[string]string{"Name": b.Name()})}
208+
collectors := []testDataCollector{newThroughputCollector(podInformer, map[string]string{"Name": b.Name()}, []string{testNamespace})}
207209
metricsCollectorConfig := defaultMetricsCollectorConfig
208210
if tc.MetricsCollectorConfig != nil {
209211
metricsCollectorConfig = *tc.MetricsCollectorConfig

test/integration/scheduler_perf/util.go

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3131
"k8s.io/apimachinery/pkg/labels"
3232
"k8s.io/apimachinery/pkg/runtime/schema"
33+
"k8s.io/apimachinery/pkg/util/sets"
3334
coreinformers "k8s.io/client-go/informers/core/v1"
3435
clientset "k8s.io/client-go/kubernetes"
3536
restclient "k8s.io/client-go/rest"
@@ -76,16 +77,19 @@ func mustSetupScheduler() (util.ShutdownFunc, coreinformers.PodInformer, clients
7677
return shutdownFunc, podInformer, clientSet
7778
}
7879

79-
func getScheduledPods(podInformer coreinformers.PodInformer) ([]*v1.Pod, error) {
80+
// Returns the list of scheduled pods in the specified namespaces.
81+
// Note that no namespces specified matches all namespaces.
82+
func getScheduledPods(podInformer coreinformers.PodInformer, namespaces ...string) ([]*v1.Pod, error) {
8083
pods, err := podInformer.Lister().List(labels.Everything())
8184
if err != nil {
8285
return nil, err
8386
}
8487

88+
s := sets.NewString(namespaces...)
8589
scheduled := make([]*v1.Pod, 0, len(pods))
8690
for i := range pods {
8791
pod := pods[i]
88-
if len(pod.Spec.NodeName) > 0 {
92+
if len(pod.Spec.NodeName) > 0 && (len(s) == 0 || s.Has(pod.Namespace)) {
8993
scheduled = append(scheduled, pod)
9094
}
9195
}
@@ -213,17 +217,19 @@ type throughputCollector struct {
213217
podInformer coreinformers.PodInformer
214218
schedulingThroughputs []float64
215219
labels map[string]string
220+
namespaces []string
216221
}
217222

218-
func newThroughputCollector(podInformer coreinformers.PodInformer, labels map[string]string) *throughputCollector {
223+
func newThroughputCollector(podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string) *throughputCollector {
219224
return &throughputCollector{
220225
podInformer: podInformer,
221226
labels: labels,
227+
namespaces: namespaces,
222228
}
223229
}
224230

225231
func (tc *throughputCollector) run(stopCh chan struct{}) {
226-
podsScheduled, err := getScheduledPods(tc.podInformer)
232+
podsScheduled, err := getScheduledPods(tc.podInformer, tc.namespaces...)
227233
if err != nil {
228234
klog.Fatalf("%v", err)
229235
}
@@ -233,7 +239,7 @@ func (tc *throughputCollector) run(stopCh chan struct{}) {
233239
case <-stopCh:
234240
return
235241
case <-time.After(throughputSampleFrequency):
236-
podsScheduled, err := getScheduledPods(tc.podInformer)
242+
podsScheduled, err := getScheduledPods(tc.podInformer, tc.namespaces...)
237243
if err != nil {
238244
klog.Fatalf("%v", err)
239245
}

0 commit comments

Comments
 (0)