Skip to content

Commit 837d917

Browse files
committed
Make sleepOp duration parametrizable in scheduler_perf
1 parent 4b33029 commit 837d917

File tree

2 files changed

+21
-20
lines changed

2 files changed

+21
-20
lines changed

test/integration/scheduler_perf/config/performance-config.yaml

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1450,7 +1450,7 @@
14501450
skipWaitToCompletion: true
14511451
# Wait to make sure gated pods are enqueued in scheduler.
14521452
- opcode: sleep
1453-
duration: 5s
1453+
durationParam: $sleepDuration
14541454
# Create pods that will be gradually deleted after being scheduled.
14551455
- opcode: createPods
14561456
countParam: $deletingPods
@@ -1471,13 +1471,15 @@
14711471
gatedPods: 10
14721472
deletingPods: 10
14731473
measurePods: 10
1474+
sleepDuration: 1s
14741475
- name: 1Node_10000GatedPods
14751476
labels: [performance, short]
14761477
threshold: 130
14771478
params:
14781479
gatedPods: 10000
14791480
deletingPods: 20000
14801481
measurePods: 20000
1482+
sleepDuration: 5s
14811483

14821484
- name: SchedulingGatedPodsWithPodAffinityImpactForThroughput
14831485
defaultPodTemplatePath: config/templates/pod-with-label.yaml
@@ -1491,7 +1493,7 @@
14911493
skipWaitToCompletion: true
14921494
- opcode: sleep
14931495
# To produce a stable scheduler_perf result, here we make sure all gated Pods are stored in the scheduling queue.
1494-
duration: 5s
1496+
durationParam: $sleepDuration
14951497
- opcode: createPods
14961498
# The scheduling of those Pods will result in many cluster events (AssignedPodAdded)
14971499
# and each of them will be processed by the scheduling queue.
@@ -1504,12 +1506,14 @@
15041506
params:
15051507
gatedPods: 10
15061508
measurePods: 10
1509+
sleepDuration: 1s
15071510
- name: 1Node_10000GatedPods
15081511
labels: [performance, short]
15091512
threshold: 110
15101513
params:
15111514
gatedPods: 10000
15121515
measurePods: 20000
1516+
sleepDuration: 5s
15131517

15141518
# This test case simulates the scheduling when pods selected to schedule have deletionTimestamp set.
15151519
# There was a memory leak related to this path of code fixed in:

test/integration/scheduler_perf/scheduler_perf.go

Lines changed: 15 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -835,22 +835,10 @@ func (bo barrierOp) patchParams(w *workload) (realOp, error) {
835835
type sleepOp struct {
836836
// Must be "sleep".
837837
Opcode operationCode
838-
// duration of sleep.
839-
Duration time.Duration
840-
}
841-
842-
func (so *sleepOp) UnmarshalJSON(data []byte) (err error) {
843-
var tmp struct {
844-
Opcode operationCode
845-
Duration string
846-
}
847-
if err = json.Unmarshal(data, &tmp); err != nil {
848-
return err
849-
}
850-
851-
so.Opcode = tmp.Opcode
852-
so.Duration, err = time.ParseDuration(tmp.Duration)
853-
return err
838+
// Duration of sleep.
839+
Duration metav1.Duration
840+
// Template parameter for Duration.
841+
DurationParam string
854842
}
855843

856844
func (so *sleepOp) isValid(_ bool) error {
@@ -861,7 +849,16 @@ func (so *sleepOp) collectsMetrics() bool {
861849
return false
862850
}
863851

864-
func (so sleepOp) patchParams(_ *workload) (realOp, error) {
852+
func (so sleepOp) patchParams(w *workload) (realOp, error) {
853+
if so.DurationParam != "" {
854+
durationStr, err := getParam[string](w.Params, so.DurationParam[1:])
855+
if err != nil {
856+
return nil, err
857+
}
858+
if so.Duration.Duration, err = time.ParseDuration(durationStr); err != nil {
859+
return nil, fmt.Errorf("invalid duration parameter %s: %w", so.DurationParam, err)
860+
}
861+
}
865862
return &so, nil
866863
}
867864

@@ -1580,7 +1577,7 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
15801577
case *sleepOp:
15811578
select {
15821579
case <-tCtx.Done():
1583-
case <-time.After(concreteOp.Duration):
1580+
case <-time.After(concreteOp.Duration.Duration):
15841581
}
15851582

15861583
case *startCollectingMetricsOp:

0 commit comments

Comments
 (0)