Skip to content

Commit 5e65529

Browse files
authored
Merge pull request kubernetes#127759 from macsko/allow_to_filter_pods_using_labels_while_collecting_metrics_scheduler_perf
Allow to filter pods using labels while collecting metrics in scheduler_perf
2 parents 1b71b94 + fdbf21e commit 5e65529

File tree

2 files changed

+20
-14
lines changed

2 files changed

+20
-14
lines changed

test/integration/scheduler_perf/scheduler_perf.go

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -871,6 +871,10 @@ type startCollectingMetricsOp struct {
871871
Name string
872872
// Namespaces for which the scheduling throughput metric is calculated.
873873
Namespaces []string
874+
// Labels used to filter the pods for which the scheduling throughput metric is collected.
875+
// If empty, it will collect the metric for all pods in the selected namespaces.
876+
// Optional.
877+
LabelSelector map[string]string
874878
}
875879

876880
func (scm *startCollectingMetricsOp) isValid(_ bool) error {
@@ -1227,12 +1231,12 @@ func checkEmptyInFlightEvents() error {
12271231
return nil
12281232
}
12291233

1230-
func startCollectingMetrics(tCtx ktesting.TContext, collectorWG *sync.WaitGroup, podInformer coreinformers.PodInformer, mcc *metricsCollectorConfig, throughputErrorMargin float64, opIndex int, name string, namespaces []string) (ktesting.TContext, []testDataCollector) {
1234+
func startCollectingMetrics(tCtx ktesting.TContext, collectorWG *sync.WaitGroup, podInformer coreinformers.PodInformer, mcc *metricsCollectorConfig, throughputErrorMargin float64, opIndex int, name string, namespaces []string, labelSelector map[string]string) (ktesting.TContext, []testDataCollector) {
12311235
collectorCtx := ktesting.WithCancel(tCtx)
12321236
workloadName := tCtx.Name()
12331237
// The first part is the same for each workload, therefore we can strip it.
12341238
workloadName = workloadName[strings.Index(name, "/")+1:]
1235-
collectors := getTestDataCollectors(podInformer, fmt.Sprintf("%s/%s", workloadName, name), namespaces, mcc, throughputErrorMargin)
1239+
collectors := getTestDataCollectors(podInformer, fmt.Sprintf("%s/%s", workloadName, name), namespaces, labelSelector, mcc, throughputErrorMargin)
12361240
for _, collector := range collectors {
12371241
// Need loop-local variable for function below.
12381242
collector := collector
@@ -1373,7 +1377,7 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
13731377
if collectorCtx != nil {
13741378
tCtx.Fatalf("op %d: Metrics collection is overlapping. Probably second collector was started before stopping a previous one", opIndex)
13751379
}
1376-
collectorCtx, collectors = startCollectingMetrics(tCtx, &collectorWG, podInformer, tc.MetricsCollectorConfig, throughputErrorMargin, opIndex, namespace, []string{namespace})
1380+
collectorCtx, collectors = startCollectingMetrics(tCtx, &collectorWG, podInformer, tc.MetricsCollectorConfig, throughputErrorMargin, opIndex, namespace, []string{namespace}, nil)
13771381
defer collectorCtx.Cancel("cleaning up")
13781382
}
13791383
if err := createPodsRapidly(tCtx, namespace, concreteOp); err != nil {
@@ -1584,7 +1588,7 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
15841588
if collectorCtx != nil {
15851589
tCtx.Fatalf("op %d: Metrics collection is overlapping. Probably second collector was started before stopping a previous one", opIndex)
15861590
}
1587-
collectorCtx, collectors = startCollectingMetrics(tCtx, &collectorWG, podInformer, tc.MetricsCollectorConfig, throughputErrorMargin, opIndex, concreteOp.Name, concreteOp.Namespaces)
1591+
collectorCtx, collectors = startCollectingMetrics(tCtx, &collectorWG, podInformer, tc.MetricsCollectorConfig, throughputErrorMargin, opIndex, concreteOp.Name, concreteOp.Namespaces, concreteOp.LabelSelector)
15881592
defer collectorCtx.Cancel("cleaning up")
15891593

15901594
case *stopCollectingMetricsOp:
@@ -1633,12 +1637,12 @@ type testDataCollector interface {
16331637
collect() []DataItem
16341638
}
16351639

1636-
func getTestDataCollectors(podInformer coreinformers.PodInformer, name string, namespaces []string, mcc *metricsCollectorConfig, throughputErrorMargin float64) []testDataCollector {
1640+
func getTestDataCollectors(podInformer coreinformers.PodInformer, name string, namespaces []string, labelSelector map[string]string, mcc *metricsCollectorConfig, throughputErrorMargin float64) []testDataCollector {
16371641
if mcc == nil {
16381642
mcc = &defaultMetricsCollectorConfig
16391643
}
16401644
return []testDataCollector{
1641-
newThroughputCollector(podInformer, map[string]string{"Name": name}, namespaces, throughputErrorMargin),
1645+
newThroughputCollector(podInformer, map[string]string{"Name": name}, labelSelector, namespaces, throughputErrorMargin),
16421646
newMetricsCollector(mcc, map[string]string{"Name": name}),
16431647
}
16441648
}

test/integration/scheduler_perf/util.go

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -405,20 +405,22 @@ func collectHistogramVec(metric string, labels map[string]string, lvMap map[stri
405405
type throughputCollector struct {
406406
podInformer coreinformers.PodInformer
407407
schedulingThroughputs []float64
408-
labels map[string]string
408+
labelSelector map[string]string
409+
resultLabels map[string]string
409410
namespaces sets.Set[string]
410411
errorMargin float64
411412

412413
progress []podScheduling
413414
start time.Time
414415
}
415416

416-
func newThroughputCollector(podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string, errorMargin float64) *throughputCollector {
417+
func newThroughputCollector(podInformer coreinformers.PodInformer, resultLabels map[string]string, labelSelector map[string]string, namespaces []string, errorMargin float64) *throughputCollector {
417418
return &throughputCollector{
418-
podInformer: podInformer,
419-
labels: labels,
420-
namespaces: sets.New(namespaces...),
421-
errorMargin: errorMargin,
419+
podInformer: podInformer,
420+
labelSelector: labelSelector,
421+
resultLabels: resultLabels,
422+
namespaces: sets.New(namespaces...),
423+
errorMargin: errorMargin,
422424
}
423425
}
424426

@@ -451,7 +453,7 @@ func (tc *throughputCollector) run(tCtx ktesting.TContext) {
451453
return
452454
}
453455

454-
if !tc.namespaces.Has(newPod.Namespace) {
456+
if !tc.namespaces.Has(newPod.Namespace) || !labelsMatch(newPod.Labels, tc.labelSelector) {
455457
return
456458
}
457459

@@ -577,7 +579,7 @@ func (tc *throughputCollector) run(tCtx ktesting.TContext) {
577579

578580
func (tc *throughputCollector) collect() []DataItem {
579581
throughputSummary := DataItem{
580-
Labels: tc.labels,
582+
Labels: tc.resultLabels,
581583
progress: tc.progress,
582584
start: tc.start,
583585
}

0 commit comments

Comments
 (0)