Skip to content

Commit fcce8aa

Browse files
committed
workloadExecutor's member use value not pointer
1 parent ca8a0f5 commit fcce8aa

File tree

1 file changed

+70
-64
lines changed

1 file changed

+70
-64
lines changed

test/integration/scheduler_perf/scheduler_perf.go

Lines changed: 70 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -1451,10 +1451,10 @@ func stopCollectingMetrics(tCtx ktesting.TContext, collectorCtx ktesting.TContex
14511451
}
14521452

14531453
type WorkloadExecutor struct {
1454-
tCtx *ktesting.TContext
1455-
wg *sync.WaitGroup
1456-
collectorCtx *ktesting.TContext
1457-
collectorWG *sync.WaitGroup
1454+
tCtx ktesting.TContext
1455+
wg sync.WaitGroup
1456+
collectorCtx ktesting.TContext
1457+
collectorWG sync.WaitGroup
14581458
collectors []testDataCollector
14591459
dataItems []DataItem
14601460
numPodsScheduledPerNamespace map[string]int
@@ -1511,10 +1511,10 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
15111511
defer collectorWG.Wait()
15121512

15131513
executor := WorkloadExecutor{
1514-
tCtx: &tCtx,
1515-
wg: &wg,
1516-
collectorCtx: &collectorCtx,
1517-
collectorWG: &collectorWG,
1514+
tCtx: tCtx,
1515+
wg: wg,
1516+
collectorCtx: collectorCtx,
1517+
collectorWG: collectorWG,
15181518
collectors: collectors,
15191519
numPodsScheduledPerNamespace: make(map[string]int),
15201520
podInformer: podInformer,
@@ -1543,8 +1543,8 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
15431543
executor.runCreateNamespaceOp(opIndex, concreteOp)
15441544
case *createPodsOp:
15451545
executor.runCreatePodsOp(opIndex, concreteOp)
1546-
if *executor.collectorCtx != nil {
1547-
defer (*executor.collectorCtx).Cancel("cleaning up")
1546+
if executor.collectorCtx != nil {
1547+
executor.collectorCtx.Cancel("cleaning up")
15481548
}
15491549
case *deletePodsOp:
15501550
executor.runDeletePodsOp(opIndex, concreteOp)
@@ -1556,7 +1556,7 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
15561556
executor.runSleepOp(concreteOp)
15571557
case *startCollectingMetricsOp:
15581558
executor.runStartCollectingMetricsOp(opIndex, concreteOp)
1559-
defer (*executor.collectorCtx).Cancel("cleaning up")
1559+
defer executor.collectorCtx.Cancel("cleaning up")
15601560
case *stopCollectingMetricsOp:
15611561
executor.runStopCollectingMetrics(opIndex)
15621562
default:
@@ -1576,27 +1576,27 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
15761576
}
15771577

15781578
func (e *WorkloadExecutor) runCreateNodesOp(opIndex int, op *createNodesOp) {
1579-
nodePreparer, err := getNodePreparer(fmt.Sprintf("node-%d-", opIndex), op, (*e.tCtx).Client())
1579+
nodePreparer, err := getNodePreparer(fmt.Sprintf("node-%d-", opIndex), op, e.tCtx.Client())
15801580
if err != nil {
1581-
(*e.tCtx).Fatalf("op %d: %v", opIndex, err)
1581+
e.tCtx.Fatalf("op %d: %v", opIndex, err)
15821582
}
1583-
if err := nodePreparer.PrepareNodes(*e.tCtx, e.nextNodeIndex); err != nil {
1584-
(*e.tCtx).Fatalf("op %d: %v", opIndex, err)
1583+
if err := nodePreparer.PrepareNodes(e.tCtx, e.nextNodeIndex); err != nil {
1584+
e.tCtx.Fatalf("op %d: %v", opIndex, err)
15851585
}
15861586
e.nextNodeIndex += op.Count
15871587
}
15881588

15891589
func (e *WorkloadExecutor) runCreateNamespaceOp(opIndex int, op *createNamespacesOp) {
1590-
nsPreparer, err := newNamespacePreparer(*e.tCtx, op)
1590+
nsPreparer, err := newNamespacePreparer(e.tCtx, op)
15911591
if err != nil {
1592-
(*e.tCtx).Fatalf("op %d: %v", opIndex, err)
1592+
e.tCtx.Fatalf("op %d: %v", opIndex, err)
15931593
}
1594-
if err := nsPreparer.prepare(*e.tCtx); err != nil {
1595-
err2 := nsPreparer.cleanup(*e.tCtx)
1594+
if err := nsPreparer.prepare(e.tCtx); err != nil {
1595+
err2 := nsPreparer.cleanup(e.tCtx)
15961596
if err2 != nil {
15971597
err = fmt.Errorf("prepare: %w; cleanup: %w", err, err2)
15981598
}
1599-
(*e.tCtx).Fatalf("op %d: %v", opIndex, err)
1599+
e.tCtx.Fatalf("op %d: %v", opIndex, err)
16001600
}
16011601
for _, n := range nsPreparer.namespaces() {
16021602
if _, ok := e.numPodsScheduledPerNamespace[n]; ok {
@@ -1610,20 +1610,20 @@ func (e *WorkloadExecutor) runCreateNamespaceOp(opIndex int, op *createNamespace
16101610
func (e *WorkloadExecutor) runBarrierOp(opIndex int, op *barrierOp) {
16111611
for _, namespace := range op.Namespaces {
16121612
if _, ok := e.numPodsScheduledPerNamespace[namespace]; !ok {
1613-
(*e.tCtx).Fatalf("op %d: unknown namespace %s", opIndex, namespace)
1613+
e.tCtx.Fatalf("op %d: unknown namespace %s", opIndex, namespace)
16141614
}
16151615
}
16161616
switch op.StageRequirement {
16171617
case Attempted:
1618-
if err := waitUntilPodsAttempted(*e.tCtx, e.podInformer, op.LabelSelector, op.Namespaces, e.numPodsScheduledPerNamespace); err != nil {
1619-
(*e.tCtx).Fatalf("op %d: %v", opIndex, err)
1618+
if err := waitUntilPodsAttempted(e.tCtx, e.podInformer, op.LabelSelector, op.Namespaces, e.numPodsScheduledPerNamespace); err != nil {
1619+
e.tCtx.Fatalf("op %d: %v", opIndex, err)
16201620
}
16211621
case Scheduled:
16221622
// Default should be treated like "Scheduled", so handling both in the same way.
16231623
fallthrough
16241624
default:
1625-
if err := waitUntilPodsScheduled(*e.tCtx, e.podInformer, op.LabelSelector, op.Namespaces, e.numPodsScheduledPerNamespace); err != nil {
1626-
(*e.tCtx).Fatalf("op %d: %v", opIndex, err)
1625+
if err := waitUntilPodsScheduled(e.tCtx, e.podInformer, op.LabelSelector, op.Namespaces, e.numPodsScheduledPerNamespace); err != nil {
1626+
e.tCtx.Fatalf("op %d: %v", opIndex, err)
16271627
}
16281628
// At the end of the barrier, we can be sure that there are no pods
16291629
// pending scheduling in the namespaces that we just blocked on.
@@ -1639,15 +1639,15 @@ func (e *WorkloadExecutor) runBarrierOp(opIndex int, op *barrierOp) {
16391639

16401640
func (e *WorkloadExecutor) runSleepOp(op *sleepOp) {
16411641
select {
1642-
case <-(*e.tCtx).Done():
1642+
case <-(e.tCtx).Done():
16431643
case <-time.After(op.Duration.Duration):
16441644
}
16451645
}
16461646

16471647
func (e *WorkloadExecutor) runStopCollectingMetrics(opIndex int) {
1648-
items := stopCollectingMetrics(*e.tCtx, *e.collectorCtx, e.collectorWG, e.workload.Threshold, *e.workload.ThresholdMetricSelector, opIndex, e.collectors)
1648+
items := stopCollectingMetrics(e.tCtx, e.collectorCtx, &e.collectorWG, e.workload.Threshold, *e.workload.ThresholdMetricSelector, opIndex, e.collectors)
16491649
e.dataItems = append(e.dataItems, items...)
1650-
*e.collectorCtx = nil
1650+
e.collectorCtx = nil
16511651
}
16521652

16531653
func (e *WorkloadExecutor) runCreatePodsOp(opIndex int, op *createPodsOp) {
@@ -1656,41 +1656,44 @@ func (e *WorkloadExecutor) runCreatePodsOp(opIndex int, op *createPodsOp) {
16561656
if op.Namespace != nil {
16571657
namespace = *op.Namespace
16581658
}
1659-
createNamespaceIfNotPresent(*e.tCtx, namespace, &e.numPodsScheduledPerNamespace)
1659+
createNamespaceIfNotPresent(e.tCtx, namespace, &e.numPodsScheduledPerNamespace)
16601660
if op.PodTemplatePath == nil {
16611661
op.PodTemplatePath = e.testCase.DefaultPodTemplatePath
16621662
}
16631663

16641664
if op.CollectMetrics {
1665-
if *e.collectorCtx != nil {
1666-
(*e.tCtx).Fatalf("op %d: Metrics collection is overlapping. Probably second collector was started before stopping a previous one", opIndex)
1665+
if e.collectorCtx != nil {
1666+
e.tCtx.Fatalf("op %d: Metrics collection is overlapping. Probably second collector was started before stopping a previous one", opIndex)
16671667
}
1668-
*e.collectorCtx, e.collectors = startCollectingMetrics(*e.tCtx, e.collectorWG, e.podInformer, e.testCase.MetricsCollectorConfig, e.throughputErrorMargin, opIndex, namespace, []string{namespace}, nil)
1668+
e.collectorCtx, e.collectors = startCollectingMetrics(e.tCtx, &e.collectorWG, e.podInformer, e.testCase.MetricsCollectorConfig, e.throughputErrorMargin, opIndex, namespace, []string{namespace}, nil)
1669+
// e.collectorCtx.Cleanup(func() {
1670+
// e.collectorCtx.Cancel("cleaning up")
1671+
// })
16691672
}
1670-
if err := createPodsRapidly(*e.tCtx, namespace, op); err != nil {
1671-
(*e.tCtx).Fatalf("op %d: %v", opIndex, err)
1673+
if err := createPodsRapidly(e.tCtx, namespace, op); err != nil {
1674+
e.tCtx.Fatalf("op %d: %v", opIndex, err)
16721675
}
16731676
switch {
16741677
case op.SkipWaitToCompletion:
16751678
// Only record those namespaces that may potentially require barriers
16761679
// in the future.
16771680
e.numPodsScheduledPerNamespace[namespace] += op.Count
16781681
case op.SteadyState:
1679-
if err := createPodsSteadily(*e.tCtx, namespace, e.podInformer, op); err != nil {
1680-
(*e.tCtx).Fatalf("op %d: %v", opIndex, err)
1682+
if err := createPodsSteadily(e.tCtx, namespace, e.podInformer, op); err != nil {
1683+
e.tCtx.Fatalf("op %d: %v", opIndex, err)
16811684
}
16821685
default:
1683-
if err := waitUntilPodsScheduledInNamespace(*e.tCtx, e.podInformer, nil, namespace, op.Count); err != nil {
1684-
(*e.tCtx).Fatalf("op %d: error in waiting for pods to get scheduled: %v", opIndex, err)
1686+
if err := waitUntilPodsScheduledInNamespace(e.tCtx, e.podInformer, nil, namespace, op.Count); err != nil {
1687+
e.tCtx.Fatalf("op %d: error in waiting for pods to get scheduled: %v", opIndex, err)
16851688
}
16861689
}
16871690
if op.CollectMetrics {
16881691
// CollectMetrics and SkipWaitToCompletion can never be true at the
16891692
// same time, so if we're here, it means that all pods have been
16901693
// scheduled.
1691-
items := stopCollectingMetrics((*e.tCtx), (*e.collectorCtx), e.collectorWG, e.workload.Threshold, *e.workload.ThresholdMetricSelector, opIndex, e.collectors)
1694+
items := stopCollectingMetrics(e.tCtx, e.collectorCtx, &e.collectorWG, e.workload.Threshold, *e.workload.ThresholdMetricSelector, opIndex, e.collectors)
16921695
e.dataItems = append(e.dataItems, items...)
1693-
*e.collectorCtx = nil
1696+
e.collectorCtx = nil
16941697
}
16951698
}
16961699

@@ -1699,7 +1702,7 @@ func (e *WorkloadExecutor) runDeletePodsOp(opIndex int, op *deletePodsOp) {
16991702

17001703
podsToDelete, err := e.podInformer.Lister().Pods(op.Namespace).List(labelSelector)
17011704
if err != nil {
1702-
(*e.tCtx).Fatalf("op %d: error in listing pods in the namespace %s: %v", opIndex, op.Namespace, err)
1705+
e.tCtx.Fatalf("op %d: error in listing pods in the namespace %s: %v", opIndex, op.Namespace, err)
17031706
}
17041707

17051708
deletePods := func(opIndex int) {
@@ -1710,13 +1713,13 @@ func (e *WorkloadExecutor) runDeletePodsOp(opIndex int, op *deletePodsOp) {
17101713
for i := 0; i < len(podsToDelete); i++ {
17111714
select {
17121715
case <-ticker.C:
1713-
if err := (*e.tCtx).Client().CoreV1().Pods(op.Namespace).Delete(*e.tCtx, podsToDelete[i].Name, metav1.DeleteOptions{}); err != nil {
1716+
if err := e.tCtx.Client().CoreV1().Pods(op.Namespace).Delete(e.tCtx, podsToDelete[i].Name, metav1.DeleteOptions{}); err != nil {
17141717
if errors.Is(err, context.Canceled) {
17151718
return
17161719
}
1717-
(*e.tCtx).Errorf("op %d: unable to delete pod %v: %v", opIndex, podsToDelete[i].Name, err)
1720+
e.tCtx.Errorf("op %d: unable to delete pod %v: %v", opIndex, podsToDelete[i].Name, err)
17181721
}
1719-
case <-(*e.tCtx).Done():
1722+
case <-(e.tCtx).Done():
17201723
return
17211724
}
17221725
}
@@ -1725,11 +1728,11 @@ func (e *WorkloadExecutor) runDeletePodsOp(opIndex int, op *deletePodsOp) {
17251728
listOpts := metav1.ListOptions{
17261729
LabelSelector: labelSelector.String(),
17271730
}
1728-
if err := (*e.tCtx).Client().CoreV1().Pods(op.Namespace).DeleteCollection(*e.tCtx, metav1.DeleteOptions{}, listOpts); err != nil {
1731+
if err := e.tCtx.Client().CoreV1().Pods(op.Namespace).DeleteCollection(e.tCtx, metav1.DeleteOptions{}, listOpts); err != nil {
17291732
if errors.Is(err, context.Canceled) {
17301733
return
17311734
}
1732-
(*e.tCtx).Errorf("op %d: unable to delete pods in namespace %v: %v", opIndex, op.Namespace, err)
1735+
e.tCtx.Errorf("op %d: unable to delete pods in namespace %v: %v", opIndex, op.Namespace, err)
17331736
}
17341737
}
17351738

@@ -1751,43 +1754,43 @@ func (e *WorkloadExecutor) runChurnOp(opIndex int, op *churnOp) {
17511754
} else {
17521755
namespace = fmt.Sprintf("namespace-%d", opIndex)
17531756
}
1754-
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cacheddiscovery.NewMemCacheClient((*e.tCtx).Client().Discovery()))
1757+
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cacheddiscovery.NewMemCacheClient(e.tCtx.Client().Discovery()))
17551758
// Ensure the namespace exists.
17561759
nsObj := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
1757-
if _, err := (*e.tCtx).Client().CoreV1().Namespaces().Create(*e.tCtx, nsObj, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
1758-
(*e.tCtx).Fatalf("op %d: unable to create namespace %v: %v", opIndex, namespace, err)
1760+
if _, err := e.tCtx.Client().CoreV1().Namespaces().Create(e.tCtx, nsObj, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
1761+
e.tCtx.Fatalf("op %d: unable to create namespace %v: %v", opIndex, namespace, err)
17591762
}
17601763

17611764
var churnFns []func(name string) string
17621765

17631766
for i, path := range op.TemplatePaths {
17641767
unstructuredObj, gvk, err := getUnstructuredFromFile(path)
17651768
if err != nil {
1766-
(*e.tCtx).Fatalf("op %d: unable to parse the %v-th template path: %v", opIndex, i, err)
1769+
e.tCtx.Fatalf("op %d: unable to parse the %v-th template path: %v", opIndex, i, err)
17671770
}
17681771
// Obtain GVR.
17691772
mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
17701773
if err != nil {
1771-
(*e.tCtx).Fatalf("op %d: unable to find GVR for %v: %v", opIndex, gvk, err)
1774+
e.tCtx.Fatalf("op %d: unable to find GVR for %v: %v", opIndex, gvk, err)
17721775
}
17731776
gvr := mapping.Resource
17741777
// Distinguish cluster-scoped with namespaced API objects.
17751778
var dynRes dynamic.ResourceInterface
17761779
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
1777-
dynRes = (*e.tCtx).Dynamic().Resource(gvr).Namespace(namespace)
1780+
dynRes = e.tCtx.Dynamic().Resource(gvr).Namespace(namespace)
17781781
} else {
1779-
dynRes = (*e.tCtx).Dynamic().Resource(gvr)
1782+
dynRes = e.tCtx.Dynamic().Resource(gvr)
17801783
}
17811784

17821785
churnFns = append(churnFns, func(name string) string {
17831786
if name != "" {
1784-
if err := dynRes.Delete(*e.tCtx, name, metav1.DeleteOptions{}); err != nil && !errors.Is(err, context.Canceled) {
1785-
(*e.tCtx).Errorf("op %d: unable to delete %v: %v", opIndex, name, err)
1787+
if err := dynRes.Delete(e.tCtx, name, metav1.DeleteOptions{}); err != nil && !errors.Is(err, context.Canceled) {
1788+
e.tCtx.Errorf("op %d: unable to delete %v: %v", opIndex, name, err)
17861789
}
17871790
return ""
17881791
}
17891792

1790-
live, err := dynRes.Create(*e.tCtx, unstructuredObj, metav1.CreateOptions{})
1793+
live, err := dynRes.Create(e.tCtx, unstructuredObj, metav1.CreateOptions{})
17911794
if err != nil {
17921795
return ""
17931796
}
@@ -1818,7 +1821,7 @@ func (e *WorkloadExecutor) runChurnOp(opIndex int, op *churnOp) {
18181821
churnFns[i]("")
18191822
}
18201823
count++
1821-
case <-(*e.tCtx).Done():
1824+
case <-(e.tCtx).Done():
18221825
return
18231826
}
18241827
}
@@ -1842,7 +1845,7 @@ func (e *WorkloadExecutor) runChurnOp(opIndex int, op *churnOp) {
18421845
retVals[i][count%op.Number] = churnFns[i](retVals[i][count%op.Number])
18431846
}
18441847
count++
1845-
case <-(*e.tCtx).Done():
1848+
case <-(e.tCtx).Done():
18461849
return
18471850
}
18481851
}
@@ -1853,19 +1856,22 @@ func (e *WorkloadExecutor) runChurnOp(opIndex int, op *churnOp) {
18531856
func (e *WorkloadExecutor) runDefaultOp(opIndex int, op realOp) {
18541857
runable, ok := op.(runnableOp)
18551858
if !ok {
1856-
(*e.tCtx).Fatalf("op %d: invalid op %v", opIndex, op)
1859+
e.tCtx.Fatalf("op %d: invalid op %v", opIndex, op)
18571860
}
18581861
for _, namespace := range runable.requiredNamespaces() {
1859-
createNamespaceIfNotPresent(*e.tCtx, namespace, &e.numPodsScheduledPerNamespace)
1862+
createNamespaceIfNotPresent(e.tCtx, namespace, &e.numPodsScheduledPerNamespace)
18601863
}
1861-
runable.run(*e.tCtx)
1864+
runable.run(e.tCtx)
18621865
}
18631866

18641867
func (e *WorkloadExecutor) runStartCollectingMetricsOp(opIndex int, op *startCollectingMetricsOp) {
1865-
if *e.collectorCtx != nil {
1866-
(*e.tCtx).Fatalf("op %d: Metrics collection is overlapping. Probably second collector was started before stopping a previous one", opIndex)
1868+
if e.collectorCtx != nil {
1869+
e.tCtx.Fatalf("op %d: Metrics collection is overlapping. Probably second collector was started before stopping a previous one", opIndex)
18671870
}
1868-
*e.collectorCtx, e.collectors = startCollectingMetrics((*e.tCtx), e.collectorWG, e.podInformer, e.testCase.MetricsCollectorConfig, e.throughputErrorMargin, opIndex, op.Name, op.Namespaces, op.LabelSelector)
1871+
e.collectorCtx, e.collectors = startCollectingMetrics(e.tCtx, &e.collectorWG, e.podInformer, e.testCase.MetricsCollectorConfig, e.throughputErrorMargin, opIndex, op.Name, op.Namespaces, op.LabelSelector)
1872+
// e.collectorCtx.Cleanup(func() {
1873+
// collectorCtx.Cancel("cleaning up")
1874+
// })
18691875
}
18701876

18711877
func createNamespaceIfNotPresent(tCtx ktesting.TContext, namespace string, podsPerNamespace *map[string]int) {

0 commit comments

Comments
 (0)