@@ -1497,27 +1497,13 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
1497
1497
// Everything else started by this function gets stopped before it returns.
1498
1498
tCtx = ktesting .WithCancel (tCtx )
1499
1499
1500
- var dataItems []DataItem
1501
-
1502
- var collectors []testDataCollector
1503
- // This needs a separate context and wait group because
1504
- // the metrics collecting needs to be sure that the goroutines
1505
- // are stopped.
1506
- var collectorCtx ktesting.TContext
1507
-
1508
1500
executor := WorkloadExecutor {
1509
1501
tCtx : tCtx ,
1510
- wg : sync.WaitGroup {},
1511
- collectorCtx : collectorCtx ,
1512
- collectorWG : sync.WaitGroup {},
1513
- collectors : collectors ,
1514
1502
numPodsScheduledPerNamespace : make (map [string ]int ),
1515
1503
podInformer : podInformer ,
1516
1504
throughputErrorMargin : throughputErrorMargin ,
1517
1505
testCase : tc ,
1518
1506
workload : w ,
1519
- nextNodeIndex : 0 ,
1520
- dataItems : dataItems ,
1521
1507
}
1522
1508
1523
1509
defer executor .wg .Wait ()
@@ -1566,7 +1552,7 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
1566
1552
1567
1553
// Some tests have unschedulable pods. Do not add an implicit barrier at the
1568
1554
// end as we do not want to wait for them.
1569
- return dataItems
1555
+ return executor . dataItems
1570
1556
}
1571
1557
1572
1558
func (e * WorkloadExecutor ) runCreateNodesOp (opIndex int , op * createNodesOp ) {
@@ -1715,7 +1701,7 @@ func (e *WorkloadExecutor) runDeletePodsOp(opIndex int, op *deletePodsOp) {
1715
1701
}
1716
1702
e .tCtx .Errorf ("op %d: unable to delete pod %v: %v" , opIndex , podsToDelete [i ].Name , err )
1717
1703
}
1718
- case <- ( e .tCtx ) .Done ():
1704
+ case <- e .tCtx .Done ():
1719
1705
return
1720
1706
}
1721
1707
}
0 commit comments