@@ -1274,9 +1274,10 @@ func runStartDeletionTest(t *testing.T, tc startDeletionTestCase, force bool) {
1274
1274
ndb := NewNodeDeletionBatcher (& ctx , scaleStateNotifier , ndt , 0 * time .Second )
1275
1275
legacyFlagDrainConfig := SingleRuleDrainConfig (ctx .MaxGracefulTerminationSec )
1276
1276
evictor := Evictor {EvictionRetryTime : 0 , PodEvictionHeadroom : DefaultPodEvictionHeadroom , shutdownGracePeriodByPodPriority : legacyFlagDrainConfig , fullDsEviction : force }
1277
+ scheduler := NewGroupDeletionScheduler (& ctx , ndt , ndb , evictor )
1277
1278
actuator := Actuator {
1278
1279
ctx : & ctx , nodeDeletionTracker : ndt ,
1279
- nodeDeletionScheduler : NewGroupDeletionScheduler ( & ctx , ndt , ndb , evictor ) ,
1280
+ nodeDeletionScheduler : scheduler ,
1280
1281
budgetProcessor : budgets .NewScaleDownBudgetProcessor (& ctx ),
1281
1282
configGetter : nodegroupconfig .NewDefaultNodeGroupConfigProcessor (ctx .NodeGroupDefaults ),
1282
1283
}
@@ -1305,9 +1306,12 @@ func runStartDeletionTest(t *testing.T, tc startDeletionTestCase, force bool) {
1305
1306
// Deletion taint may be lifted by goroutine, ignore taints to avoid race condition
1306
1307
ignoreTaints := cmpopts .IgnoreFields (apiv1.NodeSpec {}, "Taints" )
1307
1308
statusCmpOpts := cmp.Options {ignoreSdNodeOrder , cmpNg , cmpopts .EquateEmpty (), ignoreTaints }
1309
+ // lock deletion scheduler so race detector does not complain
1310
+ scheduler .Lock ()
1308
1311
if diff := cmp .Diff (wantScaleDownNodes , gotScaleDownNodes , statusCmpOpts ); diff != "" {
1309
1312
t .Errorf ("StartDeletion scaled down nodes diff (-want +got):\n %s" , diff )
1310
1313
}
1314
+ scheduler .Unlock ()
1311
1315
1312
1316
// Verify that all expected nodes were deleted using the cloud provider hook.
1313
1317
var gotDeletedNodes []string
0 commit comments