@@ -147,9 +147,7 @@ func (cs *clusterState) rebalanceStores(
147147 rangeMoveCount := 0
148148 leaseTransferCount := 0
149149 for idx /*logging only*/ , store := range sheddingStores {
150- shouldReturnEarly := false
151- shouldContinue := false
152- {
150+ shouldReturnEarly , shouldContinue := func () (bool , bool ) {
153151 log .KvDistribution .Infof (ctx , "start processing shedding store s%d: cpu node load %s, store load %s, worst dim %s" ,
154152 store .StoreID , store .nls , store .sls , store .worstDim )
155153 ss := cs .stores [store .StoreID ]
@@ -343,8 +341,7 @@ func (cs *clusterState) rebalanceStores(
343341 targetSS .maxFractionPendingIncrease , targetSS .maxFractionPendingDecrease )
344342 if leaseTransferCount >= maxLeaseTransferCount {
345343 log .KvDistribution .VInfof (ctx , 2 , "reached max lease transfer count %d, returning" , maxLeaseTransferCount )
346- shouldReturnEarly = true
347- break
344+ return true , false
348345 }
349346 doneShedding = ss .maxFractionPendingDecrease >= maxFractionPendingThreshold
350347 if doneShedding {
@@ -363,8 +360,7 @@ func (cs *clusterState) rebalanceStores(
363360 // lease transfers -- so be it.
364361 log .KvDistribution .VInfof (ctx , 2 , "skipping replica transfers for s%d: done shedding=%v, lease_transfers=%d" ,
365362 store .StoreID , doneShedding , leaseTransferCount )
366- shouldContinue = true
367- break
363+ return false , true
368364 }
369365 } else {
370366 log .KvDistribution .VInfof (ctx , 2 , "skipping lease shedding: s%v != local store s%s or cpu is not overloaded: %v" ,
@@ -376,8 +372,7 @@ func (cs *clusterState) rebalanceStores(
376372 if store .StoreID != localStoreID && store .dimSummary [CPURate ] >= overloadSlow &&
377373 now .Sub (ss .overloadStartTime ) < remoteStoreLeaseSheddingGraceDuration {
378374 log .KvDistribution .VInfof (ctx , 2 , "skipping remote store s%d: in lease shedding grace period" , store .StoreID )
379- shouldContinue = true
380- break
375+ return false , true
381376 }
382377 // If the node is cpu overloaded, or the store/node is not fdOK, exclude
383378 // the other stores on this node from receiving replicas shed by this
@@ -555,8 +550,7 @@ func (cs *clusterState) rebalanceStores(
555550 rangeID , removeTarget .StoreID , addTarget .StoreID , changes [len (changes )- 1 ], ss .adjusted .load , targetSS .adjusted .load )
556551 if rangeMoveCount >= maxRangeMoveCount {
557552 log .KvDistribution .VInfof (ctx , 2 , "s%d has reached max range move count %d: mma returning with %d stores left in shedding stores" , store .StoreID , maxRangeMoveCount , len (sheddingStores )- (idx + 1 ))
558- shouldReturnEarly = true
559- break
553+ return true , false
560554 }
561555 doneShedding = ss .maxFractionPendingDecrease >= maxFractionPendingThreshold
562556 if doneShedding {
@@ -572,10 +566,10 @@ func (cs *clusterState) rebalanceStores(
572566 // rebalancing to work well is not in scope.
573567 if doneShedding {
574568 log .KvDistribution .VInfof (ctx , 2 , "store s%d is done shedding, moving to next store" , store .StoreID )
575- shouldContinue = true
576- break
569+ return false , true
577570 }
578- }
571+ return false , false
572+ }()
579573 if shouldReturnEarly {
580574 return changes
581575 }
0 commit comments