@@ -68,7 +68,7 @@ func (cs *clusterState) rebalanceStores(
6868) []PendingRangeChange {
6969 now := cs .ts .Now ()
7070 ctx = logtags .AddTag (ctx , "mmaid" , mmaid .Add (1 ))
71- log .KvDistribution .VInfof (ctx , 2 , "rebalanceStores begins" )
71+ log .KvDistribution .VEventf (ctx , 2 , "rebalanceStores begins" )
7272 // To select which stores are overloaded, we use a notion of overload that
7373 // is based on cluster means (and of course individual store/node
7474 // capacities). We do not want to loop through all ranges in the cluster,
@@ -99,7 +99,7 @@ func (cs *clusterState) rebalanceStores(
9999 // via replicate_queue.go.
100100 for storeID , ss := range cs .stores {
101101 sls := cs .meansMemo .getStoreLoadSummary (ctx , clusterMeans , storeID , ss .loadSeqNum )
102- log .KvDistribution .VInfof (ctx , 2 , "evaluating s%d: node load %s, store load %s, worst dim %s" ,
102+ log .KvDistribution .VEventf (ctx , 2 , "evaluating s%d: node load %s, store load %s, worst dim %s" ,
103103 storeID , sls .nls , sls .sls , sls .worstDim )
104104
105105 if sls .sls >= overloadSlow {
@@ -117,10 +117,10 @@ func (cs *clusterState) rebalanceStores(
117117 if ss .maxFractionPendingDecrease < maxFractionPendingThreshold &&
118118 // There should be no pending increase, since that can be an overestimate.
119119 ss .maxFractionPendingIncrease < epsilon {
120- log .KvDistribution .VInfof (ctx , 2 , "store s%v was added to shedding store list" , storeID )
120+ log .KvDistribution .VEventf (ctx , 2 , "store s%v was added to shedding store list" , storeID )
121121 sheddingStores = append (sheddingStores , sheddingStore {StoreID : storeID , storeLoadSummary : sls })
122122 } else {
123- log .KvDistribution .VInfof (ctx , 2 ,
123+ log .KvDistribution .VEventf (ctx , 2 ,
124124 "skipping overloaded store s%d (worst dim: %s): pending decrease %.2f >= threshold %.2f or pending increase %.2f >= epsilon" ,
125125 storeID , sls .worstDim , ss .maxFractionPendingDecrease , maxFractionPendingThreshold , ss .maxFractionPendingIncrease )
126126 }
@@ -234,11 +234,11 @@ func (re *rebalanceEnv) rebalanceStore(
234234 return
235235 }
236236 } else {
237- log .KvDistribution .VInfof (ctx , 2 , "skipping lease shedding: s%v != local store s%s or cpu is not overloaded: %v" ,
237+ log .KvDistribution .VEventf (ctx , 2 , "skipping lease shedding: s%v != local store s%s or cpu is not overloaded: %v" ,
238238 ss .StoreID , localStoreID , store .dimSummary [CPURate ])
239239 }
240240
241- log .KvDistribution .VInfof (ctx , 2 , "attempting to shed replicas next" )
241+ log .KvDistribution .VEventf (ctx , 2 , "attempting to shed replicas next" )
242242 re .rebalanceReplicas (ctx , store , ss , localStoreID )
243243}
244244
@@ -248,7 +248,7 @@ func (re *rebalanceEnv) rebalanceReplicas(
248248 doneShedding := false
249249 if store .StoreID != localStoreID && store .dimSummary [CPURate ] >= overloadSlow &&
250250 re .now .Sub (ss .overloadStartTime ) < remoteStoreLeaseSheddingGraceDuration {
251- log .KvDistribution .VInfof (ctx , 2 , "skipping remote store s%d: in lease shedding grace period" , store .StoreID )
251+ log .KvDistribution .VEventf (ctx , 2 , "skipping remote store s%d: in lease shedding grace period" , store .StoreID )
252252 return
253253 }
254254 // If the node is cpu overloaded, or the store/node is not fdOK, exclude
@@ -261,7 +261,7 @@ func (re *rebalanceEnv) rebalanceReplicas(
261261 for _ , storeID := range re .nodes [nodeID ].stores {
262262 re .scratch .storesToExclude .insert (storeID )
263263 }
264- log .KvDistribution .VInfof (ctx , 2 , "excluding all stores on n%d due to overload/fd status" , nodeID )
264+ log .KvDistribution .VEventf (ctx , 2 , "excluding all stores on n%d due to overload/fd status" , nodeID )
265265 } else {
266266 // This store is excluded of course.
267267 re .scratch .storesToExclude .insert (store .StoreID )
@@ -278,11 +278,11 @@ func (re *rebalanceEnv) rebalanceReplicas(
278278 rstate := re .ranges [rangeID ]
279279 if len (rstate .pendingChanges ) > 0 {
280280 // If the range has pending changes, don't make more changes.
281- log .KvDistribution .VInfof (ctx , 2 , "skipping r%d: has pending changes" , rangeID )
281+ log .KvDistribution .VEventf (ctx , 2 , "skipping r%d: has pending changes" , rangeID )
282282 continue
283283 }
284284 if re .now .Sub (rstate .lastFailedChange ) < re .lastFailedChangeDelayDuration {
285- log .KvDistribution .VInfof (ctx , 2 , "skipping r%d: too soon after failed change" , rangeID )
285+ log .KvDistribution .VEventf (ctx , 2 , "skipping r%d: too soon after failed change" , rangeID )
286286 continue
287287 }
288288 re .ensureAnalyzedConstraints (rstate )
@@ -305,7 +305,7 @@ func (re *rebalanceEnv) rebalanceReplicas(
305305 if err != nil {
306306 // This range has some constraints that are violated. Let those be
307307 // fixed first.
308- log .KvDistribution .VInfof (ctx , 2 , "skipping r%d: constraint violation needs fixing first: %v" , rangeID , err )
308+ log .KvDistribution .VEventf (ctx , 2 , "skipping r%d: constraint violation needs fixing first: %v" , rangeID , err )
309309 continue
310310 }
311311 re .scratch .disj [0 ] = conj
@@ -326,7 +326,7 @@ func (re *rebalanceEnv) rebalanceReplicas(
326326 }
327327 // TODO(sumeer): eliminate cands allocations by passing a scratch slice.
328328 cands , ssSLS := re .computeCandidatesForRange (ctx , re .scratch .disj [:], re .scratch .storesToExcludeForRange , store .StoreID )
329- log .KvDistribution .VInfof (ctx , 2 , "considering replica-transfer r%v from s%v: store load %v" ,
329+ log .KvDistribution .VEventf (ctx , 2 , "considering replica-transfer r%v from s%v: store load %v" ,
330330 rangeID , store .StoreID , ss .adjusted .load )
331331 if log .V (2 ) {
332332 log .KvDistribution .Infof (ctx , "candidates are:" )
@@ -336,7 +336,7 @@ func (re *rebalanceEnv) rebalanceReplicas(
336336 }
337337
338338 if len (cands .candidates ) == 0 {
339- log .KvDistribution .VInfof (ctx , 2 , "result(failed): no candidates found for r%d after exclusions" , rangeID )
339+ log .KvDistribution .VEventf (ctx , 2 , "result(failed): no candidates found for r%d after exclusions" , rangeID )
340340 continue
341341 }
342342 var rlocalities replicasLocalityTiers
@@ -370,17 +370,17 @@ func (re *rebalanceEnv) rebalanceReplicas(
370370 overloadDur := re .now .Sub (ss .overloadStartTime )
371371 if overloadDur > ignoreHigherThanLoadThresholdGraceDuration {
372372 ignoreLevel = ignoreHigherThanLoadThreshold
373- log .KvDistribution .VInfof (ctx , 3 , "using level %v (threshold:%v) for r%d based on overload duration %v" ,
373+ log .KvDistribution .VEventf (ctx , 3 , "using level %v (threshold:%v) for r%d based on overload duration %v" ,
374374 ignoreLevel , ssSLS .sls , rangeID , overloadDur )
375375 } else if overloadDur > ignoreLoadThresholdAndHigherGraceDuration {
376376 ignoreLevel = ignoreLoadThresholdAndHigher
377- log .KvDistribution .VInfof (ctx , 3 , "using level %v (threshold:%v) for r%d based on overload duration %v" ,
377+ log .KvDistribution .VEventf (ctx , 3 , "using level %v (threshold:%v) for r%d based on overload duration %v" ,
378378 ignoreLevel , ssSLS .sls , rangeID , overloadDur )
379379 }
380380 targetStoreID := sortTargetCandidateSetAndPick (
381381 ctx , cands , ssSLS .sls , ignoreLevel , loadDim , re .rng )
382382 if targetStoreID == 0 {
383- log .KvDistribution .VInfof (ctx , 2 , "result(failed): no suitable target found among candidates for r%d " +
383+ log .KvDistribution .VEventf (ctx , 2 , "result(failed): no suitable target found among candidates for r%d " +
384384 "(threshold %s; %s)" , rangeID , ssSLS .sls , ignoreLevel )
385385 continue
386386 }
@@ -390,7 +390,7 @@ func (re *rebalanceEnv) rebalanceReplicas(
390390 addedLoad [CPURate ] = rstate .load .RaftCPU
391391 }
392392 if ! re .canShedAndAddLoad (ctx , ss , targetSS , addedLoad , cands .means , false , loadDim ) {
393- log .KvDistribution .VInfof (ctx , 2 , "result(failed): cannot shed from s%d to s%d for r%d: delta load %v" ,
393+ log .KvDistribution .VEventf (ctx , 2 , "result(failed): cannot shed from s%d to s%d for r%d: delta load %v" ,
394394 store .StoreID , targetStoreID , rangeID , addedLoad )
395395 continue
396396 }
@@ -417,16 +417,16 @@ func (re *rebalanceEnv) rebalanceReplicas(
417417 re .addPendingRangeChange (rangeChange )
418418 re .changes = append (re .changes , rangeChange )
419419 re .rangeMoveCount ++
420- log .KvDistribution .VInfof (ctx , 2 ,
420+ log .KvDistribution .VEventf (ctx , 2 ,
421421 "result(success): rebalancing r%v from s%v to s%v [change: %v] with resulting loads source: %v target: %v" ,
422422 rangeID , removeTarget .StoreID , addTarget .StoreID , re .changes [len (re .changes )- 1 ], ss .adjusted .load , targetSS .adjusted .load )
423423 if re .rangeMoveCount >= re .maxRangeMoveCount {
424- log .KvDistribution .VInfof (ctx , 2 , "s%d has reached max range move count %d: mma returning" , store .StoreID , re .maxRangeMoveCount )
424+ log .KvDistribution .VEventf (ctx , 2 , "s%d has reached max range move count %d: mma returning" , store .StoreID , re .maxRangeMoveCount )
425425 return
426426 }
427427 doneShedding = ss .maxFractionPendingDecrease >= maxFractionPendingThreshold
428428 if doneShedding {
429- log .KvDistribution .VInfof (ctx , 2 , "s%d has reached pending decrease threshold(%.2f>=%.2f) after rebalancing: done shedding with %d left in topk" ,
429+ log .KvDistribution .VEventf (ctx , 2 , "s%d has reached pending decrease threshold(%.2f>=%.2f) after rebalancing: done shedding with %d left in topk" ,
430430 store .StoreID , ss .maxFractionPendingDecrease , maxFractionPendingThreshold , n - (i + 1 ))
431431 break
432432 }
@@ -437,15 +437,15 @@ func (re *rebalanceEnv) rebalanceReplicas(
437437 // moved. Running with underprovisioned clusters and expecting load-based
438438 // rebalancing to work well is not in scope.
439439 if doneShedding {
440- log .KvDistribution .VInfof (ctx , 2 , "store s%d is done shedding, moving to next store" , store .StoreID )
440+ log .KvDistribution .VEventf (ctx , 2 , "store s%d is done shedding, moving to next store" , store .StoreID )
441441 return
442442 }
443443}
444444
445445func (re * rebalanceEnv ) rebalanceLeases (
446446 ctx context.Context , ss * storeState , store sheddingStore , localStoreID roachpb.StoreID ,
447447) bool {
448- log .KvDistribution .VInfof (ctx , 2 , "local store s%d is CPU overloaded (%v >= %v), attempting lease transfers first" ,
448+ log .KvDistribution .VEventf (ctx , 2 , "local store s%d is CPU overloaded (%v >= %v), attempting lease transfers first" ,
449449 store .StoreID , store .dimSummary [CPURate ], overloadSlow )
450450 // This store is local, and cpu overloaded. Shed leases first.
451451 //
@@ -460,7 +460,7 @@ func (re *rebalanceEnv) rebalanceLeases(
460460 rstate := re .ranges [rangeID ]
461461 if len (rstate .pendingChanges ) > 0 {
462462 // If the range has pending changes, don't make more changes.
463- log .KvDistribution .VInfof (ctx , 2 , "skipping r%d: has pending changes" , rangeID )
463+ log .KvDistribution .VEventf (ctx , 2 , "skipping r%d: has pending changes" , rangeID )
464464 continue
465465 }
466466 for _ , repl := range rstate .replicas {
@@ -480,7 +480,7 @@ func (re *rebalanceEnv) rebalanceLeases(
480480 }
481481 }
482482 if re .now .Sub (rstate .lastFailedChange ) < re .lastFailedChangeDelayDuration {
483- log .KvDistribution .VInfof (ctx , 2 , "skipping r%d: too soon after failed change" , rangeID )
483+ log .KvDistribution .VEventf (ctx , 2 , "skipping r%d: too soon after failed change" , rangeID )
484484 continue
485485 }
486486 re .ensureAnalyzedConstraints (rstate )
@@ -515,11 +515,11 @@ func (re *rebalanceEnv) rebalanceLeases(
515515 clear (re .scratch .nodes )
516516 means := computeMeansForStoreSet (re , candsPL , re .scratch .nodes , re .scratch .stores )
517517 sls := re .computeLoadSummary (ctx , store .StoreID , & means .storeLoad , & means .nodeLoad )
518- log .KvDistribution .VInfof (ctx , 2 , "considering lease-transfer r%v from s%v: candidates are %v" , rangeID , store .StoreID , candsPL )
518+ log .KvDistribution .VEventf (ctx , 2 , "considering lease-transfer r%v from s%v: candidates are %v" , rangeID , store .StoreID , candsPL )
519519 if sls .dimSummary [CPURate ] < overloadSlow {
520520 // This store is not cpu overloaded relative to these candidates for
521521 // this range.
522- log .KvDistribution .VInfof (ctx , 2 , "result(failed): skipping r%d since store not overloaded relative to candidates" , rangeID )
522+ log .KvDistribution .VEventf (ctx , 2 , "result(failed): skipping r%d since store not overloaded relative to candidates" , rangeID )
523523 continue
524524 }
525525 var candsSet candidateSet
@@ -570,7 +570,7 @@ func (re *rebalanceEnv) rebalanceLeases(
570570 panic ("raft cpu higher than total cpu" )
571571 }
572572 if ! re .canShedAndAddLoad (ctx , ss , targetSS , addedLoad , & means , true , CPURate ) {
573- log .KvDistribution .VInfof (ctx , 2 , "result(failed): cannot shed from s%d to s%d for r%d: delta load %v" ,
573+ log .KvDistribution .VEventf (ctx , 2 , "result(failed): cannot shed from s%d to s%d for r%d: delta load %v" ,
574574 store .StoreID , targetStoreID , rangeID , addedLoad )
575575 continue
576576 }
@@ -603,12 +603,12 @@ func (re *rebalanceEnv) rebalanceLeases(
603603 ss .maxFractionPendingIncrease , ss .maxFractionPendingDecrease ,
604604 targetSS .maxFractionPendingIncrease , targetSS .maxFractionPendingDecrease )
605605 if re .leaseTransferCount >= re .maxLeaseTransferCount {
606- log .KvDistribution .VInfof (ctx , 2 , "reached max lease transfer count %d, returning" , re .maxLeaseTransferCount )
606+ log .KvDistribution .VEventf (ctx , 2 , "reached max lease transfer count %d, returning" , re .maxLeaseTransferCount )
607607 break
608608 }
609609 doneShedding = ss .maxFractionPendingDecrease >= maxFractionPendingThreshold
610610 if doneShedding {
611- log .KvDistribution .VInfof (ctx , 2 , "s%d has reached pending decrease threshold(%.2f>=%.2f) after lease transfers: done shedding with %d left in topK" ,
611+ log .KvDistribution .VEventf (ctx , 2 , "s%d has reached pending decrease threshold(%.2f>=%.2f) after lease transfers: done shedding with %d left in topK" ,
612612 store .StoreID , ss .maxFractionPendingDecrease , maxFractionPendingThreshold , n - (i + 1 ))
613613 break
614614 }
@@ -621,7 +621,7 @@ func (re *rebalanceEnv) rebalanceLeases(
621621 // transfer is done and we may still be considering those transfers as
622622 // pending from a load perspective, so we *may* not be able to do more
623623 // lease transfers -- so be it.
624- log .KvDistribution .VInfof (ctx , 2 , "skipping replica transfers for s%d: done shedding=%v, lease_transfers=%d" ,
624+ log .KvDistribution .VEventf (ctx , 2 , "skipping replica transfers for s%d: done shedding=%v, lease_transfers=%d" ,
625625 store .StoreID , doneShedding , localLeaseTransferCount )
626626 return true
627627 }
0 commit comments