Skip to content

Commit 5fa041d

Browse files
craig[bot]tbg
andcommitted
Merge #158188
158188: mmaprototype: replace VInfof with VEventf r=tbg a=tbg They are the same, except that VEventf always logs to the trace when one is present. This will be important as we will rely more on trace-based logging soon. Extracted from #157820. Epic: CRDB-55052 Co-authored-by: Tobias Grieger <[email protected]>
2 parents fd29a93 + 423ad32 commit 5fa041d

File tree

5 files changed

+57
-46
lines changed

5 files changed

+57
-46
lines changed

pkg/kv/kvserver/allocator/mmaprototype/allocator_state.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -538,7 +538,7 @@ func sortTargetCandidateSetAndPick(
538538
}
539539
}
540540
if j == 0 {
541-
log.KvDistribution.VInfof(ctx, 2, "sortTargetCandidateSetAndPick: no candidates due to disk space util")
541+
log.KvDistribution.VEventf(ctx, 2, "sortTargetCandidateSetAndPick: no candidates due to disk space util")
542542
return 0
543543
}
544544
// Every candidate in [0:j] has same diversity and is sorted by increasing
@@ -594,15 +594,15 @@ func sortTargetCandidateSetAndPick(
594594
if cand.maxFractionPendingIncrease > epsilon && discardedCandsHadNoPendingChanges {
595595
discardedCandsHadNoPendingChanges = false
596596
}
597-
log.KvDistribution.VInfof(ctx, 2,
597+
log.KvDistribution.VEventf(ctx, 2,
598598
"candiate store %v was discarded: sls=%v", cand.StoreID, cand.storeLoadSummary)
599599
continue
600600
}
601601
cands.candidates[j] = cand
602602
j++
603603
}
604604
if j == 0 {
605-
log.KvDistribution.VInfof(ctx, 2, "sortTargetCandidateSetAndPick: no candidates due to load")
605+
log.KvDistribution.VEventf(ctx, 2, "sortTargetCandidateSetAndPick: no candidates due to load")
606606
return 0
607607
}
608608
lowestLoadSet = cands.candidates[0].sls
@@ -633,7 +633,7 @@ func sortTargetCandidateSetAndPick(
633633
}
634634
// INVARIANT: lowestLoad <= loadThreshold.
635635
if lowestLoadSet == loadThreshold && ignoreLevel < ignoreHigherThanLoadThreshold {
636-
log.KvDistribution.VInfof(ctx, 2, "sortTargetCandidateSetAndPick: no candidates due to equal to loadThreshold")
636+
log.KvDistribution.VEventf(ctx, 2, "sortTargetCandidateSetAndPick: no candidates due to equal to loadThreshold")
637637
return 0
638638
}
639639
// INVARIANT: lowestLoad < loadThreshold ||
@@ -643,7 +643,7 @@ func sortTargetCandidateSetAndPick(
643643
// [loadNoChange, loadThreshold), or loadThreshold && ignoreHigherThanLoadThreshold.
644644
if lowestLoadSet >= loadNoChange &&
645645
(!discardedCandsHadNoPendingChanges || ignoreLevel == ignoreLoadNoChangeAndHigher) {
646-
log.KvDistribution.VInfof(ctx, 2, "sortTargetCandidateSetAndPick: no candidates due to loadNoChange")
646+
log.KvDistribution.VEventf(ctx, 2, "sortTargetCandidateSetAndPick: no candidates due to loadNoChange")
647647
return 0
648648
}
649649
if lowestLoadSet != highestLoadSet {
@@ -663,7 +663,7 @@ func sortTargetCandidateSetAndPick(
663663
j++
664664
}
665665
if j == 0 {
666-
log.KvDistribution.VInfof(ctx, 2, "sortTargetCandidateSetAndPick: no candidates due to lease preference")
666+
log.KvDistribution.VEventf(ctx, 2, "sortTargetCandidateSetAndPick: no candidates due to lease preference")
667667
return 0
668668
}
669669
cands.candidates = cands.candidates[:j]
@@ -676,7 +676,7 @@ func sortTargetCandidateSetAndPick(
676676
})
677677
lowestOverloadedLoad := cands.candidates[0].dimSummary[overloadedDim]
678678
if lowestOverloadedLoad >= loadNoChange {
679-
log.KvDistribution.VInfof(ctx, 2, "sortTargetCandidateSetAndPick: no candidates due to overloadedDim")
679+
log.KvDistribution.VEventf(ctx, 2, "sortTargetCandidateSetAndPick: no candidates due to overloadedDim")
680680
return 0
681681
}
682682
j = 1
@@ -693,7 +693,7 @@ func sortTargetCandidateSetAndPick(
693693
fmt.Fprintf(&b, " s%v(%v)", cands.candidates[i].StoreID, cands.candidates[i].sls)
694694
}
695695
j = rng.Intn(j)
696-
log.KvDistribution.VInfof(ctx, 2, "sortTargetCandidateSetAndPick: candidates:%s, picked s%v", b.String(), cands.candidates[j].StoreID)
696+
log.KvDistribution.VEventf(ctx, 2, "sortTargetCandidateSetAndPick: candidates:%s, picked s%v", b.String(), cands.candidates[j].StoreID)
697697
if ignoreLevel == ignoreLoadNoChangeAndHigher && cands.candidates[j].sls >= loadNoChange ||
698698
ignoreLevel == ignoreLoadThresholdAndHigher && cands.candidates[j].sls >= loadThreshold ||
699699
ignoreLevel == ignoreHigherThanLoadThreshold && cands.candidates[j].sls > loadThreshold {

pkg/kv/kvserver/allocator/mmaprototype/cluster_state.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1441,7 +1441,7 @@ func (cs *clusterState) processStoreLoadMsg(ctx context.Context, storeMsg *Store
14411441
// corresponding delta adjustment as the reported load already contains the
14421442
// effect.
14431443
for _, change := range ss.computePendingChangesReflectedInLatestLoad(storeMsg.LoadTime) {
1444-
log.KvDistribution.VInfof(ctx, 2, "s%d not-pending %v", storeMsg.StoreID, change)
1444+
log.KvDistribution.VEventf(ctx, 2, "s%d not-pending %v", storeMsg.StoreID, change)
14451445
delete(ss.adjusted.loadPendingChanges, change.changeID)
14461446
}
14471447

@@ -1451,7 +1451,7 @@ func (cs *clusterState) processStoreLoadMsg(ctx context.Context, storeMsg *Store
14511451
// replicas.
14521452
cs.applyChangeLoadDelta(change.ReplicaChange)
14531453
}
1454-
log.KvDistribution.VInfof(ctx, 2, "processStoreLoadMsg for store s%v: %v",
1454+
log.KvDistribution.VEventf(ctx, 2, "processStoreLoadMsg for store s%v: %v",
14551455
storeMsg.StoreID, ss.adjusted.load)
14561456
}
14571457

@@ -2055,7 +2055,7 @@ func (cs *clusterState) addPendingRangeChange(change PendingRangeChange) {
20552055
storeState.adjusted.loadPendingChanges[cid] = pendingChange
20562056
rangeState.pendingChanges = append(rangeState.pendingChanges, pendingChange)
20572057
rangeState.pendingChangeNoRollback = false
2058-
log.KvDistribution.VInfof(context.Background(), 3,
2058+
log.KvDistribution.VEventf(context.Background(), 3,
20592059
"addPendingRangeChange: change_id=%v, range_id=%v, change=%v",
20602060
cid, rangeID, pendingChange.ReplicaChange)
20612061
pendingChanges = append(pendingChanges, pendingChange)
@@ -2183,7 +2183,7 @@ func (cs *clusterState) applyReplicaChange(change ReplicaChange, applyLoadChange
21832183
panic(fmt.Sprintf("range %v not found in cluster state", change.rangeID))
21842184
}
21852185

2186-
log.KvDistribution.VInfof(context.Background(), 2, "applying replica change %v to range %d on store %d",
2186+
log.KvDistribution.VEventf(context.Background(), 2, "applying replica change %v to range %d on store %d",
21872187
change, change.rangeID, change.target.StoreID)
21882188
if change.isRemoval() {
21892189
delete(storeState.adjusted.replicas, change.rangeID)
@@ -2367,11 +2367,11 @@ func (cs *clusterState) canShedAndAddLoad(
23672367
var reason strings.Builder
23682368
defer func() {
23692369
if canAddLoad {
2370-
log.KvDistribution.VInfof(ctx, 3, "can add load to n%vs%v: %v targetSLS[%v] srcSLS[%v]",
2370+
log.KvDistribution.VEventf(ctx, 3, "can add load to n%vs%v: %v targetSLS[%v] srcSLS[%v]",
23712371
targetNS.NodeID, targetSS.StoreID, canAddLoad, targetSLS, srcSLS)
23722372
} else {
2373-
log.KvDistribution.VInfof(ctx, 2, "cannot add load to n%vs%v: due to %s", targetNS.NodeID, targetSS.StoreID, reason.String())
2374-
log.KvDistribution.VInfof(ctx, 2, "[target_sls:%v,src_sls:%v]", targetSLS, srcSLS)
2373+
log.KvDistribution.VEventf(ctx, 2, "cannot add load to n%vs%v: due to %s", targetNS.NodeID, targetSS.StoreID, reason.String())
2374+
log.KvDistribution.VEventf(ctx, 2, "[target_sls:%v,src_sls:%v]", targetSLS, srcSLS)
23752375
}
23762376
}()
23772377
if targetSLS.highDiskSpaceUtilization {

pkg/kv/kvserver/allocator/mmaprototype/cluster_state_rebalance_stores.go

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ func (cs *clusterState) rebalanceStores(
6868
) []PendingRangeChange {
6969
now := cs.ts.Now()
7070
ctx = logtags.AddTag(ctx, "mmaid", mmaid.Add(1))
71-
log.KvDistribution.VInfof(ctx, 2, "rebalanceStores begins")
71+
log.KvDistribution.VEventf(ctx, 2, "rebalanceStores begins")
7272
// To select which stores are overloaded, we use a notion of overload that
7373
// is based on cluster means (and of course individual store/node
7474
// capacities). We do not want to loop through all ranges in the cluster,
@@ -99,7 +99,7 @@ func (cs *clusterState) rebalanceStores(
9999
// via replicate_queue.go.
100100
for storeID, ss := range cs.stores {
101101
sls := cs.meansMemo.getStoreLoadSummary(ctx, clusterMeans, storeID, ss.loadSeqNum)
102-
log.KvDistribution.VInfof(ctx, 2, "evaluating s%d: node load %s, store load %s, worst dim %s",
102+
log.KvDistribution.VEventf(ctx, 2, "evaluating s%d: node load %s, store load %s, worst dim %s",
103103
storeID, sls.nls, sls.sls, sls.worstDim)
104104

105105
if sls.sls >= overloadSlow {
@@ -117,10 +117,10 @@ func (cs *clusterState) rebalanceStores(
117117
if ss.maxFractionPendingDecrease < maxFractionPendingThreshold &&
118118
// There should be no pending increase, since that can be an overestimate.
119119
ss.maxFractionPendingIncrease < epsilon {
120-
log.KvDistribution.VInfof(ctx, 2, "store s%v was added to shedding store list", storeID)
120+
log.KvDistribution.VEventf(ctx, 2, "store s%v was added to shedding store list", storeID)
121121
sheddingStores = append(sheddingStores, sheddingStore{StoreID: storeID, storeLoadSummary: sls})
122122
} else {
123-
log.KvDistribution.VInfof(ctx, 2,
123+
log.KvDistribution.VEventf(ctx, 2,
124124
"skipping overloaded store s%d (worst dim: %s): pending decrease %.2f >= threshold %.2f or pending increase %.2f >= epsilon",
125125
storeID, sls.worstDim, ss.maxFractionPendingDecrease, maxFractionPendingThreshold, ss.maxFractionPendingIncrease)
126126
}
@@ -234,11 +234,11 @@ func (re *rebalanceEnv) rebalanceStore(
234234
return
235235
}
236236
} else {
237-
log.KvDistribution.VInfof(ctx, 2, "skipping lease shedding: s%v != local store s%s or cpu is not overloaded: %v",
237+
log.KvDistribution.VEventf(ctx, 2, "skipping lease shedding: s%v != local store s%s or cpu is not overloaded: %v",
238238
ss.StoreID, localStoreID, store.dimSummary[CPURate])
239239
}
240240

241-
log.KvDistribution.VInfof(ctx, 2, "attempting to shed replicas next")
241+
log.KvDistribution.VEventf(ctx, 2, "attempting to shed replicas next")
242242
re.rebalanceReplicas(ctx, store, ss, localStoreID)
243243
}
244244

@@ -248,7 +248,7 @@ func (re *rebalanceEnv) rebalanceReplicas(
248248
doneShedding := false
249249
if store.StoreID != localStoreID && store.dimSummary[CPURate] >= overloadSlow &&
250250
re.now.Sub(ss.overloadStartTime) < remoteStoreLeaseSheddingGraceDuration {
251-
log.KvDistribution.VInfof(ctx, 2, "skipping remote store s%d: in lease shedding grace period", store.StoreID)
251+
log.KvDistribution.VEventf(ctx, 2, "skipping remote store s%d: in lease shedding grace period", store.StoreID)
252252
return
253253
}
254254
// If the node is cpu overloaded, or the store/node is not fdOK, exclude
@@ -261,7 +261,7 @@ func (re *rebalanceEnv) rebalanceReplicas(
261261
for _, storeID := range re.nodes[nodeID].stores {
262262
re.scratch.storesToExclude.insert(storeID)
263263
}
264-
log.KvDistribution.VInfof(ctx, 2, "excluding all stores on n%d due to overload/fd status", nodeID)
264+
log.KvDistribution.VEventf(ctx, 2, "excluding all stores on n%d due to overload/fd status", nodeID)
265265
} else {
266266
// This store is excluded of course.
267267
re.scratch.storesToExclude.insert(store.StoreID)
@@ -278,11 +278,11 @@ func (re *rebalanceEnv) rebalanceReplicas(
278278
rstate := re.ranges[rangeID]
279279
if len(rstate.pendingChanges) > 0 {
280280
// If the range has pending changes, don't make more changes.
281-
log.KvDistribution.VInfof(ctx, 2, "skipping r%d: has pending changes", rangeID)
281+
log.KvDistribution.VEventf(ctx, 2, "skipping r%d: has pending changes", rangeID)
282282
continue
283283
}
284284
if re.now.Sub(rstate.lastFailedChange) < re.lastFailedChangeDelayDuration {
285-
log.KvDistribution.VInfof(ctx, 2, "skipping r%d: too soon after failed change", rangeID)
285+
log.KvDistribution.VEventf(ctx, 2, "skipping r%d: too soon after failed change", rangeID)
286286
continue
287287
}
288288
re.ensureAnalyzedConstraints(rstate)
@@ -305,7 +305,7 @@ func (re *rebalanceEnv) rebalanceReplicas(
305305
if err != nil {
306306
// This range has some constraints that are violated. Let those be
307307
// fixed first.
308-
log.KvDistribution.VInfof(ctx, 2, "skipping r%d: constraint violation needs fixing first: %v", rangeID, err)
308+
log.KvDistribution.VEventf(ctx, 2, "skipping r%d: constraint violation needs fixing first: %v", rangeID, err)
309309
continue
310310
}
311311
re.scratch.disj[0] = conj
@@ -326,7 +326,7 @@ func (re *rebalanceEnv) rebalanceReplicas(
326326
}
327327
// TODO(sumeer): eliminate cands allocations by passing a scratch slice.
328328
cands, ssSLS := re.computeCandidatesForRange(ctx, re.scratch.disj[:], re.scratch.storesToExcludeForRange, store.StoreID)
329-
log.KvDistribution.VInfof(ctx, 2, "considering replica-transfer r%v from s%v: store load %v",
329+
log.KvDistribution.VEventf(ctx, 2, "considering replica-transfer r%v from s%v: store load %v",
330330
rangeID, store.StoreID, ss.adjusted.load)
331331
if log.V(2) {
332332
log.KvDistribution.Infof(ctx, "candidates are:")
@@ -336,7 +336,7 @@ func (re *rebalanceEnv) rebalanceReplicas(
336336
}
337337

338338
if len(cands.candidates) == 0 {
339-
log.KvDistribution.VInfof(ctx, 2, "result(failed): no candidates found for r%d after exclusions", rangeID)
339+
log.KvDistribution.VEventf(ctx, 2, "result(failed): no candidates found for r%d after exclusions", rangeID)
340340
continue
341341
}
342342
var rlocalities replicasLocalityTiers
@@ -370,17 +370,17 @@ func (re *rebalanceEnv) rebalanceReplicas(
370370
overloadDur := re.now.Sub(ss.overloadStartTime)
371371
if overloadDur > ignoreHigherThanLoadThresholdGraceDuration {
372372
ignoreLevel = ignoreHigherThanLoadThreshold
373-
log.KvDistribution.VInfof(ctx, 3, "using level %v (threshold:%v) for r%d based on overload duration %v",
373+
log.KvDistribution.VEventf(ctx, 3, "using level %v (threshold:%v) for r%d based on overload duration %v",
374374
ignoreLevel, ssSLS.sls, rangeID, overloadDur)
375375
} else if overloadDur > ignoreLoadThresholdAndHigherGraceDuration {
376376
ignoreLevel = ignoreLoadThresholdAndHigher
377-
log.KvDistribution.VInfof(ctx, 3, "using level %v (threshold:%v) for r%d based on overload duration %v",
377+
log.KvDistribution.VEventf(ctx, 3, "using level %v (threshold:%v) for r%d based on overload duration %v",
378378
ignoreLevel, ssSLS.sls, rangeID, overloadDur)
379379
}
380380
targetStoreID := sortTargetCandidateSetAndPick(
381381
ctx, cands, ssSLS.sls, ignoreLevel, loadDim, re.rng)
382382
if targetStoreID == 0 {
383-
log.KvDistribution.VInfof(ctx, 2, "result(failed): no suitable target found among candidates for r%d "+
383+
log.KvDistribution.VEventf(ctx, 2, "result(failed): no suitable target found among candidates for r%d "+
384384
"(threshold %s; %s)", rangeID, ssSLS.sls, ignoreLevel)
385385
continue
386386
}
@@ -390,7 +390,7 @@ func (re *rebalanceEnv) rebalanceReplicas(
390390
addedLoad[CPURate] = rstate.load.RaftCPU
391391
}
392392
if !re.canShedAndAddLoad(ctx, ss, targetSS, addedLoad, cands.means, false, loadDim) {
393-
log.KvDistribution.VInfof(ctx, 2, "result(failed): cannot shed from s%d to s%d for r%d: delta load %v",
393+
log.KvDistribution.VEventf(ctx, 2, "result(failed): cannot shed from s%d to s%d for r%d: delta load %v",
394394
store.StoreID, targetStoreID, rangeID, addedLoad)
395395
continue
396396
}
@@ -417,16 +417,16 @@ func (re *rebalanceEnv) rebalanceReplicas(
417417
re.addPendingRangeChange(rangeChange)
418418
re.changes = append(re.changes, rangeChange)
419419
re.rangeMoveCount++
420-
log.KvDistribution.VInfof(ctx, 2,
420+
log.KvDistribution.VEventf(ctx, 2,
421421
"result(success): rebalancing r%v from s%v to s%v [change: %v] with resulting loads source: %v target: %v",
422422
rangeID, removeTarget.StoreID, addTarget.StoreID, re.changes[len(re.changes)-1], ss.adjusted.load, targetSS.adjusted.load)
423423
if re.rangeMoveCount >= re.maxRangeMoveCount {
424-
log.KvDistribution.VInfof(ctx, 2, "s%d has reached max range move count %d: mma returning", store.StoreID, re.maxRangeMoveCount)
424+
log.KvDistribution.VEventf(ctx, 2, "s%d has reached max range move count %d: mma returning", store.StoreID, re.maxRangeMoveCount)
425425
return
426426
}
427427
doneShedding = ss.maxFractionPendingDecrease >= maxFractionPendingThreshold
428428
if doneShedding {
429-
log.KvDistribution.VInfof(ctx, 2, "s%d has reached pending decrease threshold(%.2f>=%.2f) after rebalancing: done shedding with %d left in topk",
429+
log.KvDistribution.VEventf(ctx, 2, "s%d has reached pending decrease threshold(%.2f>=%.2f) after rebalancing: done shedding with %d left in topk",
430430
store.StoreID, ss.maxFractionPendingDecrease, maxFractionPendingThreshold, n-(i+1))
431431
break
432432
}
@@ -437,15 +437,15 @@ func (re *rebalanceEnv) rebalanceReplicas(
437437
// moved. Running with underprovisioned clusters and expecting load-based
438438
// rebalancing to work well is not in scope.
439439
if doneShedding {
440-
log.KvDistribution.VInfof(ctx, 2, "store s%d is done shedding, moving to next store", store.StoreID)
440+
log.KvDistribution.VEventf(ctx, 2, "store s%d is done shedding, moving to next store", store.StoreID)
441441
return
442442
}
443443
}
444444

445445
func (re *rebalanceEnv) rebalanceLeases(
446446
ctx context.Context, ss *storeState, store sheddingStore, localStoreID roachpb.StoreID,
447447
) bool {
448-
log.KvDistribution.VInfof(ctx, 2, "local store s%d is CPU overloaded (%v >= %v), attempting lease transfers first",
448+
log.KvDistribution.VEventf(ctx, 2, "local store s%d is CPU overloaded (%v >= %v), attempting lease transfers first",
449449
store.StoreID, store.dimSummary[CPURate], overloadSlow)
450450
// This store is local, and cpu overloaded. Shed leases first.
451451
//
@@ -460,7 +460,7 @@ func (re *rebalanceEnv) rebalanceLeases(
460460
rstate := re.ranges[rangeID]
461461
if len(rstate.pendingChanges) > 0 {
462462
// If the range has pending changes, don't make more changes.
463-
log.KvDistribution.VInfof(ctx, 2, "skipping r%d: has pending changes", rangeID)
463+
log.KvDistribution.VEventf(ctx, 2, "skipping r%d: has pending changes", rangeID)
464464
continue
465465
}
466466
for _, repl := range rstate.replicas {
@@ -480,7 +480,7 @@ func (re *rebalanceEnv) rebalanceLeases(
480480
}
481481
}
482482
if re.now.Sub(rstate.lastFailedChange) < re.lastFailedChangeDelayDuration {
483-
log.KvDistribution.VInfof(ctx, 2, "skipping r%d: too soon after failed change", rangeID)
483+
log.KvDistribution.VEventf(ctx, 2, "skipping r%d: too soon after failed change", rangeID)
484484
continue
485485
}
486486
re.ensureAnalyzedConstraints(rstate)
@@ -515,11 +515,11 @@ func (re *rebalanceEnv) rebalanceLeases(
515515
clear(re.scratch.nodes)
516516
means := computeMeansForStoreSet(re, candsPL, re.scratch.nodes, re.scratch.stores)
517517
sls := re.computeLoadSummary(ctx, store.StoreID, &means.storeLoad, &means.nodeLoad)
518-
log.KvDistribution.VInfof(ctx, 2, "considering lease-transfer r%v from s%v: candidates are %v", rangeID, store.StoreID, candsPL)
518+
log.KvDistribution.VEventf(ctx, 2, "considering lease-transfer r%v from s%v: candidates are %v", rangeID, store.StoreID, candsPL)
519519
if sls.dimSummary[CPURate] < overloadSlow {
520520
// This store is not cpu overloaded relative to these candidates for
521521
// this range.
522-
log.KvDistribution.VInfof(ctx, 2, "result(failed): skipping r%d since store not overloaded relative to candidates", rangeID)
522+
log.KvDistribution.VEventf(ctx, 2, "result(failed): skipping r%d since store not overloaded relative to candidates", rangeID)
523523
continue
524524
}
525525
var candsSet candidateSet
@@ -570,7 +570,7 @@ func (re *rebalanceEnv) rebalanceLeases(
570570
panic("raft cpu higher than total cpu")
571571
}
572572
if !re.canShedAndAddLoad(ctx, ss, targetSS, addedLoad, &means, true, CPURate) {
573-
log.KvDistribution.VInfof(ctx, 2, "result(failed): cannot shed from s%d to s%d for r%d: delta load %v",
573+
log.KvDistribution.VEventf(ctx, 2, "result(failed): cannot shed from s%d to s%d for r%d: delta load %v",
574574
store.StoreID, targetStoreID, rangeID, addedLoad)
575575
continue
576576
}
@@ -603,12 +603,12 @@ func (re *rebalanceEnv) rebalanceLeases(
603603
ss.maxFractionPendingIncrease, ss.maxFractionPendingDecrease,
604604
targetSS.maxFractionPendingIncrease, targetSS.maxFractionPendingDecrease)
605605
if re.leaseTransferCount >= re.maxLeaseTransferCount {
606-
log.KvDistribution.VInfof(ctx, 2, "reached max lease transfer count %d, returning", re.maxLeaseTransferCount)
606+
log.KvDistribution.VEventf(ctx, 2, "reached max lease transfer count %d, returning", re.maxLeaseTransferCount)
607607
break
608608
}
609609
doneShedding = ss.maxFractionPendingDecrease >= maxFractionPendingThreshold
610610
if doneShedding {
611-
log.KvDistribution.VInfof(ctx, 2, "s%d has reached pending decrease threshold(%.2f>=%.2f) after lease transfers: done shedding with %d left in topK",
611+
log.KvDistribution.VEventf(ctx, 2, "s%d has reached pending decrease threshold(%.2f>=%.2f) after lease transfers: done shedding with %d left in topK",
612612
store.StoreID, ss.maxFractionPendingDecrease, maxFractionPendingThreshold, n-(i+1))
613613
break
614614
}
@@ -621,7 +621,7 @@ func (re *rebalanceEnv) rebalanceLeases(
621621
// transfer is done and we may still be considering those transfers as
622622
// pending from a load perspective, so we *may* not be able to do more
623623
// lease transfers -- so be it.
624-
log.KvDistribution.VInfof(ctx, 2, "skipping replica transfers for s%d: done shedding=%v, lease_transfers=%d",
624+
log.KvDistribution.VEventf(ctx, 2, "skipping replica transfers for s%d: done shedding=%v, lease_transfers=%d",
625625
store.StoreID, doneShedding, localLeaseTransferCount)
626626
return true
627627
}

0 commit comments

Comments
 (0)