Skip to content

Commit f54cc67

Browse files
committed
kvserver: rename processCallback processCallback to cb processCallback
Previously, the variable name processCallback shadowed its type name, which was not ideal. This commit renames the variable to cb.
1 parent 499aefd commit f54cc67

File tree

2 files changed

+22
-22
lines changed

2 files changed

+22
-22
lines changed

pkg/kv/kvserver/queue.go

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -635,17 +635,17 @@ func (h baseQueueHelper) MaybeAdd(
635635
}
636636

637637
func (h baseQueueHelper) Add(
638-
ctx context.Context, repl replicaInQueue, prio float64, processCallback processCallback,
638+
ctx context.Context, repl replicaInQueue, prio float64, cb processCallback,
639639
) {
640-
_, err := h.bq.addInternal(ctx, repl.Desc(), repl.ReplicaID(), prio, processCallback)
640+
_, err := h.bq.addInternal(ctx, repl.Desc(), repl.ReplicaID(), prio, cb)
641641
if err != nil && log.V(1) {
642642
log.Dev.Infof(ctx, "during Add: %s", err)
643643
}
644644
}
645645

646646
type queueHelper interface {
647647
MaybeAdd(ctx context.Context, repl replicaInQueue, now hlc.ClockTimestamp)
648-
Add(ctx context.Context, repl replicaInQueue, prio float64, processCallback processCallback)
648+
Add(ctx context.Context, repl replicaInQueue, prio float64, cb processCallback)
649649
}
650650

651651
// baseQueueAsyncRateLimited indicates that the base queue async task was rate
@@ -703,12 +703,12 @@ func (bq *baseQueue) MaybeAddAsync(
703703
// register a process callback that will be invoked when the replica is enqueued
704704
// or processed.
705705
func (bq *baseQueue) AddAsyncWithCallback(
706-
ctx context.Context, repl replicaInQueue, prio float64, processCallback processCallback,
706+
ctx context.Context, repl replicaInQueue, prio float64, cb processCallback,
707707
) {
708708
if err := bq.Async(ctx, "Add", true /* wait */, func(ctx context.Context, h queueHelper) {
709-
h.Add(ctx, repl, prio, processCallback)
709+
h.Add(ctx, repl, prio, cb)
710710
}); err != nil {
711-
processCallback.onEnqueueResult(-1 /*indexOnHeap*/, err)
711+
cb.onEnqueueResult(-1 /*indexOnHeap*/, err)
712712
}
713713
}
714714

@@ -814,22 +814,22 @@ func (bq *baseQueue) addInternal(
814814
desc *roachpb.RangeDescriptor,
815815
replicaID roachpb.ReplicaID,
816816
priority float64,
817-
processCallback processCallback,
817+
cb processCallback,
818818
) (bool, error) {
819819
// NB: this is intentionally outside of bq.mu to avoid having to consider
820820
// lock ordering constraints.
821821
if !desc.IsInitialized() {
822822
// We checked this above in MaybeAdd(), but we need to check it
823823
// again for Add().
824-
processCallback.onEnqueueResult(-1 /*indexOnHeap*/, errReplicaNotInitialized)
824+
cb.onEnqueueResult(-1 /*indexOnHeap*/, errReplicaNotInitialized)
825825
return false, errReplicaNotInitialized
826826
}
827827

828828
bq.mu.Lock()
829829
defer bq.mu.Unlock()
830830

831831
if bq.mu.stopped {
832-
processCallback.onEnqueueResult(-1 /*indexOnHeap*/, errQueueStopped)
832+
cb.onEnqueueResult(-1 /*indexOnHeap*/, errQueueStopped)
833833
return false, errQueueStopped
834834
}
835835

@@ -842,14 +842,14 @@ func (bq *baseQueue) addInternal(
842842
if log.V(3) {
843843
log.Dev.Infof(ctx, "queue disabled")
844844
}
845-
processCallback.onEnqueueResult(-1 /*indexOnHeap*/, errQueueDisabled)
845+
cb.onEnqueueResult(-1 /*indexOnHeap*/, errQueueDisabled)
846846
return false, errQueueDisabled
847847
}
848848
}
849849

850850
// If the replica is currently in purgatory, don't re-add it.
851851
if _, ok := bq.mu.purgatory[desc.RangeID]; ok {
852-
processCallback.onEnqueueResult(-1 /*indexOnHeap*/, errReplicaAlreadyInPurgatory)
852+
cb.onEnqueueResult(-1 /*indexOnHeap*/, errReplicaAlreadyInPurgatory)
853853
return false, nil
854854
}
855855

@@ -859,7 +859,7 @@ func (bq *baseQueue) addInternal(
859859
if item.processing {
860860
wasRequeued := item.requeue
861861
item.requeue = true
862-
processCallback.onEnqueueResult(-1 /*indexOnHeap*/, errReplicaAlreadyProcessing)
862+
cb.onEnqueueResult(-1 /*indexOnHeap*/, errReplicaAlreadyProcessing)
863863
return !wasRequeued, nil
864864
}
865865

@@ -871,8 +871,8 @@ func (bq *baseQueue) addInternal(
871871
log.Dev.Infof(ctx, "updating priority: %0.3f -> %0.3f", item.priority, priority)
872872
}
873873
bq.mu.priorityQ.update(item, priority)
874-
// item.index should be updated now based on heap property now.
875-
processCallback.onEnqueueResult(item.index /*indexOnHeap*/, nil)
874+
// item.index should be updated now based on heap property now.
875+
cb.onEnqueueResult(item.index /*indexOnHeap*/, nil)
876876
}
877877
return false, nil
878878
}
@@ -881,7 +881,7 @@ func (bq *baseQueue) addInternal(
881881
log.Dev.Infof(ctx, "adding: priority=%0.3f", priority)
882882
}
883883
item = &replicaItem{rangeID: desc.RangeID, replicaID: replicaID, priority: priority}
884-
item.registerCallback(processCallback)
884+
item.registerCallback(cb)
885885
bq.addLocked(item)
886886

887887
// If adding this replica has pushed the queue past its maximum size, remove
@@ -895,8 +895,8 @@ func (bq *baseQueue) addInternal(
895895
priority, replicaItemToDrop.replicaID)
896896
// TODO(wenyihu6): when we introduce base queue max size cluster setting,
897897
// remember to invoke this callback when shrinking the size
898-
for _, cb := range replicaItemToDrop.callbacks {
899-
cb.onEnqueueResult(-1 /*indexOnHeap*/, errDroppedDueToFullQueueSize)
898+
for _, callback := range replicaItemToDrop.callbacks {
899+
callback.onEnqueueResult(-1 /*indexOnHeap*/, errDroppedDueToFullQueueSize)
900900
}
901901
bq.removeLocked(replicaItemToDrop)
902902
}
@@ -907,7 +907,7 @@ func (bq *baseQueue) addInternal(
907907
// No need to signal again.
908908
}
909909
// Note: it may already be dropped or dropped afterwards.
910-
processCallback.onEnqueueResult(item.index /*indexOnHeap*/, nil)
910+
cb.onEnqueueResult(item.index /*indexOnHeap*/, nil)
911911
return true, nil
912912
}
913913

@@ -1357,7 +1357,7 @@ func (bq *baseQueue) addToPurgatoryLocked(
13571357
repl replicaInQueue,
13581358
purgErr PurgatoryError,
13591359
priorityAtEnqueue float64,
1360-
processCallback []processCallback,
1360+
cbs []processCallback,
13611361
) {
13621362
bq.mu.AssertHeld()
13631363

@@ -1386,7 +1386,7 @@ func (bq *baseQueue) addToPurgatoryLocked(
13861386
replicaID: repl.ReplicaID(),
13871387
index: -1,
13881388
priority: priorityAtEnqueue,
1389-
callbacks: processCallback,
1389+
callbacks: cbs,
13901390
}
13911391

13921392
bq.mu.replicas[repl.GetRangeID()] = item

pkg/kv/kvserver/queue_helpers_testutil.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,9 @@ func (bq *baseQueue) testingAdd(
2626
// register a process callback that will be invoked when the replica is enqueued
2727
// or processed.
2828
func (bq *baseQueue) testingAddWithCallback(
29-
ctx context.Context, repl replicaInQueue, priority float64, callback processCallback,
29+
ctx context.Context, repl replicaInQueue, priority float64, cb processCallback,
3030
) (bool, error) {
31-
return bq.addInternal(ctx, repl.Desc(), repl.ReplicaID(), priority, callback)
31+
return bq.addInternal(ctx, repl.Desc(), repl.ReplicaID(), priority, cb)
3232
}
3333

3434
func forceScanAndProcess(ctx context.Context, s *Store, q *baseQueue) error {

0 commit comments

Comments
 (0)