Skip to content

Commit e4700ff

Browse files
craig[bot]sumeerbhola
andcommitted
162285: kvserver: remove kv.Sender implementation from Store, Stores, Replica r=tbg a=sumeerbhola Previously, Store, Stores, and Replica all implemented kv.Sender via their Send methods. This allowed callers to use them interchangeably with other Senders, but obscured an important detail: these Send methods were called from code that did not integrate with admission control. This commit removes the Send methods from these types. They now implement only SenderWithWriteBytes, which returns the StoreWriteBytes that callers that use admission control must handle appropriately. For test code that needs a kv.Sender, ToSenderForTesting provides an explicit wrapper that discards the StoreWriteBytes. The "ForTesting" suffix makes it clear that this wrapper bypasses admission control accounting and should only be used in tests. This change makes admission control bypass explicit: any code path that converts these types to kv.Sender must now visibly call ToSenderForTesting, making it easy to audit which paths skip admission control. Release note: None Epic: None Co-authored-by: sumeerbhola <sumeer@cockroachlabs.com>
2 parents b9e2c00 + de0f302 commit e4700ff

27 files changed

+136
-142
lines changed

pkg/kv/kvclient/kvcoord/txn_coord_sender_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ func TestTxnCoordSenderHeartbeat(t *testing.T) {
183183
kvcoord.NewDistSenderForLocalTestCluster(
184184
ctx,
185185
s.Cfg.Settings, &roachpb.NodeDescriptor{NodeID: 1},
186-
ambient.Tracer, s.Clock, s.Latency, s.Stores, s.Stopper(), s.Gossip,
186+
ambient.Tracer, s.Clock, s.Latency, kvserver.ToSenderForTesting(s.Stores), s.Stopper(), s.Gossip,
187187
),
188188
)
189189
quickHeartbeatDB := kv.NewDB(ambient, tsf, s.Clock, s.Stopper())
@@ -287,7 +287,7 @@ func TestDB_PrepareForRetryAfterHeartbeatFailure(t *testing.T) {
287287
kvcoord.NewDistSenderForLocalTestCluster(
288288
ctx,
289289
s.Cfg.Settings, &roachpb.NodeDescriptor{NodeID: 1},
290-
ambient.Tracer, s.Clock, s.Latency, s.Stores, s.Stopper(), s.Gossip,
290+
ambient.Tracer, s.Clock, s.Latency, kvserver.ToSenderForTesting(s.Stores), s.Stopper(), s.Gossip,
291291
),
292292
)
293293
db := kv.NewDB(ambient, tsf, s.Clock, s.Stopper())

pkg/kv/kvserver/client_merge_test.go

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -393,7 +393,7 @@ func mergeWithData(t *testing.T, retries int64) {
393393
verify(rhsDesc.StartKey.Next().AsRawKey(), rhsRepl.RangeID, newContent)
394394

395395
gArgs := getArgs(lhsDesc.StartKey.Next().AsRawKey())
396-
if _, pErr := kv.SendWrappedWith(ctx, store, kvpb.Header{
396+
if _, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(store), kvpb.Header{
397397
RangeID: rhsDesc.RangeID,
398398
}, gArgs); !testutils.IsPError(
399399
pErr, `was not found on s`,
@@ -561,7 +561,7 @@ func mergeCheckingTimestampCaches(
561561

562562
// Write a key to the RHS.
563563
rhsKey := scratchKey("c")
564-
if _, pErr := kv.SendWrappedWith(ctx, rhsStore, kvpb.Header{
564+
if _, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(rhsStore), kvpb.Header{
565565
RangeID: rhsDesc.RangeID,
566566
}, incrementArgs(rhsKey, 1)); pErr != nil {
567567
t.Fatal(pErr)
@@ -577,7 +577,7 @@ func mergeCheckingTimestampCaches(
577577
ba.Timestamp = readTS
578578
ba.RangeID = rhsDesc.RangeID
579579
ba.Add(getArgs(rhsKey))
580-
if br, pErr := rhsStore.Send(ctx, ba); pErr != nil {
580+
if br, pErr := kvserver.ToSenderForTesting(rhsStore).Send(ctx, ba); pErr != nil {
581581
t.Fatal(pErr)
582582
} else if v, err := br.Responses[0].GetGet().Value.GetInt(); err != nil {
583583
t.Fatal(err)
@@ -596,7 +596,7 @@ func mergeCheckingTimestampCaches(
596596
ba.Timestamp = readTS.Next()
597597
ba.RangeID = rhsDesc.RangeID
598598
ba.Add(pushTxnArgs(&pusher, &pushee, kvpb.PUSH_ABORT))
599-
if br, pErr := rhsStore.Send(ctx, ba); pErr != nil {
599+
if br, pErr := kvserver.ToSenderForTesting(rhsStore).Send(ctx, ba); pErr != nil {
600600
t.Fatal(pErr)
601601
} else if txn := br.Responses[0].GetPushTxn().PusheeTxn; txn.Status != roachpb.ABORTED {
602602
t.Fatalf("expected aborted pushee, but got %v", txn)
@@ -719,7 +719,7 @@ func mergeCheckingTimestampCaches(
719719
// be the only replica that does not apply the proposal.
720720
go func() {
721721
incArgs := incrementArgs(lhsKey, 4)
722-
_, pErr := kv.SendWrappedWith(ctx, lhsStore, kvpb.Header{RangeID: lhsDesc.RangeID}, incArgs)
722+
_, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(lhsStore), kvpb.Header{RangeID: lhsDesc.RangeID}, incArgs)
723723
incChan <- pErr
724724
}()
725725
// NB: the operation won't complete, so peek below Raft and wait for
@@ -749,7 +749,7 @@ func mergeCheckingTimestampCaches(
749749
go func() {
750750
truncArgs := truncateLogArgs(truncIndex, lhsDesc.RangeID)
751751
truncArgs.Key = lhsKey
752-
_, pErr := kv.SendWrappedWith(ctx, lhsStore, kvpb.Header{RangeID: lhsDesc.RangeID}, truncArgs)
752+
_, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(lhsStore), kvpb.Header{RangeID: lhsDesc.RangeID}, truncArgs)
753753
truncChan <- pErr
754754
}()
755755
// NB: the operation won't complete, so peek below Raft and wait for
@@ -858,7 +858,7 @@ func mergeCheckingTimestampCaches(
858858
ba.Timestamp = readTS
859859
ba.RangeID = lhsDesc.RangeID
860860
ba.Add(incrementArgs(rhsKey, 1))
861-
if br, pErr := lhsStore.Send(ctx, ba); pErr != nil {
861+
if br, pErr := kvserver.ToSenderForTesting(lhsStore).Send(ctx, ba); pErr != nil {
862862
t.Fatal(pErr)
863863
} else if br.Timestamp.LessEq(readTS) {
864864
t.Fatalf("expected write to execute after %v, but executed at %v", readTS, br.Timestamp)
@@ -883,7 +883,7 @@ func mergeCheckingTimestampCaches(
883883
} else {
884884
expReason = kvpb.ABORT_REASON_ABORTED_RECORD_FOUND
885885
}
886-
if _, pErr := lhsStore.Send(ctx, ba); pErr == nil {
886+
if _, pErr := kvserver.ToSenderForTesting(lhsStore).Send(ctx, ba); pErr == nil {
887887
t.Fatalf("expected TransactionAbortedError(%s) but got %v", expReason, pErr)
888888
} else if abortErr, ok := pErr.GetDetail().(*kvpb.TransactionAbortedError); !ok {
889889
t.Fatalf("expected TransactionAbortedError(%s) but got %v", expReason, pErr)
@@ -945,7 +945,7 @@ func TestStoreRangeMergeTimestampCacheCausality(t *testing.T) {
945945
gba.Timestamp = ba.Timestamp.Add(42 /* wallTime */, 0 /* logical */)
946946
gba.Add(getArgs(rhsKey))
947947
store := tc.GetFirstStoreFromServer(t, int(ba.Header.Replica.NodeID-1))
948-
gbr, pErr := store.Send(ctx, gba)
948+
gbr, pErr := kvserver.ToSenderForTesting(store).Send(ctx, gba)
949949
if pErr != nil {
950950
t.Error(pErr) // different goroutine, so can't use t.Fatal
951951
return pErr
@@ -1011,7 +1011,7 @@ func TestStoreRangeMergeTimestampCacheCausality(t *testing.T) {
10111011
// Merge [a, b) and [b, Max). Our request filter above will intercept the
10121012
// merge and execute a read with a large timestamp immediately before the
10131013
// Subsume request executes.
1014-
if _, pErr := kv.SendWrappedWith(ctx, tc.GetFirstStoreFromServer(t, 2), kvpb.Header{
1014+
if _, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(tc.GetFirstStoreFromServer(t, 2)), kvpb.Header{
10151015
RangeID: lhsRangeDesc.RangeID,
10161016
}, adminMergeArgs(scratchKey("a"))); pErr != nil {
10171017
t.Fatal(pErr)
@@ -1044,7 +1044,7 @@ func TestStoreRangeMergeTimestampCacheCausality(t *testing.T) {
10441044
ba.Timestamp = readTS
10451045
ba.RangeID = lhsRangeDesc.RangeID
10461046
ba.Add(incrementArgs(rhsKey, 1))
1047-
if br, pErr := tc.GetFirstStoreFromServer(t, 1).Send(ctx, ba); pErr != nil {
1047+
if br, pErr := kvserver.ToSenderForTesting(tc.GetFirstStoreFromServer(t, 1)).Send(ctx, ba); pErr != nil {
10481048
t.Fatal(pErr)
10491049
} else if br.Timestamp.LessEq(readTS) {
10501050
t.Fatalf("expected write to execute after %v, but executed at %v", readTS, br.Timestamp)

pkg/kv/kvserver/client_raft_test.go

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2623,7 +2623,7 @@ func TestQuotaPool(t *testing.T) {
26232623
if err := ba.SetActiveTimestamp(tc.Servers[0].Clock()); err != nil {
26242624
t.Fatal(err)
26252625
}
2626-
if _, pErr := leaderRepl.Send(ctx, ba); pErr != nil {
2626+
if _, pErr := kvserver.ToSenderForTesting(leaderRepl).Send(ctx, ba); pErr != nil {
26272627
t.Fatal(pErr)
26282628
}
26292629

@@ -2645,7 +2645,7 @@ func TestQuotaPool(t *testing.T) {
26452645
ch <- kvpb.NewError(err)
26462646
return
26472647
}
2648-
_, pErr := leaderRepl.Send(ctx, ba)
2648+
_, pErr := kvserver.ToSenderForTesting(leaderRepl).Send(ctx, ba)
26492649
ch <- pErr
26502650
}()
26512651
}()
@@ -2780,7 +2780,7 @@ func TestWedgedReplicaDetection(t *testing.T) {
27802780
if err := ba.SetActiveTimestamp(leaderClock); err != nil {
27812781
t.Fatal(err)
27822782
}
2783-
if _, pErr := leaderRepl.Send(ctx, ba); pErr != nil {
2783+
if _, pErr := kvserver.ToSenderForTesting(leaderRepl).Send(ctx, ba); pErr != nil {
27842784
t.Fatal(pErr)
27852785
}
27862786

@@ -2803,7 +2803,7 @@ func TestWedgedReplicaDetection(t *testing.T) {
28032803

28042804
// Send another request to the leader replica. followerRepl is locked
28052805
// so it will not respond.
2806-
if _, pErr := leaderRepl.Send(ctx, ba); pErr != nil {
2806+
if _, pErr := kvserver.ToSenderForTesting(leaderRepl).Send(ctx, ba); pErr != nil {
28072807
t.Fatal(pErr)
28082808
}
28092809

@@ -3105,7 +3105,7 @@ func TestReplicaRemovalCampaign(t *testing.T) {
31053105
replica2 := store0.LookupReplica(roachpb.RKey(key2))
31063106

31073107
rg2 := func(s *kvserver.Store) kv.Sender {
3108-
return kv.Wrap(s, func(ba *kvpb.BatchRequest) *kvpb.BatchRequest {
3108+
return kv.Wrap(kvserver.ToSenderForTesting(s), func(ba *kvpb.BatchRequest) *kvpb.BatchRequest {
31093109
if ba.RangeID == 0 {
31103110
ba.RangeID = replica2.RangeID
31113111
}
@@ -4227,7 +4227,7 @@ func TestRemovedReplicaError(t *testing.T) {
42274227
// start seeing the RangeNotFoundError after a little bit of time has passed.
42284228
getArgs := getArgs(key)
42294229
testutils.SucceedsSoon(t, func() error {
4230-
_, pErr := kv.SendWrappedWith(ctx, store, kvpb.Header{}, getArgs)
4230+
_, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(store), kvpb.Header{}, getArgs)
42314231
switch pErr.GetDetail().(type) {
42324232
case *kvpb.AmbiguousResultError:
42334233
return pErr.GoError()
@@ -4283,7 +4283,7 @@ func TestTransferRaftLeadership(t *testing.T) {
42834283
require.NoError(t, err)
42844284
require.Equal(t, roachpb.VOTER_FULL, rd1.Type)
42854285

4286-
_, pErr := kv.SendWrappedWith(ctx, store0, kvpb.Header{RangeID: repl0.RangeID}, getArgs(key))
4286+
_, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(store0), kvpb.Header{RangeID: repl0.RangeID}, getArgs(key))
42874287
require.NoError(t, pErr.GoError())
42884288

42894289
status := repl0.RaftStatus()
@@ -4294,7 +4294,7 @@ func TestTransferRaftLeadership(t *testing.T) {
42944294
// Transfer the lease. We'll then check that the leadership follows
42954295
// automatically.
42964296
transferLeaseArgs := adminTransferLeaseArgs(key, store1.StoreID())
4297-
_, pErr = kv.SendWrappedWith(ctx, store0, kvpb.Header{RangeID: repl0.RangeID}, transferLeaseArgs)
4297+
_, pErr = kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(store0), kvpb.Header{RangeID: repl0.RangeID}, transferLeaseArgs)
42984298
require.NoError(t, pErr.GoError())
42994299

43004300
// Verify leadership is transferred.
@@ -6398,7 +6398,7 @@ func TestInvalidConfChangeRejection(t *testing.T) {
63986398
},
63996399
})
64006400

6401-
_, pErr := repl.Send(ctx, &ba)
6401+
_, pErr := kvserver.ToSenderForTesting(repl).Send(ctx, &ba)
64026402
// Verify that we see the configuration change below raft, where we rejected it
64036403
// (since it would've otherwise blow up the Replica: after all, we intentionally
64046404
// proposed an invalid configuration change.

pkg/kv/kvserver/client_replica_circuit_breaker_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1315,7 +1315,7 @@ func (cbt *circuitBreakerTest) SendCtxTS(
13151315
// going to leak memory.
13161316
ctx = context.WithValue(ctx, req, struct{}{})
13171317

1318-
_, pErr := repl.Send(ctx, ba)
1318+
_, pErr := kvserver.ToSenderForTesting(repl.Replica).Send(ctx, ba)
13191319
// If our context got canceled, return an opaque error regardless of presence or
13201320
// absence of actual error. This makes sure we don't accidentally pass tests as
13211321
// a result of our context cancellation.

pkg/kv/kvserver/client_replica_test.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1251,7 +1251,7 @@ func TestNonTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) {
12511251
ba := &kvpb.BatchRequest{}
12521252
ba.RangeID = desc.RangeID
12531253
ba.Add(getArgs(key))
1254-
br, pErr := tc.GetFirstStoreFromServer(t, 1).Send(ctx, ba)
1254+
br, pErr := kvserver.ToSenderForTesting(tc.GetFirstStoreFromServer(t, 1)).Send(ctx, ba)
12551255
nonTxnRespC <- resp{br, pErr}
12561256
})
12571257

@@ -1290,7 +1290,7 @@ func TestNonTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) {
12901290
ba := &kvpb.BatchRequest{}
12911291
ba.RangeID = desc.RangeID
12921292
ba.Add(putArgs(key, []byte("val")))
1293-
br, pErr := tc.GetFirstStoreFromServer(t, 0).Send(ctx, ba)
1293+
br, pErr := kvserver.ToSenderForTesting(tc.GetFirstStoreFromServer(t, 0)).Send(ctx, ba)
12941294
require.Nil(t, pErr)
12951295
writeTs := br.Timestamp
12961296
require.True(t, nonTxnOrigTs.Less(writeTs), "nonTxnOrigTs: %v, writeTs: %v", nonTxnOrigTs, writeTs)
@@ -2589,7 +2589,7 @@ func TestConsistencyQueueDelaysProcessingNewRanges(t *testing.T) {
25892589
rngID := store.LookupReplica(roachpb.RKey(key)).RangeID
25902590
h := kvpb.Header{RangeID: rngID}
25912591
args := adminSplitArgs(key)
2592-
if _, pErr := kv.SendWrappedWith(ctx, store, h, args); pErr != nil {
2592+
if _, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(store), h, args); pErr != nil {
25932593
return pErr.GoError()
25942594
}
25952595
return nil
@@ -2600,7 +2600,7 @@ func TestConsistencyQueueDelaysProcessingNewRanges(t *testing.T) {
26002600
rngID := store.LookupReplica(roachpb.RKey(key)).RangeID
26012601
h := kvpb.Header{RangeID: rngID}
26022602
args := adminMergeArgs(key)
2603-
if _, pErr := kv.SendWrappedWith(ctx, store, h, args); pErr != nil {
2603+
if _, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(store), h, args); pErr != nil {
26042604
return pErr.GoError()
26052605
}
26062606
return nil
@@ -2744,7 +2744,7 @@ func TestLeaseInfoRequest(t *testing.T) {
27442744
},
27452745
}
27462746
reply, pErr := kv.SendWrappedWith(
2747-
context.Background(), s, kvpb.Header{
2747+
context.Background(), kvserver.ToSenderForTesting(s), kvpb.Header{
27482748
RangeID: rangeDesc.RangeID,
27492749
ReadConsistency: kvpb.INCONSISTENT,
27502750
}, leaseInfoReq)
@@ -2870,7 +2870,7 @@ func TestRangeInfoAfterSplit(t *testing.T) {
28702870
},
28712871
}
28722872
ba.Add(gArgs)
2873-
br, pErr := store.Send(ctx, ba)
2873+
br, pErr := kvserver.ToSenderForTesting(store).Send(ctx, ba)
28742874
require.NoError(t, pErr.GoError())
28752875
descs := make([]roachpb.RangeDescriptor, len(br.RangeInfos))
28762876
for i, ri := range br.RangeInfos {
@@ -3051,7 +3051,7 @@ func TestLossQuorumCauseLeaderlessWatcherToSignalUnavailable(t *testing.T) {
30513051
ba.RangeID = desc.RangeID
30523052
ba.Timestamp = repl.Clock().Now()
30533053
ba.Add(putArgs(key, []byte("foo")))
3054-
_, pErr := repl.Send(ctx, ba)
3054+
_, pErr := kvserver.ToSenderForTesting(repl).Send(ctx, ba)
30553055
return pErr, ctx.Err()
30563056
}
30573057

pkg/kv/kvserver/client_split_test.go

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -570,14 +570,14 @@ func TestStoreRangeSplitAtRangeBounds(t *testing.T) {
570570
rngID := store.LookupReplica(roachpb.RKey(key)).RangeID
571571
h := kvpb.Header{RangeID: rngID}
572572
args := adminSplitArgs(key)
573-
if _, pErr := kv.SendWrappedWith(ctx, store, h, args); pErr != nil {
573+
if _, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(store), h, args); pErr != nil {
574574
t.Fatal(pErr)
575575
}
576576
replCount := store.ReplicaCount()
577577

578578
// An AdminSplit request sent to the end of the old range should be re-routed
579579
// to the start of the new range, succeeding but without creating any new ranges.
580-
if _, pErr := kv.SendWrappedWith(ctx, store, h, args); pErr != nil {
580+
if _, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(store), h, args); pErr != nil {
581581
t.Fatal(pErr)
582582
}
583583

@@ -591,7 +591,7 @@ func TestStoreRangeSplitAtRangeBounds(t *testing.T) {
591591
// should succeed but no new ranges should be created.
592592
newRng := store.LookupReplica(roachpb.RKey(key))
593593
h.RangeID = newRng.RangeID
594-
if _, pErr := kv.SendWrappedWith(ctx, store, h, args); pErr != nil {
594+
if _, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(store), h, args); pErr != nil {
595595
t.Fatal(pErr)
596596
}
597597

@@ -1401,7 +1401,7 @@ func fillRange(
14011401
}
14021402
val := randutil.RandBytes(src, 200000)
14031403
pArgs := putArgs(key, val)
1404-
_, pErr := kv.SendWrappedWith(context.Background(), store, kvpb.Header{
1404+
_, pErr := kv.SendWrappedWith(context.Background(), kvserver.ToSenderForTesting(store), kvpb.Header{
14051405
RangeID: rangeID,
14061406
}, pArgs)
14071407
// When the split occurs in the background, our writes may start failing.
@@ -2891,7 +2891,7 @@ func TestDistributedTxnCleanup(t *testing.T) {
28912891
// Split at "b".
28922892
rhsKey := roachpb.Key("b")
28932893
args = adminSplitArgs(rhsKey)
2894-
if _, pErr := kv.SendWrappedWith(context.Background(), store, kvpb.Header{
2894+
if _, pErr := kv.SendWrappedWith(context.Background(), kvserver.ToSenderForTesting(store), kvpb.Header{
28952895
RangeID: lhs.RangeID,
28962896
}, args); pErr != nil {
28972897
t.Fatalf("split at %q: %s", rhsKey, pErr)
@@ -2932,7 +2932,7 @@ func TestDistributedTxnCleanup(t *testing.T) {
29322932
PushType: kvpb.PUSH_ABORT,
29332933
Force: true,
29342934
})
2935-
_, pErr := store.Send(ctx, ba)
2935+
_, pErr := kvserver.ToSenderForTesting(store).Send(ctx, ba)
29362936
if pErr != nil {
29372937
t.Fatalf("failed to abort the txn: %s", pErr)
29382938
}
@@ -3186,7 +3186,7 @@ func TestTxnWaitQueueDependencyCycleWithRangeSplit(t *testing.T) {
31863186

31873187
// Split at "b".
31883188
args = adminSplitArgs(rhsKey)
3189-
if _, pErr := kv.SendWrappedWith(ctx, store, kvpb.Header{
3189+
if _, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(store), kvpb.Header{
31903190
RangeID: lhs.RangeID,
31913191
}, args); pErr != nil {
31923192
t.Fatalf("split at %q: %s", rhsKey, pErr)
@@ -3533,7 +3533,7 @@ func TestStoreSplitRangeLookupRace(t *testing.T) {
35333533

35343534
// Don't use s.DistSenderI().(kv.Sender) so that we don't disturb the RangeDescriptorCache.
35353535
rangeID := store.LookupReplica(roachpb.RKey(splitKey)).RangeID
3536-
_, pErr := kv.SendWrappedWith(context.Background(), store, kvpb.Header{
3536+
_, pErr := kv.SendWrappedWith(context.Background(), kvserver.ToSenderForTesting(store), kvpb.Header{
35373537
RangeID: rangeID,
35383538
}, args)
35393539
if pErr != nil {
@@ -3978,7 +3978,7 @@ func TestSplitBlocksReadsToRHS(t *testing.T) {
39783978
g.GoCtx(func(ctx context.Context) error {
39793979
// Send directly to repl to avoid racing with the
39803980
// split and routing requests to the post-split RHS.
3981-
_, pErr := kv.SendWrappedWith(ctx, repl, h, args)
3981+
_, pErr := kv.SendWrappedWith(ctx, kvserver.ToSenderForTesting(repl), h, args)
39823982
errCh <- pErr.GoError()
39833983
return nil
39843984
})

0 commit comments

Comments
 (0)