@@ -274,9 +274,9 @@ func TestRequestsOnLaggingReplicaEpochLeases(t *testing.T) {
274
274
// can increment it again later.
275
275
_ , err := tc .Server (0 ).DB ().Inc (ctx , key , 1 )
276
276
require .NoError (t , err )
277
- log .Dev .Infof (ctx , "test: waiting for initial values..." )
277
+ log .KvExec .Infof (ctx , "test: waiting for initial values..." )
278
278
tc .WaitForValues (t , key , []int64 {1 , 1 , 1 })
279
- log .Dev .Infof (ctx , "test: waiting for initial values... done" )
279
+ log .KvExec .Infof (ctx , "test: waiting for initial values... done" )
280
280
}
281
281
282
282
// Partition the original leader from its followers. We do this by installing
@@ -291,7 +291,7 @@ func TestRequestsOnLaggingReplicaEpochLeases(t *testing.T) {
291
291
// x x
292
292
// [1]<---->[2]
293
293
//
294
- log .Dev .Infof (ctx , "test: partitioning node" )
294
+ log .KvExec .Infof (ctx , "test: partitioning node" )
295
295
const partitionNodeIdx = 0
296
296
partitionStore := tc .GetFirstStoreFromServer (t , partitionNodeIdx )
297
297
partRepl , err := partitionStore .GetReplica (rngDesc .RangeID )
@@ -325,12 +325,12 @@ func TestRequestsOnLaggingReplicaEpochLeases(t *testing.T) {
325
325
}
326
326
327
327
// Stop the heartbeats so that n1's lease can expire.
328
- log .Dev .Infof (ctx , "test: suspending heartbeats for n1" )
328
+ log .KvExec .Infof (ctx , "test: suspending heartbeats for n1" )
329
329
resumeN1Heartbeats := partitionStore .GetStoreConfig ().NodeLiveness .PauseAllHeartbeatsForTest ()
330
330
331
331
// Wait until another replica campaigns and becomes leader, replacing the
332
332
// partitioned one.
333
- log .Dev .Infof (ctx , "test: waiting for leadership transfer" )
333
+ log .KvExec .Infof (ctx , "test: waiting for leadership transfer" )
334
334
testutils .SucceedsSoon (t , func () error {
335
335
// Make sure this replica has not inadvertently quiesced. We need the
336
336
// replica ticking so that it campaigns.
@@ -348,7 +348,7 @@ func TestRequestsOnLaggingReplicaEpochLeases(t *testing.T) {
348
348
})
349
349
350
350
leaderReplicaID := roachpb .ReplicaID (otherRepl .RaftStatus ().Lead )
351
- log .Dev .Infof (ctx , "test: the leader is replica ID %d" , leaderReplicaID )
351
+ log .KvExec .Infof (ctx , "test: the leader is replica ID %d" , leaderReplicaID )
352
352
if leaderReplicaID != 2 && leaderReplicaID != 3 {
353
353
t .Fatalf ("expected leader to be 1 or 2, was: %d" , leaderReplicaID )
354
354
}
@@ -358,7 +358,7 @@ func TestRequestsOnLaggingReplicaEpochLeases(t *testing.T) {
358
358
require .NoError (t , err )
359
359
360
360
// Wait until the lease expires.
361
- log .Dev .Infof (ctx , "test: waiting for lease expiration" )
361
+ log .KvExec .Infof (ctx , "test: waiting for lease expiration" )
362
362
partitionedReplica , err := partitionStore .GetReplica (rngDesc .RangeID )
363
363
require .NoError (t , err )
364
364
testutils .SucceedsSoon (t , func () error {
@@ -375,11 +375,11 @@ func TestRequestsOnLaggingReplicaEpochLeases(t *testing.T) {
375
375
}
376
376
return nil
377
377
})
378
- log .Dev .Infof (ctx , "test: lease expired" )
378
+ log .KvExec .Infof (ctx , "test: lease expired" )
379
379
380
380
{
381
381
// Write something to generate some Raft log entries and then truncate the log.
382
- log .Dev .Infof (ctx , "test: incrementing" )
382
+ log .KvExec .Infof (ctx , "test: incrementing" )
383
383
incArgs := incrementArgs (key , 1 )
384
384
sender := leaderStore .TestSender ()
385
385
_ , pErr := kv .SendWrapped (ctx , sender , incArgs )
@@ -392,7 +392,7 @@ func TestRequestsOnLaggingReplicaEpochLeases(t *testing.T) {
392
392
// Truncate the log at index+1 (log entries < N are removed, so this includes
393
393
// the increment). This means that the partitioned replica will need a
394
394
// snapshot to catch up.
395
- log .Dev .Infof (ctx , "test: truncating log..." )
395
+ log .KvExec .Infof (ctx , "test: truncating log..." )
396
396
truncArgs := & kvpb.TruncateLogRequest {
397
397
RequestHeader : kvpb.RequestHeader {
398
398
Key : key ,
@@ -408,7 +408,7 @@ func TestRequestsOnLaggingReplicaEpochLeases(t *testing.T) {
408
408
// Resume n1's heartbeats and wait for it to become live again. This is to
409
409
// ensure that the rest of the test does not somehow fool itself because n1 is
410
410
// not live.
411
- log .Dev .Infof (ctx , "test: resuming n1 heartbeats" )
411
+ log .KvExec .Infof (ctx , "test: resuming n1 heartbeats" )
412
412
resumeN1Heartbeats ()
413
413
414
414
// Resolve the partition, but continue blocking snapshots destined for the
@@ -418,7 +418,7 @@ func TestRequestsOnLaggingReplicaEpochLeases(t *testing.T) {
418
418
// allow the replica in question to figure out that it's not the leader any
419
419
// more. As long as it is completely partitioned, the replica continues
420
420
// believing that it is the leader, and lease acquisition requests block.
421
- log .Dev .Infof (ctx , "test: removing partition" )
421
+ log .KvExec .Infof (ctx , "test: removing partition" )
422
422
slowSnapHandler := & slowSnapRaftHandler {
423
423
rangeID : rngDesc .RangeID ,
424
424
waitCh : make (chan struct {}),
@@ -439,7 +439,7 @@ func TestRequestsOnLaggingReplicaEpochLeases(t *testing.T) {
439
439
440
440
// Now we're going to send a request to the behind replica, and we expect it
441
441
// to not block; we expect a redirection to the leader.
442
- log .Dev .Infof (ctx , "test: sending request" )
442
+ log .KvExec .Infof (ctx , "test: sending request" )
443
443
timeoutCtx , cancel := context .WithTimeout (ctx , 10 * time .Second )
444
444
defer cancel ()
445
445
for {
@@ -549,7 +549,7 @@ func TestSnapshotAfterTruncationWithUncommittedTailEpochLeases(t *testing.T) {
549
549
// x x
550
550
// [1]<---->[2]
551
551
//
552
- log .Dev .Infof (ctx , "test: installing unreliable Raft transports" )
552
+ log .KvExec .Infof (ctx , "test: installing unreliable Raft transports" )
553
553
for _ , s := range []int {0 , 1 , 2 } {
554
554
h := & unreliableRaftHandler {
555
555
rangeID : partRepl .RangeID ,
@@ -572,7 +572,7 @@ func TestSnapshotAfterTruncationWithUncommittedTailEpochLeases(t *testing.T) {
572
572
// not succeed before their context is canceled, but they will be appended
573
573
// to the partitioned replica's Raft log because it is currently the Raft
574
574
// leader.
575
- log .Dev .Infof (ctx , "test: sending writes to partitioned replica" )
575
+ log .KvExec .Infof (ctx , "test: sending writes to partitioned replica" )
576
576
g := ctxgroup .WithContext (ctx )
577
577
otherKeys := make ([]roachpb.Key , 32 )
578
578
otherKeys [0 ] = key .Next ()
@@ -609,7 +609,7 @@ func TestSnapshotAfterTruncationWithUncommittedTailEpochLeases(t *testing.T) {
609
609
nonPartitionedSenders [0 ] = tc .GetFirstStoreFromServer (t , 1 ).TestSender ()
610
610
nonPartitionedSenders [1 ] = tc .GetFirstStoreFromServer (t , 2 ).TestSender ()
611
611
612
- log .Dev .Infof (ctx , "test: sending write to transfer lease" )
612
+ log .KvExec .Infof (ctx , "test: sending write to transfer lease" )
613
613
incArgs = incrementArgs (key , incB )
614
614
var i int
615
615
var newLeaderRepl * kvserver.Replica
@@ -632,15 +632,15 @@ func TestSnapshotAfterTruncationWithUncommittedTailEpochLeases(t *testing.T) {
632
632
newLeaderReplSender = tc .GetFirstStoreFromServer (t , newLeaderStoreIdx ).TestSender ()
633
633
return nil
634
634
})
635
- log .Dev .Infof (ctx , "test: waiting for values..." )
635
+ log .KvExec .Infof (ctx , "test: waiting for values..." )
636
636
tc .WaitForValues (t , key , []int64 {incA , incAB , incAB })
637
- log .Dev .Infof (ctx , "test: waiting for values... done" )
637
+ log .KvExec .Infof (ctx , "test: waiting for values... done" )
638
638
639
639
index := newLeaderRepl .GetLastIndex ()
640
640
641
641
// Truncate the log at index+1 (log entries < N are removed, so this
642
642
// includes the increment).
643
- log .Dev .Infof (ctx , "test: truncating log" )
643
+ log .KvExec .Infof (ctx , "test: truncating log" )
644
644
truncArgs := truncateLogArgs (index + 1 , partRepl .RangeID )
645
645
truncArgs .Key = partRepl .Desc ().StartKey .AsRawKey ()
646
646
testutils .SucceedsSoon (t , func () error {
@@ -659,7 +659,7 @@ func TestSnapshotAfterTruncationWithUncommittedTailEpochLeases(t *testing.T) {
659
659
snapsBefore := snapsMetric .Count ()
660
660
661
661
// Remove the partition. Snapshot should follow.
662
- log .Dev .Infof (ctx , "test: removing the partition" )
662
+ log .KvExec .Infof (ctx , "test: removing the partition" )
663
663
for _ , s := range []int {0 , 1 , 2 } {
664
664
tc .Servers [s ].RaftTransport ().(* kvserver.RaftTransport ).ListenIncomingRaftMessages (tc .Target (s ).StoreID , & unreliableRaftHandler {
665
665
rangeID : partRepl .RangeID ,
0 commit comments