Skip to content

Commit 331812e

Browse files
committed
kvserver: remove TestProcessSplitAfterRightHandSideHasBeenRemoved
The test had a race condition between lease transfer verification and partition activation. After transferring the lease from n1 to n3, there was a window where n1 could re-acquire the lease before the partition was activated, causing the subsequent increment to hang waiting for a slow proposal on the now-partitioned n1. I spent a couple of hours trying to deflake this, but whenever you fix one thing, another springs up. This test is not maintainable, and is highly complex. The [split_pre_apply] tests cover this functionality. Testing it from "far away" does give some extra coverage, but at a steep price for maintainability, which we are not ready to pay, so the test is removed in this commit. [split_pre_apply]: https://github.com/tbg/cockroach/blob/7270a51a0430999661ed15320527ccc04d796a14/pkg/kv/kvserver/testdata/replica_lifecycle/split_pre_apply.txt Fixes (on 26.1) #159676 Fixes #158295.
1 parent 48d36a5 commit 331812e

File tree

2 files changed

+1
-561
lines changed

2 files changed

+1
-561
lines changed

pkg/kv/kvserver/client_raft_helpers_test.go

Lines changed: 1 addition & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -292,44 +292,19 @@ func setupPartitionedRangeWithHandlers(
292292
return pr, nil
293293
}
294294

295-
func (pr *testClusterPartitionedRange) deactivate() { pr.set(false) }
296-
func (pr *testClusterPartitionedRange) activate() { pr.set(true) }
295+
func (pr *testClusterPartitionedRange) activate() { pr.set(true) }
297296
func (pr *testClusterPartitionedRange) set(active bool) {
298297
pr.mu.Lock()
299298
defer pr.mu.Unlock()
300299
pr.mu.partitioned = active
301300
}
302301

303-
func (pr *testClusterPartitionedRange) addReplica(replicaID roachpb.ReplicaID) {
304-
pr.mu.Lock()
305-
defer pr.mu.Unlock()
306-
pr.mu.partitionedReplicas[replicaID] = true
307-
}
308-
309302
func (pr *testClusterPartitionedRange) addStore(storeID roachpb.StoreID) {
310303
pr.mu.Lock()
311304
defer pr.mu.Unlock()
312305
pr.mu.partitionedStores[storeID] = true
313306
}
314307

315-
func (pr *testClusterPartitionedRange) removeStore(storeID roachpb.StoreID) {
316-
pr.mu.Lock()
317-
defer pr.mu.Unlock()
318-
319-
pr.mu.partitionedStores[storeID] = false
320-
}
321-
322-
func (pr *testClusterPartitionedRange) extend(
323-
tc *testcluster.TestCluster,
324-
rangeID roachpb.RangeID,
325-
replicaID roachpb.ReplicaID,
326-
partitionedNode int,
327-
activated bool,
328-
funcs kvtestutils.UnreliableRaftHandlerFuncs,
329-
) (*testClusterPartitionedRange, error) {
330-
return setupPartitionedRangeWithHandlers(tc, rangeID, replicaID, partitionedNode, activated, pr.handlers, funcs)
331-
}
332-
333308
// dropRaftMessagesFrom sets up a Raft message handler on the given server that
334309
// drops inbound Raft messages from the given range and replica IDs. In addition
335310
// to raft messages, StoreLiveness messages from the replica IDs' store are also

0 commit comments

Comments
 (0)