@@ -18,11 +18,13 @@ import (
1818 "github.com/cockroachdb/cockroach/pkg/kv"
1919 "github.com/cockroachdb/cockroach/pkg/kv/kvpb"
2020 "github.com/cockroachdb/cockroach/pkg/kv/kvserver"
21+ "github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts"
2122 "github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/ctpb"
2223 "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock"
2324 "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
2425 "github.com/cockroachdb/cockroach/pkg/roachpb"
2526 "github.com/cockroachdb/cockroach/pkg/server"
27+ "github.com/cockroachdb/cockroach/pkg/settings/cluster"
2628 "github.com/cockroachdb/cockroach/pkg/testutils"
2729 "github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
2830 "github.com/cockroachdb/cockroach/pkg/testutils/skip"
@@ -937,3 +939,171 @@ func testNonBlockingReadsWithReaderFn(
937939 atomic .StoreInt32 (& done , 1 )
938940 require .NoError (t , g .Wait ())
939941}
942+
943+ // TestClosedTimestampPolicyRefreshOnSetSpanConfig tests that SetSpanConfig
944+ // correctly triggers the closed timestamp policy refresh.
945+ func TestClosedTimestampPolicyRefreshOnSetSpanConfig (t * testing.T ) {
946+ defer leaktest .AfterTest (t )()
947+ defer log .Scope (t ).Close (t )
948+
949+ ctx := context .Background ()
950+ tc := testcluster .StartTestCluster (t , 3 , base.TestClusterArgs {})
951+ defer tc .Stopper ().Stop (ctx )
952+
953+ scratchKey := tc .ScratchRange (t )
954+
955+ repl := tc .GetFirstStoreFromServer (t , 0 ).LookupReplica (roachpb .RKey (scratchKey ))
956+ require .Equal (t , roachpb .LAG_BY_CLUSTER_SETTING , repl .GetRangeInfo (ctx ).ClosedTimestampPolicy )
957+
958+ spanConfig , err := repl .LoadSpanConfig (ctx )
959+ spanConfig .GlobalReads = true
960+ require .NoError (t , err )
961+ require .NotNil (t , spanConfig )
962+ repl .SetSpanConfig (* spanConfig , roachpb.Span {Key : scratchKey })
963+
964+ // Trigger policy refresh.
965+ testutils .SucceedsSoon (t , func () error {
966+ if repl .GetRangeInfo (ctx ).ClosedTimestampPolicy != roachpb .LEAD_FOR_GLOBAL_READS {
967+ return errors .New ("expected LEAD_FOR_GLOBAL_READS" )
968+ }
969+ return nil
970+ })
971+ }
972+
973+ // TestClosedTimestampPolicyRefreshIntervalOnLivenessRanges tests that the
974+ // closed timestamp policy is correctly applied to the node liveness range. That
975+ // is, even if we try to set the node liveness range to have global reads, the
976+ // closed timestamp policy should still be LAG_BY_CLUSTER_SETTING. Read more in
977+ // replica.closedTimestampPolicyRLocked.
978+ func TestClosedTimestampPolicyRefreshIntervalOnLivenessRanges (t * testing.T ) {
979+ defer leaktest .AfterTest (t )()
980+ defer log .Scope (t ).Close (t )
981+
982+ ctx := context .Background ()
983+ tc := testcluster .StartTestCluster (t , 3 , base.TestClusterArgs {})
984+ defer tc .Stopper ().Stop (ctx )
985+
986+ // Get the node liveness range descriptor.
987+ livenessRangeDesc , err := tc .LookupRange (keys .NodeLivenessPrefix )
988+ require .NoError (t , err )
989+
990+ // Check liveness range policy.
991+ livenessRepl := tc .GetFirstStoreFromServer (t , 0 ).LookupReplica (livenessRangeDesc .StartKey )
992+ require .Equal (t , roachpb .LAG_BY_CLUSTER_SETTING , livenessRepl .GetRangeInfo (ctx ).ClosedTimestampPolicy )
993+
994+ spanConfig , err := livenessRepl .LoadSpanConfig (ctx )
995+ spanConfig .GlobalReads = true
996+ require .NoError (t , err )
997+ require .NotNil (t , spanConfig )
998+ livenessRepl .SetSpanConfig (* spanConfig , roachpb.Span {Key : keys .NodeLivenessPrefix })
999+
1000+ require .Never (t , func () bool {
1001+ expectedState := livenessRepl .GetRangeInfo (ctx ).ClosedTimestampPolicy == roachpb .LAG_BY_CLUSTER_SETTING
1002+ return ! expectedState
1003+ }, 3 * time .Second , 500 * time .Millisecond )
1004+ }
1005+
1006+ // TestSideTransportLeaseholder verifies that a range's leaseholder is properly
1007+ // tracked by the closed timestamp side transport, even when the range is
1008+ // receiving writes and the side transport interval is disabled.
1009+ func TestSideTransportLeaseholder (t * testing.T ) {
1010+ defer leaktest .AfterTest (t )()
1011+ defer log .Scope (t ).Close (t )
1012+ ctx := context .Background ()
1013+ st := cluster .MakeTestingClusterSettings ()
1014+ // Disable side transport interval to verify tracking works even without
1015+ // active transport.
1016+ closedts .SideTransportCloseInterval .Override (ctx , & st .SV , 0 )
1017+ tc := serverutils .StartCluster (t , 3 , base.TestClusterArgs {
1018+ ReplicationMode : base .ReplicationManual ,
1019+ ServerArgs : base.TestServerArgs {
1020+ Settings : st ,
1021+ },
1022+ })
1023+ defer tc .Stopper ().Stop (ctx )
1024+
1025+ // Get store and create test range.
1026+ store , err := tc .Server (0 ).GetStores ().(* kvserver.Stores ).GetStore (tc .Server (0 ).GetFirstStoreID ())
1027+ require .NoError (t , err )
1028+ scratchKey := tc .ScratchRange (t )
1029+ tc .AddVotersOrFatal (t , scratchKey , tc .Target (1 ))
1030+ tc .AddNonVotersOrFatal (t , scratchKey , tc .Target (2 ))
1031+ repl := store .LookupReplica (roachpb .RKey (scratchKey ))
1032+ require .NotNil (t , repl )
1033+
1034+ // Start goroutine that continuously writes to the range to create write load.
1035+ go func () {
1036+ for {
1037+ select {
1038+ case <- time .After (10 * time .Millisecond ):
1039+ pArgs := putArgs (scratchKey , []byte ("value" ))
1040+ if _ , pErr := kv .SendWrapped (ctx , store .DB ().NonTransactionalSender (), pArgs ); pErr != nil {
1041+ log .Errorf (ctx , "failed to put value: %s" , pErr )
1042+ }
1043+ case <- tc .Stopper ().ShouldQuiesce ():
1044+ return
1045+ }
1046+ }
1047+ }()
1048+
1049+ // Verify that the range appears in the closed timestamp sender's leaseholders
1050+ // list despite write load and disabled side transport.
1051+ testutils .SucceedsSoon (t , func () error {
1052+ closedTsSender := store .GetStoreConfig ().ClosedTimestampSender
1053+ leaseholders := closedTsSender .GetLeaseholders ()
1054+ for _ , lh := range leaseholders {
1055+ if lh .(* kvserver.Replica ).RangeID == repl .RangeID {
1056+ return nil
1057+ }
1058+ }
1059+ return errors .Errorf ("range %d not found in leaseholders slice" , repl .RangeID )
1060+ })
1061+ }
1062+
1063+ // TestClosedTimestampPolicyRefreshIntervalOnLeaseTransfers tests that the
1064+ // closed timestamp policy is correctly refreshed on a range after a lease
1065+ // transfer.
1066+ func TestClosedTimestampPolicyRefreshIntervalOnLeaseTransfers (t * testing.T ) {
1067+ defer leaktest .AfterTest (t )()
1068+ defer log .Scope (t ).Close (t )
1069+
1070+ ctx := context .Background ()
1071+ tc := testcluster .StartTestCluster (t , 3 , base.TestClusterArgs {
1072+ ReplicationMode : base .ReplicationManual ,
1073+ })
1074+ defer tc .Stopper ().Stop (ctx )
1075+
1076+ scratchKey := tc .ScratchRange (t )
1077+ desc := tc .AddVotersOrFatal (t , scratchKey , tc .Target (1 ), tc .Target (2 ))
1078+
1079+ repl1 := tc .GetFirstStoreFromServer (t , 0 ).LookupReplica (roachpb .RKey (scratchKey ))
1080+ require .Equal (t , roachpb .LAG_BY_CLUSTER_SETTING , repl1 .GetRangeInfo (ctx ).ClosedTimestampPolicy )
1081+
1082+ repl2 := tc .GetFirstStoreFromServer (t , 1 ).LookupReplica (roachpb .RKey (scratchKey ))
1083+ require .Equal (t , roachpb .LAG_BY_CLUSTER_SETTING , repl2 .GetRangeInfo (ctx ).ClosedTimestampPolicy )
1084+
1085+ spanConfig , err := repl2 .LoadSpanConfig (ctx )
1086+ spanConfig .GlobalReads = true
1087+ require .NoError (t , err )
1088+ require .NotNil (t , spanConfig )
1089+ repl2 .SetSpanConfig (* spanConfig , roachpb.Span {Key : scratchKey })
1090+ testutils .SucceedsSoon (t , func () error {
1091+ if repl2 .GetRangeInfo (ctx ).ClosedTimestampPolicy != roachpb .LEAD_FOR_GLOBAL_READS {
1092+ return errors .New ("expected LEAD_FOR_GLOBAL_READS" )
1093+ }
1094+ return nil
1095+ })
1096+
1097+ // Force repl2 policy to be LAG_BY_CLUSTER_SETTING.
1098+ repl2 .TestingSetCachedClosedTimestampPolicy (ctpb .LAG_BY_CLUSTER_SETTING )
1099+ require .Equal (t , roachpb .LAG_BY_CLUSTER_SETTING , repl2 .GetRangeInfo (ctx ).ClosedTimestampPolicy )
1100+
1101+ // Ensure that transferring the lease to repl2 does trigger a lease refresh.
1102+ require .NoError (t , tc .TransferRangeLease (desc , tc .Target (1 )))
1103+ testutils .SucceedsSoon (t , func () error {
1104+ if actual := repl2 .GetRangeInfo (ctx ).ClosedTimestampPolicy ; actual != roachpb .LEAD_FOR_GLOBAL_READS {
1105+ return errors .Newf ("expected LEAD_FOR_GLOBAL_READS but got %v" , actual )
1106+ }
1107+ return nil
1108+ })
1109+ }
0 commit comments