@@ -27,6 +27,7 @@ import (
2727 "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock"
2828 "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
2929 "github.com/cockroachdb/cockroach/pkg/kv/kvtestutils"
30+ "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilitiespb"
3031 "github.com/cockroachdb/cockroach/pkg/roachpb"
3132 "github.com/cockroachdb/cockroach/pkg/rpc"
3233 "github.com/cockroachdb/cockroach/pkg/rpc/rpcbase"
@@ -804,9 +805,8 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) {
804805 base.TestClusterArgs {
805806 ReplicationMode : base .ReplicationManual ,
806807 ServerArgs : base.TestServerArgs {
807- Settings : settings ,
808- DefaultTestTenant : base .TODOTestTenantDisabled ,
809- UseDatabase : "t" ,
808+ Settings : settings ,
809+ UseDatabase : "t" ,
810810 },
811811 // n4 pretends to have low latency to n2 and n3, so that it tries to use
812812 // them for follower reads.
@@ -847,15 +847,24 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) {
847847 // load based rebalancing to make sure it doesn't move.
848848 kvserverbase .LoadBasedRebalancingMode .Override (ctx , & settings .SV , kvserverbase .LBRebalancingOff )
849849
850+ // NB: Tenants need capabilities to be able to relocate ranges.
851+ if ! tc .Server (0 ).DeploymentMode ().IsSingleTenant () {
852+ require .NoError (t , tc .Server (0 ).GrantTenantCapabilities (
853+ context .Background (), serverutils .TestTenantID (),
854+ map [tenantcapabilitiespb.ID ]string {
855+ tenantcapabilitiespb .CanAdminRelocateRange : "true" ,
856+ }))
857+ }
858+
850859 n1 := sqlutils .MakeSQLRunner (tc .Conns [0 ])
851860 n1 .Exec (t , `CREATE DATABASE t` )
852861 n1 .Exec (t , `CREATE TABLE test (k INT PRIMARY KEY)` )
853862 n1 .Exec (t , `ALTER TABLE test EXPERIMENTAL_RELOCATE VOTERS VALUES (ARRAY[1,2], 1)` )
854863 // Speed up closing of timestamps, in order to sleep less below before we can
855864 // use follower_read_timestamp(). follower_read_timestamp() uses the sum of
856865 // the following settings.
857- n1 . Exec ( t , `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '0.1s'` )
858- n1 . Exec ( t , `SET CLUSTER SETTING kv.closed_timestamp.side_transport_interval = '0.1s'` )
866+ closedts . TargetDuration . Override ( ctx , & settings . SV , 100 * time . Millisecond )
867+ closedts . SideTransportCloseInterval . Override ( ctx , & settings . SV , 100 * time . Millisecond )
859868
860869 // Sleep so that we can perform follower reads. The read timestamp needs to be
861870 // above the timestamp when the table was created.
@@ -865,11 +874,16 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) {
865874
866875 // Run a query on n4 to populate its cache.
867876 n4 := sqlutils .MakeSQLRunner (tc .Conns [3 ])
868- n4 .Exec (t , "SELECT * from test WHERE k=1" )
869877 // Check that the cache was indeed populated.
870878 var tableID uint32
871879 n1 .QueryRow (t , `SELECT id from system.namespace WHERE name='test'` ).Scan (& tableID )
872- tablePrefix := keys .MustAddr (keys .SystemSQLCodec .TablePrefix (tableID ))
880+ tablePrefix := keys .MustAddr (tc .Server (0 ).Codec ().TablePrefix (tableID ))
881+ // NB: Splitting a range helps prevent tenants from getting an out of band
882+ // RangeDescriptor update to the RangeCache. Unclear about the exact mechanics
883+ // at play here, but they don't matter for our test.
884+ _ , _ , err := tc .SplitRange (roachpb .Key (tablePrefix ))
885+ require .NoError (t , err )
886+ n4 .Exec (t , "SELECT * from test WHERE k=1" )
873887 n4Cache := tc .Server (3 ).DistSenderI ().(* kvcoord.DistSender ).RangeDescriptorCache ()
874888 entry , err := n4Cache .TestingGetCached (ctx , tablePrefix , false , roachpb .LAG_BY_CLUSTER_SETTING )
875889 require .NoError (t , err )
@@ -964,8 +978,18 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) {
964978
965979 // Sanity check that the plan was distributed.
966980 require .True (t , strings .Contains (rec .String (), "creating DistSQL plan with isLocal=false" ))
967- // Look at the trace and check that we've served a follower read.
968- require .True (t , kvtestutils .OnlyFollowerReads (rec ), "query was not served through follower reads: %s" , rec )
981+ // NB: We're distributing the plan here, and it (the DistSender) is running on
982+ // n3. Note that we've only injected latencies on n4, so any replica is fair
983+ // game for n3. When run in normal or shared-process multi-tenancy
984+ // deployments, n3 will route to its local replica (which is a follower), as
985+ // that guy always sorts first. However, for external process multi-tenancy,
986+ // there's no such concept of a local replica. Requests can therefore be
987+ // routed to either n1 or n3, so we can't make any assertions about whether
988+ // there'll be a follower read or not.
989+ if ! tc .Server (0 ).DeploymentMode ().IsExternal () {
990+ // Look at the trace and check that we've served a follower read.
991+ require .True (t , kvtestutils .OnlyFollowerReads (rec ), "query was not served through follower reads: %s" , rec )
992+ }
969993 // Verify that we didn't produce the "misplanned ranges" metadata that would
970994 // purge the non-stale entries from the range cache on n4.
971995 require .False (t , strings .Contains (rec .String (), "clearing entries overlapping" ))
0 commit comments