@@ -43,7 +43,7 @@ type mockReplica struct {
4343 canBump bool
4444 cantBumpReason CantCloseReason
4545 lai kvpb.LeaseAppliedIndex
46- policy roachpb .RangeClosedTimestampPolicy
46+ policy ctpb .RangeClosedTimestampPolicy
4747}
4848
4949var _ Replica = & mockReplica {}
@@ -120,7 +120,7 @@ func newMockReplica(id roachpb.RangeID, nodes ...roachpb.NodeID) *mockReplica {
120120 rangeID : id ,
121121 canBump : true ,
122122 lai : 5 ,
123- policy : roachpb .LAG_BY_CLUSTER_SETTING ,
123+ policy : ctpb .LAG_BY_CLUSTER_SETTING ,
124124 }
125125 r .mu .desc = desc
126126 return r
@@ -137,14 +137,14 @@ func newMockReplicaEx(id roachpb.RangeID, replicas ...roachpb.ReplicationTarget)
137137 rangeID : id ,
138138 canBump : true ,
139139 lai : 5 ,
140- policy : roachpb .LAG_BY_CLUSTER_SETTING ,
140+ policy : ctpb .LAG_BY_CLUSTER_SETTING ,
141141 }
142142 r .mu .desc = desc
143143 return r
144144}
145145
146146func expGroupUpdates (s * Sender , now hlc.ClockTimestamp ) []ctpb.Update_GroupUpdate {
147- targetForPolicy := func (pol roachpb .RangeClosedTimestampPolicy ) hlc.Timestamp {
147+ targetForPolicy := func (pol ctpb .RangeClosedTimestampPolicy ) hlc.Timestamp {
148148 return closedts .TargetForPolicy (
149149 now ,
150150 s .clock .MaxOffset (),
@@ -155,8 +155,8 @@ func expGroupUpdates(s *Sender, now hlc.ClockTimestamp) []ctpb.Update_GroupUpdat
155155 )
156156 }
157157 return []ctpb.Update_GroupUpdate {
158- {Policy : roachpb .LAG_BY_CLUSTER_SETTING , ClosedTimestamp : targetForPolicy (roachpb .LAG_BY_CLUSTER_SETTING )},
159- {Policy : roachpb . LEAD_FOR_GLOBAL_READS , ClosedTimestamp : targetForPolicy (roachpb . LEAD_FOR_GLOBAL_READS )},
158+ {Policy : ctpb .LAG_BY_CLUSTER_SETTING , ClosedTimestamp : targetForPolicy (ctpb .LAG_BY_CLUSTER_SETTING )},
159+ {Policy : ctpb . LEAD_FOR_GLOBAL_READS_WITH_NO_LATENCY_INFO , ClosedTimestamp : targetForPolicy (ctpb . LEAD_FOR_GLOBAL_READS_WITH_NO_LATENCY_INFO )},
160160 }
161161}
162162
@@ -190,7 +190,7 @@ func TestSenderBasic(t *testing.T) {
190190 now = s .publish (ctx )
191191 require .Len (t , s .trackedMu .tracked , 1 )
192192 require .Equal (t , map [roachpb.RangeID ]trackedRange {
193- 15 : {lai : 5 , policy : roachpb .LAG_BY_CLUSTER_SETTING },
193+ 15 : {lai : 5 , policy : ctpb .LAG_BY_CLUSTER_SETTING },
194194 }, s .trackedMu .tracked )
195195 require .Len (t , s .leaseholdersMu .leaseholders , 1 )
196196 require .Len (t , s .connsMu .conns , 2 )
@@ -204,7 +204,7 @@ func TestSenderBasic(t *testing.T) {
204204 require .Equal (t , expGroupUpdates (s , now ), up .ClosedTimestamps )
205205 require .Nil (t , up .Removed )
206206 require .Equal (t , []ctpb.Update_RangeUpdate {
207- {RangeID : 15 , LAI : 5 , Policy : roachpb .LAG_BY_CLUSTER_SETTING },
207+ {RangeID : 15 , LAI : 5 , Policy : ctpb .LAG_BY_CLUSTER_SETTING },
208208 }, up .AddedOrUpdated )
209209
210210 c2 , ok := s .connsMu .conns [2 ]
@@ -287,7 +287,7 @@ func TestSenderColocateReplicasOnSameNode(t *testing.T) {
287287 now := s .publish (ctx )
288288 require .Len (t , s .trackedMu .tracked , 1 )
289289 require .Equal (t , map [roachpb.RangeID ]trackedRange {
290- 15 : {lai : 5 , policy : roachpb .LAG_BY_CLUSTER_SETTING },
290+ 15 : {lai : 5 , policy : ctpb .LAG_BY_CLUSTER_SETTING },
291291 }, s .trackedMu .tracked )
292292 require .Len (t , s .leaseholdersMu .leaseholders , 1 )
293293 // Ensure that we have two connections, one for remote node and one for local.
@@ -304,7 +304,7 @@ func TestSenderColocateReplicasOnSameNode(t *testing.T) {
304304 require .Equal (t , expGroupUpdates (s , now ), up .ClosedTimestamps )
305305 require .Nil (t , up .Removed )
306306 require .Equal (t , []ctpb.Update_RangeUpdate {
307- {RangeID : 15 , LAI : 5 , Policy : roachpb .LAG_BY_CLUSTER_SETTING },
307+ {RangeID : 15 , LAI : 5 , Policy : ctpb .LAG_BY_CLUSTER_SETTING },
308308 }, up .AddedOrUpdated )
309309}
310310
0 commit comments