@@ -170,6 +170,104 @@ def generate_pagination_where_clause(
170170 return " AND " .join (where_clause )
171171
172172
173+ def generate_pagination_bounds (
174+ direction : str ,
175+ from_token : Optional [RoomStreamToken ],
176+ to_token : Optional [RoomStreamToken ],
177+ ) -> Tuple [
178+ str , Optional [Tuple [Optional [int ], int ]], Optional [Tuple [Optional [int ], int ]]
179+ ]:
180+ """
181+ Generate a start and end point for this page of events.
182+
183+ Args:
184+ direction: Whether pagination is going forwards or backwards. One of "f" or "b".
185+ from_token: The token to start pagination at, or None to start at the first value.
186+ to_token: The token to end pagination at, or None to not limit the end point.
187+
188+ Returns:
189+ A three tuple of:
190+
191+ ASC or DESC for sorting of the query.
192+
193+ The starting position as a tuple of ints representing
194+ (topological position, stream position) or None if no from_token was
195+ provided. The topological position may be None for live tokens.
196+
197+ The end position in the same format as the starting position, or None
198+ if no to_token was provided.
199+ """
200+
201+ # Tokens really represent positions between elements, but we use
202+ # the convention of pointing to the event before the gap. Hence
203+ # we have a bit of asymmetry when it comes to equalities.
204+ if direction == "b" :
205+ order = "DESC"
206+ else :
207+ order = "ASC"
208+
209+ # The bounds for the stream tokens are complicated by the fact
210+ # that we need to handle the instance_map part of the tokens. We do this
211+ # by fetching all events between the min stream token and the maximum
212+ # stream token (as returned by `RoomStreamToken.get_max_stream_pos`) and
213+ # then filtering the results.
214+ from_bound : Optional [Tuple [Optional [int ], int ]] = None
215+ if from_token :
216+ if from_token .topological is not None :
217+ from_bound = from_token .as_historical_tuple ()
218+ elif direction == "b" :
219+ from_bound = (
220+ None ,
221+ from_token .get_max_stream_pos (),
222+ )
223+ else :
224+ from_bound = (
225+ None ,
226+ from_token .stream ,
227+ )
228+
229+ to_bound : Optional [Tuple [Optional [int ], int ]] = None
230+ if to_token :
231+ if to_token .topological is not None :
232+ to_bound = to_token .as_historical_tuple ()
233+ elif direction == "b" :
234+ to_bound = (
235+ None ,
236+ to_token .stream ,
237+ )
238+ else :
239+ to_bound = (
240+ None ,
241+ to_token .get_max_stream_pos (),
242+ )
243+
244+ return order , from_bound , to_bound
245+
246+
247+ def generate_next_token (
248+ direction : str , last_topo_ordering : int , last_stream_ordering : int
249+ ) -> RoomStreamToken :
250+ """
251+ Generate the next room stream token based on the currently returned data.
252+
253+ Args:
254+ direction: Whether pagination is going forwards or backwards. One of "f" or "b".
255+ last_topo_ordering: The last topological ordering being returned.
256+ last_stream_ordering: The last stream ordering being returned.
257+
258+ Returns:
259+ A new RoomStreamToken to return to the client.
260+ """
261+ if direction == "b" :
262+ # Tokens are positions between events.
263+ # This token points *after* the last event in the chunk.
264+ # We need it to point to the event before it in the chunk
265+ # when we are going backwards so we subtract one from the
266+ # stream part.
267+ last_stream_ordering -= 1
268+ return RoomStreamToken (last_topo_ordering , last_stream_ordering )
269+
270+
173271def _make_generic_sql_bound (
174272 bound : str ,
175273 column_names : Tuple [str , str ],
@@ -1300,47 +1398,11 @@ def _paginate_room_events_txn(
13001398 `to_token`), or `limit` is zero.
13011399 """
13021400
1303- # Tokens really represent positions between elements, but we use
1304- # the convention of pointing to the event before the gap. Hence
1305- # we have a bit of asymmetry when it comes to equalities.
13061401 args = [False , room_id ]
1307- if direction == "b" :
1308- order = "DESC"
1309- else :
1310- order = "ASC"
1311-
1312- # The bounds for the stream tokens are complicated by the fact
1313- # that we need to handle the instance_map part of the tokens. We do this
1314- # by fetching all events between the min stream token and the maximum
1315- # stream token (as returned by `RoomStreamToken.get_max_stream_pos`) and
1316- # then filtering the results.
1317- if from_token .topological is not None :
1318- from_bound : Tuple [Optional [int ], int ] = from_token .as_historical_tuple ()
1319- elif direction == "b" :
1320- from_bound = (
1321- None ,
1322- from_token .get_max_stream_pos (),
1323- )
1324- else :
1325- from_bound = (
1326- None ,
1327- from_token .stream ,
1328- )
13291402
1330- to_bound : Optional [Tuple [Optional [int ], int ]] = None
1331- if to_token :
1332- if to_token .topological is not None :
1333- to_bound = to_token .as_historical_tuple ()
1334- elif direction == "b" :
1335- to_bound = (
1336- None ,
1337- to_token .stream ,
1338- )
1339- else :
1340- to_bound = (
1341- None ,
1342- to_token .get_max_stream_pos (),
1343- )
1403+ order , from_bound , to_bound = generate_pagination_bounds (
1404+ direction , from_token , to_token
1405+ )
13441406
13451407 bounds = generate_pagination_where_clause (
13461408 direction = direction ,
@@ -1436,16 +1498,10 @@ def _paginate_room_events_txn(
14361498 ][:limit ]
14371499
14381500 if rows :
1439- topo = rows [- 1 ].topological_ordering
1440- token = rows [- 1 ].stream_ordering
1441- if direction == "b" :
1442- # Tokens are positions between events.
1443- # This token points *after* the last event in the chunk.
1444- # We need it to point to the event before it in the chunk
1445- # when we are going backwards so we subtract one from the
1446- # stream part.
1447- token -= 1
1448- next_token = RoomStreamToken (topo , token )
1501+ assert rows [- 1 ].topological_ordering is not None
1502+ next_token = generate_next_token (
1503+ direction , rows [- 1 ].topological_ordering , rows [- 1 ].stream_ordering
1504+ )
14491505 else :
14501506 # TODO (erikj): We should work out what to do here instead.
14511507 next_token = to_token if to_token else from_token
0 commit comments