@@ -673,11 +673,11 @@ async def get_oldest_events_with_depth_in_room(self, room_id):
673673 )
674674
675675 def get_oldest_events_with_depth_in_room_txn (self , txn , room_id ):
676- sqlAsdf = "SELECT * FROM insertion_event_extremeties as i"
676+ sqlAsdf = "SELECT * FROM insertion_event_edges as i"
677677 txn .execute (sqlAsdf )
678678 logger .info ("wfeafewawafeawg %s" , dict (txn ))
679679
680- sqlAsdf = "SELECT * FROM insertion_event_extremeties as i WHERE i.room_id = ?"
680+ sqlAsdf = "SELECT * FROM insertion_event_edges as i WHERE i.room_id = ?"
681681 txn .execute (sqlAsdf , (room_id ,))
682682 logger .info ("awfeawefw %s" , dict (txn ))
683683
@@ -688,7 +688,7 @@ def get_oldest_events_with_depth_in_room_txn(self, txn, room_id):
688688 # " INNER JOIN event_backward_extremities as b"
689689 # " ON g.prev_event_id = b.event_id"
690690 # TODO
691- " INNER JOIN insertion_event_extremeties as i"
691+ " INNER JOIN insertion_event_edges as i"
692692 " ON e.event_id = i.insertion_prev_event_id"
693693 " WHERE i.room_id = ?"
694694 " GROUP BY i.insertion_event_id"
@@ -703,7 +703,7 @@ def get_oldest_events_with_depth_in_room_txn(self, txn, room_id):
703703 " INNER JOIN event_backward_extremities as b"
704704 " ON g.prev_event_id = b.event_id"
705705 # TODO
706- # " INNER JOIN insertion_event_extremeties as i"
706+ # " INNER JOIN insertion_event_edges as i"
707707 # " ON g.event_id = i.insertion_prev_event_id"
708708 " WHERE b.room_id = ? AND g.is_state is ?"
709709 " GROUP BY b.event_id"
@@ -961,16 +961,50 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
961961 # We want to make sure that we do a breadth-first, "depth" ordered
962962 # search.
963963
964- # TODO
964+ # Look for the prev_event_id connected to the given event_id
965965 query = (
966966 "SELECT depth, prev_event_id FROM event_edges"
967+ # Get the depth of the prev_event_id from the events table
967968 " INNER JOIN events"
968969 " ON prev_event_id = events.event_id"
970+ # Find an event which matches the given event_id
969971 " WHERE event_edges.event_id = ?"
970972 " AND event_edges.is_state = ?"
971973 " LIMIT ?"
972974 )
973975
976+ # Look for the "insertion" events connected to the given event_id
977+ # TODO: Do we need to worry about selecting only from the given room_id? The other query above doesn't
978+ connected_insertion_event_query = (
979+ "SELECT e.depth, i.insertion_event_id FROM insertion_event_edges AS i"
980+ # Get the depth of the insertion event from the events table
981+ " INNER JOIN events AS e"
982+ " ON e.event_id = i.insertion_event_id"
983+ # Find an insertion event which points via prev_events to the given event_id
984+ " WHERE i.insertion_prev_event_id = ?"
985+ " LIMIT ?"
986+ )
987+
988+ # Find any chunk connections of a given insertion event
989+ # TODO: Do we need to worry about selecting only from the given room_id? The other query above doesn't
990+ chunk_connection_query = (
991+ "SELECT e.depth, c.event_id FROM insertion_events AS i"
992+ # Find the chunk that connects to the given insertion event
993+ " INNER JOIN chunk_edges AS c"
994+ " ON i.next_chunk_id = c.chunk_id"
995+ # Get the depth of the chunk start event from the events table
996+ " INNER JOIN events AS e"
997+ " ON e.event_id = c.event_id"
998+ # Find an insertion event which matches the given event_id
999+ " WHERE i.insertion_event_id = ?"
1000+ " LIMIT ?"
1001+ )
1002+
1003+ # In a PriorityQueue, the lowest valued entries are retrieved first.
1004+ # We're using depth as the priority in the queue.
1005+ # Depth is lowest at the oldest-in-time message and highest and
1006+ # newest-in-time message. We add events to the queue with a negative depth so that
1007+ # we process the newest-in-time messages first going backwards in time.
9741008 queue = PriorityQueue ()
9751009
9761010 for event_id in event_list :
@@ -996,9 +1030,36 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
9961030
9971031 event_results .add (event_id )
9981032
1033+ txn .execute (
1034+ connected_insertion_event_query , (event_id , limit - len (event_results ))
1035+ )
1036+ connected_insertion_event_id_results = list (txn )
1037+ logger .info (
1038+ "connected_insertion_event_query %s" ,
1039+ connected_insertion_event_id_results ,
1040+ )
1041+ for row in connected_insertion_event_id_results :
1042+ if row [1 ] not in event_results :
1043+ queue .put ((- row [0 ], row [1 ]))
1044+
1045+ # Find any chunk connections for the given insertion event
1046+ txn .execute (
1047+ chunk_connection_query , (row [1 ], limit - len (event_results ))
1048+ )
1049+ chunk_start_event_id_results = list (txn )
1050+ logger .info (
1051+ "chunk_start_event_id_results %s" ,
1052+ chunk_start_event_id_results ,
1053+ )
1054+ for row in chunk_start_event_id_results :
1055+ if row [1 ] not in event_results :
1056+ queue .put ((- row [0 ], row [1 ]))
1057+
9991058 txn .execute (query , (event_id , False , limit - len (event_results )))
1059+ prev_event_id_results = list (txn )
1060+ logger .info ("prev_event_ids %s" , prev_event_id_results )
10001061
1001- for row in txn :
1062+ for row in prev_event_id_results :
10021063 if row [1 ] not in event_results :
10031064 queue .put ((- row [0 ], row [1 ]))
10041065
0 commit comments