@@ -19,16 +19,56 @@ use postgres_openssl::MakeTlsConnector;
19
19
use rand:: { distributions:: Alphanumeric , thread_rng, Rng } ;
20
20
use std:: { borrow:: Cow , collections:: BTreeMap , fmt} ;
21
21
22
- use crate :: { generate_sql , Config } ;
22
+ use crate :: { compressor :: Level , generate_sql } ;
23
23
24
24
use super :: StateGroupEntry ;
25
25
26
26
/// Fetch the entries in state_groups_state (and their prev groups) for a
27
27
/// specific room.
28
28
///
29
- /// - Connects to the database
30
- /// - Fetches the first [group] rows with group id after [min]
31
- /// - Recursively searches for missing predecessors and adds those
29
+ /// Returns with the state_group map and the id of the last group that was used
30
+ ///
31
+ /// # Arguments
32
+ ///
33
+ /// * `room_id` - The ID of the room in the database
34
+ /// * `db_url` - The URL of a Postgres database. This should be of the
35
+ /// form: "postgresql://user:pass@domain:port/database"
36
+ /// * `min_state_group` - If specified, then only fetch the entries for state
37
+ /// groups greater than (but not equal) to this number. It
38
+ /// also requires groups_to_compress to be specified
39
+ /// * 'groups_to_compress' - The number of groups to get from the database before stopping
40
+
41
+ pub fn get_data_from_db (
42
+ db_url : & str ,
43
+ room_id : & str ,
44
+ min_state_group : Option < i64 > ,
45
+ groups_to_compress : Option < i64 > ,
46
+ max_state_group : Option < i64 > ,
47
+ ) -> ( BTreeMap < i64 , StateGroupEntry > , i64 ) {
48
+ // connect to the database
49
+ let mut builder = SslConnector :: builder ( SslMethod :: tls ( ) ) . unwrap ( ) ;
50
+ builder. set_verify ( SslVerifyMode :: NONE ) ;
51
+ let connector = MakeTlsConnector :: new ( builder. build ( ) ) ;
52
+
53
+ let mut client = Client :: connect ( db_url, connector)
54
+ . unwrap_or_else ( |e| panic ! ( "Error connecting to the database: {}" , e) ) ;
55
+
56
+ let state_group_map: BTreeMap < i64 , StateGroupEntry > = BTreeMap :: new ( ) ;
57
+
58
+ load_map_from_db (
59
+ & mut client,
60
+ room_id,
61
+ min_state_group,
62
+ groups_to_compress,
63
+ max_state_group,
64
+ state_group_map,
65
+ )
66
+ }
67
+
68
+ /// Fetch the entries in state_groups_state (and their prev groups) for a
69
+ /// specific room. This method should only be called if resuming the compressor from
70
+ /// where it last finished - and as such also loads in the state groups from the heads
71
+ /// of each of the levels (as they were at the end of the last run of the compressor)
32
72
///
33
73
/// Returns with the state_group map and the id of the last group that was used
34
74
///
@@ -43,12 +83,15 @@ use super::StateGroupEntry;
43
83
/// * 'groups_to_compress' - The number of groups to get from the database before stopping
44
84
/// * `max_state_group` - If specified, then only fetch the entries for state
45
85
/// groups lower than or equal to this number.
46
- pub fn get_data_from_db (
86
+ /// * 'level_info' - The maximum size, current length and current head for each
87
+ /// level (as it was when the compressor last finished for this
88
+ /// room)
89
+ pub fn reload_data_from_db (
47
90
db_url : & str ,
48
91
room_id : & str ,
49
92
min_state_group : Option < i64 > ,
50
93
groups_to_compress : Option < i64 > ,
51
- max_state_group : Option < i64 > ,
94
+ level_info : & [ Level ] ,
52
95
) -> ( BTreeMap < i64 , StateGroupEntry > , i64 ) {
53
96
// connect to the database
54
97
let mut builder = SslConnector :: builder ( SslMethod :: tls ( ) ) . unwrap ( ) ;
@@ -58,19 +101,117 @@ pub fn get_data_from_db(
58
101
let mut client = Client :: connect ( db_url, connector)
59
102
. unwrap_or_else ( |e| panic ! ( "Error connecting to the database: {}" , e) ) ;
60
103
104
+ // load just the state_groups at the head of each level
105
+ // this doesn't load their predecessors as that will be done at the end of
106
+ // load_map_from_db()
107
+ let state_group_map: BTreeMap < i64 , StateGroupEntry > = load_level_heads ( & mut client, level_info) ;
108
+
109
+ load_map_from_db (
110
+ & mut client,
111
+ room_id,
112
+ min_state_group,
113
+ groups_to_compress,
114
+ // max state group not used when saving and loading
115
+ None ,
116
+ state_group_map,
117
+ )
118
+ }
119
+
120
+ /// Finds the state_groups that are at the head of each compressor level
121
+ /// NOTE this does not also retrieve their predecessors
122
+ ///
123
+ /// # Arguments
124
+ ///
125
+ /// * `client' - A Postgres client to make requests with
126
+ /// * `levels' - The levels who's heads are being requested
127
+ fn load_level_heads ( client : & mut Client , level_info : & [ Level ] ) -> BTreeMap < i64 , StateGroupEntry > {
128
+ // obtain all of the heads that aren't None from level_info
129
+ let level_heads: Vec < i64 > = level_info
130
+ . iter ( )
131
+ . filter_map ( |l| ( * l) . get_current ( ) )
132
+ . collect ( ) ;
133
+
134
+ // Query to get id, predecessor and deltas for each state group
135
+ let sql = r#"
136
+ SELECT m.id, prev_state_group, type, state_key, s.event_id
137
+ FROM state_groups AS m
138
+ LEFT JOIN state_groups_state AS s ON (m.id = s.state_group)
139
+ LEFT JOIN state_group_edges AS e ON (m.id = e.state_group)
140
+ WHERE m.id = ANY($1)
141
+ "# ;
142
+
143
+ // Actually do the query
144
+ let mut rows = client. query_raw ( sql, & [ & level_heads] ) . unwrap ( ) ;
145
+
146
+ // Copy the data from the database into a map
147
+ let mut state_group_map: BTreeMap < i64 , StateGroupEntry > = BTreeMap :: new ( ) ;
148
+
149
+ while let Some ( row) = rows. next ( ) . unwrap ( ) {
150
+ // The row in the map to copy the data to
151
+ // NOTE: default StateGroupEntry has in_range as false
152
+ // This is what we want since as a level head, it has already been compressed by the
153
+ // previous run!
154
+ let entry = state_group_map. entry ( row. get ( 0 ) ) . or_default ( ) ;
155
+
156
+ // Save the predecessor (this may already be there)
157
+ entry. prev_state_group = row. get ( 1 ) ;
158
+
159
+ // Copy the single delta from the predecessor stored in this row
160
+ if let Some ( etype) = row. get :: < _ , Option < String > > ( 2 ) {
161
+ entry. state_map . insert (
162
+ & etype,
163
+ & row. get :: < _ , String > ( 3 ) ,
164
+ row. get :: < _ , String > ( 4 ) . into ( ) ,
165
+ ) ;
166
+ }
167
+ }
168
+ state_group_map
169
+ }
170
+
171
+ /// Fetch the entries in state_groups_state (and their prev groups) for a
172
+ /// specific room within a certain range. These are appended onto the provided
173
+ /// map.
174
+ ///
175
+ /// - Fetches the first [group] rows with group id after [min]
176
+ /// - Recursively searches for missing predecessors and adds those
177
+ ///
178
+ /// Returns with the state_group map and the id of the last group that was used
179
+ ///
180
+ /// # Arguments
181
+ ///
182
+ /// * `client` - A Postgres client to make requests with
183
+ /// * `room_id` - The ID of the room in the database
184
+ /// * `min_state_group` - If specified, then only fetch the entries for state
185
+ /// groups greater than (but not equal) to this number. It
186
+ /// also requires groups_to_compress to be specified
187
+ /// * 'groups_to_compress' - The number of groups to get from the database before stopping
188
+ /// * 'state_group_map' - The map to populate with the entries from the database
189
+
190
+ fn load_map_from_db (
191
+ client : & mut Client ,
192
+ room_id : & str ,
193
+ min_state_group : Option < i64 > ,
194
+ groups_to_compress : Option < i64 > ,
195
+ max_state_group : Option < i64 > ,
196
+ mut state_group_map : BTreeMap < i64 , StateGroupEntry > ,
197
+ ) -> ( BTreeMap < i64 , StateGroupEntry > , i64 ) {
61
198
// Search for the group id of the groups_to_compress'th group after min_state_group
62
199
// If this is saved, then the compressor can continue by having min_state_group being
63
200
// set to this maximum
64
201
let max_group_found = find_max_group (
65
- & mut client,
202
+ client,
66
203
room_id,
67
204
min_state_group,
68
205
groups_to_compress,
69
206
max_state_group,
70
207
) ;
71
208
72
- let mut state_group_map =
73
- get_initial_data_from_db ( & mut client, room_id, min_state_group, max_group_found) ;
209
+ state_group_map. append ( & mut get_initial_data_from_db (
210
+ client,
211
+ room_id,
212
+ min_state_group,
213
+ max_group_found,
214
+ ) ) ;
74
215
75
216
println ! ( "Got initial state from database. Checking for any missing state groups..." ) ;
76
217
@@ -111,7 +252,7 @@ pub fn get_data_from_db(
111
252
// println!("Missing {} state groups", missing_sgs.len());
112
253
113
254
// find state groups not picked up already and add them to the map
114
- let map = get_missing_from_db ( & mut client, & missing_sgs, min_state_group, max_group_found) ;
255
+ let map = get_missing_from_db ( client, & missing_sgs, min_state_group, max_group_found) ;
115
256
for ( k, v) in map {
116
257
state_group_map. entry ( k) . or_insert ( v) ;
117
258
}
@@ -354,7 +495,8 @@ fn test_pg_escape() {
354
495
/// * `new_map` - The state group data generated by the compressor to
355
496
/// replace replace the old contents
356
497
pub fn send_changes_to_db (
357
- config : & Config ,
498
+ db_url : & str ,
499
+ room_id : & str ,
358
500
old_map : & BTreeMap < i64 , StateGroupEntry > ,
359
501
new_map : & BTreeMap < i64 , StateGroupEntry > ,
360
502
) {
@@ -363,7 +505,7 @@ pub fn send_changes_to_db(
363
505
builder. set_verify ( SslVerifyMode :: NONE ) ;
364
506
let connector = MakeTlsConnector :: new ( builder. build ( ) ) ;
365
507
366
- let mut client = Client :: connect ( & config . db_url , connector) . unwrap ( ) ;
508
+ let mut client = Client :: connect ( db_url, connector) . unwrap ( ) ;
367
509
368
510
println ! ( "Writing changes..." ) ;
369
511
@@ -375,7 +517,7 @@ pub fn send_changes_to_db(
375
517
pb. set_message ( "state groups" ) ;
376
518
pb. enable_steady_tick ( 100 ) ;
377
519
378
- for sql_transaction in generate_sql ( old_map, new_map, & config . room_id ) {
520
+ for sql_transaction in generate_sql ( old_map, new_map, room_id) {
379
521
// commit this change to the database
380
522
// N.B. this is a synchronous library so will wait until finished before continueing...
381
523
// if want to speed up compressor then this might be a good place to start!
0 commit comments