@@ -13,14 +13,17 @@ use crate::{
13
13
selection_criteria:: ReadPreference ,
14
14
test:: {
15
15
run_spec_test,
16
- EventClient ,
16
+ Event ,
17
+ EventHandler ,
17
18
FailCommandOptions ,
18
19
FailPoint ,
19
20
FailPointMode ,
21
+ SdamEvent ,
20
22
TestClient ,
21
23
CLIENT_OPTIONS ,
22
24
LOCK ,
23
25
} ,
26
+ ServerType ,
24
27
RUNTIME ,
25
28
} ;
26
29
@@ -114,14 +117,13 @@ async fn load_balancing_test() {
114
117
115
118
let mut setup_client_options = CLIENT_OPTIONS . clone ( ) ;
116
119
117
- // TODO: RUST-1004 unskip on auth variants
118
- if setup_client_options. credential . is_some ( ) {
119
- println ! ( "skipping load_balancing_test test due to auth being enabled" ) ;
120
+ if setup_client_options. load_balanced . unwrap_or ( false ) {
121
+ println ! ( "skipping load_balancing_test test due to load-balanced topology" ) ;
120
122
return ;
121
123
}
122
124
123
- if setup_client_options. load_balanced . unwrap_or ( false ) {
124
- println ! ( "skipping load_balancing_test test due to load-balanced topology " ) ;
125
+ if setup_client_options. credential . is_some ( ) {
126
+ println ! ( "skipping load_balancing_test test due to auth being enabled " ) ;
125
127
return ;
126
128
}
127
129
@@ -158,8 +160,14 @@ async fn load_balancing_test() {
158
160
159
161
/// min_share is the lower bound for the % of times the the less selected server
160
162
/// was selected. max_share is the upper bound.
161
- async fn do_test ( client : & mut EventClient , min_share : f64 , max_share : f64 , iterations : usize ) {
162
- client. clear_cached_events ( ) ;
163
+ async fn do_test (
164
+ client : & TestClient ,
165
+ handler : & mut EventHandler ,
166
+ min_share : f64 ,
167
+ max_share : f64 ,
168
+ iterations : usize ,
169
+ ) {
170
+ handler. clear_cached_events ( ) ;
163
171
164
172
let mut handles: Vec < AsyncJoinHandle < ( ) > > = Vec :: new ( ) ;
165
173
for _ in 0 ..10 {
@@ -180,7 +188,7 @@ async fn load_balancing_test() {
180
188
futures:: future:: join_all ( handles) . await ;
181
189
182
190
let mut tallies: HashMap < ServerAddress , u32 > = HashMap :: new ( ) ;
183
- for event in client . get_command_started_events ( & [ "find" ] ) {
191
+ for event in handler . get_command_started_events ( & [ "find" ] ) {
184
192
* tallies. entry ( event. connection . address . clone ( ) ) . or_insert ( 0 ) += 1 ;
185
193
}
186
194
@@ -203,10 +211,33 @@ async fn load_balancing_test() {
203
211
) ;
204
212
}
205
213
206
- let mut client = EventClient :: new ( ) . await ;
214
+ let mut handler = EventHandler :: new ( ) ;
215
+ let mut subscriber = handler. subscribe ( ) ;
216
+ let mut options = CLIENT_OPTIONS . clone ( ) ;
217
+ options. local_threshold = Duration :: from_secs ( 30 ) . into ( ) ;
218
+ let client = TestClient :: with_handler ( Some ( Arc :: new ( handler. clone ( ) ) ) , options) . await ;
219
+
220
+ // wait for both servers to be discovered.
221
+ subscriber
222
+ . wait_for_event ( Duration :: from_secs ( 30 ) , |event| {
223
+ if let Event :: Sdam ( SdamEvent :: TopologyDescriptionChanged ( event) ) = event {
224
+ event
225
+ . new_description
226
+ . servers ( )
227
+ . into_iter ( )
228
+ . filter ( |s| matches ! ( s. 1 . server_type( ) , ServerType :: Mongos ) )
229
+ . count ( )
230
+ == 2
231
+ } else {
232
+ false
233
+ }
234
+ } )
235
+ . await
236
+ . expect ( "timed out waiting for both mongoses to be discovered" ) ;
237
+ drop ( subscriber) ;
207
238
208
239
// saturate pools
209
- do_test ( & mut client , 0.0 , 0.50 , 100 ) . await ;
240
+ do_test ( & client , & mut handler , 0.0 , 0.50 , 100 ) . await ;
210
241
211
242
// enable a failpoint on one of the mongoses to slow it down
212
243
let options = FailCommandOptions :: builder ( )
@@ -220,9 +251,9 @@ async fn load_balancing_test() {
220
251
. expect ( "enabling failpoint should succeed" ) ;
221
252
222
253
// verify that the lesser picked server (slower one) was picked less than 25% of the time.
223
- do_test ( & mut client , 0.05 , 0.25 , 10 ) . await ;
254
+ do_test ( & client , & mut handler , 0.05 , 0.25 , 10 ) . await ;
224
255
225
256
// disable failpoint and rerun, should be back to even split
226
257
drop ( fp_guard) ;
227
- do_test ( & mut client , 0.40 , 0.50 , 100 ) . await ;
258
+ do_test ( & client , & mut handler , 0.40 , 0.50 , 100 ) . await ;
228
259
}
0 commit comments