1
1
use std:: { collections:: HashMap , sync:: Arc , time:: Duration } ;
2
2
3
3
use approx:: abs_diff_eq;
4
- use bson:: Document ;
4
+ use bson:: { doc , Document } ;
5
5
use semver:: VersionReq ;
6
6
use serde:: Deserialize ;
7
7
use tokio:: sync:: RwLockWriteGuard ;
@@ -114,6 +114,13 @@ async fn load_balancing_test() {
114
114
let _guard: RwLockWriteGuard < _ > = LOCK . run_exclusively ( ) . await ;
115
115
116
116
let mut setup_client_options = CLIENT_OPTIONS . clone ( ) ;
117
+
118
+ // TODO: RUST-1004 unskip on auth variants
119
+ if setup_client_options. credential . is_some ( ) {
120
+ println ! ( "skipping load_balancing_test test due to auth being enabled" ) ;
121
+ return ;
122
+ }
123
+
117
124
setup_client_options. hosts . drain ( 1 ..) ;
118
125
setup_client_options. direct_connection = Some ( true ) ;
119
126
let setup_client = TestClient :: with_options ( Some ( setup_client_options) ) . await ;
@@ -137,21 +144,17 @@ async fn load_balancing_test() {
137
144
return ;
138
145
}
139
146
140
- let options = FailCommandOptions :: builder ( )
141
- . block_connection ( Duration :: from_millis ( 500 ) )
142
- . build ( ) ;
143
- let failpoint = FailPoint :: fail_command ( & [ "find" ] , FailPointMode :: AlwaysOn , options) ;
144
-
145
- let fp_guard = setup_client
146
- . enable_failpoint ( failpoint, None )
147
+ // seed the collection with a document so the find commands do some work
148
+ setup_client
149
+ . database ( "load_balancing_test" )
150
+ . collection ( "load_balancing_test" )
151
+ . insert_one ( doc ! { } , None )
147
152
. await
148
- . expect ( "enabling failpoint should succeed" ) ;
149
-
150
- let mut client = EventClient :: new ( ) . await ;
153
+ . unwrap ( ) ;
151
154
152
155
/// min_share is the lower bound for the % of times the the less selected server
153
156
/// was selected. max_share is the upper bound.
154
- async fn do_test ( client : & mut EventClient , min_share : f64 , max_share : f64 ) {
157
+ async fn do_test ( client : & mut EventClient , min_share : f64 , max_share : f64 , iterations : usize ) {
155
158
client. clear_cached_events ( ) ;
156
159
157
160
let mut handles: Vec < AsyncJoinHandle < ( ) > > = Vec :: new ( ) ;
@@ -162,7 +165,7 @@ async fn load_balancing_test() {
162
165
handles. push (
163
166
RUNTIME
164
167
. spawn ( async move {
165
- for _ in 0 ..10 {
168
+ for _ in 0 ..iterations {
166
169
let _ = collection. find_one ( None , None ) . await ;
167
170
}
168
171
} )
@@ -181,15 +184,41 @@ async fn load_balancing_test() {
181
184
let mut counts: Vec < _ > = tallies. values ( ) . collect ( ) ;
182
185
counts. sort ( ) ;
183
186
184
- // verify that the lesser picked server (slower one) was picked less than 25% of the time.
185
187
let share_of_selections = ( * counts[ 0 ] as f64 ) / ( ( * counts[ 0 ] + * counts[ 1 ] ) as f64 ) ;
186
- assert ! ( share_of_selections <= max_share) ;
187
- assert ! ( share_of_selections >= min_share) ;
188
+ assert ! (
189
+ share_of_selections <= max_share,
190
+ "expected no more than {}% of selections, instead got {}%" ,
191
+ ( max_share * 100.0 ) as u32 ,
192
+ ( share_of_selections * 100.0 ) as u32
193
+ ) ;
194
+ assert ! (
195
+ share_of_selections >= min_share,
196
+ "expected at least {}% of selections, instead got {}%" ,
197
+ ( min_share * 100.0 ) as u32 ,
198
+ ( share_of_selections * 100.0 ) as u32
199
+ ) ;
188
200
}
189
201
190
- do_test ( & mut client, 0.05 , 0.25 ) . await ;
202
+ let mut client = EventClient :: new ( ) . await ;
203
+
204
+ // saturate pools
205
+ do_test ( & mut client, 0.0 , 0.50 , 100 ) . await ;
206
+
207
+ // enable a failpoint on one of the mongoses to slow it down
208
+ let options = FailCommandOptions :: builder ( )
209
+ . block_connection ( Duration :: from_millis ( 500 ) )
210
+ . build ( ) ;
211
+ let failpoint = FailPoint :: fail_command ( & [ "find" ] , FailPointMode :: AlwaysOn , options) ;
212
+
213
+ let fp_guard = setup_client
214
+ . enable_failpoint ( failpoint, None )
215
+ . await
216
+ . expect ( "enabling failpoint should succeed" ) ;
217
+
218
+ // verify that the lesser picked server (slower one) was picked less than 25% of the time.
219
+ do_test ( & mut client, 0.05 , 0.25 , 10 ) . await ;
191
220
192
- // disable failpoint and rerun, should be close to even split
221
+ // disable failpoint and rerun, should be back to even split
193
222
drop ( fp_guard) ;
194
- do_test ( & mut client, 0.40 , 0.50 ) . await ;
223
+ do_test ( & mut client, 0.40 , 0.50 , 100 ) . await ;
195
224
}
0 commit comments