5
5
6
6
import argparse
7
7
import inspect
8
+ import json
8
9
import logging
9
10
import math
10
11
import os
@@ -114,6 +115,14 @@ def _parse_args(self) -> str:
114
115
per_test_arg_parser .add_argument (
115
116
"--insecure" , action = "store_true" , help = "Disable SSL validation. Default is False." , default = False
116
117
)
118
+ per_test_arg_parser .add_argument (
119
+ "-l" , "--latency" , action = "store_true" , help = "Track per-operation latency statistics." , default = False
120
+ )
121
+ per_test_arg_parser .add_argument (
122
+ "--results-file" ,
123
+ type = str ,
124
+ help = "File path location to store the results for the test run." ,
125
+ )
117
126
118
127
# Per-test args
119
128
self ._test_class_to_run .add_arguments (per_test_arg_parser )
@@ -264,13 +273,16 @@ async def start(self):
264
273
265
274
def _report_results (self ):
266
275
"""Calculate and log the test run results across all child processes"""
267
- operations = []
276
+ total_operations = 0
277
+ operations_per_second = 0.0
278
+ latencies = []
268
279
while not self .results .empty ():
269
- operations .append (self .results .get ())
280
+ result : Tuple [int , int , float , List [float ]] = self .results .get ()
281
+ total_operations += result [1 ]
282
+ operations_per_second += result [1 ] / result [2 ] if result [2 ] else 0
283
+ latencies .extend (result [3 ])
270
284
271
- total_operations = self ._get_completed_operations (operations )
272
285
self .logger .info ("" )
273
- operations_per_second = self ._get_operations_per_second (operations )
274
286
if operations_per_second :
275
287
seconds_per_operation = 1 / operations_per_second
276
288
weighted_average_seconds = total_operations / operations_per_second
@@ -282,6 +294,14 @@ def _report_results(self):
282
294
self ._format_number (seconds_per_operation , 4 ),
283
295
)
284
296
)
297
+
298
+ if self .per_test_args .latency and len (latencies ) > 0 :
299
+ self .logger .info ("" )
300
+ self ._print_latencies (latencies )
301
+ if self .per_test_args .results_file :
302
+ # Not all tests will have a size argument
303
+ size = getattr (self .per_test_args , "size" , None )
304
+ self ._write_results_file (self .per_test_args .results_file , latencies , size )
285
305
else :
286
306
self .logger .info ("Completed without generating operation statistics." )
287
307
self .logger .info ("" )
@@ -335,3 +355,18 @@ def _format_number(self, value, min_significant_digits):
335
355
decimals = max (0 , significant_digits - math .floor (log ) - 1 )
336
356
337
357
return ("{:,." + str (decimals ) + "f}" ).format (rounded )
358
+
359
+ def _print_latencies (self , latencies : List [float ]):
360
+ self .logger .info ("=== Latency Distribution ===" )
361
+ latencies .sort ()
362
+
363
+ percentiles = [50.0 , 75.0 , 90.0 , 95.0 , 99.0 , 99.9 , 100.0 ]
364
+ for p in percentiles :
365
+ index = math .ceil (p / 100 * len (latencies )) - 1
366
+ self .logger .info (f"{ p :5.1f} % { latencies [index ]:10.2f} ms" )
367
+
368
+ def _write_results_file (self , path : str , latencies : List [float ], size ):
369
+ data = [{"Time" : l , "Size" : size } for l in latencies ]
370
+ output = json .dumps (data , indent = 2 )
371
+ with open (path , 'w' , encoding = 'utf-8' ) as f :
372
+ f .write (output )
0 commit comments