11import requests
2- import sys
32import random
43from collections import defaultdict
54import os
87import time
98import argparse
109
10+
1111class BenchmarkRunner :
12- def __init__ (self ):
12+ def __init__ (self , gps_traces_file_path ):
1313 self .coordinates = []
1414 self .tracks = defaultdict (list )
1515
16- gps_traces_file_path = os .path .expanduser ('~/gps_traces.csv' )
16+ gps_traces_file_path = os .path .expanduser (gps_traces_file_path )
1717 with open (gps_traces_file_path , 'r' ) as file :
1818 reader = csv .DictReader (file )
1919 for row in reader :
@@ -36,10 +36,9 @@ def run(self, benchmark_name, host, num_requests, warmup_requests=50):
3636 response = requests .get (url )
3737 end_time = time .time ()
3838 if response .status_code != 200 :
39- if benchmark_name == 'match' :
40- code = response .json ()['code' ]
41- if code == 'NoSegment' or code == 'NoMatch' :
42- continue
39+ code = response .json ()['code' ]
40+ if code in ['NoSegment' , 'NoMatch' , 'NoRoute' , 'NoTrips' ]:
41+ continue
4342 raise Exception (f"Error: { response .status_code } { response .text } " )
4443 times .append ((end_time - start_time ) * 1000 ) # convert to ms
4544
@@ -54,7 +53,7 @@ def make_url(self, host, benchmark_name):
5453 end_coord = f"{ end [1 ]:.6f} ,{ end [0 ]:.6f} "
5554 return f"{ host } /route/v1/driving/{ start_coord } ;{ end_coord } ?overview=full&steps=true"
5655 elif benchmark_name == 'table' :
57- num_coords = random .randint (3 , 100 )
56+ num_coords = random .randint (3 , 12 )
5857 selected_coords = random .sample (self .coordinates , num_coords )
5958 coords_str = ";" .join ([f"{ coord [1 ]:.6f} ,{ coord [0 ]:.6f} " for coord in selected_coords ])
6059 return f"{ host } /table/v1/driving/{ coords_str } "
@@ -77,26 +76,63 @@ def make_url(self, host, benchmark_name):
7776 else :
7877 raise Exception (f"Unknown benchmark: { benchmark_name } " )
7978
79+ def bootstrap_confidence_interval (data , num_samples = 1000 , confidence_level = 0.95 ):
80+ means = []
81+ for _ in range (num_samples ):
82+ sample = np .random .choice (data , size = len (data ), replace = True )
83+ means .append (np .mean (sample ))
84+ lower_bound = np .percentile (means , (1 - confidence_level ) / 2 * 100 )
85+ upper_bound = np .percentile (means , (1 + confidence_level ) / 2 * 100 )
86+ mean = np .mean (means )
87+ return mean , lower_bound , upper_bound
88+
89+ def calculate_confidence_interval (data ):
90+ mean , lower , upper = bootstrap_confidence_interval (data )
91+ min_value = np .min (data )
92+ return mean , (upper - lower ) / 2 , min_value
93+
94+
8095def main ():
8196 parser = argparse .ArgumentParser (description = 'Run GPS benchmark tests.' )
8297 parser .add_argument ('--host' , type = str , required = True , help = 'Host URL' )
8398 parser .add_argument ('--method' , type = str , required = True , choices = ['route' , 'table' , 'match' , 'nearest' , 'trip' ], help = 'Benchmark method' )
8499 parser .add_argument ('--num_requests' , type = int , required = True , help = 'Number of requests to perform' )
100+ parser .add_argument ('--iterations' , type = int , required = True , help = 'Number of iterations to run the benchmark' )
101+ parser .add_argument ('--gps_traces_file_path' , type = str , required = True , help = 'Path to the GPS traces file' )
85102
86103 args = parser .parse_args ()
87104
88- random .seed (42 )
105+ np .random .seed (42 )
106+
107+ runner = BenchmarkRunner (args .gps_traces_file_path )
108+
109+ all_times = []
110+ for _ in range (args .iterations ):
111+ random .seed (42 )
112+ times = runner .run (args .method , args .host , args .num_requests )
113+ all_times .append (times )
114+ all_times = np .asarray (all_times )
115+
116+ assert all_times .shape == (args .iterations , all_times .shape [1 ])
117+
89118
90- runner = BenchmarkRunner ()
91- times = runner .run (args .method , args .host , args .num_requests )
119+ total_time , total_ci , total_best = calculate_confidence_interval (np .sum (all_times , axis = 1 ))
120+ ops_per_sec , ops_per_sec_ci , ops_per_sec_best = calculate_confidence_interval (float (all_times .shape [1 ]) / np .sum (all_times / 1000 , axis = 1 ))
121+ min_time , min_ci , _ = calculate_confidence_interval (np .min (all_times , axis = 1 ))
122+ mean_time , mean_ci , _ = calculate_confidence_interval (np .mean (all_times , axis = 1 ))
123+ median_time , median_ci , _ = calculate_confidence_interval (np .median (all_times , axis = 1 ))
124+ perc_95_time , perc_95_ci , _ = calculate_confidence_interval (np .percentile (all_times , 95 , axis = 1 ))
125+ perc_99_time , perc_99_ci , _ = calculate_confidence_interval (np .percentile (all_times , 99 , axis = 1 ))
126+ max_time , max_ci , _ = calculate_confidence_interval (np .max (all_times , axis = 1 ))
92127
93- print (f'Total: { np .sum (times )} ms' )
94- print (f"Min time: { np .min (times )} ms" )
95- print (f"Mean time: { np .mean (times )} ms" )
96- print (f"Median time: { np .median (times )} ms" )
97- print (f"95th percentile: { np .percentile (times , 95 )} ms" )
98- print (f"99th percentile: { np .percentile (times , 99 )} ms" )
99- print (f"Max time: { np .max (times )} ms" )
128+ print (f'Ops: { ops_per_sec :.2f} ± { ops_per_sec_ci :.2f} ops/s. Best: { ops_per_sec_best :.2f} ops/s' )
129+ print (f'Total: { total_time :.2f} ms ± { total_ci :.2f} ms. Best: { total_best :.2f} ms' )
130+ print (f"Min time: { min_time :.2f} ms ± { min_ci :.2f} ms" )
131+ print (f"Mean time: { mean_time :.2f} ms ± { mean_ci :.2f} ms" )
132+ print (f"Median time: { median_time :.2f} ms ± { median_ci :.2f} ms" )
133+ print (f"95th percentile: { perc_95_time :.2f} ms ± { perc_95_ci :.2f} ms" )
134+ print (f"99th percentile: { perc_99_time :.2f} ms ± { perc_99_ci :.2f} ms" )
135+ print (f"Max time: { max_time :.2f} ms ± { max_ci :.2f} ms" )
100136
101137if __name__ == '__main__' :
102138 main ()
0 commit comments