1212
1313
1414from redisbench_admin .export .common .common import split_tags_string
15+ from redisbench_admin .export .google_benchmark .google_benchmark_json_format import (
16+ generate_summary_json_google_benchmark ,
17+ )
1518from redisbench_admin .export .pyperf .pyperf_json_format import (
1619 generate_summary_json_pyperf ,
1720)
@@ -50,10 +53,13 @@ def export_command_logic(args, project_name, project_version):
5053 "You need to specify at least one (or more) of --deployment-version --github_branch arguments"
5154 )
5255 exit (1 )
53- if results_format != "csv" and results_format != "pyperf-json" :
56+ non_required_spec = ["csv" , "pyperf-json" , "google.benchmark" ]
57+ if results_format not in non_required_spec :
5458 if exporter_spec_file is None :
5559 logging .error (
56- "--exporter-spec-file is required for all formats with exception of csv and pyperf-json"
60+ "--exporter-spec-file is required for all formats with exception of {}" .format (
61+ "," .join (non_required_spec )
62+ )
5763 )
5864 exit (1 )
5965 else :
@@ -76,6 +82,22 @@ def export_command_logic(args, project_name, project_version):
7682 with open (benchmark_file , "r" ) as json_file :
7783 start_dict = json .load (json_file )
7884 results_dict = generate_summary_json_pyperf (start_dict )
85+ if results_format == "google.benchmark" :
86+ with open (benchmark_file , "r" ) as json_file :
87+ # override test names
88+ print_warning = False
89+ old_test_name = test_name
90+ if test_name is None :
91+ print_warning = True
92+ start_dict = json .load (json_file )
93+ results_dict , test_name = generate_summary_json_google_benchmark (start_dict )
94+ if print_warning is True :
95+ logging .warning (
96+ "You've specificied a test name {} but on google benchmark we override it based on the test names retrieved from out file {}" .format (
97+ old_test_name , test_name
98+ )
99+ )
100+
79101 if args .override_test_time :
80102 datapoints_timestamp = int (args .override_test_time .timestamp () * 1000.0 )
81103 logging .info (
@@ -120,9 +142,9 @@ def export_command_logic(args, project_name, project_version):
120142 triggering_env ,
121143 )
122144 logging .info ("Parsed a total of {} metrics" .format (len (timeseries_dict .keys ())))
123- if results_format == "pyperf-json" :
124- logging .info ("Parsing pyperf format into timeseries format" )
125- timeseries_dict = export_pyperf_json_to_timeseries_dict (
145+ if results_format == "pyperf-json" or results_format == "google.benchmark" :
146+ logging .info ("Parsing {} format into timeseries format" . format ( results_format ) )
147+ timeseries_dict = export_json_to_timeseries_dict (
126148 results_dict ,
127149 break_by_dict ,
128150 datapoints_timestamp ,
@@ -181,7 +203,7 @@ def export_command_logic(args, project_name, project_version):
181203 )
182204
183205
184- def export_pyperf_json_to_timeseries_dict (
206+ def export_json_to_timeseries_dict (
185207 benchmark_file ,
186208 break_by_dict ,
187209 datapoints_timestamp ,
0 commit comments