1212
1313
1414from redisbench_admin .export .common .common import split_tags_string
15+ from redisbench_admin .export .pyperf .pyperf_json_format import (
16+ generate_summary_json_pyperf ,
17+ )
1518from redisbench_admin .run .git import git_vars_crosscheck
1619
1720from redisbench_admin .run .redistimeseries import timeseries_test_sucess_flow
@@ -47,10 +50,10 @@ def export_command_logic(args, project_name, project_version):
4750 "You need to specify at least one (or more) of --deployment-version --github_branch arguments"
4851 )
4952 exit (1 )
50- if results_format != "csv" :
53+ if results_format != "csv" and results_format != "pyperf-json" :
5154 if exporter_spec_file is None :
5255 logging .error (
53- "--exporter-spec-file is required for all formats with exception of csv"
56+ "--exporter-spec-file is required for all formats with exception of csv and pyperf-json "
5457 )
5558 exit (1 )
5659 else :
@@ -69,6 +72,10 @@ def export_command_logic(args, project_name, project_version):
6972 if results_format == "json" :
7073 with open (benchmark_file , "r" ) as json_file :
7174 results_dict = json .load (json_file )
75+ if results_format == "pyperf-json" :
76+ with open (benchmark_file , "r" ) as json_file :
77+ start_dict = json .load (json_file )
78+ results_dict = generate_summary_json_pyperf (start_dict )
7279 if args .override_test_time :
7380 datapoints_timestamp = int (args .override_test_time .timestamp () * 1000.0 )
7481 logging .info (
@@ -90,7 +97,7 @@ def export_command_logic(args, project_name, project_version):
9097 datetime .datetime .now (datetime .timezone .utc ).timestamp () * 1000.0
9198 )
9299 logging .warning (
93- "Error while trying to parse datapoints timestamp. Using current system timestamp Error : {}" .format (
100+ "Error while trying to parse datapoints timestamp. Using current system timestamp: {}" .format (
94101 datapoints_timestamp
95102 )
96103 )
@@ -113,6 +120,20 @@ def export_command_logic(args, project_name, project_version):
113120 triggering_env ,
114121 )
115122 logging .info ("Parsed a total of {} metrics" .format (len (timeseries_dict .keys ())))
123+ if results_format == "pyperf-json" :
124+ logging .info ("Parsing pyperf format into timeseries format" )
125+ timeseries_dict = export_pyperf_json_to_timeseries_dict (
126+ results_dict ,
127+ break_by_dict ,
128+ datapoints_timestamp ,
129+ deployment_name ,
130+ deployment_type ,
131+ extra_tags_dict ,
132+ github_org ,
133+ github_repo ,
134+ triggering_env ,
135+ )
136+ logging .info ("Parsed a total of {} metrics" .format (len (timeseries_dict .keys ())))
116137 logging .info (
117138 "Checking connection to RedisTimeSeries to host: {}:{}" .format (
118139 args .redistimeseries_host , args .redistimeseries_port
@@ -160,6 +181,48 @@ def export_command_logic(args, project_name, project_version):
160181 )
161182
162183
184+ def export_pyperf_json_to_timeseries_dict (
185+ benchmark_file ,
186+ break_by_dict ,
187+ datapoints_timestamp ,
188+ deployment_name ,
189+ deployment_type ,
190+ extra_tags_dict ,
191+ tf_github_org ,
192+ tf_github_repo ,
193+ triggering_env ,
194+ ):
195+ results_dict = {}
196+ for test_name , d in benchmark_file .items ():
197+ for metric_name , metric_value in d .items ():
198+ for break_by_key , break_by_value in break_by_dict .items ():
199+ break_by_str = "by.{}" .format (break_by_key )
200+ timeserie_tags , ts_name = get_ts_tags_and_name (
201+ break_by_key ,
202+ break_by_str ,
203+ break_by_value ,
204+ None ,
205+ deployment_name ,
206+ deployment_type ,
207+ extra_tags_dict ,
208+ metric_name ,
209+ metric_name ,
210+ metric_name ,
211+ triggering_env ,
212+ test_name ,
213+ metric_name ,
214+ tf_github_org ,
215+ tf_github_repo ,
216+ triggering_env ,
217+ False ,
218+ )
219+ results_dict [ts_name ] = {
220+ "labels" : timeserie_tags .copy (),
221+ "data" : {datapoints_timestamp : metric_value },
222+ }
223+ return results_dict
224+
225+
163226def export_opereto_csv_to_timeseries_dict (
164227 benchmark_file ,
165228 break_by_dict ,
0 commit comments