1
1
import os
2
2
import pandas as pd
3
+ import redis
4
+
3
5
from redisbench_admin .compare .compare import get_key_results_and_values
4
6
from redisbench_admin .utils .utils import retrieve_local_or_remote_input_json
5
7
import matplotlib .pyplot as plt
6
8
import seaborn as sns
9
+
7
10
sns .set (style = "darkgrid" )
8
11
from redistimeseries .client import Client
9
12
13
+
10
14
def get_timeserie_name (labels_kv_array ):
11
15
name = ""
12
16
for label_kv in labels_kv_array :
13
17
k = list (label_kv .keys ())[0 ]
14
18
v = list (label_kv .values ())[0 ]
15
- k = prepare_tags (k )
19
+ k = prepare_tags (k )
16
20
v = prepare_tags (v )
17
21
if name != "" :
18
22
name += ":"
@@ -47,7 +51,7 @@ def extract_benchmark_config_details(benchmark_result):
47
51
specifications_version = benchmark_config ["specifications-version" ]
48
52
testcase_description = benchmark_config ["description" ]
49
53
key_metrics_specs = benchmark_config ["key-metrics" ]
50
- return testcase_name , specifications_version , testcase_description ,key_metrics_specs
54
+ return testcase_name , specifications_version , testcase_description , key_metrics_specs
51
55
52
56
53
57
def extract_key_configs (benchmark_result ):
@@ -66,14 +70,14 @@ def extract_key_configs(benchmark_result):
66
70
return deployment_type , deployment_shards , project , project_version , git_sha
67
71
68
72
69
- def add_datapoint (time_series_dict , broader_ts_name , start_time_ms , metric_value , tags_array ):
73
+ def add_datapoint (time_series_dict , broader_ts_name , start_time_ms , metric_value , tags_array ):
70
74
if broader_ts_name not in time_series_dict :
71
75
tags_dict = {}
72
76
for tag_kv in tags_array :
73
77
k = list (tag_kv .keys ())[0 ]
74
78
v = list (tag_kv .values ())[0 ]
75
- tags_dict [k ]= v
76
- time_series_dict [broader_ts_name ]= {"index" :[],"data" :[],"tags-array" :tags_array , "tags" :tags_dict }
79
+ tags_dict [k ] = v
80
+ time_series_dict [broader_ts_name ] = {"index" : [], "data" : [], "tags-array" : tags_array , "tags" : tags_dict }
77
81
time_series_dict [broader_ts_name ]["index" ].append (start_time_ms )
78
82
time_series_dict [broader_ts_name ]["data" ].append (metric_value )
79
83
@@ -85,9 +89,9 @@ def split_tags_string(extra_tags):
85
89
for extra_tag in extra_tags :
86
90
kv = extra_tag .split ("=" )
87
91
if len (kv ) == 2 :
88
- k = prepare_tags (kv [0 ])
92
+ k = prepare_tags (kv [0 ])
89
93
v = prepare_tags (kv [1 ])
90
- result .append ({k :v })
94
+ result .append ({k : v })
91
95
return result
92
96
93
97
@@ -99,33 +103,36 @@ def split_key_metrics_by_step(key_metrics_specs):
99
103
step = key_metric_spec ["step" ]
100
104
metric_name = key_metric_spec ["metric-name" ]
101
105
if step not in key_metrics_by_step :
102
- key_metrics_by_step [step ]= {}
103
- key_metrics_by_step [step ][metric_name ]= key_metric_spec
106
+ key_metrics_by_step [step ] = {}
107
+ key_metrics_by_step [step ][metric_name ] = key_metric_spec
104
108
return key_metrics_by_step
105
109
106
- def get_or_None (dict ,property ):
110
+
111
+ def get_or_None (dict , property ):
107
112
result = None
108
113
if property in dict :
109
114
result = dict [property ]
110
115
return result
111
116
117
+
112
118
def get_metric_detail (key_metric_spec ):
113
- metric_step = get_or_None ( key_metric_spec , "step" )
114
- metric_family = get_or_None ( key_metric_spec , "metric-family" )
119
+ metric_step = get_or_None (key_metric_spec , "step" )
120
+ metric_family = get_or_None (key_metric_spec , "metric-family" )
115
121
metric_json_path = get_or_None (key_metric_spec , "metric-json-path" )
116
- metric_name = get_or_None ( key_metric_spec , "metric-name" )
117
- metric_unit = get_or_None ( key_metric_spec , "unit" )
118
- metric_type = get_or_None ( key_metric_spec , "metric-type" )
119
- metric_comparison = get_or_None ( key_metric_spec , "comparison" )
120
- metric_per_step_comparison_priority = get_or_None ( key_metric_spec , "per-step-comparison-metric-priority" )
121
- return metric_step ,metric_family ,metric_json_path ,metric_name ,metric_unit ,metric_type ,metric_comparison ,metric_per_step_comparison_priority
122
+ metric_name = get_or_None (key_metric_spec , "metric-name" )
123
+ metric_unit = get_or_None (key_metric_spec , "unit" )
124
+ metric_type = get_or_None (key_metric_spec , "metric-type" )
125
+ metric_comparison = get_or_None (key_metric_spec , "comparison" )
126
+ metric_per_step_comparison_priority = get_or_None (key_metric_spec , "per-step-comparison-metric-priority" )
127
+ return metric_step , metric_family , metric_json_path , metric_name , metric_unit , metric_type , metric_comparison , metric_per_step_comparison_priority
128
+
122
129
123
130
def export_command_logic (args ):
124
131
benchmark_files = args .benchmark_result_files
125
132
local_path = os .path .abspath (args .local_dir )
126
133
use_result = args .use_result
127
134
included_steps = args .steps .split ("," )
128
-
135
+
129
136
extra_tags_array = split_tags_string (args .extra_tags )
130
137
print (extra_tags_array )
131
138
results_type = "key-results"
@@ -134,10 +141,12 @@ def export_command_logic(args):
134
141
for filename , benchmark_result in benchmark_results .items ():
135
142
print (filename )
136
143
key_result_steps = benchmark_result [results_type ].keys ()
137
- testcase_name , specifications_version , testcase_description ,key_metrics_specs = extract_benchmark_config_details (benchmark_result )
144
+ testcase_name , specifications_version , testcase_description , key_metrics_specs = extract_benchmark_config_details (
145
+ benchmark_result )
138
146
key_metrics_specs_per_step = split_key_metrics_by_step (key_metrics_specs )
139
147
deployment_type , deployment_shards , project , project_version , git_sha = extract_key_configs (benchmark_result )
140
- start_time_ms , start_time_humanized , end_time_ms , end_time_humanized , duration_ms , duration_humanized = extract_benchmark_run_info_details (benchmark_result )
148
+ start_time_ms , start_time_humanized , end_time_ms , end_time_humanized , duration_ms , duration_humanized = extract_benchmark_run_info_details (
149
+ benchmark_result )
141
150
142
151
for step in key_result_steps :
143
152
common_broader_kv_tags = [
@@ -168,8 +177,7 @@ def export_command_logic(args):
168
177
git_sha_ts_name = get_timeserie_name (git_sha_kv )
169
178
170
179
key_metric_spec = key_metrics_specs [metric_name ]
171
- metric_step , metric_family , _ , _ , metric_unit , _ , _ ,_ = get_metric_detail (key_metric_spec )
172
-
180
+ metric_step , metric_family , _ , _ , metric_unit , _ , _ , _ = get_metric_detail (key_metric_spec )
173
181
174
182
# add_datapoint(time_series_dict,broader_ts_name,start_time_ms,metric_value,tags_kv)
175
183
# add_datapoint(time_series_dict, version_ts_name, start_time_ms, metric_value, tags_kv)
@@ -178,30 +186,18 @@ def export_command_logic(args):
178
186
git_sha_tags_kv .extend (
179
187
[{"metric-step" : metric_step }, {"metric-family" : metric_family }, {"metric-unit" : metric_unit }])
180
188
add_datapoint (time_series_dict , git_sha_ts_name , start_time_ms , metric_value , git_sha_tags_kv )
181
- # print(broader_ts_name)
182
- # print(version_ts_name)
183
- # print(git_sha_ts_name)
184
- # print(time_series_dict)
185
- rts = Client ()
189
+ rts = Client (host = args .host ,port = args .port ,password = args .password )
186
190
for timeseries_name , time_series in time_series_dict .items ():
187
- rts .create (timeseries_name ,labels = time_series ['tags' ])
188
- for pos ,timestamp in enumerate (time_series ['index' ]):
191
+ try :
192
+ rts .create (timeseries_name , labels = time_series ['tags' ])
193
+ except redis .exceptions .ResponseError :
194
+ # if ts already exists continue
195
+ pass
196
+ for pos , timestamp in enumerate (time_series ['index' ]):
189
197
value = time_series ['data' ][pos ]
190
- rts .add (timeseries_name , timestamp , value )
191
-
192
- #
193
- # index = pd.to_datetime(time_series['index'], unit='ms')
194
- # df = pd.Series(time_series['data'], index=index)
195
- # print(df)
196
-
197
- # import matplotlib.pyplot as plt
198
- # fig, ax = plt.subplots()
199
- # # Plot the responses for different events and regions
200
- # sns.lineplot( x="timepoint", y="signal",
201
- # data=df, ax=ax)
202
- # bottom, top = plt.ylim()
203
- # plt.ylim(0,top)
204
- # plt.savefig('{}.png'.format(timeseries_name))
205
-
206
-
198
+ try :
199
+ rts .add (timeseries_name , timestamp , value )
200
+ except redis .exceptions .ResponseError :
201
+ # if ts already exists continue
202
+ pass
207
203
0 commit comments