2
2
import json
3
3
import logging
4
4
import os
5
+ import shutil
5
6
import sys
6
7
import tempfile
7
8
import traceback
8
9
from pathlib import Path
9
- import shutil
10
10
11
+ import docker
12
+ import redis
13
+ from docker .models .containers import Container
14
+ from pytablewriter import CsvTableWriter , MarkdownTableWriter
11
15
from redisbench_admin .profilers .profilers_local import (
12
16
check_compatible_system_and_kernel_and_prepare_profile ,
13
- profilers_start_if_required ,
14
17
local_profilers_platform_checks ,
18
+ profilers_start_if_required ,
15
19
profilers_stop_if_required ,
16
20
)
17
- import docker
18
- import redis
19
- from docker .models .containers import Container
20
- from pytablewriter import MarkdownTableWriter
21
- from pytablewriter import CsvTableWriter
22
-
23
21
from redisbench_admin .run .common import (
24
- get_start_time_vars ,
25
- prepare_benchmark_parameters ,
26
22
execute_init_commands ,
23
+ get_start_time_vars ,
27
24
merge_default_and_config_metrics ,
25
+ prepare_benchmark_parameters ,
28
26
)
29
27
from redisbench_admin .run .metrics import extract_results_table
30
28
from redisbench_admin .run .redistimeseries import timeseries_test_sucess_flow
31
29
from redisbench_admin .run .run import calculate_client_tool_duration_and_check
32
- from redisbench_admin .utils .benchmark_config import (
33
- get_final_benchmark_config ,
34
- )
30
+ from redisbench_admin .utils .benchmark_config import get_final_benchmark_config
35
31
from redisbench_admin .utils .local import get_local_run_full_filename
36
32
from redisbench_admin .utils .results import post_process_benchmark_results
37
33
38
34
from redis_benchmarks_specification .__common__ .env import (
39
- LOG_FORMAT ,
40
35
LOG_DATEFMT ,
36
+ LOG_FORMAT ,
41
37
LOG_LEVEL ,
42
38
REDIS_HEALTH_CHECK_INTERVAL ,
43
39
REDIS_SOCKET_TIMEOUT ,
49
45
)
50
46
from redis_benchmarks_specification .__common__ .runner import extract_testsuites
51
47
from redis_benchmarks_specification .__common__ .spec import (
52
- extract_client_cpu_limit ,
53
48
extract_client_container_image ,
49
+ extract_client_cpu_limit ,
54
50
extract_client_tool ,
55
51
)
56
52
from redis_benchmarks_specification .__runner__ .args import create_client_runner_args
59
55
def main ():
60
56
_ , _ , project_version = populate_with_poetry_data ()
61
57
project_name_suffix = "redis-benchmarks-spec-client-runner"
62
- project_name = "{ } (solely client)". format ( project_name_suffix )
58
+ project_name = f" { project_name_suffix } (solely client)"
63
59
parser = create_client_runner_args (
64
60
get_version_string (project_name , project_version )
65
61
)
@@ -70,7 +66,7 @@ def main():
70
66
71
67
def run_client_runner_logic (args , project_name , project_name_suffix , project_version ):
72
68
if args .logname is not None :
73
- print ("Writting log to {}" . format ( args .logname ) )
69
+ print (f "Writting log to { args .logname } " )
74
70
logging .basicConfig (
75
71
filename = args .logname ,
76
72
filemode = "a" ,
@@ -114,14 +110,15 @@ def run_client_runner_logic(args, project_name, project_name_suffix, project_ver
114
110
args .datasink_redistimeseries_port ,
115
111
)
116
112
)
117
- logging .error ("Error message {}" . format ( e .__str__ ()) )
113
+ logging .error (f "Error message { e .__str__ ()} " )
118
114
exit (1 )
119
115
running_platform = args .platform_name
120
116
tls_enabled = args .tls
121
117
tls_skip_verify = args .tls_skip_verify
122
118
tls_cert = args .cert
123
119
tls_key = args .key
124
120
tls_cacert = args .cacert
121
+ resp_version = args .resp
125
122
client_aggregated_results_folder = args .client_aggregated_results_folder
126
123
preserve_temporary_client_dirs = args .preserve_temporary_client_dirs
127
124
docker_client = docker .from_env ()
@@ -158,6 +155,7 @@ def run_client_runner_logic(args, project_name, project_name_suffix, project_ver
158
155
tls_cacert ,
159
156
client_aggregated_results_folder ,
160
157
preserve_temporary_client_dirs ,
158
+ resp_version ,
161
159
)
162
160
163
161
@@ -173,13 +171,14 @@ def prepare_memtier_benchmark_parameters(
173
171
tls_cert = None ,
174
172
tls_key = None ,
175
173
tls_cacert = None ,
174
+ resp_version = None ,
176
175
):
177
176
benchmark_command = [
178
177
full_benchmark_path ,
179
178
"--port" ,
180
- "{}" . format ( port ) ,
179
+ f" { port } " ,
181
180
"--server" ,
182
- "{}" . format ( server ) ,
181
+ f" { server } " ,
183
182
"--json-out-file" ,
184
183
local_benchmark_output_filename ,
185
184
]
@@ -194,6 +193,14 @@ def prepare_memtier_benchmark_parameters(
194
193
if tls_skip_verify :
195
194
benchmark_command .append ("--tls-skip-verify" )
196
195
196
+ if resp_version :
197
+ tool = clientconfig ["tool" ]
198
+ if tool == "memtier_benchmark" :
199
+ benchmark_command .extend (["--resp" , resp_version ])
200
+ elif tool == "redis-benchmark" :
201
+ if resp_version == "3" :
202
+ benchmark_command .append ("-3" )
203
+
197
204
if oss_cluster_api_enabled is True :
198
205
benchmark_command .append ("--cluster-mode" )
199
206
benchmark_command_str = " " .join (benchmark_command )
@@ -222,6 +229,7 @@ def process_self_contained_coordinator_stream(
222
229
tls_cacert = None ,
223
230
client_aggregated_results_folder = "" ,
224
231
preserve_temporary_client_dirs = False ,
232
+ resp_version = None ,
225
233
):
226
234
overall_result = True
227
235
results_matrix = []
@@ -245,6 +253,7 @@ def process_self_contained_coordinator_stream(
245
253
246
254
for topology_spec_name in benchmark_config ["redis-topologies" ]:
247
255
test_result = False
256
+ benchmark_tool_global = ""
248
257
try :
249
258
current_cpu_pos = args .cpuset_start_pos
250
259
temporary_dir_client = tempfile .mkdtemp (dir = home )
@@ -280,7 +289,7 @@ def process_self_contained_coordinator_stream(
280
289
redis_pids .append (first_redis_pid )
281
290
282
291
setup_name = "oss-standalone"
283
- github_actor = "{ }-{}" . format ( tf_triggering_env , running_platform )
292
+ github_actor = f" { tf_triggering_env } -{ running_platform } "
284
293
dso = "redis-server"
285
294
profilers_artifacts_matrix = []
286
295
@@ -344,17 +353,19 @@ def process_self_contained_coordinator_stream(
344
353
test_tls_cert ,
345
354
test_tls_key ,
346
355
test_tls_cacert ,
356
+ resp_version ,
347
357
)
348
358
349
359
execute_init_commands (
350
360
benchmark_config , r , dbconfig_keyname = "dbconfig"
351
361
)
352
362
353
363
benchmark_tool = extract_client_tool (benchmark_config )
364
+ benchmark_tool_global = benchmark_tool
354
365
# backwards compatible
355
366
if benchmark_tool is None :
356
367
benchmark_tool = "redis-benchmark"
357
- full_benchmark_path = "/usr/local/bin/{}" . format ( benchmark_tool )
368
+ full_benchmark_path = f "/usr/local/bin/{ benchmark_tool } "
358
369
359
370
# setup the benchmark
360
371
(
@@ -404,6 +415,7 @@ def process_self_contained_coordinator_stream(
404
415
test_tls_cert ,
405
416
test_tls_key ,
406
417
test_tls_cacert ,
418
+ resp_version ,
407
419
)
408
420
409
421
client_container_image = extract_client_container_image (
@@ -491,9 +503,7 @@ def process_self_contained_coordinator_stream(
491
503
full_result_path = "{}/{}" .format (
492
504
temporary_dir_client , local_benchmark_output_filename
493
505
)
494
- logging .info (
495
- "Reading results json from {}" .format (full_result_path )
496
- )
506
+ logging .info (f"Reading results json from { full_result_path } " )
497
507
498
508
with open (
499
509
full_result_path ,
@@ -518,9 +528,7 @@ def process_self_contained_coordinator_stream(
518
528
519
529
dataset_load_duration_seconds = 0
520
530
521
- logging .info (
522
- "Using datapoint_time_ms: {}" .format (datapoint_time_ms )
523
- )
531
+ logging .info (f"Using datapoint_time_ms: { datapoint_time_ms } " )
524
532
525
533
timeseries_test_sucess_flow (
526
534
datasink_push_results_redistimeseries ,
@@ -587,17 +595,15 @@ def process_self_contained_coordinator_stream(
587
595
588
596
if preserve_temporary_client_dirs is True :
589
597
logging .info (
590
- "Preserving temporary client dir {}" .format (
591
- temporary_dir_client
592
- )
598
+ f"Preserving temporary client dir { temporary_dir_client } "
593
599
)
594
600
else :
595
- if "redis-benchmark" in benchmark_tool :
601
+ if "redis-benchmark" in benchmark_tool_global :
596
602
os .remove (full_result_path )
597
603
logging .info ("Removing temporary JSON file" )
598
604
shutil .rmtree (temporary_dir_client , ignore_errors = True )
599
605
logging .info (
600
- "Removing temporary client dir {}" . format ( temporary_dir_client )
606
+ f "Removing temporary client dir { temporary_dir_client } "
601
607
)
602
608
603
609
table_name = "Results for entire test-suite"
@@ -615,13 +621,8 @@ def process_self_contained_coordinator_stream(
615
621
616
622
if client_aggregated_results_folder != "" :
617
623
os .makedirs (client_aggregated_results_folder , exist_ok = True )
618
- dest_fpath = "{}/{}" .format (
619
- client_aggregated_results_folder ,
620
- "aggregate-results.csv" ,
621
- )
622
- logging .info (
623
- "Storing an aggregated results CSV into {}" .format (full_result_path )
624
- )
624
+ dest_fpath = f"{ client_aggregated_results_folder } /aggregate-results.csv"
625
+ logging .info (f"Storing an aggregated results CSV into { full_result_path } " )
625
626
626
627
csv_writer = CsvTableWriter (
627
628
table_name = table_name ,
@@ -633,12 +634,10 @@ def process_self_contained_coordinator_stream(
633
634
634
635
def cp_to_workdir (benchmark_tool_workdir , srcfile ):
635
636
head , filename = os .path .split (srcfile )
636
- dstfile = "{ }/{}" . format ( benchmark_tool_workdir , filename )
637
+ dstfile = f" { benchmark_tool_workdir } /{ filename } "
637
638
shutil .copyfile (srcfile , dstfile )
638
639
logging .info (
639
- "Copying to workdir the following file {}. Final workdir file {}" .format (
640
- srcfile , dstfile
641
- )
640
+ f"Copying to workdir the following file { srcfile } . Final workdir file { dstfile } "
642
641
)
643
642
return dstfile , filename
644
643
@@ -657,14 +656,14 @@ def print_results_table_stdout(
657
656
default_metrics ,
658
657
None ,
659
658
)
660
- table_name = "Results for {} test-case on {} topology" . format ( test_name , setup_name )
659
+ table_name = f "Results for { test_name } test-case on { setup_name } topology"
661
660
results_matrix_headers = [
662
661
"Metric JSON Path" ,
663
662
"Metric Value" ,
664
663
]
665
664
results_matrix = extract_results_table (metrics , results_dict )
666
665
667
- results_matrix = [[x [0 ], "{ :.3f}". format ( x [ 3 ]) ] for x in results_matrix ]
666
+ results_matrix = [[x [0 ], f" { x [ 3 ] :.3f} " ] for x in results_matrix ]
668
667
writer = MarkdownTableWriter (
669
668
table_name = table_name ,
670
669
headers = results_matrix_headers ,
@@ -684,7 +683,7 @@ def prepare_overall_total_test_results(
684
683
)
685
684
current_test_results_matrix = extract_results_table (metrics , results_dict )
686
685
current_test_results_matrix = [
687
- [test_name , x [0 ], "{ :.3f}". format ( x [ 3 ]) ] for x in current_test_results_matrix
686
+ [test_name , x [0 ], f" { x [ 3 ] :.3f} " ] for x in current_test_results_matrix
688
687
]
689
688
overall_results_matrix .extend (current_test_results_matrix )
690
689
@@ -704,6 +703,7 @@ def data_prepopulation_step(
704
703
tls_cert = None ,
705
704
tls_key = None ,
706
705
tls_cacert = None ,
706
+ resp_version = None ,
707
707
):
708
708
# setup the benchmark
709
709
(
@@ -721,7 +721,7 @@ def data_prepopulation_step(
721
721
benchmark_config ["dbconfig" ], "preload_tool"
722
722
)
723
723
preload_tool = extract_client_tool (benchmark_config ["dbconfig" ], "preload_tool" )
724
- full_benchmark_path = "/usr/local/bin/{}" . format ( preload_tool )
724
+ full_benchmark_path = f "/usr/local/bin/{ preload_tool } "
725
725
client_mnt_point = "/mnt/client/"
726
726
if "memtier_benchmark" in preload_tool :
727
727
(_ , preload_command_str ,) = prepare_memtier_benchmark_parameters (
@@ -736,6 +736,7 @@ def data_prepopulation_step(
736
736
tls_cert ,
737
737
tls_key ,
738
738
tls_cacert ,
739
+ resp_version ,
739
740
)
740
741
741
742
logging .info (
0 commit comments