diff --git a/redis_benchmarks_specification/__cli__/stats.py b/redis_benchmarks_specification/__cli__/stats.py index 6aae3276..2716ba9f 100644 --- a/redis_benchmarks_specification/__cli__/stats.py +++ b/redis_benchmarks_specification/__cli__/stats.py @@ -123,6 +123,25 @@ def generate_stats_cli_command_logic(args, project_name, project_version): ) test_result = False + # Validate client configuration format + has_clientconfig = "clientconfig" in benchmark_config + has_clientconfigs = "clientconfigs" in benchmark_config + + if has_clientconfig and has_clientconfigs: + logging.error( + "Test {} has both 'clientconfig' and 'clientconfigs'. Only one format is allowed.".format( + test_name + ) + ) + test_result = False + elif not has_clientconfig and not has_clientconfigs: + logging.error( + "Test {} is missing client configuration. Must have either 'clientconfig' or 'clientconfigs'.".format( + test_name + ) + ) + test_result = False + test_names.append(test_name) group = "" is_memtier = False @@ -150,12 +169,24 @@ def generate_stats_cli_command_logic(args, project_name, project_version): for tested_command in origin_tested_commands: tested_commands.append(tested_command.lower()) if is_memtier: - arguments = benchmark_config["clientconfig"]["arguments"] - arg_list = ( - benchmark_config["clientconfig"]["arguments"] - .replace('"', "") - .split() - ) + # Handle both clientconfig and clientconfigs formats + if "clientconfigs" in benchmark_config: + # For multiple configs, use the first one for stats analysis + # TODO: Consider aggregating stats from all configs + arguments = benchmark_config["clientconfigs"][0]["arguments"] + arg_list = ( + benchmark_config["clientconfigs"][0]["arguments"] + .replace('"', "") + .split() + ) + else: + # Legacy single clientconfig format + arguments = benchmark_config["clientconfig"]["arguments"] + arg_list = ( + benchmark_config["clientconfig"]["arguments"] + .replace('"', "") + .split() + ) data_size = get_arg_value(arg_list, "--data-size", data_size) data_size = get_arg_value(arg_list, "-d", data_size) diff --git a/redis_benchmarks_specification/__common__/spec.py b/redis_benchmarks_specification/__common__/spec.py index 67495a70..db38a36e 100644 --- a/redis_benchmarks_specification/__common__/spec.py +++ b/redis_benchmarks_specification/__common__/spec.py @@ -54,9 +54,19 @@ def extract_redis_configuration_from_topology(topologies_map, topology_spec_name def extract_client_cpu_limit(benchmark_config): - db_cpu_limit = benchmark_config["clientconfig"]["resources"]["requests"]["cpus"] - ceil_db_cpu_limit = math.ceil(float(db_cpu_limit)) - return ceil_db_cpu_limit + # Handle both clientconfig (single) and clientconfigs (multiple) formats + if "clientconfigs" in benchmark_config: + # For multiple configs, return the sum of all CPU limits + total_cpu_limit = 0 + for client_config in benchmark_config["clientconfigs"]: + cpu_limit = client_config["resources"]["requests"]["cpus"] + total_cpu_limit += float(cpu_limit) + return math.ceil(total_cpu_limit) + else: + # Legacy single clientconfig format + db_cpu_limit = benchmark_config["clientconfig"]["resources"]["requests"]["cpus"] + ceil_db_cpu_limit = math.ceil(float(db_cpu_limit)) + return ceil_db_cpu_limit def extract_build_variant_variations(benchmark_config, keyname="build-variants"): @@ -74,9 +84,60 @@ def extract_client_container_image(benchmark_config, keyname="clientconfig"): return client_container_image +def extract_client_container_images(benchmark_config): + """Extract container images for both single and multiple client configs""" + if "clientconfigs" in benchmark_config: + # Multiple client configs - return list of images + images = [] + for client_config in benchmark_config["clientconfigs"]: + if "run_image" in client_config: + images.append(client_config["run_image"]) + else: + images.append(None) + return images + elif "clientconfig" in benchmark_config: + # Single client config - return list with one image for consistency + if "run_image" in benchmark_config["clientconfig"]: + return [benchmark_config["clientconfig"]["run_image"]] + else: + return [None] + return [] + + def extract_client_tool(benchmark_config, keyname="clientconfig"): client_tool = None if keyname in benchmark_config: if "tool" in benchmark_config[keyname]: client_tool = benchmark_config[keyname]["tool"] return client_tool + + +def extract_client_tools(benchmark_config): + """Extract tools for both single and multiple client configs""" + if "clientconfigs" in benchmark_config: + # Multiple client configs - return list of tools + tools = [] + for client_config in benchmark_config["clientconfigs"]: + if "tool" in client_config: + tools.append(client_config["tool"]) + else: + tools.append(None) + return tools + elif "clientconfig" in benchmark_config: + # Single client config - return list with one tool for consistency + if "tool" in benchmark_config["clientconfig"]: + return [benchmark_config["clientconfig"]["tool"]] + else: + return [None] + return [] + + +def extract_client_configs(benchmark_config): + """Extract client configurations as a list for both single and multiple formats""" + if "clientconfigs" in benchmark_config: + # Multiple client configs + return benchmark_config["clientconfigs"] + elif "clientconfig" in benchmark_config: + # Single client config - return as list for consistency + return [benchmark_config["clientconfig"]] + return [] diff --git a/redis_benchmarks_specification/__runner__/args.py b/redis_benchmarks_specification/__runner__/args.py index a52d41c1..a6950af4 100644 --- a/redis_benchmarks_specification/__runner__/args.py +++ b/redis_benchmarks_specification/__runner__/args.py @@ -208,6 +208,12 @@ def create_client_runner_args(project_name): type=int, help="override memtier number of runs for each benchmark. By default will run once each test", ) + parser.add_argument( + "--container-timeout-buffer", + default=60, + type=int, + help="Buffer time in seconds to add to test-time for container timeout. Default is 60 seconds.", + ) parser.add_argument( "--cluster-mode", default=False, diff --git a/redis_benchmarks_specification/__runner__/runner.py b/redis_benchmarks_specification/__runner__/runner.py index 01631534..967d7f79 100644 --- a/redis_benchmarks_specification/__runner__/runner.py +++ b/redis_benchmarks_specification/__runner__/runner.py @@ -57,6 +57,9 @@ extract_client_container_image, extract_client_cpu_limit, extract_client_tool, + extract_client_configs, + extract_client_container_images, + extract_client_tools, ) from redis_benchmarks_specification.__runner__.args import create_client_runner_args @@ -88,6 +91,321 @@ def parse_size(size): return int(number * units[unit]) +def run_multiple_clients( + benchmark_config, + docker_client, + temporary_dir_client, + client_mnt_point, + benchmark_tool_workdir, + client_cpuset_cpus, + port, + host, + password, + oss_cluster_api_enabled, + tls_enabled, + tls_skip_verify, + test_tls_cert, + test_tls_key, + test_tls_cacert, + resp_version, + override_memtier_test_time, + override_test_runs, + unix_socket, + args, +): + """ + Run multiple client configurations simultaneously and aggregate results. + Returns aggregated stdout and list of individual results. + """ + client_configs = extract_client_configs(benchmark_config) + client_images = extract_client_container_images(benchmark_config) + client_tools = extract_client_tools(benchmark_config) + + if not client_configs: + raise ValueError("No client configurations found") + + containers = [] + results = [] + + # Start all containers simultaneously (detached) + for client_index, (client_config, client_tool, client_image) in enumerate( + zip(client_configs, client_tools, client_images) + ): + try: + local_benchmark_output_filename = f"benchmark_output_{client_index}.json" + + # Prepare benchmark command for this client + if "memtier_benchmark" in client_tool: + ( + _, + benchmark_command_str, + arbitrary_command, + ) = prepare_memtier_benchmark_parameters( + client_config, + client_tool, + port, + host, + password, + local_benchmark_output_filename, + oss_cluster_api_enabled, + tls_enabled, + tls_skip_verify, + test_tls_cert, + test_tls_key, + test_tls_cacert, + resp_version, + override_memtier_test_time, + override_test_runs, + unix_socket, + ) + elif "pubsub-sub-bench" in client_tool: + ( + _, + benchmark_command_str, + arbitrary_command, + ) = prepare_pubsub_sub_bench_parameters( + client_config, + client_tool, + port, + host, + password, + local_benchmark_output_filename, + oss_cluster_api_enabled, + tls_enabled, + tls_skip_verify, + test_tls_cert, + test_tls_key, + test_tls_cacert, + resp_version, + override_memtier_test_time, + unix_socket, + None, # username + ) + else: + # Handle other benchmark tools + ( + benchmark_command, + benchmark_command_str, + ) = prepare_benchmark_parameters( + {**benchmark_config, "clientconfig": client_config}, + client_tool, + port, + host, + local_benchmark_output_filename, + False, + benchmark_tool_workdir, + False, + ) + + # Calculate container timeout + container_timeout = 300 # 5 minutes default + buffer_timeout = ( + args.container_timeout_buffer + ) # Configurable buffer from command line + if "test-time" in benchmark_command_str: + # Try to extract test time and add buffer + import re + + # Handle both --test-time (memtier) and -test-time (pubsub-sub-bench) + test_time_match = re.search( + r"--?test-time[=\s]+(\d+)", benchmark_command_str + ) + if test_time_match: + test_time = int(test_time_match.group(1)) + container_timeout = test_time + buffer_timeout + logging.info( + f"Client {client_index}: Set container timeout to {container_timeout}s (test-time: {test_time}s + {buffer_timeout}s buffer)" + ) + + logging.info( + f"Starting client {client_index} with docker image {client_image} (cpuset={client_cpuset_cpus}) with args: {benchmark_command_str}" + ) + + # Start container (detached) + import os + + container = docker_client.containers.run( + image=client_image, + volumes={ + temporary_dir_client: { + "bind": client_mnt_point, + "mode": "rw", + }, + }, + auto_remove=False, + privileged=True, + working_dir=benchmark_tool_workdir, + command=benchmark_command_str, + network_mode="host", + detach=True, + cpuset_cpus=client_cpuset_cpus, + user=f"{os.getuid()}:{os.getgid()}", # Run as current user to fix permissions + ) + + containers.append( + { + "container": container, + "client_index": client_index, + "client_tool": client_tool, + "client_image": client_image, + "benchmark_command_str": benchmark_command_str, + "timeout": container_timeout, + } + ) + + except Exception as e: + error_msg = f"Error starting client {client_index}: {e}" + logging.error(error_msg) + logging.error(f"Image: {client_image}, Tool: {client_tool}") + logging.error(f"Command: {benchmark_command_str}") + # Fail fast on container startup errors + raise RuntimeError(f"Failed to start client {client_index}: {e}") + + # Wait for all containers to complete + logging.info(f"Waiting for {len(containers)} containers to complete...") + + for container_info in containers: + container = container_info["container"] + client_index = container_info["client_index"] + client_tool = container_info["client_tool"] + client_image = container_info["client_image"] + benchmark_command_str = container_info["benchmark_command_str"] + + try: + # Wait for container to complete + exit_code = container.wait(timeout=container_info["timeout"]) + client_stdout = container.logs().decode("utf-8") + + # Check if container succeeded + if exit_code.get("StatusCode", 1) != 0: + logging.error( + f"Client {client_index} failed with exit code: {exit_code}" + ) + logging.error(f"Client {client_index} stdout/stderr:") + logging.error(client_stdout) + # Fail fast on container execution errors + raise RuntimeError( + f"Client {client_index} ({client_tool}) failed with exit code {exit_code}" + ) + + logging.info( + f"Client {client_index} completed successfully with exit code: {exit_code}" + ) + + results.append( + { + "client_index": client_index, + "stdout": client_stdout, + "config": client_configs[client_index], + "tool": client_tool, + "image": client_image, + } + ) + + except Exception as e: + # Get logs even if wait failed + try: + client_stdout = container.logs().decode("utf-8") + logging.error(f"Client {client_index} logs:") + logging.error(client_stdout) + except: + logging.error(f"Could not retrieve logs for client {client_index}") + + raise RuntimeError(f"Client {client_index} ({client_tool}) failed: {e}") + + finally: + # Clean up container + try: + container.remove(force=True) + except Exception as cleanup_error: + logging.warning(f"Client {client_index} cleanup error: {cleanup_error}") + + logging.info(f"Successfully completed {len(containers)} client configurations") + + # Aggregate results by reading JSON output files + aggregated_stdout = "" + successful_results = [r for r in results if "error" not in r] + + if successful_results: + # Try to read and aggregate JSON output files + import json + import os + + aggregated_json = {} + memtier_json = None + pubsub_json = None + + for result in successful_results: + client_index = result["client_index"] + tool = result["tool"] + + # Look for JSON output file + json_filename = f"benchmark_output_{client_index}.json" + json_filepath = os.path.join(temporary_dir_client, json_filename) + + if os.path.exists(json_filepath): + try: + with open(json_filepath, "r") as f: + client_json = json.load(f) + + if "memtier_benchmark" in tool: + # Store memtier JSON + memtier_json = client_json + logging.info( + f"Successfully read memtier JSON output from client {client_index}" + ) + elif "pubsub-sub-bench" in tool: + # Store pubsub JSON + pubsub_json = client_json + logging.info( + f"Successfully read pubsub-sub-bench JSON output from client {client_index}" + ) + + logging.info( + f"Successfully read JSON output from client {client_index} ({tool})" + ) + + except Exception as e: + logging.warning( + f"Failed to read JSON from client {client_index}: {e}" + ) + # Fall back to stdout + pass + else: + logging.warning( + f"JSON output file not found for client {client_index}: {json_filepath}" + ) + + # Merge JSON outputs from both tools + if memtier_json and pubsub_json: + # Use memtier as base and add pubsub metrics + aggregated_json = memtier_json.copy() + # Add pubsub metrics to the aggregated result + aggregated_json.update(pubsub_json) + aggregated_stdout = json.dumps(aggregated_json, indent=2) + logging.info( + "Using merged JSON results from memtier and pubsub-sub-bench clients" + ) + elif memtier_json: + # Only memtier available + aggregated_json = memtier_json + aggregated_stdout = json.dumps(aggregated_json, indent=2) + logging.info("Using JSON results from memtier client only") + elif pubsub_json: + # Only pubsub available + aggregated_json = pubsub_json + aggregated_stdout = json.dumps(aggregated_json, indent=2) + logging.info("Using JSON results from pubsub-sub-bench client only") + else: + # Fall back to concatenated stdout + aggregated_stdout = "\n".join([r["stdout"] for r in successful_results]) + logging.warning( + "No JSON results found, falling back to concatenated stdout" + ) + + return aggregated_stdout, results + + def main(): _, _, project_version = populate_with_poetry_data() project_name_suffix = "redis-benchmarks-spec-client-runner" @@ -347,6 +665,96 @@ def prepare_memtier_benchmark_parameters( return None, benchmark_command_str, arbitrary_command +def prepare_pubsub_sub_bench_parameters( + clientconfig, + full_benchmark_path, + port, + server, + password, + local_benchmark_output_filename, + oss_cluster_api_enabled=False, + tls_enabled=False, + tls_skip_verify=False, + tls_cert=None, + tls_key=None, + tls_cacert=None, + resp_version=None, + override_test_time=0, + unix_socket="", + username=None, +): + """ + Prepare pubsub-sub-bench command parameters + """ + arbitrary_command = False + + benchmark_command = [ + # full_benchmark_path, + "-json-out-file", + local_benchmark_output_filename, + ] + + # Connection parameters + if unix_socket != "": + # pubsub-sub-bench doesn't support unix sockets directly + # Fall back to host/port + logging.warning( + "pubsub-sub-bench doesn't support unix sockets, using host/port" + ) + benchmark_command.extend(["-host", server, "-port", str(port)]) + else: + benchmark_command.extend(["-host", server, "-port", str(port)]) + + # Authentication + if username and password: + # ACL style authentication + benchmark_command.extend(["-user", username, "-a", password]) + elif password: + # Password-only authentication + benchmark_command.extend(["-a", password]) + + # TLS support (if the tool supports it in future versions) + if tls_enabled: + logging.warning("pubsub-sub-bench TLS support not implemented yet") + + # RESP version + if resp_version: + if resp_version == "3": + benchmark_command.extend(["-resp", "3"]) + elif resp_version == "2": + benchmark_command.extend(["-resp", "2"]) + + # Cluster mode + if oss_cluster_api_enabled: + benchmark_command.append("-oss-cluster-api-distribute-subscribers") + + logging.info(f"Preparing pubsub-sub-bench parameters: {benchmark_command}") + benchmark_command_str = " ".join(benchmark_command) + + # Append user-defined arguments from YAML + user_arguments = "" + if "arguments" in clientconfig: + user_arguments = clientconfig["arguments"] + + # Test time override - handle after user arguments to avoid conflicts + if override_test_time and override_test_time > 0: + # Remove any existing -test-time from user arguments + import re + + user_arguments = re.sub(r"-test-time\s+\d+", "", user_arguments) + # Add our override test time + benchmark_command_str = ( + benchmark_command_str + " -test-time " + str(override_test_time) + ) + logging.info(f"Applied test-time override: {override_test_time}s") + + # Add cleaned user arguments + if user_arguments.strip(): + benchmark_command_str = benchmark_command_str + " " + user_arguments.strip() + + return benchmark_command, benchmark_command_str, arbitrary_command + + def process_self_contained_coordinator_stream( args, datasink_push_results_redistimeseries, @@ -376,7 +784,7 @@ def delete_temporary_files( if preserve_temporary_client_dirs is True: logging.info(f"Preserving temporary client dir {temporary_dir_client}") else: - if "redis-benchmark" in benchmark_tool_global: + if benchmark_tool_global and "redis-benchmark" in benchmark_tool_global: if full_result_path is not None: os.remove(full_result_path) logging.info("Removing temporary JSON file") @@ -723,45 +1131,6 @@ def delete_temporary_files( ) arbitrary_command = False - if "memtier_benchmark" not in benchmark_tool: - # prepare the benchmark command - ( - benchmark_command, - benchmark_command_str, - ) = prepare_benchmark_parameters( - benchmark_config, - full_benchmark_path, - port, - host, - local_benchmark_output_filename, - False, - benchmark_tool_workdir, - False, - ) - else: - ( - _, - benchmark_command_str, - arbitrary_command, - ) = prepare_memtier_benchmark_parameters( - benchmark_config["clientconfig"], - full_benchmark_path, - port, - host, - password, - local_benchmark_output_filename, - oss_cluster_api_enabled, - tls_enabled, - tls_skip_verify, - test_tls_cert, - test_tls_key, - test_tls_cacert, - resp_version, - override_memtier_test_time, - override_test_runs, - unix_socket, - ) - if ( arbitrary_command and oss_cluster_api_enabled @@ -777,9 +1146,83 @@ def delete_temporary_files( ) continue - client_container_image = extract_client_container_image( - benchmark_config - ) + # Check if we have multiple client configurations + client_configs = extract_client_configs(benchmark_config) + is_multiple_clients = len(client_configs) > 1 + + if is_multiple_clients: + logging.info( + f"Running test with {len(client_configs)} client configurations" + ) + else: + # Legacy single client mode - prepare benchmark parameters + client_container_image = extract_client_container_image( + benchmark_config + ) + benchmark_tool = extract_client_tool(benchmark_config) + + # Prepare benchmark command for single client + if "memtier_benchmark" in benchmark_tool: + ( + _, + benchmark_command_str, + arbitrary_command, + ) = prepare_memtier_benchmark_parameters( + benchmark_config["clientconfig"], + full_benchmark_path, + port, + host, + password, + local_benchmark_output_filename, + oss_cluster_api_enabled, + tls_enabled, + tls_skip_verify, + test_tls_cert, + test_tls_key, + test_tls_cacert, + resp_version, + override_memtier_test_time, + override_test_runs, + unix_socket, + ) + elif "pubsub-sub-bench" in benchmark_tool: + ( + _, + benchmark_command_str, + arbitrary_command, + ) = prepare_pubsub_sub_bench_parameters( + benchmark_config["clientconfig"], + full_benchmark_path, + port, + host, + password, + local_benchmark_output_filename, + oss_cluster_api_enabled, + tls_enabled, + tls_skip_verify, + test_tls_cert, + test_tls_key, + test_tls_cacert, + resp_version, + override_memtier_test_time, + unix_socket, + None, # username + ) + else: + # prepare the benchmark command for other tools + ( + benchmark_command, + benchmark_command_str, + ) = prepare_benchmark_parameters( + benchmark_config, + full_benchmark_path, + port, + host, + local_benchmark_output_filename, + False, + benchmark_tool_workdir, + False, + ) profiler_call_graph_mode = "dwarf" profiler_frequency = 99 @@ -801,50 +1244,106 @@ def delete_temporary_files( # run the benchmark benchmark_start_time = datetime.datetime.now() - if args.benchmark_local_install: - logging.info("Running memtier benchmark outside of docker") - benchmark_command_str = ( - "taskset -c " - + client_cpuset_cpus - + " " - + benchmark_command_str - ) + if is_multiple_clients: + # Run multiple client configurations logging.info( - "Running memtier benchmark command {}".format( - benchmark_command_str - ) + "Running multiple client configurations simultaneously" + ) + client_container_stdout, client_results = run_multiple_clients( + benchmark_config, + docker_client, + temporary_dir_client, + client_mnt_point, + benchmark_tool_workdir, + client_cpuset_cpus, + port, + host, + password, + oss_cluster_api_enabled, + tls_enabled, + tls_skip_verify, + test_tls_cert, + test_tls_key, + test_tls_cacert, + resp_version, + override_memtier_test_time, + override_test_runs, + unix_socket, + args, ) - stream = os.popen(benchmark_command_str) - client_container_stdout = stream.read() - move_command = "mv {} {}".format( - local_benchmark_output_filename, temporary_dir_client + logging.info( + f"Completed {len(client_results)} client configurations" ) - os.system(move_command) else: - logging.info( - "Using docker image {} as benchmark client image (cpuset={}) with the following args: {}".format( - client_container_image, - client_cpuset_cpus, - benchmark_command_str, + # Legacy single client execution + if args.benchmark_local_install: + logging.info("Running memtier benchmark outside of docker") + benchmark_command_str = ( + "taskset -c " + + client_cpuset_cpus + + " " + + benchmark_command_str + ) + logging.info( + "Running memtier benchmark command {}".format( + benchmark_command_str + ) + ) + stream = os.popen(benchmark_command_str) + client_container_stdout = stream.read() + move_command = "mv {} {}".format( + local_benchmark_output_filename, temporary_dir_client + ) + os.system(move_command) + else: + logging.info( + "Using docker image {} as benchmark client image (cpuset={}) with the following args: {}".format( + client_container_image, + client_cpuset_cpus, + benchmark_command_str, + ) ) - ) - client_container_stdout = docker_client.containers.run( - image=client_container_image, - volumes={ - temporary_dir_client: { - "bind": client_mnt_point, - "mode": "rw", + # Use explicit container management for single client + container = docker_client.containers.run( + image=client_container_image, + volumes={ + temporary_dir_client: { + "bind": client_mnt_point, + "mode": "rw", + }, }, - }, - auto_remove=True, - privileged=True, - working_dir=benchmark_tool_workdir, - command=benchmark_command_str, - network_mode="host", - detach=False, - cpuset_cpus=client_cpuset_cpus, - ) + auto_remove=False, + privileged=True, + working_dir=benchmark_tool_workdir, + command=benchmark_command_str, + network_mode="host", + detach=True, + cpuset_cpus=client_cpuset_cpus, + ) + + # Wait for container and get output + try: + exit_code = container.wait() + client_container_stdout = container.logs().decode( + "utf-8" + ) + logging.info( + f"Single client completed with exit code: {exit_code}" + ) + except Exception as wait_error: + logging.error(f"Single client wait error: {wait_error}") + client_container_stdout = container.logs().decode( + "utf-8" + ) + finally: + # Clean up container + try: + container.remove(force=True) + except Exception as cleanup_error: + logging.warning( + f"Single client cleanup error: {cleanup_error}" + ) benchmark_end_time = datetime.datetime.now() benchmark_duration_seconds = ( @@ -895,18 +1394,47 @@ def delete_temporary_files( client_container_stdout, None, ) - full_result_path = local_benchmark_output_filename - if "memtier_benchmark" in benchmark_tool: - full_result_path = "{}/{}".format( - temporary_dir_client, local_benchmark_output_filename + # Check if we have multi-client results with aggregated JSON + if ( + is_multiple_clients + and client_container_stdout.strip().startswith("{") + ): + # Use aggregated JSON from multi-client runner + logging.info( + "Using aggregated JSON results from multi-client execution" ) - logging.info(f"Reading results json from {full_result_path}") + results_dict = json.loads(client_container_stdout) + # Print results table for multi-client + print_results_table_stdout( + benchmark_config, + default_metrics, + results_dict, + setup_type, + test_name, + ) + # Add results to overall summary table + prepare_overall_total_test_results( + benchmark_config, + default_metrics, + results_dict, + test_name, + results_matrix, + redis_conns, + ) + else: + # Single client - read from file as usual + full_result_path = local_benchmark_output_filename + if "memtier_benchmark" in benchmark_tool: + full_result_path = "{}/{}".format( + temporary_dir_client, local_benchmark_output_filename + ) + logging.info(f"Reading results json from {full_result_path}") - with open( - full_result_path, - "r", - ) as json_file: - results_dict = json.load(json_file) + with open( + full_result_path, + "r", + ) as json_file: + results_dict = json.load(json_file) print_results_table_stdout( benchmark_config, default_metrics, @@ -921,6 +1449,7 @@ def delete_temporary_files( results_dict, test_name, results_matrix, + redis_conns, ) dataset_load_duration_seconds = 0 @@ -995,6 +1524,22 @@ def delete_temporary_files( benchmark_tool_global=benchmark_tool_global, ) + # Print Redis server information section before results + if len(results_matrix) > 0: + # Get redis_conns from the first test context (we need to pass it somehow) + # For now, try to get it from the current context if available + try: + # Try to get redis connection to display server info + import redis as redis_module + + r = redis_module.StrictRedis( + host="localhost", port=6379, decode_responses=True + ) + r.ping() # Test connection + print_redis_info_section([r]) + except Exception as e: + logging.info(f"Could not connect to Redis for server info: {e}") + table_name = "Results for entire test-suite" results_matrix_headers = [ "Test Name", @@ -1125,8 +1670,53 @@ def print_results_table_stdout( writer.write_table() +def print_redis_info_section(redis_conns): + """Print Redis server information as a separate section""" + if redis_conns is not None and len(redis_conns) > 0: + try: + redis_info = redis_conns[0].info() + + print("\n# Redis Server Information") + redis_info_data = [ + ["Redis Version", redis_info.get("redis_version", "unknown")], + ["Redis Git SHA1", redis_info.get("redis_git_sha1", "unknown")], + ["Redis Git Dirty", str(redis_info.get("redis_git_dirty", "unknown"))], + ["Redis Build ID", redis_info.get("redis_build_id", "unknown")], + ["Redis Mode", redis_info.get("redis_mode", "unknown")], + ["OS", redis_info.get("os", "unknown")], + ["Arch Bits", str(redis_info.get("arch_bits", "unknown"))], + ["GCC Version", redis_info.get("gcc_version", "unknown")], + ["Process ID", str(redis_info.get("process_id", "unknown"))], + ["TCP Port", str(redis_info.get("tcp_port", "unknown"))], + [ + "Uptime (seconds)", + str(redis_info.get("uptime_in_seconds", "unknown")), + ], + ] + + from pytablewriter import MarkdownTableWriter + + writer = MarkdownTableWriter( + table_name="", + headers=["Property", "Value"], + value_matrix=redis_info_data, + ) + writer.write_table() + + logging.info( + f"Displayed Redis server information: Redis {redis_info.get('redis_version', 'unknown')}" + ) + except Exception as e: + logging.warning(f"Failed to collect Redis server information: {e}") + + def prepare_overall_total_test_results( - benchmark_config, default_metrics, results_dict, test_name, overall_results_matrix + benchmark_config, + default_metrics, + results_dict, + test_name, + overall_results_matrix, + redis_conns=None, ): # check which metrics to extract ( @@ -1246,7 +1836,8 @@ def data_prepopulation_step( preload_command_str, ) ) - client_container_stdout = docker_client.containers.run( + # Use explicit container management for preload tool + container = docker_client.containers.run( image=preload_image, volumes={ temporary_dir: { @@ -1254,15 +1845,30 @@ def data_prepopulation_step( "mode": "rw", }, }, - auto_remove=True, + auto_remove=False, privileged=True, working_dir=benchmark_tool_workdir, command=preload_command_str, network_mode="host", - detach=False, + detach=True, cpuset_cpus=client_cpuset_cpus, ) + # Wait for preload container and get output + try: + exit_code = container.wait() + client_container_stdout = container.logs().decode("utf-8") + logging.info(f"Preload tool completed with exit code: {exit_code}") + except Exception as wait_error: + logging.error(f"Preload tool wait error: {wait_error}") + client_container_stdout = container.logs().decode("utf-8") + finally: + # Clean up container + try: + container.remove(force=True) + except Exception as cleanup_error: + logging.warning(f"Preload tool cleanup error: {cleanup_error}") + preload_end_time = datetime.datetime.now() preload_duration_seconds = calculate_client_tool_duration_and_check( preload_end_time, preload_start_time, "Preload", False diff --git a/redis_benchmarks_specification/test-suites/defaults.yml b/redis_benchmarks_specification/test-suites/defaults.yml index f7a5ba14..26e21dd2 100644 --- a/redis_benchmarks_specification/test-suites/defaults.yml +++ b/redis_benchmarks_specification/test-suites/defaults.yml @@ -26,3 +26,4 @@ exporter: - $."ALL STATS".Totals."Misses/sec" - $."ALL STATS".Totals."Percentile Latencies"."p50.00" - $."ALL STATS".Totals."Percentile Latencies"."p99.00" + - $."MessageRate" diff --git a/redis_benchmarks_specification/test-suites/generate.py b/redis_benchmarks_specification/test-suites/generate.py index 50e02138..0513ca2b 100644 --- a/redis_benchmarks_specification/test-suites/generate.py +++ b/redis_benchmarks_specification/test-suites/generate.py @@ -75,7 +75,13 @@ keyspace_value = dbconfig["check"]["keyspacelen"] if "preload_tool" in dbconfig: precommand = dbconfig["preload_tool"]["arguments"] - command = benchmark_config["clientconfig"]["arguments"] + # Handle both clientconfig and clientconfigs formats + if "clientconfigs" in benchmark_config: + # For multiple configs, use the first one for generation + command = benchmark_config["clientconfigs"][0]["arguments"] + else: + # Legacy single clientconfig format + command = benchmark_config["clientconfig"]["arguments"] check_dict = {"keyspacelen": keyspace_value} test_definition = { diff --git a/redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-100-subscribers.yml b/redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-100-subscribers.yml new file mode 100644 index 00000000..478de09f --- /dev/null +++ b/redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-100-subscribers.yml @@ -0,0 +1,35 @@ +version: 0.4 +name: memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-100-subscribers +description: Mixed workload with memtier publishing messages and pubsub-sub-bench subscribing to channels simultaneously. +dbconfig: + configuration-parameters: + save: '""' + resources: + requests: + memory: 2g +tested-groups: +- pubsub +tested-commands: +- publish +- subscribe +redis-topologies: +- oss-standalone +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +- dockerhub +clientconfigs: + - run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark + arguments: --test-time 120 --key-prefix "channel-" --pipeline 1 -d 128 --key-maximum 100 --command "PUBLISH __key__ __data__" --command-key-pattern="R" -c 50 -t 4 --hide-histogram + resources: + requests: + cpus: '4' + memory: 1g + - run_image: filipe958/pubsub-sub-bench:latest + tool: pubsub-sub-bench + arguments: -clients 100 -channel-minimum 1 -channel-maximum 100 -subscriber-prefix "channel-" -mode subscribe -test-time 120 -subscribers-per-channel 1 + resources: + requests: + cpus: '4' + memory: 1g +priority: 23 diff --git a/redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-1000-subscribers.yml b/redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-1000-subscribers.yml new file mode 100644 index 00000000..f61722c4 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-1000-subscribers.yml @@ -0,0 +1,35 @@ +version: 0.4 +name: memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-1000-subscribers +description: Mixed workload with memtier publishing messages and pubsub-sub-bench subscribing to channels simultaneously. +dbconfig: + configuration-parameters: + save: '""' + resources: + requests: + memory: 2g +tested-groups: +- pubsub +tested-commands: +- publish +- subscribe +redis-topologies: +- oss-standalone +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +- dockerhub +clientconfigs: + - run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark + arguments: --test-time 120 --key-prefix "channel-" --pipeline 1 -d 128 --key-maximum 100 --command "PUBLISH __key__ __data__" --command-key-pattern="R" -c 50 -t 4 --hide-histogram + resources: + requests: + cpus: '4' + memory: 1g + - run_image: filipe958/pubsub-sub-bench:latest + tool: pubsub-sub-bench + arguments: -clients 1000 -channel-minimum 1 -channel-maximum 100 -subscriber-prefix "channel-" -mode subscribe -test-time 120 -subscribers-per-channel 1 + resources: + requests: + cpus: '4' + memory: 1g +priority: 23 diff --git a/redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-5000-subscribers.yml b/redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-5000-subscribers.yml new file mode 100644 index 00000000..4ddf7816 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-5000-subscribers.yml @@ -0,0 +1,35 @@ +version: 0.4 +name: memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-5000-subscribers +description: Mixed workload with memtier publishing messages and pubsub-sub-bench subscribing to channels simultaneously. +dbconfig: + configuration-parameters: + save: '""' + resources: + requests: + memory: 2g +tested-groups: +- pubsub +tested-commands: +- publish +- subscribe +redis-topologies: +- oss-standalone +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +- dockerhub +clientconfigs: + - run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark + arguments: --test-time 120 --key-prefix "channel-" --pipeline 1 -d 128 --key-maximum 100 --command "PUBLISH __key__ __data__" --command-key-pattern="R" -c 50 -t 4 --hide-histogram + resources: + requests: + cpus: '4' + memory: 1g + - run_image: filipe958/pubsub-sub-bench:latest + tool: pubsub-sub-bench + arguments: -clients 5000 -channel-minimum 1 -channel-maximum 100 -subscriber-prefix "channel-" -mode subscribe -test-time 120 -subscribers-per-channel 1 + resources: + requests: + cpus: '4' + memory: 1g +priority: 23 diff --git a/redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-50K-subscribers-5k-conns.yml b/redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-50K-subscribers-5k-conns.yml new file mode 100644 index 00000000..a17373ef --- /dev/null +++ b/redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-50K-subscribers-5k-conns.yml @@ -0,0 +1,35 @@ +version: 0.4 +name: memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-50K-subscribers-5k-conns +description: Mixed workload with memtier publishing messages and pubsub-sub-bench subscribing to channels simultaneously. +dbconfig: + configuration-parameters: + save: '""' + resources: + requests: + memory: 2g +tested-groups: +- pubsub +tested-commands: +- publish +- subscribe +redis-topologies: +- oss-standalone +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +- dockerhub +clientconfigs: + - run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark + arguments: --test-time 120 --key-prefix "channel-" --pipeline 1 -d 128 --key-maximum 100 --command "PUBLISH __key__ __data__" --command-key-pattern="R" -c 50 -t 4 --hide-histogram + resources: + requests: + cpus: '4' + memory: 1g + - run_image: filipe958/pubsub-sub-bench:latest + tool: pubsub-sub-bench + arguments: -clients 5000 -channel-minimum 1 -channel-maximum 100 -subscriber-prefix "channel-" -mode subscribe -test-time 120 -subscribers-per-channel 10 + resources: + requests: + cpus: '4' + memory: 1g +priority: 23 diff --git a/utils/generate_memory_requirements.py b/utils/generate_memory_requirements.py index 38fc40b0..11cfa248 100644 --- a/utils/generate_memory_requirements.py +++ b/utils/generate_memory_requirements.py @@ -123,11 +123,19 @@ def process_yaml_file(yaml_file_path, removed_dir): print(f"Error reading {yaml_file_path}: {e}", file=sys.stderr) return - # Check for necessary fields + # Check for necessary fields - handle both clientconfig and clientconfigs try: - arguments = config["clientconfig"]["arguments"] - except KeyError: - print(f"Skipping {yaml_file_path}: Missing 'clientconfig.arguments' field.") + if "clientconfigs" in config: + # Multiple client configs - use first one for memory calculation + arguments = config["clientconfigs"][0]["arguments"] + elif "clientconfig" in config: + # Single client config + arguments = config["clientconfig"]["arguments"] + else: + print(f"Skipping {yaml_file_path}: Missing client configuration.") + return + except (KeyError, IndexError): + print(f"Skipping {yaml_file_path}: Invalid client configuration format.") return # Convert arguments to string @@ -203,10 +211,20 @@ def main(): continue try: - arguments = config["clientconfig"]["arguments"] - except KeyError: + if "clientconfigs" in config: + # Multiple client configs - use first one for memory calculation + arguments = config["clientconfigs"][0]["arguments"] + elif "clientconfig" in config: + # Single client config + arguments = config["clientconfig"]["arguments"] + else: + print( + f"Skipping {yaml_file_path}: Missing client configuration." + ) + continue + except (KeyError, IndexError): print( - f"Skipping {yaml_file_path}: Missing 'clientconfig.arguments' field." + f"Skipping {yaml_file_path}: Invalid client configuration format." ) continue diff --git a/utils/summary.py b/utils/summary.py index cb29be49..1c395ee7 100644 --- a/utils/summary.py +++ b/utils/summary.py @@ -131,8 +131,16 @@ def summarize_yaml_file(yaml_file_path, command_summary, command_group_summary): command_group = categorize_command(command) command_group_summary[command_group] += 1 - # Extract command from 'clientconfig.arguments' - arguments = config.get("clientconfig", {}).get("arguments", "") + # Extract command from client configuration - handle both formats + arguments = "" + if "clientconfigs" in config: + # Multiple client configs - use first one for summary + if config["clientconfigs"] and "arguments" in config["clientconfigs"][0]: + arguments = config["clientconfigs"][0]["arguments"] + elif "clientconfig" in config: + # Single client config + arguments = config.get("clientconfig", {}).get("arguments", "") + if arguments: command = parse_arguments(arguments) if command: diff --git a/utils/tests/test_runner.py b/utils/tests/test_runner.py index a465c986..7f66d7cc 100644 --- a/utils/tests/test_runner.py +++ b/utils/tests/test_runner.py @@ -6,11 +6,21 @@ from redis_benchmarks_specification.__common__.package import get_version_string from redis_benchmarks_specification.__common__.runner import extract_testsuites -from redis_benchmarks_specification.__common__.spec import extract_client_tool +from redis_benchmarks_specification.__common__.spec import ( + extract_client_tool, + extract_client_configs, + extract_client_container_images, + extract_client_tools, + extract_client_cpu_limit, + extract_client_container_image, +) from redis_benchmarks_specification.__runner__.args import create_client_runner_args from redis_benchmarks_specification.__runner__.runner import ( prepare_memtier_benchmark_parameters, run_client_runner_logic, + parse_size, + run_multiple_clients, + prepare_pubsub_sub_bench_parameters, ) @@ -177,6 +187,820 @@ def test_prepare_memtier_benchmark_parameters(): ) +def test_extract_client_configs(): + # Test single clientconfig format + with open( + "./redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-100B-expire-use-case.yml", + "r", + ) as yml_file: + benchmark_config = yaml.safe_load(yml_file) + client_configs = extract_client_configs(benchmark_config) + assert len(client_configs) == 1 + assert "tool" in client_configs[0] + assert client_configs[0]["tool"] == "memtier_benchmark" + + # Test multiple clientconfigs format (create a test config) + test_config = { + "clientconfigs": [ + { + "run_image": "redislabs/memtier_benchmark:edge", + "tool": "memtier_benchmark", + "arguments": '--command="ZSCAN zset:100 0" --hide-histogram --test-time 120', + "resources": {"requests": {"cpus": "4", "memory": "2g"}}, + }, + { + "run_image": "redislabs/memtier_benchmark:edge", + "tool": "memtier_benchmark", + "arguments": '--command="ZRANGE zset:100 0 -1" --hide-histogram --test-time 120', + "resources": {"requests": {"cpus": "2", "memory": "1g"}}, + }, + ] + } + client_configs = extract_client_configs(test_config) + assert len(client_configs) == 2 + assert client_configs[0]["tool"] == "memtier_benchmark" + assert client_configs[1]["tool"] == "memtier_benchmark" + + +def test_extract_client_container_images(): + # Test single clientconfig format + with open( + "./redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-100B-expire-use-case.yml", + "r", + ) as yml_file: + benchmark_config = yaml.safe_load(yml_file) + images = extract_client_container_images(benchmark_config) + assert len(images) == 1 + assert "redislabs/memtier_benchmark" in images[0] + + # Test multiple clientconfigs format + test_config = { + "clientconfigs": [ + { + "run_image": "redislabs/memtier_benchmark:edge", + "tool": "memtier_benchmark", + "arguments": '--command="ZSCAN zset:100 0"', + "resources": {"requests": {"cpus": "4", "memory": "2g"}}, + }, + { + "run_image": "redislabs/memtier_benchmark:latest", + "tool": "memtier_benchmark", + "arguments": '--command="ZRANGE zset:100 0 -1"', + "resources": {"requests": {"cpus": "2", "memory": "1g"}}, + }, + ] + } + images = extract_client_container_images(test_config) + assert len(images) == 2 + assert images[0] == "redislabs/memtier_benchmark:edge" + assert images[1] == "redislabs/memtier_benchmark:latest" + + +def test_extract_client_tools(): + # Test single clientconfig format + with open( + "./redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-100B-expire-use-case.yml", + "r", + ) as yml_file: + benchmark_config = yaml.safe_load(yml_file) + tools = extract_client_tools(benchmark_config) + assert len(tools) == 1 + assert tools[0] == "memtier_benchmark" + + # Test multiple clientconfigs format + test_config = { + "clientconfigs": [ + { + "run_image": "redislabs/memtier_benchmark:edge", + "tool": "memtier_benchmark", + "arguments": '--command="ZSCAN zset:100 0"', + "resources": {"requests": {"cpus": "4", "memory": "2g"}}, + }, + { + "run_image": "redislabs/redis-benchmark:latest", + "tool": "redis-benchmark", + "arguments": "-t set,get -n 1000", + "resources": {"requests": {"cpus": "2", "memory": "1g"}}, + }, + ] + } + tools = extract_client_tools(test_config) + assert len(tools) == 2 + assert tools[0] == "memtier_benchmark" + assert tools[1] == "redis-benchmark" + + +def test_extract_client_cpu_limit(): + # Test single clientconfig format + with open( + "./redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-100B-expire-use-case.yml", + "r", + ) as yml_file: + benchmark_config = yaml.safe_load(yml_file) + cpu_limit = extract_client_cpu_limit(benchmark_config) + assert cpu_limit >= 1 # Should be at least 1 CPU + + # Test multiple clientconfigs format - should sum CPU limits + test_config = { + "clientconfigs": [ + { + "run_image": "redislabs/memtier_benchmark:edge", + "tool": "memtier_benchmark", + "arguments": '--command="ZSCAN zset:100 0"', + "resources": {"requests": {"cpus": "2.5", "memory": "2g"}}, + }, + { + "run_image": "redislabs/memtier_benchmark:latest", + "tool": "memtier_benchmark", + "arguments": '--command="ZRANGE zset:100 0 -1"', + "resources": {"requests": {"cpus": "1.5", "memory": "1g"}}, + }, + ] + } + cpu_limit = extract_client_cpu_limit(test_config) + assert cpu_limit == 4 # ceil(2.5 + 1.5) = 4 + + +def test_extract_client_configs_edge_cases(): + # Test empty config + empty_config = {} + client_configs = extract_client_configs(empty_config) + assert len(client_configs) == 0 + + # Test config with missing fields + incomplete_config = { + "clientconfigs": [ + { + "run_image": "redislabs/memtier_benchmark:edge", + # Missing tool and arguments + "resources": {"requests": {"cpus": "2", "memory": "1g"}}, + } + ] + } + client_configs = extract_client_configs(incomplete_config) + assert len(client_configs) == 1 + assert "run_image" in client_configs[0] + + +def test_extract_client_container_images_edge_cases(): + # Test config without run_image + config_no_image = { + "clientconfig": { + "tool": "memtier_benchmark", + "arguments": '--command="SET key value"', + "resources": {"requests": {"cpus": "2", "memory": "1g"}}, + } + } + images = extract_client_container_images(config_no_image) + assert len(images) == 1 + assert images[0] is None + + # Test empty clientconfigs + empty_configs = {"clientconfigs": []} + images = extract_client_container_images(empty_configs) + assert len(images) == 0 + + +def test_extract_client_tools_edge_cases(): + # Test config without tool + config_no_tool = { + "clientconfig": { + "run_image": "redislabs/memtier_benchmark:edge", + "arguments": '--command="SET key value"', + "resources": {"requests": {"cpus": "2", "memory": "1g"}}, + } + } + tools = extract_client_tools(config_no_tool) + assert len(tools) == 1 + assert tools[0] is None + + +def test_prepare_memtier_benchmark_parameters_variations(): + """Test memtier benchmark parameter preparation with different configurations""" + with open( + "./redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-100B-expire-use-case.yml", + "r", + ) as yml_file: + benchmark_config = yaml.safe_load(yml_file) + client_tool = extract_client_tool(benchmark_config) + + # Test with TLS enabled + (_, benchmark_command_str, _) = prepare_memtier_benchmark_parameters( + benchmark_config["clientconfig"], + client_tool, + 12000, + "localhost", + "password123", + "test_output.json", + False, # oss_cluster_api_enabled + True, # tls_enabled + False, # tls_skip_verify + "cert.pem", + "key.pem", + "ca.pem", + "3", # resp_version (should be string) + 0, # override_memtier_test_time (use default) + 1, # override_test_runs (use default) + "", # unix_socket (should be empty string, not None) + ) + assert "--tls" in benchmark_command_str + assert "--cert cert.pem" in benchmark_command_str + assert "--key key.pem" in benchmark_command_str + assert "--cacert ca.pem" in benchmark_command_str + assert "--protocol resp3" in benchmark_command_str + + # Test with Unix socket + (_, benchmark_command_str, _) = prepare_memtier_benchmark_parameters( + benchmark_config["clientconfig"], + client_tool, + 12000, + "localhost", + None, + "test_output.json", + False, # oss_cluster_api_enabled + False, # tls_enabled + False, # tls_skip_verify + None, + None, + None, + "2", # resp_version + 120, # override_memtier_test_time + 5, # override_test_runs + "/tmp/redis.sock", # unix_socket + ) + assert "--unix-socket /tmp/redis.sock" in benchmark_command_str + assert "--test-time=120" in benchmark_command_str + assert "--run-count=5" in benchmark_command_str + + # Test with password + (_, benchmark_command_str, _) = prepare_memtier_benchmark_parameters( + benchmark_config["clientconfig"], + client_tool, + 12000, + "localhost", + "mypassword", + "test_output.json", + False, # oss_cluster_api_enabled + False, # tls_enabled + False, # tls_skip_verify + None, + None, + None, + "2", # resp_version + 0, # override_memtier_test_time (use default) + 1, # override_test_runs (use default) + "", # unix_socket + ) + assert "--authenticate mypassword" in benchmark_command_str + + +def test_parse_size(): + """Test the parse_size utility function""" + # Test basic units + assert parse_size("100") == 100 + assert parse_size("100B") == 100 + assert parse_size("1KB") == 1024 + assert parse_size("1MB") == 1024 * 1024 + assert parse_size("1GB") == 1024 * 1024 * 1024 + assert parse_size("2TB") == 2 * 1024 * 1024 * 1024 * 1024 + + # Test decimal values + assert parse_size("1.5KB") == int(1.5 * 1024) + assert parse_size("2.5MB") == int(2.5 * 1024 * 1024) + + # Test short forms + assert parse_size("1K") == 1024 + assert parse_size("1M") == 1024 * 1024 + assert parse_size("1G") == 1024 * 1024 * 1024 + + # Test with spaces + assert parse_size("1 KB") == 1024 + assert parse_size("2 MB") == 2 * 1024 * 1024 + + # Test case insensitive + assert parse_size("1kb") == 1024 + assert parse_size("1mb") == 1024 * 1024 + + +def test_parse_size_edge_cases(): + """Test parse_size with edge cases and different formats""" + # Test decimal numbers + assert parse_size("0.5KB") == int(0.5 * 1024) + assert parse_size("1.25MB") == int(1.25 * 1024 * 1024) + + # Test different unit formats + assert parse_size("1KIB") == 1000 # Decimal units + assert parse_size("1MIB") == 1000000 + assert parse_size("1GIB") == 1000000000 + + # Test short forms + assert parse_size("1KI") == 1000 + assert parse_size("1MI") == 1000000 + assert parse_size("1GI") == 1000000000 + assert parse_size("1TI") == 1000000000000 + + # Test with extra whitespace + assert parse_size(" 1 KB ") == 1024 + assert parse_size("\t2\tMB\t") == 2 * 1024 * 1024 + + # Test string input + assert parse_size(str(1024)) == 1024 + + +def test_create_client_runner_args(): + """Test the argument parser creation""" + version_string = "test-version-1.0" + parser = create_client_runner_args(version_string) + + # Test that parser is created successfully + assert parser is not None + + # Test parsing basic arguments + args = parser.parse_args( + [ + "--test", + "test.yml", + "--db_server_host", + "localhost", + "--db_server_port", + "6379", + ] + ) + + assert args.test == "test.yml" + assert args.db_server_host == "localhost" + assert args.db_server_port == 6379 # Port is parsed as integer + + # Test parsing with optional arguments + args = parser.parse_args( + [ + "--test", + "test.yml", + "--db_server_host", + "localhost", + "--db_server_port", + "6379", + "--flushall_on_every_test_start", + "--benchmark_local_install", + ] + ) + + assert args.flushall_on_every_test_start is True + assert args.benchmark_local_install is True + + +def test_extract_client_container_image_legacy(): + """Test the legacy extract_client_container_image function""" + # Test with run_image present + config = {"clientconfig": {"run_image": "redis:latest", "tool": "redis-benchmark"}} + image = extract_client_container_image(config) + assert image == "redis:latest" + + # Test with missing run_image + config_no_image = {"clientconfig": {"tool": "memtier_benchmark"}} + image = extract_client_container_image(config_no_image) + assert image is None + + # Test with missing clientconfig entirely + empty_config = {} + image = extract_client_container_image(empty_config) + assert image is None + + # Test with custom keyname + config_custom = {"myclient": {"run_image": "custom:image"}} + image = extract_client_container_image(config_custom, keyname="myclient") + assert image == "custom:image" + + +def test_extract_client_tool_legacy(): + """Test the legacy extract_client_tool function""" + # Test with tool present + config = { + "clientconfig": {"tool": "memtier_benchmark", "run_image": "redis:latest"} + } + tool = extract_client_tool(config) + assert tool == "memtier_benchmark" + + # Test with missing tool + config_no_tool = {"clientconfig": {"run_image": "redis:latest"}} + tool = extract_client_tool(config_no_tool) + assert tool is None + + # Test with missing clientconfig entirely + empty_config = {} + tool = extract_client_tool(empty_config) + assert tool is None + + # Test with custom keyname + config_custom = {"myclient": {"tool": "redis-benchmark"}} + tool = extract_client_tool(config_custom, keyname="myclient") + assert tool == "redis-benchmark" + + +def test_run_multiple_clients_error_handling(): + """Test error handling in run_multiple_clients""" + import pytest + + # Test with empty config (no client configurations) + empty_config = {} + + # Mock the required parameters + mock_args = type("MockArgs", (), {"benchmark_local_install": False})() + + with pytest.raises(ValueError, match="No client configurations found"): + run_multiple_clients( + empty_config, + None, # docker_client + "/tmp", # temporary_dir_client + "/mnt", # client_mnt_point + "/workdir", # benchmark_tool_workdir + "0-3", # client_cpuset_cpus + 6379, # port + "localhost", # host + None, # password + False, # oss_cluster_api_enabled + False, # tls_enabled + False, # tls_skip_verify + None, # test_tls_cert + None, # test_tls_key + None, # test_tls_cacert + "2", # resp_version + 0, # override_memtier_test_time + 1, # override_test_runs + "", # unix_socket + mock_args, # args + ) + + +def test_create_client_runner_args_all_options(): + """Test argument parser with all possible options""" + version_string = "test-version-1.0" + parser = create_client_runner_args(version_string) + + # Test parsing with all optional arguments + args = parser.parse_args( + [ + "--test", + "test.yml", + "--db_server_host", + "redis.example.com", + "--db_server_port", + "6380", + "--flushall_on_every_test_start", + "--benchmark_local_install", + "--cluster-mode", + "--unix-socket", + "/tmp/redis.sock", + "--override-memtier-test-time", + "60", + "--override-test-runs", + "3", + ] + ) + + assert args.test == "test.yml" + assert args.db_server_host == "redis.example.com" + assert args.db_server_port == 6380 + assert args.flushall_on_every_test_start is True + assert args.benchmark_local_install is True + assert args.cluster_mode is True + assert args.unix_socket == "/tmp/redis.sock" + assert args.override_memtier_test_time == 60 + assert args.override_test_runs == 3 + + +def test_create_client_runner_args_defaults(): + """Test argument parser default values""" + version_string = "test-version-1.0" + parser = create_client_runner_args(version_string) + + # Test parsing with minimal required arguments + args = parser.parse_args( + [ + "--test", + "test.yml", + "--db_server_host", + "localhost", + "--db_server_port", + "6379", + ] + ) + + # Check default values + assert args.flushall_on_every_test_start is False + assert args.benchmark_local_install is False + assert args.cluster_mode is False + assert args.unix_socket == "" + assert args.override_memtier_test_time == 0 + assert args.override_test_runs == 1 + + +# Removed test_prepare_benchmark_parameters_redis_benchmark as it tests external functionality + + +def test_prepare_memtier_benchmark_parameters_resp_versions(): + """Test memtier benchmark with different RESP versions""" + with open( + "./redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-100B-expire-use-case.yml", + "r", + ) as yml_file: + benchmark_config = yaml.safe_load(yml_file) + client_tool = extract_client_tool(benchmark_config) + + # Test RESP2 (default) + (_, benchmark_command_str, _) = prepare_memtier_benchmark_parameters( + benchmark_config["clientconfig"], + client_tool, + 6379, + "localhost", + None, + "test_output.json", + False, # oss_cluster_api_enabled + False, # tls_enabled + False, # tls_skip_verify + None, + None, + None, + "2", # resp_version + 0, + 1, + "", + ) + # RESP2 should not add any protocol flags + assert "--protocol resp2" not in benchmark_command_str + + # Test RESP3 + (_, benchmark_command_str, _) = prepare_memtier_benchmark_parameters( + benchmark_config["clientconfig"], + client_tool, + 6379, + "localhost", + None, + "test_output.json", + False, # oss_cluster_api_enabled + False, # tls_enabled + False, # tls_skip_verify + None, + None, + None, + "3", # resp_version + 0, + 1, + "", + ) + assert "--protocol resp3" in benchmark_command_str + + +def test_prepare_pubsub_sub_bench_parameters(): + """Test pubsub-sub-bench parameter preparation""" + # Create a test client config for pubsub-sub-bench + client_config = { + "tool": "pubsub-sub-bench", + "arguments": "-clients 10 -messages 1000 -subscribers-per-channel 5", + "run_image": "filipe958/pubsub-sub-bench:latest", + "resources": {"requests": {"cpus": "2", "memory": "1g"}}, + } + + # Test basic parameter preparation + (_, benchmark_command_str, _) = prepare_pubsub_sub_bench_parameters( + client_config, + "pubsub-sub-bench", + 6379, + "localhost", + None, # password + "test_output.json", + False, # oss_cluster_api_enabled + False, # tls_enabled + False, # tls_skip_verify + None, + None, + None, # TLS certs + "2", # resp_version + 0, # override_test_time + "", # unix_socket + None, # username + ) + + # Verify basic parameters + assert "-json-out-file test_output.json" in benchmark_command_str + assert "-host localhost" in benchmark_command_str + assert "-port 6379" in benchmark_command_str + assert "-resp 2" in benchmark_command_str + + # Verify user arguments are appended + assert "-clients 10" in benchmark_command_str + assert "-messages 1000" in benchmark_command_str + assert "-subscribers-per-channel 5" in benchmark_command_str + + +def test_prepare_pubsub_sub_bench_parameters_with_auth(): + """Test pubsub-sub-bench with authentication""" + client_config = { + "tool": "pubsub-sub-bench", + "arguments": "-test-time 60", + "run_image": "filipe958/pubsub-sub-bench:latest", + } + + # Test with password only + (_, benchmark_command_str, _) = prepare_pubsub_sub_bench_parameters( + client_config, + "pubsub-sub-bench", + 6379, + "redis.example.com", + "mypassword", + "output.json", + False, + False, + False, + None, + None, + None, + "3", # RESP3 + 120, # test_time override + "", + None, + ) + + assert "-host redis.example.com" in benchmark_command_str + assert "-port 6379" in benchmark_command_str + assert "-a mypassword" in benchmark_command_str + assert "-resp 3" in benchmark_command_str + assert "-test-time 120" in benchmark_command_str + + # Test with username and password (ACL style) + (_, benchmark_command_str, _) = prepare_pubsub_sub_bench_parameters( + client_config, + "pubsub-sub-bench", + 6379, + "redis.example.com", + "mypassword", + "output.json", + False, + False, + False, + None, + None, + None, + "2", + 0, + "", + "myuser", # username + ) + + assert "-user myuser" in benchmark_command_str + assert "-a mypassword" in benchmark_command_str + + +def test_prepare_pubsub_sub_bench_parameters_cluster_mode(): + """Test pubsub-sub-bench with cluster mode""" + client_config = { + "tool": "pubsub-sub-bench", + "arguments": "-channel-minimum 1 -channel-maximum 100", + "run_image": "filipe958/pubsub-sub-bench:latest", + } + + (_, benchmark_command_str, _) = prepare_pubsub_sub_bench_parameters( + client_config, + "pubsub-sub-bench", + 6379, + "cluster.redis.com", + None, + "cluster_output.json", + True, # oss_cluster_api_enabled + False, + False, + None, + None, + None, + None, + 0, + "", + None, + ) + + assert "-oss-cluster-api-distribute-subscribers" in benchmark_command_str + assert "-channel-minimum 1" in benchmark_command_str + assert "-channel-maximum 100" in benchmark_command_str + + +def test_prepare_pubsub_sub_bench_parameters_unix_socket(): + """Test pubsub-sub-bench with unix socket (should fall back to host/port)""" + client_config = { + "tool": "pubsub-sub-bench", + "arguments": "-verbose", + "run_image": "filipe958/pubsub-sub-bench:latest", + } + + (_, benchmark_command_str, _) = prepare_pubsub_sub_bench_parameters( + client_config, + "pubsub-sub-bench", + 6379, + "localhost", + None, + "unix_output.json", + False, + False, + False, + None, + None, + None, + None, + 0, + "/tmp/redis.sock", # unix_socket + None, + ) + + # Should still use host/port since pubsub-sub-bench doesn't support unix sockets + assert "-host localhost" in benchmark_command_str + assert "-port 6379" in benchmark_command_str + assert "-verbose" in benchmark_command_str + + +def test_extract_client_configs_pubsub_sub_bench(): + """Test client config extraction with pubsub-sub-bench tool""" + # Test multiple pubsub-sub-bench configs + test_config = { + "clientconfigs": [ + { + "run_image": "filipe958/pubsub-sub-bench:latest", + "tool": "pubsub-sub-bench", + "arguments": "-clients 5 -mode subscribe", + "resources": {"requests": {"cpus": "1", "memory": "512m"}}, + }, + { + "run_image": "filipe958/pubsub-sub-bench:edge", + "tool": "pubsub-sub-bench", + "arguments": "-clients 10 -mode ssubscribe", + "resources": {"requests": {"cpus": "2", "memory": "1g"}}, + }, + ] + } + + client_configs = extract_client_configs(test_config) + client_tools = extract_client_tools(test_config) + client_images = extract_client_container_images(test_config) + + assert len(client_configs) == 2 + assert len(client_tools) == 2 + assert len(client_images) == 2 + + assert client_tools[0] == "pubsub-sub-bench" + assert client_tools[1] == "pubsub-sub-bench" + assert "subscribe" in client_configs[0]["arguments"] + assert "ssubscribe" in client_configs[1]["arguments"] + assert client_images[0] == "filipe958/pubsub-sub-bench:latest" + assert client_images[1] == "filipe958/pubsub-sub-bench:edge" + + +def test_prepare_pubsub_sub_bench_parameters_override_test_time(): + """Test pubsub-sub-bench with test-time override""" + client_config = { + "tool": "pubsub-sub-bench", + "arguments": "-clients 10 -test-time 60 -verbose", # User specifies 60s + "run_image": "filipe958/pubsub-sub-bench:latest", + } + + # Test with override_test_time=30 (should override the user's 60s) + (_, benchmark_command_str, _) = prepare_pubsub_sub_bench_parameters( + client_config, + "pubsub-sub-bench", + 6379, + "localhost", + None, + "output.json", + False, + False, + False, + None, + None, + None, + "2", + 30, # override_test_time=30 + "", + None, + ) + + # Should have our override time, not the user's time + assert "-test-time 30" in benchmark_command_str + assert "-test-time 60" not in benchmark_command_str # User's time should be removed + assert "-clients 10" in benchmark_command_str # Other args should remain + assert "-verbose" in benchmark_command_str + + +def test_create_client_runner_args_container_timeout_buffer(): + """Test that container timeout buffer argument is properly configured""" + from redis_benchmarks_specification.__runner__.args import create_client_runner_args + + # Test default value + parser = create_client_runner_args("test") + args = parser.parse_args([]) + assert args.container_timeout_buffer == 60 # Default should be 60 seconds + + # Test custom value + args = parser.parse_args(["--container-timeout-buffer", "120"]) + assert args.container_timeout_buffer == 120 + + def test_run_client_runner_logic(): project_name = "tool" project_version = "v0"