Skip to content

Commit d1b985a

Browse files
author
chibu
committed
add file error handling
1 parent 3703e62 commit d1b985a

File tree

2 files changed

+17
-28
lines changed

2 files changed

+17
-28
lines changed

src/automation/tasks/scripts/guidellm_script.py

Lines changed: 17 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -55,25 +55,6 @@ def clean_hocon_value(v):
5555

5656
gpu_count = int(guidellm_args.get("gpu_count", 1))
5757

58-
print(vllm_args)
59-
print(model_id)
60-
print(guidellm_args["target"])
61-
print(args["Args"]["server_wait_time"])
62-
print(gpu_count)
63-
print(os.getcwd())
64-
65-
from pathlib import Path
66-
from guidellm.benchmark.scenario import GenerativeTextScenario, get_builtin_scenarios
67-
user_scenario = guidellm_args.get("scenario", "")
68-
if user_scenario:
69-
filepath = Path(os.path.join(".", "src", "automation", "standards", "benchmarking", f"{user_scenario}.json"))
70-
current_scenario = GenerativeTextScenario.from_file(filepath, dict(guidellm_args))
71-
#elif len(get_builtin_scenarios()) > 0:
72-
# current_scenario = GenerativeTextScenario.from_builtin(get_builtin_scenarios()[0], dict(guidellm_args))
73-
else:
74-
filepath = Path(os.path.join(".", "src", "automation", "standards", "benchmarking", f"{DEFAULT_GUIDELLM_SCENARIO}.json"))
75-
current_scenario = GenerativeTextScenario.from_file(filepath, dict(guidellm_args))
76-
print(current_scenario.model_fields)
7758
# Start vLLM server
7859
server_process, server_initialized, server_log = start_vllm_server(
7960
vllm_args,
@@ -97,15 +78,29 @@ def clean_hocon_value(v):
9778
import json
9879
import asyncio
9980
from pathlib import Path
100-
from guidellm.benchmark.output import GenerativeBenchmarksReport
101-
from guidellm.benchmark.entrypoints import benchmark_generative_text, benchmark_with_scenario
81+
from guidellm.benchmark.entrypoints import benchmark_with_scenario
10282
from guidellm.benchmark.scenario import GenerativeTextScenario, get_builtin_scenarios
10383

84+
user_scenario = guidellm_args.get("scenario", "")
85+
if user_scenario:
86+
filepath = Path(os.path.join(".", "src", "automation", "standards", "benchmarking", f"{user_scenario}.json"))
87+
if os.path.exists(filepath):
88+
current_scenario = GenerativeTextScenario.from_file(filepath, dict(guidellm_args))
89+
else:
90+
raise ValueError(f"Scenario path {filepath} does not exist")
91+
#elif len(get_builtin_scenarios()) > 0:
92+
# to be used when get_builtin_scenarios() bug is fiexed
93+
# current_scenario = GenerativeTextScenario.from_builtin(get_builtin_scenarios()[0], dict(guidellm_args))
94+
else:
95+
filepath = Path(os.path.join(".", "src", "automation", "standards", "benchmarking", f"{DEFAULT_GUIDELLM_SCENARIO}.json"))
96+
current_scenario = GenerativeTextScenario.from_file(filepath, dict(guidellm_args))
97+
print(current_scenario.model_fields)
98+
10499
# Ensure output_path is set and consistent
105100
output_path = Path(guidellm_args.get("output_path", "guidellm-output.json"))
106101
guidellm_args["output_path"] = str(output_path)
107102

108-
print("[DEBUG] Calling benchmark_generative_text with:")
103+
print("[DEBUG] Calling benchmark_with_scenario with:")
109104
print(json.dumps(guidellm_args, indent=2))
110105

111106
executable_path = os.path.dirname(sys.executable)

src/automation/vllm/server.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,12 +32,6 @@ def start_vllm_server(
3232

3333
parsed_target = urlparse(target)
3434
print(f"vllm path is: {vllm_path}")
35-
"""
36-
server_command = [
37-
f"{vllm_path}", "serve",
38-
"Qwen/Qwen2.5-1.5B-Instruct",
39-
]
40-
"""
4135

4236
server_command = [
4337
f"{vllm_path}", "serve",

0 commit comments

Comments
 (0)