@@ -88,7 +88,6 @@ def clean_hocon_value(v):
88
88
import json
89
89
import asyncio
90
90
from pathlib import Path
91
- #from guidellm.benchmark import benchmark_generative_text
92
91
from guidellm .benchmark .output import GenerativeBenchmarksReport
93
92
from guidellm .benchmark .entrypoints import benchmark_generative_text , benchmark_with_scenario
94
93
from guidellm .benchmark .scenario import GenerativeTextScenario , get_builtin_scenarios
@@ -100,41 +99,10 @@ def clean_hocon_value(v):
100
99
print ("[DEBUG] Calling benchmark_generative_text with:" )
101
100
print (json .dumps (guidellm_args , indent = 2 ))
102
101
103
- #GenerativeBenchmarksReport()
104
102
executable_path = os .path .dirname (sys .executable )
105
103
vllm_path = os .path .join (executable_path , "vllm" )
106
104
print (f"The vllm path is: { vllm_path } " )
107
105
108
-
109
- #default_scenario = get_builtin_scenarios()[0]
110
- #current_scenario = GenerativeTextScenario.from_builtin(default_scenario, dict(guidellm_args))
111
-
112
- #from pathlib import Path
113
- #filepath = Path(os.path.join(".", "src", "automation", "standards", "benchmarking", "chat.json"))
114
- #current_scenario = GenerativeTextScenario.from_file(filepath, dict(guidellm_args))
115
-
116
- #import time
117
- #time.sleep(300)
118
- """
119
- current_scenario = GenerativeTextScenario
120
- print(current_scenario.model_fields["target"])
121
- print(current_scenario.model_fields["model"])
122
- overlap_keys = current_scenario.model_fields.keys() & dict(guidellm_args)
123
- #overlap_keys = ["model"]
124
- for element in overlap_keys:
125
- #print(element)
126
- element_field_info = current_scenario.model_fields[element]
127
- element_field_info.default = guidellm_args[element]
128
- current_scenario.model_fields[element] = element_field_info
129
- #print(element_field_info.annotation)
130
- print(overlap_keys)
131
-
132
- print(current_scenario.model_fields["target"])
133
- print(current_scenario.model_fields["model"])
134
-
135
- current_scenario = GenerativeTextScenario
136
- """
137
-
138
106
try :
139
107
asyncio .run (
140
108
benchmark_with_scenario (
0 commit comments