Skip to content

Commit e9117ea

Browse files
author
chibu
committed
clean up
1 parent 432031e commit e9117ea

File tree

3 files changed

+2
-39
lines changed

3 files changed

+2
-39
lines changed

src/automation/tasks/base_task.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
class BaseTask():
99

1010
#base_packages = ["git+https://github.com/neuralmagic/research.git"]
11-
base_packages = ["git+https://github.com/neuralmagic/research.git@update_guidellm"]
11+
#base_packages = ["git+https://github.com/neuralmagic/research.git@update_guidellm"]
1212

1313
def __init__(
1414
self,
@@ -19,6 +19,7 @@ def __init__(
1919
packages: Optional[Sequence[str]]=None,
2020
task_type: str="training",
2121
):
22+
base_packages = [f"git+https://github.com/neuralmagic/research.git@{branch}"]
2223

2324
if packages is not None:
2425
packages = list(set(packages + self.base_packages))

src/automation/tasks/guidellm.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,16 +5,10 @@
55

66
DEFAULT_SERVER_WAIT_TIME = 600 # 600 seconds = 10 minutes
77
GUIDELLM_PACKAGE = "git+https://github.com/neuralmagic/guidellm.git"
8-
#GUIDELLM_PACKAGE = "git+https://github.com/neuralmagic/guidellm.git@clearml-guidellm"
9-
#GUIDELLM_PACKAGE = "git+https://github.com/neuralmagic/guidellm.git@clearml-guidellm#egg=guidellm"
10-
#GUIDELLM_PACKAGE = "git+https://github.com/neuralmagic/guidellm.git@main#egg=guidellm[dev]"
118

129
class GuideLLMTask(BaseTask):
1310

1411
guidellm_packages = [
15-
#"build>=1.0.0",
16-
#"setuptools>=61.0",
17-
#"setuptools-git-versioning>=2.0,<3",
1812
"vllm",
1913
GUIDELLM_PACKAGE,
2014
"hf_xet",

src/automation/tasks/scripts/guidellm_script.py

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,6 @@ def clean_hocon_value(v):
8888
import json
8989
import asyncio
9090
from pathlib import Path
91-
#from guidellm.benchmark import benchmark_generative_text
9291
from guidellm.benchmark.output import GenerativeBenchmarksReport
9392
from guidellm.benchmark.entrypoints import benchmark_generative_text, benchmark_with_scenario
9493
from guidellm.benchmark.scenario import GenerativeTextScenario, get_builtin_scenarios
@@ -100,41 +99,10 @@ def clean_hocon_value(v):
10099
print("[DEBUG] Calling benchmark_generative_text with:")
101100
print(json.dumps(guidellm_args, indent=2))
102101

103-
#GenerativeBenchmarksReport()
104102
executable_path = os.path.dirname(sys.executable)
105103
vllm_path = os.path.join(executable_path, "vllm")
106104
print(f"The vllm path is: {vllm_path}")
107105

108-
109-
#default_scenario = get_builtin_scenarios()[0]
110-
#current_scenario = GenerativeTextScenario.from_builtin(default_scenario, dict(guidellm_args))
111-
112-
#from pathlib import Path
113-
#filepath = Path(os.path.join(".", "src", "automation", "standards", "benchmarking", "chat.json"))
114-
#current_scenario = GenerativeTextScenario.from_file(filepath, dict(guidellm_args))
115-
116-
#import time
117-
#time.sleep(300)
118-
"""
119-
current_scenario = GenerativeTextScenario
120-
print(current_scenario.model_fields["target"])
121-
print(current_scenario.model_fields["model"])
122-
overlap_keys = current_scenario.model_fields.keys() & dict(guidellm_args)
123-
#overlap_keys = ["model"]
124-
for element in overlap_keys:
125-
#print(element)
126-
element_field_info = current_scenario.model_fields[element]
127-
element_field_info.default = guidellm_args[element]
128-
current_scenario.model_fields[element] = element_field_info
129-
#print(element_field_info.annotation)
130-
print(overlap_keys)
131-
132-
print(current_scenario.model_fields["target"])
133-
print(current_scenario.model_fields["model"])
134-
135-
current_scenario = GenerativeTextScenario
136-
"""
137-
138106
try:
139107
asyncio.run(
140108
benchmark_with_scenario(

0 commit comments

Comments
 (0)