Skip to content

Commit 85ec2fc

Browse files
committed
fix tests A10-PyTorch-1.test_e2e.test_openai_chat_multimodal_example and A10-PyTorch-1.test_e2e.test_trtllm_serve_multimodal_example
Signed-off-by: John Calderon <jcalderon@nvidia.com>
1 parent 175cc1c commit 85ec2fc

File tree

2 files changed

+5
-2
lines changed

2 files changed

+5
-2
lines changed

tests/unittest/llmapi/apps/_test_openai_chat_multimodal.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def server(model_name: str, temp_extra_llm_api_options_file: str):
4848
model_path = get_model_path(model_name)
4949
args = [
5050
"--extra_llm_api_options", temp_extra_llm_api_options_file,
51-
"--max_batch_size", "64"
51+
"--max_batch_size", "64", "--enable_chunked_prefill"
5252
]
5353
with RemoteOpenAIServer(model_path, args) as remote_server:
5454
yield remote_server

tests/unittest/llmapi/apps/_test_trtllm_serve_multimodal_example.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,10 @@ def temp_extra_llm_api_options_file(request):
4646
@pytest.fixture(scope="module")
4747
def server(model_name: str, temp_extra_llm_api_options_file: str):
4848
model_path = get_model_path(model_name)
49-
args = ["--extra_llm_api_options", temp_extra_llm_api_options_file]
49+
args = [
50+
"--enable_chunked_prefill", "--extra_llm_api_options",
51+
temp_extra_llm_api_options_file
52+
]
5053
with RemoteOpenAIServer(model_path, port=8000,
5154
cli_args=args) as remote_server:
5255
yield remote_server

0 commit comments

Comments
 (0)