Skip to content

Commit 4f87455

Browse files
Skip CLI/bench tool tests in CI - require external binaries
These tests require llama-cli and llama-bench binaries which may not be available in CI environments. Mark them as slow tests to skip by default. They can still be run locally with SLOW_TESTS=1. Co-Authored-By: Alex Peng <[email protected]>
1 parent c7d781a commit 4f87455

File tree

1 file changed

+9
-0
lines changed

1 file changed

+9
-0
lines changed

tools/server/tests/e2e/test_tool_integration.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
from utils import *
1414

1515

16+
@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test - requires llama-cli binary")
1617
def test_cli_basic_execution(pipeline_process, e2e_small_model_config):
1718
"""
1819
Test basic llama-cli execution with a model.
@@ -44,6 +45,7 @@ def test_cli_basic_execution(pipeline_process, e2e_small_model_config):
4445
assert len(output) > 0, "CLI should produce output"
4546

4647

48+
@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test - requires llama-cli binary")
4749
def test_cli_with_seed(pipeline_process, e2e_small_model_config):
4850
"""
4951
Test llama-cli with deterministic seed for reproducible outputs.
@@ -82,6 +84,7 @@ def test_cli_with_seed(pipeline_process, e2e_small_model_config):
8284
assert len(output2) > 0
8385

8486

87+
@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test - requires llama-bench binary")
8588
def test_bench_basic_execution(pipeline_process, e2e_small_model_config):
8689
"""
8790
Test basic llama-bench execution.
@@ -116,6 +119,7 @@ def test_bench_basic_execution(pipeline_process, e2e_small_model_config):
116119
"Bench output should contain performance metrics"
117120

118121

122+
@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test - requires llama-bench binary")
119123
def test_bench_with_different_batch_sizes(pipeline_process, e2e_small_model_config):
120124
"""
121125
Test llama-bench with different batch size configurations.
@@ -147,6 +151,7 @@ def test_bench_with_different_batch_sizes(pipeline_process, e2e_small_model_conf
147151
assert len(result["output"]) > 0
148152

149153

154+
@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test - requires llama-cli binary")
150155
def test_cli_embedding_generation(pipeline_process, e2e_embedding_model_config):
151156
"""
152157
Test embedding generation using llama-cli.
@@ -175,6 +180,7 @@ def test_cli_embedding_generation(pipeline_process, e2e_embedding_model_config):
175180
assert result.returncode == 0, f"CLI embedding should succeed: {result.stderr.decode()}"
176181

177182

183+
@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test - requires llama-cli binary")
178184
def test_tool_parameter_validation(pipeline_process, e2e_small_model_config):
179185
"""
180186
Test tool parameter validation and error handling.
@@ -193,6 +199,7 @@ def test_tool_parameter_validation(pipeline_process, e2e_small_model_config):
193199
assert len(stderr) > 0, "Should provide error message"
194200

195201

202+
@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test - requires llama-cli binary")
196203
def test_cli_context_size_parameter(pipeline_process, e2e_small_model_config):
197204
"""
198205
Test llama-cli with custom context size parameter.
@@ -219,6 +226,7 @@ def test_cli_context_size_parameter(pipeline_process, e2e_small_model_config):
219226
assert result.returncode == 0, "CLI with custom context size should succeed"
220227

221228

229+
@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test - requires llama-cli binary")
222230
def test_server_and_cli_coordination(pipeline_process, e2e_small_model_config):
223231
"""
224232
Test coordination between server and CLI tool workflows.
@@ -253,6 +261,7 @@ def test_server_and_cli_coordination(pipeline_process, e2e_small_model_config):
253261
assert result.returncode == 0, "CLI should work after server stops"
254262

255263

264+
@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test - requires llama-cli binary")
256265
def test_cli_json_output_format(pipeline_process, e2e_small_model_config):
257266
"""
258267
Test llama-cli JSON output format.

0 commit comments

Comments
 (0)