Skip to content

Commit 80a79ef

Browse files
committed
add fuzz testing and workflows update
1 parent 0b245ea commit 80a79ef

File tree

4 files changed

+381
-1
lines changed

4 files changed

+381
-1
lines changed

.github/workflows/test.yml

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,3 +53,54 @@ jobs:
5353
python -m pip install pip==26.0.1
5454
pip install -e ".[dev,test,performance]"
5555
pip-audit
56+
57+
schema-updated:
58+
runs-on: ubuntu-latest
59+
if: github.event_name == 'pull_request'
60+
steps:
61+
- uses: actions/checkout@v4
62+
with:
63+
fetch-depth: 0
64+
65+
- name: Check for schema changes
66+
id: schema
67+
run: |
68+
CHANGED=$(git diff --name-only origin/${{ github.base_ref }}...HEAD -- \
69+
'src/inference_endpoint/config/schema.py' \
70+
'src/inference_endpoint/endpoint_client/config.py' \
71+
'src/inference_endpoint/commands/benchmark/cli.py')
72+
echo "changed=$([[ -n "$CHANGED" ]] && echo true || echo false)" >> "$GITHUB_OUTPUT"
73+
74+
- name: Set up Python 3.12
75+
if: steps.schema.outputs.changed == 'true'
76+
uses: actions/setup-python@v4
77+
with:
78+
python-version: "3.12"
79+
80+
- name: Install dependencies
81+
if: steps.schema.outputs.changed == 'true'
82+
run: |
83+
python -m pip install --upgrade pip
84+
pip install -e .[test]
85+
86+
- name: Run schema fuzz tests
87+
if: steps.schema.outputs.changed == 'true'
88+
run: |
89+
pytest -xv -m schema_fuzz
90+
91+
- name: Validate YAML templates against schema
92+
if: steps.schema.outputs.changed == 'true'
93+
run: |
94+
python -c "
95+
from pathlib import Path
96+
from inference_endpoint.config.schema import BenchmarkConfig
97+
templates = sorted(Path('src/inference_endpoint/config/templates').glob('*.yaml'))
98+
for t in templates:
99+
try:
100+
BenchmarkConfig.from_yaml_file(t)
101+
print(f' OK: {t.name}')
102+
except Exception as e:
103+
print(f' FAIL: {t.name}: {e}')
104+
raise SystemExit(1)
105+
print(f'All {len(templates)} templates valid.')
106+
"

pyproject.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,8 @@ test = [
9797
"aiohttp==3.13.4",
9898
# Plotting for benchmark sweep mode
9999
"matplotlib==3.10.8",
100+
# Property-based testing (CLI fuzz)
101+
"hypothesis==6.151.10",
100102
]
101103
performance = [
102104
"pytest-benchmark==5.2.3",
@@ -184,6 +186,7 @@ markers = [
184186
"integration: marks tests as integration tests",
185187
"unit: marks tests as unit tests",
186188
"run_explicitly: mark test to only run explicitly",
189+
"schema_fuzz: hypothesis CLI fuzz tests (run in CI on schema changes)",
187190
]
188191
filterwarnings = [
189192
"ignore:Session timeout reached:RuntimeWarning",

tests/integration/commands/test_benchmark_command.py

Lines changed: 27 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def test_offline_benchmark(
8787

8888
@pytest.mark.integration
8989
@pytest.mark.parametrize("streaming", [StreamingMode.OFF, StreamingMode.ON])
90-
def test_online_benchmark(
90+
def test_poisson_benchmark(
9191
self, mock_http_echo_server, ds_dataset_path, caplog, streaming
9292
):
9393
config = _config(
@@ -105,6 +105,32 @@ def test_online_benchmark(
105105
assert "PoissonDistributionScheduler" in caplog.text
106106
assert "50" in caplog.text
107107

108+
@pytest.mark.integration
109+
@pytest.mark.parametrize("streaming", [StreamingMode.OFF, StreamingMode.ON])
110+
def test_concurrency_benchmark(
111+
self, mock_http_echo_server, ds_dataset_path, caplog, streaming
112+
):
113+
config = _config(
114+
mock_http_echo_server.url,
115+
ds_dataset_path,
116+
type=TestType.ONLINE,
117+
model_params=ModelParams(name="echo-server", streaming=streaming),
118+
settings=Settings(
119+
runtime=RuntimeConfig(min_duration_ms=2000),
120+
load_pattern=LoadPattern(
121+
type=LoadPatternType.CONCURRENCY, target_concurrency=4
122+
),
123+
client=HTTPClientConfig(
124+
num_workers=1, warmup_connections=0, max_connections=10
125+
),
126+
),
127+
)
128+
with caplog.at_level("INFO"):
129+
run_benchmark(config, TestMode.PERF)
130+
131+
assert "Completed in" in caplog.text
132+
assert "successful" in caplog.text
133+
108134
@pytest.mark.integration
109135
def test_results_json_output(
110136
self, mock_http_echo_server, ds_dataset_path, tmp_path

0 commit comments

Comments
 (0)