@@ -107,13 +107,17 @@ steps:
107107 mirror_hardwares : [amd]
108108 source_file_dependencies :
109109 - vllm/
110+ - tests/entrypoints/llm
111+ - tests/entrypoints/openai
112+ - tests/entrypoints/test_chat_utils
113+ - tests/entrypoints/offline_mode
110114 commands :
111115 - pytest -v -s entrypoints/llm --ignore=entrypoints/llm/test_lazy_outlines.py --ignore=entrypoints/llm/test_generate.py --ignore=entrypoints/llm/test_generate_multiple_loras.py --ignore=entrypoints/llm/test_guided_generate.py --ignore=entrypoints/llm/test_collective_rpc.py
112116 - pytest -v -s entrypoints/llm/test_lazy_outlines.py # it needs a clean process
113117 - pytest -v -s entrypoints/llm/test_generate.py # it needs a clean process
114118 - pytest -v -s entrypoints/llm/test_generate_multiple_loras.py # it needs a clean process
115119 - pytest -v -s entrypoints/llm/test_guided_generate.py # it needs a clean process
116- - pytest -v -s entrypoints/openai --ignore=entrypoints/openai/test_oot_registration.py
120+ - pytest -v -s entrypoints/openai --ignore=entrypoints/openai/test_oot_registration.py --ignore=entrypoints/openai/correctness/
117121 - pytest -v -s entrypoints/test_chat_utils.py
118122 - pytest -v -s entrypoints/offline_mode # Needs to avoid interference with other tests
119123
@@ -124,9 +128,10 @@ steps:
124128 source_file_dependencies :
125129 - vllm/distributed/
126130 - vllm/core/
127- - tests/distributed
131+ - tests/distributed/test_utils
132+ - tests/distributed/test_pynccl
128133 - tests/spec_decode/e2e/test_integration_dist_tp4
129- - tests/compile
134+ - tests/compile/test_basic_correctness
130135 - examples/offline_inference/rlhf.py
131136 - examples/offline_inference/rlhf_colocate.py
132137 commands :
@@ -174,6 +179,9 @@ steps:
174179 - vllm/
175180 - tests/engine
176181 - tests/tokenization
182+ - tests/test_sequence
183+ - tests/test_config
184+ - tests/test_logger
177185 commands :
178186 - pytest -v -s engine test_sequence.py test_config.py test_logger.py
179187 # OOM in the CI unless we run this separately
@@ -197,7 +205,7 @@ steps:
197205 - VLLM_USE_V1=1 pytest -v -s v1/e2e
198206 # Integration test for streaming correctness (requires special branch).
199207 - pip install -U git+https://github.com/robertgshaw2-neuralmagic/lm-evaluation-harness.git@streaming-api
200- - pytest -v -s entrypoints/openai/test_accuracy .py::test_lm_eval_accuracy_v1_engine
208+ - pytest -v -s entrypoints/openai/correctness/test_lmeval .py::test_lm_eval_accuracy_v1_engine
201209
202210- label : Examples Test # 25min
203211 working_dir : " /vllm-workspace/examples"
@@ -331,6 +339,14 @@ steps:
331339 - export VLLM_WORKER_MULTIPROC_METHOD=spawn
332340 - bash ./run-tests.sh -c configs/models-small.txt -t 1
333341
342+ - label : OpenAI API correctness
343+ source_file_dependencies :
344+ - csrc/
345+ - vllm/entrypoints/openai/
346+ - vllm/model_executor/models/whisper.py
347+ commands : # LMEval+Transcription WER check
348+ - pytest -s entrypoints/openai/correctness/
349+
334350- label : Encoder Decoder tests # 5min
335351 source_file_dependencies :
336352 - vllm/
0 commit comments