@@ -323,62 +323,60 @@ steps:
323
323
- pytest -v -s models/test_registry.py
324
324
- pytest -v -s models/test_initialization.py
325
325
326
- - label : Decoder-only Language Models Test (Standard) # 18min
326
+ - label : Language Models Test (Standard) # 42min
327
327
# mirror_hardwares: [amd]
328
328
source_file_dependencies :
329
329
- vllm/
330
330
- tests/models/decoder_only/language
331
+ - tests/models/embedding/language
332
+ - tests/models/encoder_decoder/language
331
333
commands :
332
- - pytest -v -s models/decoder_only/language -m core_model
333
- - pytest -v -s models/decoder_only/language -m quant_model
334
+ - pytest -v -s models/decoder_only/language -m 'core_model or quant_model'
335
+ - pytest -v -s models/embedding/language -m core_model
336
+ - pytest -v -s models/embedding/vision_language -m core_model
334
337
335
- - label : Decoder-only Language Models Test (Extended) # 46min
338
+ - label : Language Models Test (Extended) # 50min
336
339
nightly : true
337
340
source_file_dependencies :
338
341
- vllm/
339
342
- tests/models/decoder_only/language
343
+ - tests/models/embedding/language
344
+ - tests/models/encoder_decoder/language
340
345
commands :
341
346
- pytest -v -s models/decoder_only/language -m 'not core_model and not quant_model'
347
+ - pytest -v -s models/embedding/language -m 'not core_model'
348
+ - pytest -v -s models/embedding/vision_language -m 'not core_model'
342
349
343
- - label : Decoder-only Multi-Modal Models Test (Standard) # 22min
350
+ - label : Multi-Modal Models Test (Standard) # 26min
344
351
# mirror_hardwares: [amd]
345
352
source_file_dependencies :
346
353
- vllm/
347
354
- tests/models/decoder_only/audio_language
348
355
- tests/models/decoder_only/vision_language
356
+ - tests/models/embedding/vision_language
357
+ - tests/models/encoder_decoder/vision_language
349
358
commands :
350
- - pytest -v -s models/decoder_only/audio_language -m core_model
351
- - pytest -v -s --ignore models/decoder_only/vision_language/test_phi3v.py models/decoder_only/vision_language -m core_model
352
- # No tests under this group for now
353
- # - pytest -v -s models/decoder_only/audio_language -m quant_model
354
- - pytest -v -s --ignore models/decoder_only/vision_language/test_phi3v.py models/decoder_only/vision_language -m quant_model
359
+ - pytest -v -s models/decoder_only/audio_language -m 'core_model or quant_model'
360
+ - pytest -v -s --ignore models/decoder_only/vision_language/test_phi3v.py models/decoder_only/vision_language -m 'core_model or quant_model'
361
+ - pytest -v -s models/encoder_decoder/language -m core_model
362
+ - pytest -v -s models/encoder_decoder/vision_language -m core_model
355
363
356
- - label : Decoder-only Multi-Modal Models Test (Extended) # 1h10m
364
+ - label : Multi-Modal Models Test (Extended) # 1h15m
357
365
nightly : true
358
366
source_file_dependencies :
359
367
- vllm/
360
368
- tests/models/decoder_only/audio_language
361
369
- tests/models/decoder_only/vision_language
370
+ - tests/models/embedding/vision_language
371
+ - tests/models/encoder_decoder/vision_language
362
372
commands :
363
373
- pytest -v -s models/decoder_only/audio_language -m 'not core_model and not quant_model'
364
374
# HACK - run phi3v tests separately to sidestep this transformers bug
365
375
# https://github.com/huggingface/transformers/issues/34307
366
376
- pytest -v -s models/decoder_only/vision_language/test_phi3v.py
367
377
- pytest -v -s --ignore models/decoder_only/vision_language/test_phi3v.py models/decoder_only/vision_language -m 'not core_model and not quant_model'
368
-
369
- - label : Other Models Test # 20min
370
- # mirror_hardwares: [amd]
371
- source_file_dependencies :
372
- - vllm/
373
- - tests/models/embedding/language
374
- - tests/models/embedding/vision_language
375
- - tests/models/encoder_decoder/language
376
- - tests/models/encoder_decoder/vision_language
377
- commands :
378
- - pytest -v -s models/embedding/language
379
- - pytest -v -s models/embedding/vision_language
380
- - pytest -v -s models/encoder_decoder/language
381
- - pytest -v -s models/encoder_decoder/vision_language
378
+ - pytest -v -s models/encoder_decoder/language -m 'not core_model'
379
+ - pytest -v -s models/encoder_decoder/vision_language -m 'not core_model'
382
380
383
381
# This test is used only in PR development phase to test individual models and should never run on main
384
382
- label : Custom Models Test
0 commit comments