|
25 | 25 |
|
26 | 26 | PHI3V_MODEL_ID = "microsoft/Phi-3.5-vision-instruct"
|
27 | 27 | ULTRAVOX_MODEL_ID = "fixie-ai/ultravox-v0_5-llama-3_2-1b"
|
| 28 | +QWEN2AUDIO_MODEL_ID = "Qwen/Qwen2-Audio-7B-Instruct" |
28 | 29 | QWEN2VL_MODEL_ID = "Qwen/Qwen2-VL-2B-Instruct"
|
29 | 30 | QWEN25VL_MODEL_ID = "Qwen/Qwen2.5-VL-3B-Instruct"
|
30 | 31 | MLLAMA_MODEL_ID = "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
@@ -841,13 +842,16 @@ def test_resolve_hf_chat_template(sample_json_schema, model, use_tools):
|
841 | 842 | assert isinstance(chat_template, str)
|
842 | 843 |
|
843 | 844 |
|
| 845 | +# NOTE: Qwen2-Audio default chat template is specially defined inside |
| 846 | +# processor class instead of using `tokenizer_config.json` |
844 | 847 | # yapf: disable
|
845 | 848 | @pytest.mark.parametrize(
|
846 | 849 | ("model", "expected_format"),
|
847 | 850 | [(PHI3V_MODEL_ID, "string"),
|
848 | 851 | (QWEN2VL_MODEL_ID, "openai"),
|
849 | 852 | (QWEN25VL_MODEL_ID, "openai"),
|
850 | 853 | (ULTRAVOX_MODEL_ID, "string"),
|
| 854 | + (QWEN2AUDIO_MODEL_ID, "openai"), |
851 | 855 | (MLLAMA_MODEL_ID, "openai"),
|
852 | 856 | (LLAMA_GUARD_MODEL_ID, "openai")],
|
853 | 857 | )
|
@@ -900,10 +904,13 @@ def test_resolve_content_format_hf_defined(model, expected_format):
|
900 | 904 | ("template_chatglm2.jinja", "string"),
|
901 | 905 | ("template_chatml.jinja", "string"),
|
902 | 906 | ("template_deepseek_vl2.jinja", "string"),
|
| 907 | + ("template_dse_qwen2_vl.jinja", "openai"), |
903 | 908 | ("template_falcon_180b.jinja", "string"),
|
904 | 909 | ("template_falcon.jinja", "string"),
|
| 910 | + ("template_florence2.jinja", "string"), |
905 | 911 | ("template_inkbot.jinja", "string"),
|
906 | 912 | ("template_llava.jinja", "string"),
|
| 913 | + ("template_teleflm.jinja", "string"), |
907 | 914 | ("template_vlm2vec.jinja", "openai"),
|
908 | 915 | ("tool_chat_template_granite_20b_fc.jinja", "string"),
|
909 | 916 | ("tool_chat_template_hermes.jinja", "string"),
|
|
0 commit comments