|
| 1 | +import pytest |
| 2 | +from transformers import AutoModelForCausalLM |
| 3 | + |
| 4 | +from llmcompressor.transformers.tracing import ( |
| 5 | + TraceableIdefics3ForConditionalGeneration, |
| 6 | + TraceableLlavaForConditionalGeneration, |
| 7 | + TraceableMllamaForConditionalGeneration, |
| 8 | + TraceableQwen2_5_VLForConditionalGeneration, |
| 9 | + TraceableQwen2VLForConditionalGeneration, |
| 10 | + TraceableWhisperForConditionalGeneration, |
| 11 | +) |
| 12 | +from llmcompressor.transformers.tracing.debug import trace |
| 13 | + |
| 14 | + |
| 15 | +@pytest.mark.parametrize( |
| 16 | + "model_id,model_class,targets", |
| 17 | + [ |
| 18 | + ("meta-llama/Meta-Llama-3-8B-Instruct", AutoModelForCausalLM, None), |
| 19 | + ], |
| 20 | +) |
| 21 | +def test_text_trace(model_id, model_class, targets): |
| 22 | + trace( |
| 23 | + model_id, |
| 24 | + model_class, |
| 25 | + targets, |
| 26 | + ignore=[], |
| 27 | + modality="text", |
| 28 | + trust_remote_code=True, |
| 29 | + ) |
| 30 | + |
| 31 | + |
| 32 | +@pytest.mark.parametrize( |
| 33 | + "model_id,model_class,targets,ignore", |
| 34 | + [ |
| 35 | + ( |
| 36 | + "Qwen/Qwen2-VL-2B-Instruct", |
| 37 | + TraceableQwen2VLForConditionalGeneration, |
| 38 | + None, |
| 39 | + ["lm_head", "re:visual.*"], |
| 40 | + ), |
| 41 | + ( |
| 42 | + "Qwen/Qwen2.5-VL-7B-Instruct", |
| 43 | + TraceableQwen2_5_VLForConditionalGeneration, |
| 44 | + None, |
| 45 | + ["lm_head", "re:visual.*"], |
| 46 | + ), |
| 47 | + ( |
| 48 | + "mgoin/pixtral-12b", |
| 49 | + TraceableLlavaForConditionalGeneration, |
| 50 | + ["MistralDecoderLayer"], |
| 51 | + ["re:.*lm_head", "re:vision_tower.*", "re:multi_modal_projector.*"], |
| 52 | + ), |
| 53 | + ( |
| 54 | + "meta-llama/Llama-3.2-11B-Vision-Instruct", |
| 55 | + TraceableMllamaForConditionalGeneration, |
| 56 | + None, |
| 57 | + ["re:.*lm_head", "re:multi_modal_projector.*", "re:vision_model.*"], |
| 58 | + ), |
| 59 | + ( |
| 60 | + "llava-hf/llava-1.5-7b-hf", |
| 61 | + TraceableLlavaForConditionalGeneration, |
| 62 | + ["LlamaDecoderLayer"], |
| 63 | + ["re:.*lm_head", "re:vision_tower.*", "re:multi_modal_projector.*"], |
| 64 | + ), |
| 65 | + ( |
| 66 | + "HuggingFaceM4/Idefics3-8B-Llama3", |
| 67 | + TraceableIdefics3ForConditionalGeneration, |
| 68 | + ["Idefics3EncoderLayer", "LlamaDecoderLayer"], |
| 69 | + ["re:.*lm_head", "re:model.vision_model.*", "re:model.connector.*"], |
| 70 | + ), |
| 71 | + ], |
| 72 | +) |
| 73 | +def test_vision_trace(model_id, model_class, targets, ignore): |
| 74 | + trace( |
| 75 | + model_id, |
| 76 | + model_class, |
| 77 | + targets, |
| 78 | + ignore=ignore, |
| 79 | + modality="vision", |
| 80 | + trust_remote_code=True, |
| 81 | + ) |
| 82 | + |
| 83 | + |
| 84 | +@pytest.mark.parametrize( |
| 85 | + "model_id,model_class,targets,ignore", |
| 86 | + [ |
| 87 | + ("openai/whisper-large-v3", TraceableWhisperForConditionalGeneration, None, []), |
| 88 | + ], |
| 89 | +) |
| 90 | +def test_audio_trace(model_id, model_class, targets, ignore): |
| 91 | + trace( |
| 92 | + model_id, |
| 93 | + model_class, |
| 94 | + targets, |
| 95 | + ignore=ignore, |
| 96 | + modality="audio", |
| 97 | + trust_remote_code=True, |
| 98 | + ) |
0 commit comments