|
| 1 | +import torch |
| 2 | +from datasets import load_dataset |
| 3 | +from transformers import AutoProcessor, Qwen2AudioForConditionalGeneration |
| 4 | + |
| 5 | +from llmcompressor import oneshot |
| 6 | +from llmcompressor.modifiers.quantization import GPTQModifier |
| 7 | +from llmcompressor.utils import dispatch_for_generation |
| 8 | + |
| 9 | +# Select model and load it. |
| 10 | +MODEL_ID = "Qwen/Qwen2-Audio-7B" |
| 11 | +model = Qwen2AudioForConditionalGeneration.from_pretrained(MODEL_ID, torch_dtype="auto") |
| 12 | +processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True) |
| 13 | + |
| 14 | +# Select calibration dataset. |
| 15 | +DATASET_ID = "MLCommons/peoples_speech" |
| 16 | +DATASET_SUBSET = "test" |
| 17 | +DATASET_SPLIT = "test" |
| 18 | + |
| 19 | +NUM_CALIBRATION_SAMPLES = 4#512 |
| 20 | +MAX_SEQUENCE_LENGTH = 2048 |
| 21 | + |
| 22 | +# Load dataset and preprocess. |
| 23 | +ds = load_dataset( |
| 24 | + DATASET_ID, |
| 25 | + DATASET_SUBSET, |
| 26 | + split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]", |
| 27 | +) |
| 28 | + |
| 29 | +def preprocess(example): |
| 30 | + # Peoples Speech: example["audio"] = {"array": ..., "sampling_rate": ...} |
| 31 | + # example["text"] is transcript |
| 32 | + return { |
| 33 | + "array": example["audio"]["array"], |
| 34 | + "sampling_rate": example["audio"]["sampling_rate"], |
| 35 | + "text": example["text"].strip(), |
| 36 | + } |
| 37 | + |
| 38 | +ds = ds.map(preprocess, remove_columns=ds.column_names) |
| 39 | + |
| 40 | +# Process inputs. |
| 41 | +PROMPT_PREFIX = "<|audio_bos|><|AUDIO|><|audio_eos|>Transcribe the audio in English:" |
| 42 | + |
| 43 | +def process(sample): |
| 44 | + text = f"{PROMPT_PREFIX} {sample['text']}" |
| 45 | + |
| 46 | + # 1) Audio -> padded mel features (exactly 3000 frames) |
| 47 | + audio_feats = processor.feature_extractor( |
| 48 | + sample["array"], |
| 49 | + sampling_rate=sample["sampling_rate"], |
| 50 | + padding="max_length", |
| 51 | + max_length=processor.feature_extractor.n_samples, |
| 52 | + return_tensors="pt", |
| 53 | + ) |
| 54 | + |
| 55 | + # 2) Text -> token ids (your chosen MAX_SEQUENCE_LENGTH) |
| 56 | + text_toks = processor.tokenizer( |
| 57 | + text, |
| 58 | + padding="max_length", |
| 59 | + max_length=MAX_SEQUENCE_LENGTH, |
| 60 | + truncation=True, |
| 61 | + return_tensors="pt", |
| 62 | + ) |
| 63 | + |
| 64 | + # Merge to what Qwen2AudioForConditionalGeneration expects |
| 65 | + inputs = { |
| 66 | + "input_features": audio_feats["input_features"][0], # strip batch dim |
| 67 | + "input_ids": text_toks["input_ids"][0], |
| 68 | + "attention_mask": text_toks["attention_mask"][0], |
| 69 | + } |
| 70 | + # Some versions also provide/expect a feature_attention_mask; include if present |
| 71 | + if "attention_mask" in audio_feats and audio_feats["attention_mask"] is not None: |
| 72 | + inputs["feature_attention_mask"] = audio_feats["attention_mask"][0] |
| 73 | + |
| 74 | + return inputs |
| 75 | + |
| 76 | +ds = ds.map(process, remove_columns=ds.column_names) |
| 77 | + |
| 78 | +# Recipe |
| 79 | +recipe = GPTQModifier( |
| 80 | + targets="Linear", |
| 81 | + scheme="W4A16", |
| 82 | + ignore=["lm_head"], # safe default for generation heads |
| 83 | +) |
| 84 | + |
| 85 | +# Apply algorithms. |
| 86 | +# oneshot( |
| 87 | +# model=model, |
| 88 | +# dataset=ds, |
| 89 | +# recipe=recipe, |
| 90 | +# max_seq_length=MAX_SEQUENCE_LENGTH, |
| 91 | +# num_calibration_samples=NUM_CALIBRATION_SAMPLES, |
| 92 | +# ) |
| 93 | + |
| 94 | +# Confirm generations of the quantized model look sane. |
| 95 | +print("\n========== SAMPLE GENERATION ==============") |
| 96 | +dispatch_for_generation(model) |
| 97 | +sample = next(iter(ds)) |
| 98 | +sample = {key: torch.tensor([value]).to(model.device) for key, value in sample.items()} |
| 99 | +output = model.generate(**sample, max_new_tokens=100) |
| 100 | +text = processor.batch_decode(output, skip_special_tokens=True)[0] |
| 101 | +print(text) |
| 102 | +print("==========================================\n") |
| 103 | + |
| 104 | +# Save to disk compressed. |
| 105 | +SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-W4A16-GPTQ" |
| 106 | +model.save_pretrained(SAVE_DIR, save_compressed=True) |
| 107 | +processor.save_pretrained(SAVE_DIR) |
| 108 | +print(f"Saved to: {SAVE_DIR}") |
0 commit comments