|
| 1 | +import torch |
| 2 | +from datasets import load_dataset |
| 3 | +from transformers import Llama4ForConditionalGeneration, Llama4Processor |
| 4 | + |
| 5 | +from llmcompressor import oneshot |
| 6 | +from llmcompressor.modeling import prepare_for_calibration |
| 7 | +from llmcompressor.modifiers.quantization import QuantizationModifier |
| 8 | + |
| 9 | +# Select model and load it. |
| 10 | +model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct" |
| 11 | +model = Llama4ForConditionalGeneration.from_pretrained(model_id, torch_dtype="auto") |
| 12 | +processor = Llama4Processor.from_pretrained(model_id) |
| 13 | +# We update `Llama4TextMoe` modules with custom `SequentialLlama4TextMoe`. |
| 14 | +# This change allows compatibility with vllm. |
| 15 | +# To apply your own custom module for experimentation, consider updating |
| 16 | +# `SequentialLlama4TextMoe` under llmcompressor/modeling/llama4.py |
| 17 | +model = prepare_for_calibration(model) |
| 18 | + |
| 19 | +DATASET_ID = "neuralmagic/calibration" |
| 20 | +NUM_CALIBRATION_SAMPLES = 20 |
| 21 | +MAX_SEQUENCE_LENGTH = 8192 |
| 22 | + |
| 23 | +ds = load_dataset(DATASET_ID, name="LLM", split=f"train[:{NUM_CALIBRATION_SAMPLES}]") |
| 24 | + |
| 25 | + |
| 26 | +def preprocess_function(example): |
| 27 | + messgages = [] |
| 28 | + for message in example["messages"]: |
| 29 | + messgages.append( |
| 30 | + { |
| 31 | + "role": message["role"], |
| 32 | + "content": [{"type": "text", "text": message["content"]}], |
| 33 | + } |
| 34 | + ) |
| 35 | + |
| 36 | + return processor.apply_chat_template( |
| 37 | + messgages, |
| 38 | + return_tensors="pt", |
| 39 | + padding=False, |
| 40 | + truncation=True, |
| 41 | + max_length=MAX_SEQUENCE_LENGTH, |
| 42 | + tokenize=True, |
| 43 | + add_special_tokens=False, |
| 44 | + return_dict=True, |
| 45 | + add_generation_prompt=False, |
| 46 | + ) |
| 47 | + |
| 48 | + |
| 49 | +ds = ds.map(preprocess_function, batched=False, remove_columns=ds.column_names) |
| 50 | + |
| 51 | + |
| 52 | +def data_collator(batch): |
| 53 | + assert len(batch) == 1 |
| 54 | + return { |
| 55 | + key: torch.tensor(value) |
| 56 | + if key != "pixel_values" |
| 57 | + else torch.tensor(value, dtype=torch.bfloat16).squeeze(0) |
| 58 | + for key, value in batch[0].items() |
| 59 | + } |
| 60 | + |
| 61 | + |
| 62 | +# Configure the quantization algorithm to run. |
| 63 | +recipe = QuantizationModifier( |
| 64 | + targets="Linear", |
| 65 | + scheme="NVFP4", |
| 66 | + ignore=[ |
| 67 | + "re:.*lm_head", |
| 68 | + "re:.*self_attn", |
| 69 | + "re:.*router", |
| 70 | + "re:vision_model.*", |
| 71 | + "re:multi_modal_projector.*", |
| 72 | + "Llama4TextAttention", |
| 73 | + ], |
| 74 | +) |
| 75 | + |
| 76 | +# Apply algorithms. |
| 77 | +# due to the large size of Llama4, we specify sequential targets such that |
| 78 | +# only one MLP is loaded into GPU memory at a time |
| 79 | +oneshot( |
| 80 | + model=model, |
| 81 | + dataset=ds, |
| 82 | + recipe=recipe, |
| 83 | + max_seq_length=MAX_SEQUENCE_LENGTH, |
| 84 | + num_calibration_samples=NUM_CALIBRATION_SAMPLES, |
| 85 | + sequential_targets=["Llama4TextMLP"], |
| 86 | + data_collator=data_collator, |
| 87 | +) |
| 88 | + |
| 89 | + |
| 90 | +# Save to disk compressed. |
| 91 | +SAVE_DIR = model_id.rstrip("/").split("/")[-1] + "-NVFP4" |
| 92 | +model.save_pretrained(SAVE_DIR) |
| 93 | +processor.save_pretrained(SAVE_DIR) |
0 commit comments