Skip to content

Commit 80d22a1

Browse files
authored
Merge branch 'main' into kylesayrs/shared-pipelines
2 parents 7d1b5d8 + 4d630df commit 80d22a1

File tree

4 files changed

+567
-1
lines changed

4 files changed

+567
-1
lines changed

README.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,14 @@
1212
<img alt="LLM Compressor Flow" src="https://github.com/user-attachments/assets/adf07594-6487-48ae-af62-d9555046d51b" width="80%" />
1313
</p>
1414

15+
## 🚀 What's New!
16+
17+
Big updates have landed in LLM Compressor! Check out these exciting new features:
18+
19+
* **Axolotl Sparse Finetuning Integration:** Easily finetune sparse LLMs through our seamless integration with Axolotl. [Learn more here](https://docs.axolotl.ai/docs/custom_integrations.html#llmcompressor).
20+
* **AutoAWQ Integration:** Perform low-bit weight-only quantization efficiently using AutoAWQ, now part of LLM Compressor. *Note: This integration should be considered experimental for now. Enhanced support, including for MoE models and improved handling of larger models via layer sequential pipelining, is planned for upcoming releases.* [See the details](https://github.com/vllm-project/llm-compressor/pull/1177).
21+
* **Day 0 Llama 4 Support:** Meta utilized LLM Compressor to create the [FP8-quantized Llama-4-Maverick-17B-128E](https://huggingface.co/meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8), optimized for vLLM inference using [compressed-tensors](https://github.com/neuralmagic/compressed-tensors) format.
22+
1523
### Supported Formats
1624
* Activation Quantization: W8A8 (int8 and fp8)
1725
* Mixed Precision: W4A16, W8A16
Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
import requests
2+
import torch
3+
from PIL import Image
4+
from transformers import AutoProcessor
5+
6+
from llmcompressor import oneshot
7+
from llmcompressor.modifiers.quantization import GPTQModifier
8+
from llmcompressor.transformers.tracing import TraceableGemma3ForConditionalGeneration
9+
10+
# Load model.
11+
model_id = "google/gemma-3-4b-it"
12+
model = TraceableGemma3ForConditionalGeneration.from_pretrained(
13+
model_id, device_map="auto", torch_dtype="auto"
14+
)
15+
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
16+
17+
# Oneshot arguments
18+
DATASET_ID = "flickr30k"
19+
DATASET_SPLIT = {"calibration": "test[:512]"}
20+
NUM_CALIBRATION_SAMPLES = 512
21+
MAX_SEQUENCE_LENGTH = 2048
22+
23+
24+
# Define a oneshot data collator for multimodal inputs.
25+
def data_collator(batch):
26+
assert len(batch) == 1
27+
return {key: torch.tensor(value) for key, value in batch[0].items()}
28+
29+
30+
# Recipe
31+
recipe = [
32+
GPTQModifier(
33+
targets="Linear",
34+
scheme="W4A16",
35+
ignore=["re:*.lm_head", "re:vision_tower.*", "re:multi_modal_projector.*"],
36+
),
37+
]
38+
39+
# Perform oneshot
40+
oneshot(
41+
model=model,
42+
tokenizer=model_id,
43+
dataset=DATASET_ID,
44+
splits=DATASET_SPLIT,
45+
recipe=recipe,
46+
max_seq_length=MAX_SEQUENCE_LENGTH,
47+
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
48+
trust_remote_code_model=True,
49+
data_collator=data_collator,
50+
)
51+
52+
# Confirm generations of the quantized model look sane.
53+
print("========== SAMPLE GENERATION ==============")
54+
messages = [
55+
{
56+
"role": "user",
57+
"content": [
58+
{"type": "text", "text": "Please describe the animal in this image\n"},
59+
{"type": "image"},
60+
],
61+
},
62+
]
63+
prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
64+
image_url = "http://images.cocodataset.org/train2017/000000231895.jpg"
65+
raw_image = Image.open(requests.get(image_url, stream=True).raw)
66+
67+
inputs = processor(images=raw_image, text=prompt, return_tensors="pt").to("cuda")
68+
output = model.generate(**inputs, max_new_tokens=100)
69+
print(processor.decode(output[0], skip_special_tokens=True))
70+
print("==========================================")
71+
72+
# Save to disk compressed.
73+
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
74+
model.save_pretrained(SAVE_DIR, save_compressed=True)
75+
processor.save_pretrained(SAVE_DIR)

src/llmcompressor/transformers/tracing/__init__.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
from .gemma3 import (
2+
Gemma3ForConditionalGeneration as TraceableGemma3ForConditionalGeneration,
3+
)
14
from .llava import (
25
LlavaForConditionalGeneration as TraceableLlavaForConditionalGeneration,
36
)
@@ -11,12 +14,13 @@
1114
Idefics3ForConditionalGeneration as TraceableIdefics3ForConditionalGeneration,
1215
)
1316
from .qwen2_5_vl import (
14-
Qwen2_5_VLForConditionalGeneration as TraceableQwen2_5_VLForConditionalGeneration
17+
Qwen2_5_VLForConditionalGeneration as TraceableQwen2_5_VLForConditionalGeneration,
1518
)
1619
from .debug import get_model_class
1720

1821
__all__ = [
1922
"get_model_class",
23+
"TraceableGemma3ForConditionalGeneration",
2024
"TraceableLlavaForConditionalGeneration",
2125
"TraceableMllamaForConditionalGeneration",
2226
"TraceableQwen2VLForConditionalGeneration",

0 commit comments

Comments
 (0)