forked from vllm-project/llm-compressor
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathqwen3.5_example.py
More file actions
47 lines (41 loc) · 1.64 KB
/
qwen3.5_example.py
File metadata and controls
47 lines (41 loc) · 1.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from compressed_tensors.offload import dispatch_model
from transformers import AutoProcessor, Qwen3_5ForConditionalGeneration
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
# Load model.
MODEL_ID = "Qwen/Qwen3.5-27B"
model = Qwen3_5ForConditionalGeneration.from_pretrained(
MODEL_ID, dtype="auto", trust_remote_code=True
)
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
# Configure the quantization algorithm and scheme.
# In this case, we:
# * quantize the weights to fp4 with per group 32 via ptq
# * skip the visual encoder, lm_head, linear attention (Gated DeltaNet
# fused projections are incompatible with microscale formats), and MTP modules
recipe = QuantizationModifier(
targets="Linear",
scheme="MXFP4A16",
ignore=[
"lm_head",
"re:.*visual.*",
"re:.*linear_attn.*",
"re:.*mtp.*",
],
)
# Apply quantization.
oneshot(model=model, recipe=recipe)
print("\n\n========== SAMPLE GENERATION ==============")
dispatch_model(model)
messages = [{"role": "user", "content": "Hello my name is"}]
prompt = processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
inputs = processor(text=prompt, return_tensors="pt").to(model.device)
output = model.generate(**inputs, max_new_tokens=100)
print(processor.decode(output[0], skip_special_tokens=True))
print("==========================================\n\n")
# Save to disk in compressed-tensors format.
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-MXFP4A16"
model.save_pretrained(SAVE_DIR, save_compressed=True)
processor.save_pretrained(SAVE_DIR)