Skip to content
Draft
95 changes: 95 additions & 0 deletions examples/awq/awq_without_smooth.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
#!/usr/bin/env python3

import os
import time
from pathlib import Path

import modal

_resolved = Path(__file__).resolve()
REPO_ROOT = _resolved.parents[2] if len(_resolved.parents) >= 3 else Path("/repo")

MODEL_ID = "meta-llama/Meta-Llama-3-8B-Instruct"
DATASET_ID = "HuggingFaceH4/ultrachat_200k"
DATASET_SPLIT = "train_sft"
NUM_CALIBRATION_SAMPLES = 256
MAX_SEQUENCE_LENGTH = 2048
RECIPE_PATH = "tests/e2e/vLLM/recipes/WNA16/recipe_w4a16_awq_sym.yaml"
SAVE_DIR = "llama3-8b-w4a16-awq-baseline"

LMEVAL_TASK = "gsm8k"
LMEVAL_LIMIT = 100
LMEVAL_NUM_FEWSHOT = 5
LMEVAL_BATCH_SIZE = 4


def _make_image() -> modal.Image:
return (
modal.Image.debian_slim(python_version="3.11")
.add_local_dir(
REPO_ROOT,
remote_path="/repo",
copy=True,
ignore=[".git", ".venv", "*.pyc", "__pycache__", "*.egg-info"],
)
.env({"SETUPTOOLS_SCM_PRETEND_VERSION": "0.1.0"})
.run_commands(
"cd /repo && pip install -e .",
"pip install lm_eval==0.4.9.2",
"pip install pytest",
)
)


image = _make_image()
app = modal.App("awq-baseline-standalone")


@app.function(
image=image,
gpu="H100",
timeout=14400,
secrets=[modal.Secret.from_name("huggingface-secret")],
env={
"PYTHONPATH": "/repo/src:/repo",
"PYTORCH_ALLOC_CONF": "expandable_segments:True",
},
)
def run_baseline(skip_lm_eval: bool = False) -> dict:
"""Run AWQ baseline (no smooth) + optional lm_eval."""
os.chdir("/repo")
from tests.e2e.e2e_utils import run_oneshot_for_e2e_testing

recipe_abs = f"/repo/{RECIPE_PATH}"
if not os.path.isfile(recipe_abs):
return {"error": f"Recipe not found: {recipe_abs}", "awq_time_s": None, "lm_eval": None}

t0 = time.perf_counter()
model, processor = run_oneshot_for_e2e_testing(
model=MODEL_ID,
model_class="AutoModelForCausalLM",
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
max_seq_length=MAX_SEQUENCE_LENGTH,
scheme="W4A16_awq_sym",
dataset_id=DATASET_ID,
dataset_config=None,
dataset_split=DATASET_SPLIT,
recipe=recipe_abs,
quant_type=None,
)
os.makedirs(SAVE_DIR, exist_ok=True)
model.save_pretrained(SAVE_DIR)
processor.save_pretrained(SAVE_DIR)
from llmcompressor.core import active_session
active_session().reset()
awq_time_s = time.perf_counter() - t0

# Compression-only: return AWQ runtime; evaluation is handled by a separate script.
return {"awq_time_s": awq_time_s}


@app.local_entrypoint()
def main(skip_lm_eval: bool = False):
"""Entrypoint: run baseline and print result."""
result = run_baseline.remote(skip_lm_eval=skip_lm_eval)
print("Baseline result:", result)
2 changes: 1 addition & 1 deletion examples/awq/fp8_block_llama_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,6 @@ def tokenize(sample):
print("==========================================\n\n")

# Save to disk compressed.
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-awq-asym"
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-awq-fp8-block"
model.save_pretrained(SAVE_DIR, save_compressed=True)
tokenizer.save_pretrained(SAVE_DIR)
81 changes: 81 additions & 0 deletions examples/awq/fp8_block_llama_example_smooth.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
from compressed_tensors.offload import dispatch_model
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer

from llmcompressor import oneshot
from llmcompressor.modifiers.awq import AWQModifier

# Select model and load it.
MODEL_ID = "meta-llama/Meta-Llama-3-8B-Instruct"

model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype="auto")
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)

# Select calibration dataset.
DATASET_ID = "HuggingFaceH4/ultrachat_200k"
DATASET_SPLIT = "train_sft"

# Select number of samples. 256 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 256
MAX_SEQUENCE_LENGTH = 512

# Load dataset and preprocess.
ds = load_dataset(DATASET_ID, split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]")
ds = ds.shuffle(seed=42)


def preprocess(example):
return {
"text": tokenizer.apply_chat_template(
example["messages"],
tokenize=False,
)
}


ds = ds.map(preprocess)


# Tokenize inputs.
def tokenize(sample):
return tokenizer(
sample["text"],
padding=False,
max_length=MAX_SEQUENCE_LENGTH,
truncation=True,
add_special_tokens=False,
)


# Configure the quantization algorithm to run.
recipe = [
AWQModifier(
ignore=["lm_head"], scheme="FP8_BLOCK", targets=["Linear"], duo_scaling="both", smooth_layer_quantization=True
),
]

# Apply algorithms.
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
)

# Confirm generations of the quantized model look sane.
print("\n\n")
print("========== SAMPLE GENERATION ==============")
dispatch_model(model)
input_ids = tokenizer("Hello my name is", return_tensors="pt").input_ids.to(
model.device
)
output = model.generate(input_ids, max_new_tokens=100)
print(tokenizer.decode(output[0]))
print("==========================================\n\n")

# Save to disk compressed.
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-awq-fp8-block-smooth"
model.save_pretrained(SAVE_DIR, save_compressed=True)
tokenizer.save_pretrained(SAVE_DIR)
2 changes: 1 addition & 1 deletion examples/awq/fp8_dynamic_llama_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,6 @@ def tokenize(sample):
print("==========================================\n\n")

# Save to disk compressed.
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-awq-asym"
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-awq-fp8-dynamic"
model.save_pretrained(SAVE_DIR, save_compressed=True)
tokenizer.save_pretrained(SAVE_DIR)
81 changes: 81 additions & 0 deletions examples/awq/fp8_dynamic_llama_example_smooth.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
from compressed_tensors.offload import dispatch_model
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer

from llmcompressor import oneshot
from llmcompressor.modifiers.awq import AWQModifier

# Select model and load it.
MODEL_ID = "meta-llama/Meta-Llama-3-8B-Instruct"

model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype="auto")
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)

# Select calibration dataset.
DATASET_ID = "HuggingFaceH4/ultrachat_200k"
DATASET_SPLIT = "train_sft"

# Select number of samples. 256 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 256
MAX_SEQUENCE_LENGTH = 512

# Load dataset and preprocess.
ds = load_dataset(DATASET_ID, split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]")
ds = ds.shuffle(seed=42)


def preprocess(example):
return {
"text": tokenizer.apply_chat_template(
example["messages"],
tokenize=False,
)
}


ds = ds.map(preprocess)


# Tokenize inputs.
def tokenize(sample):
return tokenizer(
sample["text"],
padding=False,
max_length=MAX_SEQUENCE_LENGTH,
truncation=True,
add_special_tokens=False,
)


# Configure the quantization algorithm to run.
recipe = [
AWQModifier(
ignore=["lm_head"], scheme="FP8_DYNAMIC", targets=["Linear"], duo_scaling="both", smooth_layer_quantization=True
),
]

# Apply algorithms.
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
)

# Confirm generations of the quantized model look sane.
print("\n\n")
print("========== SAMPLE GENERATION ==============")
dispatch_model(model)
input_ids = tokenizer("Hello my name is", return_tensors="pt").input_ids.to(
model.device
)
output = model.generate(input_ids, max_new_tokens=100)
print(tokenizer.decode(output[0]))
print("==========================================\n\n")

# Save to disk compressed.
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-awq-fp8-dynamic-smooth"
model.save_pretrained(SAVE_DIR, save_compressed=True)
tokenizer.save_pretrained(SAVE_DIR)
2 changes: 1 addition & 1 deletion examples/awq/llama_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,6 @@ def tokenize(sample):
print("==========================================\n\n")

# Save to disk compressed.
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-awq-asym"
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-awq-w4a16-asym"
model.save_pretrained(SAVE_DIR, save_compressed=True)
tokenizer.save_pretrained(SAVE_DIR)
81 changes: 81 additions & 0 deletions examples/awq/llama_example_smooth.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
from compressed_tensors.offload import dispatch_model
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer

from llmcompressor import oneshot
from llmcompressor.modifiers.awq import AWQModifier

# Select model and load it.
MODEL_ID = "meta-llama/Meta-Llama-3-8B-Instruct"

model = AutoModelForCausalLM.from_pretrained(MODEL_ID, dtype="auto")
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)

# Select calibration dataset.
DATASET_ID = "HuggingFaceH4/ultrachat_200k"
DATASET_SPLIT = "train_sft"

# Select number of samples. 256 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 256
MAX_SEQUENCE_LENGTH = 512

# Load dataset and preprocess.
ds = load_dataset(DATASET_ID, split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]")
ds = ds.shuffle(seed=42)


def preprocess(example):
return {
"text": tokenizer.apply_chat_template(
example["messages"],
tokenize=False,
)
}


ds = ds.map(preprocess)


# Tokenize inputs.
def tokenize(sample):
return tokenizer(
sample["text"],
padding=False,
max_length=MAX_SEQUENCE_LENGTH,
truncation=True,
add_special_tokens=False,
)


# Configure the quantization algorithm to run.
recipe = [
AWQModifier(
ignore=["lm_head"], scheme="W4A16_ASYM", targets=["Linear"], duo_scaling="both", smooth_layer_quantization=True
),
]

# Apply algorithms.
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
)

# Confirm generations of the quantized model look sane.
print("\n\n")
print("========== SAMPLE GENERATION ==============")
dispatch_model(model)
input_ids = tokenizer("Hello my name is", return_tensors="pt").input_ids.to(
model.device
)
output = model.generate(input_ids, max_new_tokens=100)
print(tokenizer.decode(output[0]))
print("==========================================\n\n")

# Save to disk compressed.
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-awq-w4a16-asym-smooth"
model.save_pretrained(SAVE_DIR, save_compressed=True)
tokenizer.save_pretrained(SAVE_DIR)
Loading
Loading