Skip to content

Commit 7b3f434

Browse files
committed
revert examples, rename arg
Signed-off-by: Kyle Sayers <[email protected]>
1 parent f87a78f commit 7b3f434

File tree

4 files changed

+5
-4
lines changed

4 files changed

+5
-4
lines changed

examples/multimodal_vision/phi3_vision_example.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from transformers import AutoModelForCausalLM, AutoProcessor
44

55
from llmcompressor.modifiers.quantization import GPTQModifier
6+
from llmcompressor.modifiers.smoothquant import SmoothQuantModifier
67
from llmcompressor.transformers import oneshot
78

89
# Load model.
@@ -15,6 +16,7 @@
1516
_attn_implementation="eager",
1617
)
1718
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
19+
processor.chat_template = processor.tokenizer.chat_template
1820

1921
# Oneshot arguments
2022
DATASET_ID = "lmms-lab/flickr30k"
@@ -66,6 +68,7 @@ def data_collator(batch):
6668

6769
# Recipe
6870
recipe = [
71+
SmoothQuantModifier(smoothing_strength=0.8),
6972
GPTQModifier(
7073
targets="Linear",
7174
scheme="W4A16",

examples/quantization_w4a16/llama3_example.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
# Increasing the number of samples can improve accuracy.
2323
NUM_CALIBRATION_SAMPLES = 512
2424
MAX_SEQUENCE_LENGTH = 2048
25-
BATCH_SIZE = 8
2625

2726
# Load dataset and preprocess.
2827
ds = load_dataset(DATASET_ID, split=DATASET_SPLIT)
@@ -65,7 +64,6 @@ def tokenize(sample):
6564
recipe=recipe,
6665
max_seq_length=MAX_SEQUENCE_LENGTH,
6766
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
68-
per_device_oneshot_batch_size=BATCH_SIZE,
6967
)
7068

7169
# Confirm generations of the quantized model look sane.

src/llmcompressor/transformers/finetune/runner.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ def one_shot(self, stage: Optional[str] = None):
144144
calib_data = format_calibration_data(
145145
tokenized_dataset=self.get_dataset_split("calibration"),
146146
num_calibration_samples=self._data_args.num_calibration_samples,
147-
batch_size=self._training_args.per_device_oneshot_batch_size,
147+
batch_size=self._training_args.oneshot_batch_size,
148148
do_shuffle=self._data_args.shuffle_calibration_samples,
149149
collate_fn=self._data_args.data_collator,
150150
processor=self.processor,

src/llmcompressor/transformers/finetune/training_args.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ class TrainingArguments(HFTrainingArgs):
3232
)
3333
},
3434
)
35-
per_device_oneshot_batch_size: int = field(
35+
oneshot_batch_size: int = field(
3636
default=1,
3737
metadata={
3838
"help": "The batch size per GPU/XPU/TPU/MPS/NPU core/CPU for oneshot"

0 commit comments

Comments
 (0)