Skip to content

Commit 3b16e08

Browse files
committed
Cleanup
1 parent b6c1005 commit 3b16e08

File tree

7 files changed

+37
-27
lines changed

7 files changed

+37
-27
lines changed

.github/workflows/test.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ on:
1111
- main
1212

1313
jobs:
14-
ruff:
14+
test:
1515
runs-on: ubuntu-latest
1616
strategy:
1717
matrix:

auto_fp8/quantize.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,9 @@ def quantize_activations(model, calibration_tokens):
179179
if not isinstance(dynamic_quant_linear, FP8DynamicLinear):
180180
continue
181181
quantizer = FP8StaticLinearQuantizer(
182-
dynamic_quant_linear.weight, dynamic_quant_linear.weight_scale, dynamic_quant_linear.bias
182+
dynamic_quant_linear.weight,
183+
dynamic_quant_linear.weight_scale,
184+
dynamic_quant_linear.bias,
183185
)
184186
replace_module(model, name, quantizer)
185187
del dynamic_quant_linear
@@ -197,7 +199,10 @@ def quantize_activations(model, calibration_tokens):
197199
if not isinstance(quantizer, FP8StaticLinearQuantizer):
198200
continue
199201
static_proj = FP8StaticLinear(
200-
quantizer.weight, quantizer.weight_scale, quantizer.bias, quantizer.act_scale
202+
quantizer.weight,
203+
quantizer.weight_scale,
204+
quantizer.bias,
205+
quantizer.act_scale,
201206
)
202207
replace_module(model, name, static_proj)
203208
del quantizer

example.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -5,16 +5,13 @@
55
quantized_model_dir = "opt-125m-fp8"
66

77
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
8-
examples = [
9-
"auto-fp8 is an easy-to-use model quantization library"
10-
]
8+
examples = ["auto-fp8 is an easy-to-use model quantization library"]
119
examples = tokenizer(examples, return_tensors="pt").to("cuda")
1210

13-
quantize_config = BaseQuantizeConfig(
14-
quant_method="fp8",
15-
activation_scheme="static"
16-
)
11+
quantize_config = BaseQuantizeConfig(quant_method="fp8", activation_scheme="static")
1712

18-
model = AutoFP8ForCausalLM.from_pretrained(pretrained_model_dir, quantize_config=quantize_config)
13+
model = AutoFP8ForCausalLM.from_pretrained(
14+
pretrained_model_dir, quantize_config=quantize_config
15+
)
1916
model.quantize(examples)
2017
model.save_quantized(quantized_model_dir)

examples/original_quantize.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ def forward(self, x):
163163
def replace_module(model, name, new_module):
164164
if "." in name:
165165
parent_name = name.rsplit(".", 1)[0]
166-
child_name = name[len(parent_name) + 1:]
166+
child_name = name[len(parent_name) + 1 :]
167167
parent = model.model.get_submodule(parent_name)
168168
else:
169169
parent_name = ""

quantize.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ def forward(self, x):
165165
def replace_module(model, name, new_module):
166166
if "." in name:
167167
parent_name = name.rsplit(".", 1)[0]
168-
child_name = name[len(parent_name) + 1:]
168+
child_name = name[len(parent_name) + 1 :]
169169
parent = model.model.get_submodule(parent_name)
170170
else:
171171
parent_name = ""
@@ -193,7 +193,9 @@ def quantize_activations(model, calibration_tokens):
193193
if not isinstance(dynamic_quant_linear, FP8DynamicLinear):
194194
continue
195195
quantizer = FP8StaticLinearQuantizer(
196-
dynamic_quant_linear.weight, dynamic_quant_linear.weight_scale, dynamic_quant_linear.bias
196+
dynamic_quant_linear.weight,
197+
dynamic_quant_linear.weight_scale,
198+
dynamic_quant_linear.bias,
197199
)
198200
replace_module(model, name, quantizer)
199201
del dynamic_quant_linear
@@ -212,7 +214,10 @@ def quantize_activations(model, calibration_tokens):
212214
if not isinstance(quantizer, FP8StaticLinearQuantizer):
213215
continue
214216
static_proj = FP8StaticLinear(
215-
quantizer.weight, quantizer.weight_scale, quantizer.bias, quantizer.act_scale
217+
quantizer.weight,
218+
quantizer.weight_scale,
219+
quantizer.bias,
220+
quantizer.act_scale,
216221
)
217222
replace_module(model, name, static_proj)
218223
del quantizer

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
description="FP8 quantization for Transformers.",
99
long_description=open("README.md").read(),
1010
long_description_content_type="text/markdown",
11-
url="https://github.com/neuralmagic/auto_fp8",
11+
url="https://github.com/neuralmagic/AutoFP8",
1212
packages=find_packages(),
1313
install_requires=[
1414
"torch>=2.2",

tests/test_auto_fp8.py

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,31 @@
11
import os
22
from transformers import AutoTokenizer
33
from auto_fp8 import AutoFP8ForCausalLM, BaseQuantizeConfig
4+
import shutil
5+
46

57
def test_quantization():
68
model_id = "facebook/opt-125m"
79
quantized_model_dir = "opt-125m-fp8"
810

911
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
10-
examples = [
11-
"auto-fp8 is an easy-to-use model quantization library"
12-
]
13-
examples = tokenizer(examples, return_tensors="pt").to("cuda")
12+
examples = ["auto-fp8 is an easy-to-use model quantization library"]
13+
examples = tokenizer(examples, return_tensors="pt")
1414

15-
quantize_config = BaseQuantizeConfig(
16-
quant_method="fp8", activation_scheme="static"
17-
)
15+
quantize_config = BaseQuantizeConfig(quant_method="fp8", activation_scheme="static")
1816

19-
model = AutoFP8ForCausalLM.from_pretrained(model_id, quantize_config=quantize_config, device_map="auto")
17+
model = AutoFP8ForCausalLM.from_pretrained(
18+
model_id, quantize_config=quantize_config
19+
)
20+
model.model.to("cpu")
2021

2122
model.quantize(examples)
2223
model.save_quantized(quantized_model_dir)
2324

24-
# We expect the model to be < 160MB
25+
# Measure checkpoint size and cleanup
2526
model_size = os.path.getsize(f"{quantized_model_dir}/model.safetensors")
26-
target_size = 160 * (1024*1024)
27-
assert model_size < target_size
27+
shutil.rmtree(quantized_model_dir)
2828

29+
# We expect the model to be < 160MB
30+
target_size = 160 * (1024 * 1024)
31+
assert model_size < target_size

0 commit comments

Comments
 (0)