-
Notifications
You must be signed in to change notification settings - Fork 453
Expand file tree
/
Copy pathtest_run_compressed.py
More file actions
182 lines (138 loc) · 6.19 KB
/
test_run_compressed.py
File metadata and controls
182 lines (138 loc) · 6.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
import shutil
import tempfile
import unittest
import torch
from compressed_tensors.linear.compressed_linear import CompressedLinear
from compressed_tensors.quantization.utils import iter_named_leaf_modules
from parameterized import parameterized_class
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.utils.quantization_config import CompressedTensorsConfig
from tests.testing_utils import parse_params, requires_gpu
COMPRESSED_LINEAR_CONFIG_DIR = (
"tests/llmcompressor/transformers/compression/run_compressed_configs"
)
@requires_gpu
@parameterized_class(parse_params(COMPRESSED_LINEAR_CONFIG_DIR))
class Test_Decompressed_Linear_Uncompressed_Linear(unittest.TestCase):
"""
Uncompressed-Linear-forward decompressed-Linear-foward check
Uncompressed: Optimized model saved as run_compressed=False, no need to decompress
Decompressed: Optimized model saved as run_compressed=True, and decompressed using
AutoModelForCausalLM decompression
AutoModelForCausalLM decompression diagram flow https://tinyurl.com/2ynb6wbu
"""
compressed_model_stub = None
uncompressed_model_stub = None
@classmethod
def setUpClass(cls):
cls.test_dir = tempfile.mkdtemp()
quantization_config = CompressedTensorsConfig(run_compressed=False)
# Decompressed using HFQuantizer
# Linear foward
cls.decompressed_model = AutoModelForCausalLM.from_pretrained(
cls.compressed_model_stub,
torch_dtype="auto",
device_map="auto",
quantization_config=quantization_config,
)
# Load model as is at the uncompressed state
# Linear forward
cls.uncompressed_model = AutoModelForCausalLM.from_pretrained(
cls.uncompressed_model_stub,
torch_dtype=cls.decompressed_model.dtype,
device_map=cls.decompressed_model.device,
)
cls.tokenizer = AutoTokenizer.from_pretrained(cls.compressed_model_stub)
def test_compressed_matches_decompressed(self):
SAMPLE_INPUT = [
"I love 4-bit quantization because",
"What is the capital of France?",
"def fibonacci(n):",
]
decompressed_device = self.decompressed_model.device
uncompressed_device = self.uncompressed_model.device
# overwrite weights in cpu to cuda
self.decompressed_model = self.decompressed_model.to(decompressed_device)
self.uncompressed_model = self.uncompressed_model.to(uncompressed_device)
inputs = self.tokenizer(SAMPLE_INPUT, return_tensors="pt", padding=True).to(
decompressed_device
)
decompressed_output = self.decompressed_model.generate(**inputs, max_length=50)
inputs = inputs.to(uncompressed_device)
uncompressed_output = self.uncompressed_model.generate(**inputs, max_length=50)
for idx in range(len(SAMPLE_INPUT)):
assert torch.equal(decompressed_output[idx], uncompressed_output[idx])
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.test_dir)
del cls.decompressed_model
del cls.uncompressed_model
torch.cuda.empty_cache()
@requires_gpu
@parameterized_class(parse_params(COMPRESSED_LINEAR_CONFIG_DIR))
class Test_Compressed_CompressedLinear_Decompressed_Linear(unittest.TestCase):
"""
Compressed-CompresesdLinear, Decompressed-Linear check
Compressed: Optimized model saved as run_compressed=True, no decompression
Decompressed: Optimized model saved as run_compressed=True, and decompressed using
AutoModelForCausalLM decompression
All compressed model should have CompressedLinear, which has its custom forward call
"""
compressed_model_stub = None
@classmethod
def setUpClass(cls):
cls.test_dir = tempfile.mkdtemp()
# Should have CompressedLinear modules
# Compressed Linear forward
cls.compressed_model = AutoModelForCausalLM.from_pretrained(
cls.compressed_model_stub,
torch_dtype="auto",
device_map="auto",
)
# Should just be linear modules
# Linear forward
quantization_config = CompressedTensorsConfig(run_compressed=False)
cls.decompressed_model = AutoModelForCausalLM.from_pretrained(
cls.compressed_model_stub,
torch_dtype=cls.compressed_model.dtype,
device_map=cls.compressed_model.device,
quantization_config=quantization_config,
)
cls.tokenizer = AutoTokenizer.from_pretrained(cls.compressed_model_stub)
def test_compressed_linear_modules_exist(self):
compressed_linear_counts = 0
for _, submodule in iter_named_leaf_modules(
self.compressed_model,
):
if isinstance(submodule, CompressedLinear):
compressed_linear_counts += 1
# some linear models are not compressed - ex. lm_head
assert compressed_linear_counts > 0
def test_compressed_matches_decompressed__hf_quantizer(self):
SAMPLE_INPUT = [
"I love 4-bit quantization because",
"What is the capital of France?",
"def fibonacci(n):",
]
decompressed_device = self.decompressed_model.device
compressed_device = self.compressed_model.device
# overwrite weights in cpu to cuda
self.decompressed_model = self.decompressed_model.to(decompressed_device)
self.compressed_model = self.compressed_model.to(compressed_device)
inputs = self.tokenizer(SAMPLE_INPUT, return_tensors="pt", padding=True).to(
decompressed_device
)
decompressed_model_out = self.decompressed_model.generate(
**inputs, max_length=50
)
inputs = inputs.to(compressed_device)
compressed_model_out = self.compressed_model.generate(**inputs, max_length=50)
# Compare outputs for each input
for idx in range(len(SAMPLE_INPUT)):
torch.equal(compressed_model_out[idx], decompressed_model_out[idx])
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.test_dir)
del cls.decompressed_model
del cls.compressed_model
torch.cuda.empty_cache()