Skip to content

Commit 95da832

Browse files
committed
fix awq lite param
Signed-off-by: Jennifer Chen <[email protected]>
1 parent fc0bb88 commit 95da832

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

tests/_test_utils/torch_quantization/quantize_common.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -213,13 +213,13 @@ def forward_loop(model):
213213
if config in [mtq.INT4_AWQ_CFG, mtq.W4A8_AWQ_BETA_CFG]:
214214
# Check act scale
215215
_reduce_quantizer_attr(
216-
model.fc1.weight_quantizer.awq_lite,
216+
model.fc1.awq_lite,
217217
"act_scale",
218218
dist.ReduceOp.AVG,
219219
group=group,
220220
)
221221
_reduce_quantizer_attr(
222-
model.fc2.weight_quantizer.awq_lite,
222+
model.fc2.awq_lite,
223223
"act_scale",
224224
dist.ReduceOp.AVG,
225225
group=group,
@@ -274,13 +274,13 @@ def _reduce_quantizer_attr(quantizer, attr=str, op=dist.ReduceOp.MAX):
274274
# Check act scale
275275
if config in [mtq.INT4_AWQ_CFG, mtq.W4A8_AWQ_BETA_CFG]:
276276
_reduce_quantizer_attr(
277-
model.fc1.weight_quantizer.awq_lite,
277+
model.fc1.awq_lite,
278278
"act_scale",
279279
dist.ReduceOp.AVG,
280280
group=tp_group,
281281
)
282282
_reduce_quantizer_attr(
283-
model.fc2.weight_quantizer.awq_lite,
283+
model.fc2.awq_lite,
284284
"act_scale",
285285
dist.ReduceOp.AVG,
286286
group=tp_group,

0 commit comments

Comments
 (0)