Skip to content

Commit 2e233ab

Browse files
Renamed the quant to gptqquant class
1 parent 66e61c5 commit 2e233ab

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

keras/src/quantizers/gptqutils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from keras.src.layers import Embedding
1414

1515
from .gptq import GPTQ
16-
from .quant import Quantizer
16+
from .quant import GPTQQuant
1717

1818

1919
def get_dataloader(tokenizer, seqlen, dataset, nsamples=128, seed=0):
@@ -363,7 +363,7 @@ def hook(*args, **kwargs):
363363
inp_reshaped = ops.reshape(layer_inputs, (-1, num_features))
364364
gptq_object.update_hessian_with_batch(inp_reshaped)
365365

366-
quantizer = Quantizer()
366+
quantizer = GPTQQuant()
367367
quantizer.configure(
368368
wbits, perchannel=True, sym=symmetric, groupsize=groupsize
369369
)

keras/src/quantizers/quant.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ def quantize(x, scale, zero, maxq):
1717
return scale * (q - zero)
1818

1919

20-
class Quantizer:
20+
class GPTQQuant:
2121
"""
2222
This version contains the definitive fix for the per-tensor shape mismatch,
2323
as identified by the unit test. It now correctly tiles the per-tensor

0 commit comments

Comments
 (0)