Skip to content

Commit c37e519

Browse files
author
lingzhi98
authored
rename woq to _gptq (#3562)
* rename woq to _gptq * modify comment
1 parent f3dd1b4 commit c37e519

File tree

4 files changed

+4
-5
lines changed

4 files changed

+4
-5
lines changed

intel_extension_for_pytorch/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,6 @@
9797
if has_cpu():
9898
from . import cpu
9999
from . import quantization
100-
from .quantization import woq
101100
from . import _meta_registrations
102101

103102
try:
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
from .api import woq
1+
from .api import _gptq

intel_extension_for_pytorch/quantization/GPTQ/api.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111

1212
@torch.no_grad()
13-
def woq(
13+
def _gptq(
1414
model,
1515
dataset,
1616
quantized_ckpt,
@@ -47,7 +47,7 @@ def woq(
4747
>>> dataset = ...
4848
>>> model = GPTJForCausalLM.from_pretrained(model_path)
4949
>>> model.eval()
50-
>>> ipex.woq(model, dataset, 'quantized_weight.pt', wbits=4)
50+
>>> ipex.quantization._gptq(model, dataset, 'quantized_weight.pt', wbits=4)
5151
"""
5252
print("Starting ...")
5353

intel_extension_for_pytorch/quantization/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,4 +9,4 @@
99
WoqLowpMode,
1010
)
1111
from ._autotune import autotune
12-
from .GPTQ import woq
12+
from .GPTQ import _gptq

0 commit comments

Comments
 (0)