We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e1b1564 commit 001927fCopy full SHA for 001927f
examples/models/llama/source_transformation/quantize.py
@@ -77,7 +77,9 @@ def quantize( # noqa C901
77
matches = re.findall(pattern, qmode)
78
assert len(matches) == 1, f"Expected 1 match for pattern but got {len(matches)}"
79
bitwidth = int(matches[0][0])
80
- _load_torchao_aten_lib(libname="libtorchao_ops_mps_linear_fp_act_xbit_weight_aten")
+ _load_torchao_aten_lib(
81
+ libname="libtorchao_ops_mps_linear_fp_act_xbit_weight_aten"
82
+ )
83
from torchao.experimental.quant_api import UIntxWeightOnlyLinearQuantizer
84
85
with torch.no_grad():
0 commit comments