We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent c8ae1ed commit 63dfaf7Copy full SHA for 63dfaf7
src/llmcompressor/observers/min_max.py
@@ -3,7 +3,7 @@
3
import torch
4
from compressed_tensors.quantization.quant_args import QuantizationArgs
5
from compressed_tensors.quantization.utils import calculate_qparams, generate_gparam
6
-from compressed_tensors.utils import deprecated, patch_attr
+from compressed_tensors.utils import deprecated
7
8
from llmcompressor.observers.base import Observer
9
0 commit comments