Skip to content

Commit 492f1e8

Browse files
committed
style
Signed-off-by: Kyle Sayers <[email protected]>
1 parent 65e75fd commit 492f1e8

File tree

2 files changed

+3
-7
lines changed

2 files changed

+3
-7
lines changed

src/llmcompressor/modifiers/quantization/calibration.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ def calibrate_activations(module: Module, value: torch.Tensor, base_name: str):
198198
calculate_qparams = False
199199
if quantization_args.strategy == QuantizationStrategy.TENSOR_GROUP:
200200
calculate_gparam = True
201-
201+
202202
# (..., 1, hidden_dim)
203203
# this reshaping is mostly for the benefit of group quantization
204204
value = value.unsqueeze(-2)

src/llmcompressor/observers/base.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,8 @@
88
QuantizationArgs,
99
QuantizationStrategy,
1010
)
11-
from compressed_tensors.quantization.utils import is_fp4
11+
from compressed_tensors.quantization.utils import is_fp4, strict_divide
1212
from compressed_tensors.registry.registry import RegistryMixin
13-
from compressed_tensors.quantization.utils import strict_divide
1413
from loguru import logger
1514
from torch import FloatTensor, IntTensor, Tensor
1615

@@ -255,10 +254,7 @@ def get_qparams_along_dim(
255254
dim = set(dim)
256255

257256
# convert negative dims
258-
dim = [
259-
d if d >= 0 else observed.ndim + d
260-
for d in dim
261-
]
257+
dim = [d if d >= 0 else observed.ndim + d for d in dim]
262258

263259
# reduce all dimensions except the the one pass as argument to this function
264260
reduce_dims = tuple(idx for idx in range(observed.ndim) if idx not in dim)

0 commit comments

Comments
 (0)