Skip to content

Commit baa97ab

Browse files
committed
Remove unused imports
Signed-off-by: Fynn Schmitt-Ulms <[email protected]>
1 parent c49380a commit baa97ab

File tree

12 files changed

+6
-16
lines changed

12 files changed

+6
-16
lines changed

src/compressed_tensors/compressors/model_compressors/model_compressor.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@
4242
load_pretrained_quantization_parameters,
4343
)
4444
from compressed_tensors.quantization.lifecycle import expand_target_names
45-
from compressed_tensors.quantization.utils import is_module_quantized
4645
from compressed_tensors.utils import (
4746
align_module_device,
4847
delete_offload_parameter,
@@ -390,7 +389,6 @@ def compress_model(self, model: Module):
390389
)
391390

392391
for prefix, module in tqdm(model.named_modules(), desc="Compressing model"):
393-
394392
if prefix in module_to_scheme or prefix in sparse_compression_targets:
395393
module_device = get_execution_device(module)
396394
is_meta = module_device.type == "meta"

src/compressed_tensors/compressors/quantized_compressors/base.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
get_nested_weight_mappings,
2525
merge_names,
2626
)
27-
from compressed_tensors.utils.safetensors_load import match_param_name
2827
from safetensors import safe_open
2928
from torch import Tensor
3029
from tqdm import tqdm

src/compressed_tensors/compressors/quantized_compressors/nvfp4_quantized.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515

1616
from typing import Dict, Optional, Tuple
1717

18-
import numpy
1918
import torch
2019
from compressed_tensors.compressors.base import BaseCompressor
2120
from compressed_tensors.compressors.quantized_compressors.base import (
@@ -71,7 +70,6 @@ def compress_weight(
7170
zero_point: Optional[torch.Tensor] = None,
7271
g_idx: Optional[torch.Tensor] = None,
7372
) -> Dict[str, torch.Tensor]:
74-
7573
quantized_weight = quantize(
7674
x=weight,
7775
scale=scale,
@@ -91,7 +89,6 @@ def decompress_weight(
9189
compressed_data: Dict[str, Tensor],
9290
quantization_args: Optional[QuantizationArgs] = None,
9391
) -> torch.Tensor:
94-
9592
weight = compressed_data["weight_packed"]
9693
scale = compressed_data["weight_scale"]
9794
global_scale = compressed_data["weight_global_scale"]

src/compressed_tensors/compressors/quantized_compressors/pack_quantized.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
import math
1515
from typing import Dict, Literal, Optional, Tuple, Union
1616

17-
import numpy as np
1817
import torch
1918
from compressed_tensors.compressors.base import BaseCompressor
2019
from compressed_tensors.compressors.quantized_compressors.base import (

src/compressed_tensors/compressors/sparse_compressors/sparse_24_bitmask.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
from dataclasses import dataclass
16-
from typing import Dict, Generator, List, Tuple, Union
16+
from typing import Dict, List, Tuple, Union
1717

1818
import torch
1919
from compressed_tensors.compressors.base import BaseCompressor

src/compressed_tensors/quantization/quant_scheme.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
import warnings
1616
from copy import deepcopy
17-
from typing import Any, Dict, List, Optional
17+
from typing import List, Optional
1818

1919
from compressed_tensors.quantization.quant_args import (
2020
DynamicType,

src/compressed_tensors/quantization/utils/helpers.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929
from compressed_tensors.utils import deprecated
3030
from torch import FloatTensor, IntTensor, Tensor
3131
from torch.nn import Module
32-
from tqdm import tqdm
3332

3433

3534
__all__ = [

src/compressed_tensors/transform/factory/hadamard.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
import math
16-
from typing import Optional, Union
16+
from typing import Optional
1717

1818
import torch
1919
from compressed_tensors.transform import TransformArgs, TransformScheme
@@ -26,7 +26,7 @@
2626
from compressed_tensors.utils import get_execution_device, get_offloaded_device
2727
from compressed_tensors.utils.helpers import ParameterizedDefaultDict
2828
from torch import Tensor, device, dtype
29-
from torch.nn import Linear, Module, Parameter
29+
from torch.nn import Module, Parameter
3030

3131

3232
@TransformFactory.register("hadamard")

src/compressed_tensors/transform/factory/matrix_multiply.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
from compressed_tensors.utils import get_offloaded_device
2525
from compressed_tensors.utils.helpers import ParameterizedDefaultDict
2626
from torch import Tensor, device, dtype
27-
from torch.nn import Linear, Module, Parameter
27+
from torch.nn import Module, Parameter
2828

2929

3030
@TransformFactory.register("random-matrix")

src/compressed_tensors/transform/utils/matrix.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from typing import Callable, Optional, Tuple
15+
from typing import Optional
1616

1717
import torch
1818
from compressed_tensors.transform import TransformLocation

0 commit comments

Comments
 (0)