Skip to content

Commit 38f8161

Browse files
committed
Run make style
Signed-off-by: Fynn Schmitt-Ulms <[email protected]>
1 parent c8b26be commit 38f8161

File tree

4 files changed

+5
-4
lines changed

4 files changed

+5
-4
lines changed

src/compressed_tensors/compressors/model_compressors/model_compressor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ def from_pretrained_model(
195195

196196
@staticmethod
197197
def parse_sparsity_config(
198-
compression_config: Union[Dict[str, Any], "CompressedTensorsConfig"]
198+
compression_config: Union[Dict[str, Any], "CompressedTensorsConfig"],
199199
) -> Union[Dict[str, Any], None]:
200200
"""
201201
Parse sparsity config from quantization/compression config. Sparsity
@@ -215,7 +215,7 @@ def parse_sparsity_config(
215215

216216
@staticmethod
217217
def parse_quantization_config(
218-
compression_config: Union[Dict[str, Any], "CompressedTensorsConfig"]
218+
compression_config: Union[Dict[str, Any], "CompressedTensorsConfig"],
219219
) -> Union[Dict[str, Any], None]:
220220
"""
221221
Parse quantization config from quantization/compression config. The

src/compressed_tensors/compressors/quantized_compressors/nvfp4_quantized.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -154,6 +154,7 @@ def pack_fp4_to_uint8(x: torch.Tensor) -> torch.Tensor:
154154
[0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0], dtype=torch.float32
155155
)
156156

157+
157158
# reference: : https://github.com/vllm-project/vllm/pull/16362
158159
def unpack_fp4_from_uint8(
159160
a: torch.Tensor, m: int, n: int, dtype: Optional[torch.dtype] = torch.bfloat16

src/compressed_tensors/compressors/sparse_quantized_compressors/marlin_24.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ class Marlin24Compressor(BaseCompressor):
4848

4949
@staticmethod
5050
def validate_quant_compatability(
51-
names_to_scheme: Dict[str, QuantizationScheme]
51+
names_to_scheme: Dict[str, QuantizationScheme],
5252
) -> bool:
5353
"""
5454
Checks if every quantized module in the model is compatible with Marlin24

src/compressed_tensors/registry/registry.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def standardize_lookup_name(name: str) -> str:
5555

5656

5757
def standardize_alias_name(
58-
name: Union[None, str, List[str]]
58+
name: Union[None, str, List[str]],
5959
) -> Union[None, str, List[str]]:
6060
if name is None:
6161
return None

0 commit comments

Comments
 (0)