Skip to content

Commit 33b2be7

Browse files
committed
fix style
1 parent e97e2ef commit 33b2be7

File tree

4 files changed

+6
-7
lines changed

4 files changed

+6
-7
lines changed

src/diffusers/quantizers/auto.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,19 +20,19 @@
2020
from typing import Dict, Optional, Union
2121

2222
from .bitsandbytes import BnB4BitDiffusersQuantizer, BnB8BitDiffusersQuantizer
23+
from .finegrained_fp8 import FinegrainedFP8Quantizer
2324
from .gguf import GGUFQuantizer
2425
from .quantization_config import (
2526
BitsAndBytesConfig,
27+
FinegrainedFP8Config,
2628
GGUFQuantizationConfig,
2729
QuantizationConfigMixin,
2830
QuantizationMethod,
2931
QuantoConfig,
3032
TorchAoConfig,
31-
FinegrainedFP8Config,
3233
)
3334
from .quanto import QuantoQuantizer
3435
from .torchao import TorchAoHfQuantizer
35-
from .finegrained_fp8 import FinegrainedFP8Quantizer
3636

3737

3838
AUTO_QUANTIZER_MAPPING = {
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
from .finegrained_fp8_quantizer import FinegrainedFP8Quantizer
1+
src/diffusers/quantizers/finegrained_fp8/finegrained_fp8_quantizer.pyfrom .finegrained_fp8_quantizer import FinegrainedFP8Quantizer

src/diffusers/quantizers/finegrained_fp8/finegrained_fp8_quantizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
from typing import TYPE_CHECKING, Any, Dict, List, Optional
22

3-
from ...utils import is_accelerate_available, is_torch_available, logging
3+
from ...utils import get_module_from_name, is_accelerate_available, is_torch_available, logging
44
from ..base import DiffusersQuantizer
5-
from ...utils import get_module_from_name
65

76

87
if is_torch_available():
@@ -13,6 +12,7 @@
1312
if TYPE_CHECKING:
1413
from ...models.modeling_utils import ModelMixin
1514

15+
1616
class FinegrainedFP8Quantizer(DiffusersQuantizer):
1717
"""
1818
FP8 quantization implementation supporting both standard and MoE models.

src/diffusers/quantizers/quantization_config.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
from dataclasses import dataclass
2929
from enum import Enum
3030
from functools import partial
31-
from typing import Any, Dict, List, Optional, Union, Tuple
31+
from typing import Any, Dict, List, Optional, Tuple, Union
3232

3333
from packaging import version
3434

@@ -725,7 +725,6 @@ def post_init(self):
725725
raise ValueError(f"Only support weights in {accepted_weights} but found {self.weights_dtype}")
726726

727727

728-
729728
@dataclass
730729
class FinegrainedFP8Config(QuantizationConfigMixin):
731730
"""

0 commit comments

Comments
 (0)