Skip to content

Commit 23b2d1d

Browse files
committed
fix
Signed-off-by: Pawel Gadzinski <pgadzinski@nvidia.com>
1 parent 9907081 commit 23b2d1d

File tree

4 files changed

+3
-10
lines changed

4 files changed

+3
-10
lines changed

transformer_engine/debug/features/disable_fp8_gemm.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,5 +45,3 @@ class DisableFP8GEMM(DisableQuantizationGEMM):
4545
enabled: True
4646
gemms: [dgrad, wgrad]
4747
"""
48-
49-
pass # Inherits all functionality from DisableQuantizationGEMM

transformer_engine/debug/features/disable_fp8_layer.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,5 +34,3 @@ class DisableFP8Layer(DisableQuantizationLayer):
3434
DisableFP8Layer: # Deprecated: use DisableQuantizationLayer
3535
enabled: True
3636
"""
37-
38-
pass # Inherits all functionality from DisableQuantizationLayer

transformer_engine/debug/features/log_fp8_tensor_stats.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,13 @@
99

1010
import torch
1111
import nvdlfw_inspect.api as debug_api
12-
12+
import transformer_engine_torch as tex
1313

1414
from nvdlfw_inspect.debug_features.log_tensor_stats import LogTensorStats as BaseLogTensorStats
1515
from nvdlfw_inspect.registry import Registry, api_method
1616

1717
from transformer_engine.debug.features.utils.stats_buffer import STATS_BUFFERS
18+
from transformer_engine.debug.features.utils import get_reduction_params, next_enabled_iter
1819
from transformer_engine.pytorch.tensor import Quantizer, QuantizedTensor
1920
from transformer_engine.pytorch.tensor.float8_tensor import (
2021
Float8Quantizer,
@@ -23,8 +24,6 @@
2324
from transformer_engine.pytorch.tensor.mxfp8_tensor import MXFP8Quantizer
2425
from transformer_engine.pytorch.tensor.float8_blockwise_tensor import Float8BlockQuantizer
2526

26-
import transformer_engine_torch as tex
27-
2827
try:
2928
from transformer_engine.pytorch.tensor.nvfp4_tensor import NVFP4Quantizer
3029

@@ -33,8 +32,6 @@
3332
_nvfp4_available = False
3433
NVFP4Quantizer = None
3534

36-
from transformer_engine.debug.features.utils import get_reduction_params, next_enabled_iter
37-
3835

3936
ALL_RECIPE_NAMES = ["fp8_delayed_scaling", "fp8_current_scaling", "mxfp8", "fp8_block_scaling"]
4037

transformer_engine/debug/features/log_nvfp4_tensor_stats.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ def update_aux_dict(
103103
self,
104104
aux_dict: Dict,
105105
quantized_tensor: QuantizedTensor,
106-
quantizer: Quantizer,
106+
quantizer: Quantizer, # pylint: disable=unused-argument
107107
original_tensor: torch.Tensor,
108108
):
109109
"""

0 commit comments

Comments
 (0)