Skip to content

Commit c215b71

Browse files
committed
pre-commit fixes
1 parent 5b1ea90 commit c215b71

File tree

1 file changed

+5
-2
lines changed
  • vllm/model_executor/layers/fused_moe

1 file changed

+5
-2
lines changed

vllm/model_executor/layers/fused_moe/layer.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -792,9 +792,12 @@ def __init__(
792792
if quant_config and quant_config.get_name() == "mxfp4":
793793
from vllm.model_executor.layers.quantization.mxfp4 import ( # noqa: E501
794794
should_use_flashinfer_mxfp4, should_use_flashinfer_mxfp4_bf16)
795-
if current_platform.is_rocm() or (should_use_flashinfer_mxfp4() and current_platform.is_device_capability(100)):
795+
if current_platform.is_rocm() or (
796+
should_use_flashinfer_mxfp4()
797+
and current_platform.is_device_capability(100)):
796798
hidden_size = round_up(hidden_size, 256)
797-
elif should_use_flashinfer_mxfp4_bf16() and current_platform.is_device_capability(90):
799+
elif should_use_flashinfer_mxfp4_bf16(
800+
) and current_platform.is_device_capability(90):
798801
hidden_size = round_up(hidden_size, 128)
799802

800803
# For smuggling this layer into the fused moe custom op

0 commit comments

Comments
 (0)