Skip to content

Commit a1b21d3

Browse files
committed
fix lint
Signed-off-by: wangxiyuan <[email protected]>
1 parent d0db29f commit a1b21d3

File tree

2 files changed

+4
-2
lines changed

2 files changed

+4
-2
lines changed

vllm_ascend/torchair/torchair_sfa.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -981,7 +981,6 @@ def forward(
981981
# Profiling run.
982982
return output
983983

984-
985984
if attn_metadata.prefill is not None:
986985
assert attn_metadata.num_decodes is not None and \
987986
attn_metadata.num_prefills is not None and \

vllm_ascend/worker/worker_v1.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,10 @@ def __init__(
9191
if get_ascend_config().use_sfa:
9292
# Direct import instead of using try_register_lib to ensure proper error handling when
9393
# custom_ops is necessary but not available (e.g., in DeepSeek v3.2 deployments)
94-
import custom_ops # type: ignore[import-untyped] # noqa
94+
# yapf: disable
95+
import custom_ops # type: ignore # noqa
96+
97+
# yapf: enable
9598
logger.info(
9699
"custom_ops module loaded successfully. Custom operators like "
97100
"torch.ops.custom.npu_sparse_flash_attention are now available."

0 commit comments

Comments
 (0)