From 083d8aa330e1bbe24f50c4d5bdad33727b1a995a Mon Sep 17 00:00:00 2001 From: muxuezzz <1103535658@qq.com> Date: Sat, 27 Sep 2025 15:23:09 +0800 Subject: [PATCH] fix: accelerate the first inference speed on low-level NPUs. --- comfy/model_management.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index c5b817b62643..8557612793c7 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -121,6 +121,7 @@ def get_supported_float8_types(): try: import torch_npu # noqa: F401 _ = torch.npu.device_count() + torch_npu.npu.set_compile_mode(jit_compile = False) npu_available = torch.npu.is_available() except: npu_available = False