We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 86a8ace commit 079e659Copy full SHA for 079e659
vllm_gaudi/platform.py
@@ -143,7 +143,7 @@ def set_torch_compile(cls) -> None:
143
# Eager backend (PT_HPU_LAZY_MODE = 0) must be selected for
144
# torch.compile support
145
os.environ['PT_HPU_WEIGHT_SHARING'] = '0'
146
- is_lazy = os.environ.get('PT_HPU_LAZY_MODE', '1') == '1'
+ is_lazy = os.environ.get('PT_HPU_LAZY_MODE', '0') == '1'
147
if is_lazy:
148
torch._dynamo.config.disable = True
149
# NOTE multi-HPU inference with HPUGraphs (lazy-only)
0 commit comments