We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ac028fa commit b69a0f9Copy full SHA for b69a0f9
vllm/platforms/xpu.py
@@ -2,7 +2,7 @@
2
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
4
import os
5
-from typing import TYPE_CHECKING, Any, Optional
+from typing import TYPE_CHECKING, Optional
6
7
import torch
8
@@ -176,12 +176,6 @@ def check_if_supports_dtype(cls, torch_dtype: torch.dtype):
176
"You can use float16 instead by explicitly setting the "
177
"`dtype` flag in CLI, for example: --dtype=half.")
178
179
- def get_global_graph_pool(self) -> Any:
180
- """
181
- Currently xpu does NOT support Graph model.
182
183
- raise NotImplementedError("XPU does not support Graph model.")
184
-
185
@classmethod
186
def opaque_attention_op(cls) -> bool:
187
return True
0 commit comments