Skip to content

Commit f30581c

Browse files
authored
[misc][perf] remove old code (#11425)
Signed-off-by: youkaichao <[email protected]>
1 parent 048fc57 commit f30581c

File tree

1 file changed

+0
-51
lines changed

1 file changed

+0
-51
lines changed

vllm/_custom_ops.py

Lines changed: 0 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import contextlib
2-
import functools
32
import importlib
43
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
54

@@ -36,34 +35,6 @@ def register_fake(fn):
3635
from torch.library import impl_abstract as register_fake
3736

3837

39-
def hint_on_error(fn):
40-
41-
@functools.wraps(fn)
42-
def wrapper(*args, **kwargs):
43-
try:
44-
return fn(*args, **kwargs)
45-
46-
except NotImplementedError as e:
47-
msg = (
48-
"Error in calling custom op %s: %s\n"
49-
"Not implemented or built, mostly likely because the current current device "
50-
"does not support this kernel (less likely TORCH_CUDA_ARCH_LIST was set "
51-
"incorrectly while building)")
52-
logger.error(msg, fn.__name__, e)
53-
raise NotImplementedError(msg % (fn.__name__, e)) from e
54-
except AttributeError as e:
55-
msg = (
56-
"Error in calling custom op %s: %s\n"
57-
"Possibly you have built or installed an obsolete version of vllm.\n"
58-
"Please try a clean build and install of vllm,"
59-
"or remove old built files such as vllm/*cpython*.so and build/ ."
60-
)
61-
logger.error(msg, fn.__name__, e)
62-
raise e
63-
64-
return wrapper
65-
66-
6738
# activation ops
6839
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
6940
torch.ops._C.silu_and_mul(out, x)
@@ -1101,25 +1072,3 @@ def get_graph_buffer_ipc_meta(fa: int) -> Tuple[List[int], List[int]]:
11011072
def register_graph_buffers(fa: int, handles: List[List[int]],
11021073
offsets: List[List[int]]) -> None:
11031074
torch.ops._C_custom_ar.register_graph_buffers(fa, handles, offsets)
1104-
1105-
1106-
# temporary fix for https://github.com/vllm-project/vllm/issues/5456
1107-
# TODO: remove this in v0.6.0
1108-
names_and_values = globals()
1109-
names_and_values_to_update = {}
1110-
# prepare variables to avoid dict size change during iteration
1111-
k, v, arg = None, None, None
1112-
fn_type = type(lambda x: x)
1113-
for k, v in names_and_values.items():
1114-
# find functions that are defined in this file and have torch.Tensor
1115-
# in their annotations. `arg == "torch.Tensor"` is used to handle
1116-
# the case when users use `import __annotations__` to turn type
1117-
# hints into strings.
1118-
if isinstance(v, fn_type) \
1119-
and v.__code__.co_filename == __file__ \
1120-
and any(arg is torch.Tensor or arg == "torch.Tensor"
1121-
for arg in v.__annotations__.values()):
1122-
names_and_values_to_update[k] = hint_on_error(v)
1123-
1124-
names_and_values.update(names_and_values_to_update)
1125-
del names_and_values_to_update, names_and_values, v, k, fn_type

0 commit comments

Comments
 (0)