|
4 | 4 | import torch
|
5 | 5 |
|
6 | 6 | from vllm.config import CacheConfig, ModelConfig, SchedulerConfig
|
7 |
| -from vllm.executor.executor_base import ExecutorBase |
| 7 | +from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase |
8 | 8 | from vllm.logger import init_logger
|
9 | 9 | from vllm.lora.request import LoRARequest
|
10 | 10 | from vllm.sequence import SamplerOutput, SequenceGroupMetadata
|
11 |
| -from vllm.utils import get_distributed_init_method, get_ip, get_open_port |
| 11 | +from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, |
| 12 | + make_async) |
12 | 13 |
|
13 | 14 | logger = init_logger(__name__)
|
14 | 15 |
|
@@ -100,6 +101,28 @@ def check_health(self) -> None:
|
100 | 101 | return
|
101 | 102 |
|
102 | 103 |
|
| 104 | +class CPUExecutorAsync(CPUExecutor, ExecutorAsyncBase): |
| 105 | + |
| 106 | + async def execute_model_async( |
| 107 | + self, |
| 108 | + seq_group_metadata_list: List[SequenceGroupMetadata], |
| 109 | + blocks_to_swap_in: Dict[int, int], |
| 110 | + blocks_to_swap_out: Dict[int, int], |
| 111 | + blocks_to_copy: Dict[int, List[int]], |
| 112 | + ) -> SamplerOutput: |
| 113 | + output = await make_async(self.driver_worker.execute_model)( |
| 114 | + seq_group_metadata_list=seq_group_metadata_list, |
| 115 | + blocks_to_swap_in=blocks_to_swap_in, |
| 116 | + blocks_to_swap_out=blocks_to_swap_out, |
| 117 | + blocks_to_copy=blocks_to_copy) |
| 118 | + return output |
| 119 | + |
| 120 | + async def check_health_async(self) -> None: |
| 121 | + # CPUExecutor will always be healthy as long as |
| 122 | + # it's running. |
| 123 | + return |
| 124 | + |
| 125 | + |
103 | 126 | def _verify_and_get_model_config(config: ModelConfig) -> ModelConfig:
|
104 | 127 | if config.dtype == torch.float16:
|
105 | 128 | logger.warning("float16 is not supported on CPU, casting to bfloat16.")
|
|
0 commit comments