File tree Expand file tree Collapse file tree 3 files changed +10
-8
lines changed Expand file tree Collapse file tree 3 files changed +10
-8
lines changed Original file line number Diff line number Diff line change 8
8
import numpy as np
9
9
import torch
10
10
11
- from vllm .inputs import PromptType
11
+ from vllm .inputs import ProcessorInputs , PromptType
12
12
from vllm .logger import init_logger
13
13
14
14
if TYPE_CHECKING :
@@ -400,6 +400,7 @@ def validate_request(
400
400
cls ,
401
401
prompt : PromptType ,
402
402
params : Union [SamplingParams , PoolingParams ],
403
+ processed_inputs : ProcessorInputs ,
403
404
) -> None :
404
405
"""Raises if this request is unsupported on this platform"""
405
406
Original file line number Diff line number Diff line change 5
5
import torch
6
6
7
7
import vllm .envs as envs
8
- from vllm .inputs import PromptType
8
+ from vllm .inputs import ProcessorInputs , PromptType
9
9
from vllm .logger import init_logger
10
10
from vllm .sampling_params import SamplingParams , SamplingType
11
11
@@ -150,6 +150,7 @@ def validate_request(
150
150
cls ,
151
151
prompt : PromptType ,
152
152
params : Union [SamplingParams , PoolingParams ],
153
+ processed_inputs : ProcessorInputs ,
153
154
) -> None :
154
155
"""Raises if this request is unsupported on this platform"""
155
156
if isinstance (params , SamplingParams ):
Original file line number Diff line number Diff line change @@ -202,12 +202,6 @@ def process_inputs(
202
202
203
203
# TODO(woosuk): Support pooling models.
204
204
# TODO(woosuk): Support encoder-decoder models.
205
-
206
- from vllm .platforms import current_platform
207
- current_platform .validate_request (
208
- prompt = prompt ,
209
- params = params ,
210
- )
211
205
self ._validate_lora (lora_request )
212
206
self ._validate_params (params )
213
207
if priority != 0 :
@@ -231,6 +225,12 @@ def process_inputs(
231
225
prompt_adapter_request = prompt_adapter_request ,
232
226
return_mm_hashes = self .use_hash ,
233
227
)
228
+ from vllm .platforms import current_platform
229
+ current_platform .validate_request (
230
+ prompt = prompt ,
231
+ params = params ,
232
+ processed_inputs = processed_inputs ,
233
+ )
234
234
eos_token_id = self .input_preprocessor .get_eos_token_id (lora_request )
235
235
236
236
self ._validate_model_inputs (processed_inputs , lora_request )
You can’t perform that action at this time.
0 commit comments