Skip to content

Commit a150791

Browse files
committed
Return reason in error message as to why LoRA adapter could not be loaded
The reason why a LoRA adapter could not be loaded may include information from model validation, such as that model signature verification did not succeed because unsigned files were found. Signed-off-by: Stefan Berger <[email protected]>
1 parent 1629e87 commit a150791

File tree

1 file changed

+4
-1
lines changed

1 file changed

+4
-1
lines changed

vllm/entrypoints/openai/serving_models.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,7 @@ async def resolve_lora(self, lora_name: str) -> Union[LoRARequest, ErrorResponse
249249
base_model_name = self.model_config.model
250250
unique_id = self.lora_id_counter.inc(1)
251251
found_adapter = False
252+
reason = ""
252253

253254
# Try to resolve using available resolvers
254255
for resolver in self.lora_resolvers:
@@ -275,13 +276,15 @@ async def resolve_lora(self, lora_name: str) -> Union[LoRARequest, ErrorResponse
275276
resolver.__class__.__name__,
276277
e,
277278
)
279+
reason = str(e)
278280
continue
279281

280282
if found_adapter:
281283
# An adapter was found, but all attempts to load it failed.
282284
return create_error_response(
283285
message=(
284-
f"LoRA adapter '{lora_name}' was found but could not be loaded."
286+
f"LoRA adapter '{lora_name}' was found "
287+
f"but could not be loaded: {reason}"
285288
),
286289
err_type="BadRequestError",
287290
status_code=HTTPStatus.BAD_REQUEST,

0 commit comments

Comments
 (0)