We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent b396cb4 commit be54f8eCopy full SHA for be54f8e
vllm/entrypoints/api_server.py
@@ -3,7 +3,7 @@
3
from typing import AsyncGenerator
4
5
from fastapi import BackgroundTasks, FastAPI, Request
6
-from fastapi.responses import Response, StreamingResponse
+from fastapi.responses import JSONResponse, Response, StreamingResponse
7
import uvicorn
8
9
from vllm.engine.arg_utils import AsyncEngineArgs
@@ -64,7 +64,7 @@ async def abort_request() -> None:
64
prompt = final_output.prompt
65
text_outputs = [prompt + output.text for output in final_output.outputs]
66
ret = {"text": text_outputs}
67
- return Response(content=json.dumps(ret))
+ return JSONResponse(ret)
68
69
70
if __name__ == "__main__":
0 commit comments