|
1 | | -from typing import Any |
2 | | - |
3 | 1 | try: |
4 | 2 | from fastapi import APIRouter, HTTPException |
5 | 3 | from fastapi.responses import StreamingResponse |
|
21 | 19 | from pydantic_ai.fastapi.registry import AgentRegistry |
22 | 20 |
|
23 | 21 |
|
24 | | -class AgentAPIRouter(APIRouter): |
25 | | - """FastAPI Router for Pydantic Agent.""" |
26 | | - |
27 | | - def __init__( |
28 | | - self, |
29 | | - agent_registry: AgentRegistry, |
30 | | - disable_response_api: bool = False, |
31 | | - disable_completions_api: bool = False, |
32 | | - *args: Any, |
33 | | - **kwargs: Any, |
34 | | - ): |
35 | | - super().__init__(*args, **kwargs) |
36 | | - self.registry = agent_registry |
37 | | - self.responses_api = AgentResponsesAPI(self.registry) |
38 | | - self.completions_api = AgentChatCompletionsAPI(self.registry) |
39 | | - self.models_api = AgentModelsAPI(self.registry) |
40 | | - self.enable_responses_api = not disable_response_api |
41 | | - self.enable_completions_api = not disable_completions_api |
| 22 | +def create_agent_router( |
| 23 | + agent_registry: AgentRegistry, |
| 24 | + disable_responses_api: bool = False, |
| 25 | + disable_completions_api: bool = False, |
| 26 | + api_router: APIRouter | None = None, |
| 27 | +) -> APIRouter: |
| 28 | + """FastAPI Router factory for Pydantic Agent exposure as OpenAI endpoint.""" |
| 29 | + if api_router is None: |
| 30 | + api_router = APIRouter() |
| 31 | + responses_api = AgentResponsesAPI(agent_registry) |
| 32 | + completions_api = AgentChatCompletionsAPI(agent_registry) |
| 33 | + models_api = AgentModelsAPI(agent_registry) |
| 34 | + enable_responses_api = not disable_responses_api |
| 35 | + enable_completions_api = not disable_completions_api |
42 | 36 |
|
43 | | - # Registers OpenAI/v1 API routes |
44 | | - self._register_routes() |
| 37 | + if enable_completions_api: |
45 | 38 |
|
46 | | - def _register_routes(self) -> None: |
47 | | - if self.enable_completions_api: |
| 39 | + @api_router.post('/v1/chat/completions', response_model=ChatCompletion) |
| 40 | + async def chat_completions( # type: ignore[reportUnusedFunction] |
| 41 | + request: ChatCompletionRequest, |
| 42 | + ) -> ChatCompletion | StreamingResponse: |
| 43 | + if getattr(request, 'stream', False): |
| 44 | + return StreamingResponse( |
| 45 | + completions_api.create_streaming_completion(request), |
| 46 | + media_type='text/event-stream', |
| 47 | + headers={ |
| 48 | + 'Cache-Control': 'no-cache', |
| 49 | + 'Connection': 'keep-alive', |
| 50 | + 'Content-Type': 'text/plain; charset=utf-8', |
| 51 | + }, |
| 52 | + ) |
| 53 | + else: |
| 54 | + return await completions_api.create_completion(request) |
48 | 55 |
|
49 | | - @self.post('/v1/chat/completions', response_model=ChatCompletion) |
50 | | - async def chat_completions( |
51 | | - request: ChatCompletionRequest, |
52 | | - ) -> ChatCompletion | StreamingResponse: |
53 | | - if getattr(request, 'stream', False): |
54 | | - return StreamingResponse( |
55 | | - self.completions_api.create_streaming_completion(request), |
56 | | - media_type='text/event-stream', |
57 | | - headers={ |
58 | | - 'Cache-Control': 'no-cache', |
59 | | - 'Connection': 'keep-alive', |
60 | | - 'Content-Type': 'text/plain; charset=utf-8', |
61 | | - }, |
62 | | - ) |
63 | | - else: |
64 | | - return await self.completions_api.create_completion(request) |
| 56 | + if enable_responses_api: |
65 | 57 |
|
66 | | - if self.enable_responses_api: |
| 58 | + @api_router.post('/v1/responses', response_model=OpenAIResponse) |
| 59 | + async def responses( # type: ignore[reportUnusedFunction] |
| 60 | + request: ResponsesRequest, |
| 61 | + ) -> OpenAIResponse: |
| 62 | + if getattr(request, 'stream', False): |
| 63 | + # TODO: add streaming support for responses api |
| 64 | + raise HTTPException(status_code=501) |
| 65 | + else: |
| 66 | + return await responses_api.create_response(request) |
67 | 67 |
|
68 | | - @self.post('/v1/responses', response_model=OpenAIResponse) |
69 | | - async def responses( |
70 | | - request: ResponsesRequest, |
71 | | - ) -> OpenAIResponse: |
72 | | - if getattr(request, 'stream', False): |
73 | | - # TODO: add streaming support for responses api |
74 | | - raise HTTPException(status_code=501) |
75 | | - else: |
76 | | - return await self.responses_api.create_response(request) |
| 68 | + @api_router.get('/v1/models', response_model=ModelsResponse) |
| 69 | + async def get_models() -> ModelsResponse: # type: ignore[reportUnusedFunction] |
| 70 | + return await models_api.list_models() |
77 | 71 |
|
78 | | - @self.get('/v1/models', response_model=ModelsResponse) |
79 | | - async def get_models() -> ModelsResponse: |
80 | | - return await self.models_api.list_models() |
| 72 | + @api_router.get('/v1/models' + '/{model_id}', response_model=Model) |
| 73 | + async def get_model(model_id: str) -> Model: # type: ignore[reportUnusedFunction] |
| 74 | + return await models_api.get_model(model_id) |
81 | 75 |
|
82 | | - @self.get('/v1/models' + '/{model_id}', response_model=Model) |
83 | | - async def get_model(model_id: str) -> Model: |
84 | | - return await self.models_api.get_model(model_id) |
| 76 | + return api_router |
0 commit comments