Skip to content

Commit 15e0f8c

Browse files
committed
chore: switch to agentic llm
1 parent c2d279c commit 15e0f8c

File tree

5 files changed

+15
-10
lines changed

5 files changed

+15
-10
lines changed

backend/config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ modules:
1010
chat_openai: chat_openai
1111
session: "session"
1212
chat_openai:
13-
class: modai.modules.chat.openai_raw_chat.OpenAILLMChatModule
13+
class: modai.modules.chat.openai_agent_chat.StrandsAgentChatModule
1414
module_dependencies:
1515
llm_provider_module: openai_model_provider
1616
model_provider_store:

backend/src/modai/default_config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ modules:
99
module_dependencies:
1010
chat_openai: chat_openai
1111
chat_openai:
12-
class: modai.modules.chat.openai_raw_chat.OpenAILLMChatModule
12+
class: modai.modules.chat.openai_agent_chat.StrandsAgentChatModule
1313
module_dependencies:
1414
llm_provider_module: openai_model_provider
1515
model_provider_store:

backend/src/modai/modules/chat/__tests__/test_chat.py renamed to backend/src/modai/modules/chat/__tests__/test_openai_raw_chat.py

File renamed without changes.

backend/src/modai/modules/chat/module.py

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,13 @@
88
from fastapi.responses import StreamingResponse
99
from typing import Any, AsyncGenerator
1010
from modai.module import ModaiModule, ModuleDependencies
11-
import openai
11+
from openai.types.responses import (
12+
Response as OpenAIResponse,
13+
ResponseStreamEvent as OpenAIResponseStreamEvent,
14+
)
15+
from openai.types.responses.response_create_params import (
16+
ResponseCreateParams as OpenAICreateResponse,
17+
)
1218

1319

1420
class ChatWebModule(ModaiModule, ABC):
@@ -33,8 +39,8 @@ def __init__(self, dependencies: ModuleDependencies, config: dict[str, Any]):
3339
async def responses_endpoint(
3440
self,
3541
request: Request,
36-
body_json: openai.types.responses.ResponseCreateParams = Body(...),
37-
) -> openai.types.responses.Response | StreamingResponse:
42+
body_json: OpenAICreateResponse = Body(...),
43+
) -> OpenAIResponse | StreamingResponse:
3844
"""
3945
Handles responses requests. Must be implemented by concrete implementations.
4046
Fully OpenAI /responses API compatible.
@@ -61,11 +67,8 @@ def __init__(self, dependencies: ModuleDependencies, config: dict[str, Any]):
6167

6268
@abstractmethod
6369
async def generate_response(
64-
self, request: Request, body_json: openai.types.responses.ResponseCreateParams
65-
) -> (
66-
openai.types.responses.Response
67-
| AsyncGenerator[openai.types.responses.ResponseStreamEvent, None]
68-
):
70+
self, request: Request, body_json: OpenAICreateResponse
71+
) -> OpenAIResponse | AsyncGenerator[OpenAIResponseStreamEvent, None]:
6972
"""
7073
Generate a streaming or non-streaming chat response.
7174

backend/src/modai/modules/chat/openai_raw_chat.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@
1414
)
1515

1616

17+
# The module is at the moment unuses as I'm unsure if we should support a raw LLM Access as the agentic
18+
# chat is more what a chat backend needs
1719
class OpenAILLMChatModule(ChatLLMModule):
1820
"""
1921
OpenAI LLM Provider for Chat Responses.

0 commit comments

Comments
 (0)