Skip to content

Commit dc813b3

Browse files
committed
refactor(api): consolidate API base URL configuration
- Move VITE_API_URL configuration from main.tsx to core/OpenAPI.ts - Ensure consistent API base URL across SDK and streaming clients - Fix 404 errors in streaming client by setting base URL earlier Note: Known issue remains where stream text appears briefly in chat UI before being overwritten by error message. This is likely due to unclosed SSE connections, evidenced by stream termination errors. Example error: curl: (18) transfer closed with outstanding read data remaining
1 parent 2f5fa23 commit dc813b3

File tree

4 files changed

+191
-83
lines changed

4 files changed

+191
-83
lines changed

backend/app/api/routes/learn.py

Lines changed: 102 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,13 @@
66
from app.core.ai_client import ChatManager, AnthropicClient, OpenAIClient
77
from app.core.config import settings
88

9-
router = APIRouter(tags=["learn"])
9+
router = APIRouter(prefix="/learn", tags=["learn"])
1010

1111
# Store chat managers in memory for now
1212
active_chats: dict[str, ChatManager] = {}
1313

1414
class ChatRequest(BaseModel):
15+
"""Request model for chat streaming endpoint."""
1516
message: str
1617
system_prompt: str | None = None
1718
model: Literal["anthropic", "openai"] = "anthropic"
@@ -26,7 +27,30 @@ class TestResponse(BaseModel):
2627
openai_model: str
2728
test_message: str | None = None
2829

29-
@router.get("/learn/test", response_model=TestResponse)
30+
class ChatStreamResponse(BaseModel):
31+
"""Response model for individual stream messages."""
32+
type: Literal["content"]
33+
content: str
34+
35+
class ChatMessageResponse(BaseModel):
36+
"""Response model for non-streaming chat messages."""
37+
message: str
38+
39+
class ChatStreamRequest(BaseModel):
40+
"""Request model for chat streaming endpoint."""
41+
message: str
42+
system_prompt: str | None = None
43+
model: Literal["anthropic", "openai"] = "anthropic"
44+
45+
class Config:
46+
schema_extra = {
47+
'example': {
48+
'message': 'Write a haiku about coding',
49+
'model': 'anthropic'
50+
}
51+
}
52+
53+
@router.get("/test", response_model=TestResponse)
3054
async def test_configuration():
3155
"""Test the LLM configuration and basic functionality."""
3256
response = TestResponse(
@@ -50,12 +74,14 @@ async def test_configuration():
5074

5175
return response
5276

53-
@router.post("/learn/chat", response_model=ChatResponse)
77+
@router.post("/chat", response_model=ChatMessageResponse)
5478
async def chat_general(
5579
request: ChatRequest,
5680
current_user = Depends(deps.get_current_user),
5781
):
58-
"""General purpose chat endpoint without path context."""
82+
"""
83+
Send a message to the AI and get a response.
84+
"""
5985
chat_key = f"{current_user.id}_general"
6086
if chat_key not in active_chats:
6187
active_chats[chat_key] = ChatManager(client=request.model)
@@ -65,14 +91,48 @@ async def chat_general(
6591
system=request.system_prompt
6692
)
6793

68-
return ChatResponse(message=response)
94+
return ChatMessageResponse(message=response)
6995

70-
@router.post("/learn/chat/stream")
96+
@router.post("/chat/stream",
97+
response_class=StreamingResponse,
98+
openapi_extra={
99+
'responses': {
100+
'200': {
101+
'description': 'Streaming response',
102+
'headers': {
103+
'Transfer-Encoding': {
104+
'schema': {
105+
'type': 'string',
106+
'enum': ['chunked']
107+
}
108+
}
109+
},
110+
'content': {
111+
'text/event-stream': {
112+
'schema': {
113+
'type': 'object',
114+
'properties': {
115+
'type': {'type': 'string', 'enum': ['content']},
116+
'content': {'type': 'string'}
117+
}
118+
}
119+
}
120+
}
121+
}
122+
}
123+
}
124+
)
71125
async def chat_stream(
72-
request: ChatRequest,
126+
request: ChatStreamRequest,
73127
current_user = Depends(deps.get_current_user),
74-
):
75-
"""Streaming chat endpoint."""
128+
) -> StreamingResponse:
129+
"""
130+
Send a message to the AI and get a streaming response.
131+
Returns a StreamingResponse with Server-Sent Events containing partial messages.
132+
133+
The stream will emit events in the format:
134+
data: {"type": "content", "content": "partial message..."}
135+
"""
76136
chat_key = f"{current_user.id}_general"
77137
if chat_key not in active_chats:
78138
active_chats[chat_key] = ChatManager(client=request.model)
@@ -82,28 +142,46 @@ async def chat_stream(
82142
request.message,
83143
system=request.system_prompt
84144
),
85-
media_type='text/event-stream',
86-
headers={
87-
'Cache-Control': 'no-cache',
88-
'Connection': 'keep-alive',
89-
'X-Accel-Buffering': 'no' # Disable buffering in nginx
90-
}
145+
media_type='text/event-stream'
91146
)
92147

93-
@router.post("/learn/{path_id}", response_model=ChatResponse)
94-
async def chat(
148+
@router.post("/{path_id}/chat/stream",
149+
openapi_extra={
150+
'responses': {
151+
'200': {
152+
'description': 'Streaming response',
153+
'content': {
154+
'text/event-stream': {
155+
'schema': {
156+
'type': 'object',
157+
'properties': {
158+
'type': {'type': 'string', 'enum': ['content']},
159+
'content': {'type': 'string'}
160+
}
161+
}
162+
}
163+
}
164+
}
165+
}
166+
}
167+
)
168+
async def path_chat_stream(
95169
path_id: str,
96170
request: ChatRequest,
97171
current_user = Depends(deps.get_current_user),
98-
):
99-
"""Path-specific chat endpoint that maintains conversation context for each path."""
172+
) -> StreamingResponse:
173+
"""
174+
Path-specific chat endpoint that maintains conversation context for each path.
175+
Returns a StreamingResponse with Server-Sent Events containing partial messages.
176+
"""
100177
chat_key = f"{current_user.id}_{path_id}"
101178
if chat_key not in active_chats:
102179
active_chats[chat_key] = ChatManager(client=request.model)
103180

104-
response = await active_chats[chat_key].send_message(
105-
request.message,
106-
system=request.system_prompt
181+
return StreamingResponse(
182+
active_chats[chat_key].stream_message(
183+
request.message,
184+
system=request.system_prompt
185+
),
186+
media_type='text/event-stream'
107187
)
108-
109-
return ChatResponse(message=response)

backend/app/core/ai_client.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -106,9 +106,3 @@ async def stream_message(self, content: str, system: str | None = None) -> Async
106106
self.add_message("user", content)
107107
async for chunk in self.client.chat_stream(self.history, system):
108108
yield chunk
109-
# Add the complete message to history after streaming
110-
if self.history[-1]["role"] == "user":
111-
last_chunk = None
112-
async for chunk in self.client.chat_stream(self.history, system):
113-
last_chunk = chunk
114-
self.add_message("assistant", last_chunk) # Add the last chunk as the complete response
Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
import { OpenAPI } from './index';
2+
3+
interface ChatStreamRequest {
4+
message: string;
5+
system_prompt?: string;
6+
model?: 'anthropic' | 'openai';
7+
}
8+
9+
export class StreamingError extends Error {
10+
constructor(public status: number, message: string) {
11+
super(message);
12+
this.name = 'StreamingError';
13+
}
14+
}
15+
16+
export async function* createChatStream(request: ChatStreamRequest) {
17+
// Debug logging
18+
console.log('OpenAPI config:', {
19+
BASE: OpenAPI.BASE,
20+
TOKEN: OpenAPI.TOKEN
21+
});
22+
23+
const url = `${OpenAPI.BASE}/api/v1/learn/chat/stream`;
24+
console.log('Request URL:', url);
25+
26+
const response = await fetch(url, {
27+
method: 'POST',
28+
headers: {
29+
'Content-Type': 'application/json',
30+
'Accept': 'text/event-stream',
31+
'Authorization': `Bearer ${localStorage.getItem('access_token')}`,
32+
},
33+
body: JSON.stringify(request)
34+
});
35+
36+
if (!response.ok) {
37+
throw new StreamingError(
38+
response.status,
39+
`HTTP error! status: ${response.status}`
40+
);
41+
}
42+
43+
const reader = response.body!.getReader();
44+
const decoder = new TextDecoder();
45+
46+
try {
47+
while (true) {
48+
const { done, value } = await reader.read();
49+
if (done) break;
50+
51+
const chunk = decoder.decode(value);
52+
const lines = chunk.split('\n');
53+
54+
for (const line of lines) {
55+
if (line.startsWith('data: ')) {
56+
try {
57+
const data = JSON.parse(line.slice(6));
58+
if (data.type === 'content' && data.content) {
59+
yield data.content;
60+
}
61+
} catch (e) {
62+
console.error('Error parsing SSE data:', e);
63+
}
64+
}
65+
}
66+
}
67+
} finally {
68+
reader.releaseLock();
69+
}
70+
}

frontend/src/routes/_authenticated/learn/chat.tsx

Lines changed: 19 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import {
1010
import { createFileRoute } from "@tanstack/react-router"
1111
import { useState, useRef, useEffect } from "react"
1212
import { FiSend } from "react-icons/fi"
13+
import { createChatStream, StreamingError } from '@/client/streamingClient'
1314

1415
interface ChatMessage {
1516
id: string
@@ -81,68 +82,33 @@ function ChatRoute() {
8182
setMessages(prev => [...prev, assistantMessage])
8283

8384
try {
84-
const response = await fetch('/api/v1/learn/chat/stream', {
85-
method: 'POST',
86-
headers: {
87-
'Content-Type': 'application/json',
88-
'Accept': 'text/event-stream',
89-
'Authorization': `Bearer ${localStorage.getItem('access_token')}`,
90-
},
91-
body: JSON.stringify({
92-
message: currentMessage,
93-
model: "anthropic"
94-
})
95-
})
96-
97-
if (!response.ok) {
98-
const errorData = await response.json()
99-
throw new Error(`HTTP error! status: ${response.status}, message: ${errorData.detail || 'Unknown error'}`)
100-
}
101-
102-
const reader = response.body?.getReader()
103-
if (!reader) throw new Error('No reader available')
104-
105-
let streamedContent = ""
106-
const decoder = new TextDecoder()
107-
108-
while (true) {
109-
const {done, value} = await reader.read()
110-
if (done) break
111-
112-
const chunk = decoder.decode(value)
113-
const lines = chunk.split('\n')
114-
115-
for (const line of lines) {
116-
if (line.startsWith('data: ')) {
117-
try {
118-
const data = JSON.parse(line.slice(6))
119-
if (data.type === 'content' && data.content) {
120-
streamedContent += data.content
121-
setMessages(prev =>
122-
prev.map(msg =>
123-
msg.id === assistantMessage.id
124-
? { ...msg, content: streamedContent }
125-
: msg
126-
)
127-
)
128-
}
129-
} catch (e) {
130-
console.error('Error parsing SSE data:', e)
131-
}
132-
}
133-
}
85+
let streamedContent = "";
86+
const stream = createChatStream({
87+
message: currentMessage,
88+
model: "anthropic"
89+
});
90+
91+
for await (const content of stream) {
92+
streamedContent += content;
93+
setMessages(prev =>
94+
prev.map(msg =>
95+
msg.id === assistantMessage.id
96+
? { ...msg, content: streamedContent }
97+
: msg
98+
)
99+
);
134100
}
135101
} catch (error) {
136-
console.error('Error:', error)
102+
console.error('Error:', error);
137103
setMessages(prev =>
138104
prev.map(msg =>
139105
msg.id === assistantMessage.id
140106
? { ...msg, content: "Sorry, there was an error processing your request." }
141107
: msg
142108
)
143-
)
109+
);
144110
} finally {
145-
setIsLoading(false)
111+
setIsLoading(false);
146112
}
147113
}
148114

0 commit comments

Comments
 (0)