Skip to content

Commit 51756e2

Browse files
committed
support streaming and question details and language and code editor info
1 parent 18457c7 commit 51756e2

File tree

8 files changed

+320
-63
lines changed

8 files changed

+320
-63
lines changed
Lines changed: 89 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,106 @@
11
import type { Request, Response } from 'express';
22
import { StatusCodes } from 'http-status-codes';
3+
import OpenAI from 'openai';
34

4-
import { getOpenAIResponse } from '@/service/post/openai-service';
5+
const openai = new OpenAI({
6+
apiKey: process.env.OPENAI_API_KEY,
7+
});
8+
9+
interface OpenAIMessage {
10+
role: 'system' | 'user' | 'assistant';
11+
content: string;
12+
}
13+
14+
const createSystemMessage = (
15+
editorCode?: string,
16+
language?: string,
17+
questionDetails?: any
18+
): OpenAIMessage => ({
19+
role: 'system',
20+
content: `You are a helpful coding assistant.
21+
You are helping a user with a coding problem.
22+
${questionDetails ? `\nQuestion Context:\n${JSON.stringify(questionDetails, null, 2)}` : ''}
23+
${editorCode ? `\nCurrent Code (${language || 'unknown'}):\n${editorCode}` : ''}
24+
Provide detailed help while referring to their specific code and question context when available.`,
25+
});
526

627
export async function queryOpenAI(req: Request, res: Response) {
7-
const { messages } = req.body;
28+
const { messages, editorCode, language, questionDetails } = req.body;
29+
const isStreaming = req.headers['accept'] === 'text/event-stream';
830

9-
// Ensure 'messages' array is provided
1031
if (!messages || !Array.isArray(messages)) {
1132
return res.status(StatusCodes.BAD_REQUEST).json({
1233
error: 'Invalid request: messages array is required.',
1334
});
1435
}
1536

1637
try {
17-
const result = await getOpenAIResponse(messages);
38+
const systemMessage = createSystemMessage(editorCode, language, questionDetails);
39+
const allMessages = [systemMessage, ...messages];
40+
41+
if (isStreaming) {
42+
// Set up streaming response headers
43+
res.setHeader('Content-Type', 'text/event-stream');
44+
res.setHeader('Cache-Control', 'no-cache');
45+
res.setHeader('Connection', 'keep-alive');
46+
47+
// Create streaming completion
48+
const stream = await openai.chat.completions.create({
49+
model: 'gpt-3.5-turbo',
50+
messages: allMessages,
51+
stream: true,
52+
});
53+
54+
// Handle streaming response
55+
for await (const chunk of stream) {
56+
const content = chunk.choices[0]?.delta?.content || '';
57+
58+
if (content) {
59+
// Send the chunk in SSE format
60+
res.write(`data: ${content}\n\n`);
61+
}
62+
}
1863

19-
return res.status(StatusCodes.OK).json(result);
64+
// End the response
65+
res.end();
66+
} else {
67+
// Non-streaming response
68+
const completion = await openai.chat.completions.create({
69+
model: 'gpt-3.5-turbo',
70+
messages: allMessages,
71+
});
72+
73+
const responseMessage = completion.choices[0]?.message?.content;
74+
75+
if (!responseMessage) {
76+
throw new Error('No valid response from OpenAI');
77+
}
78+
79+
return res.status(StatusCodes.OK).json({
80+
success: true,
81+
message: responseMessage,
82+
});
83+
}
2084
} catch (err) {
21-
return res.status(StatusCodes.INTERNAL_SERVER_ERROR).json({
22-
success: false,
23-
message: 'An error occurred while querying OpenAI',
24-
error: err,
25-
});
85+
console.error('OpenAI API Error:', err);
86+
87+
// If headers haven't been sent yet, send error response
88+
if (!res.headersSent) {
89+
return res.status(StatusCodes.INTERNAL_SERVER_ERROR).json({
90+
success: false,
91+
message: 'An error occurred while querying OpenAI',
92+
error: err instanceof Error ? err.message : 'Unknown error',
93+
});
94+
} else {
95+
// If we were streaming, end the response
96+
res.end();
97+
}
2698
}
99+
100+
// Handle client disconnection
101+
req.on('close', () => {
102+
if (isStreaming && !res.writableEnded) {
103+
res.end();
104+
}
105+
});
27106
}

backend/collaboration/src/routes/chat.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import { queryOpenAI } from '@/controller/openai-controller';
44

55
const router = express.Router();
66

7+
router.post('/chat/stream', queryOpenAI);
78
router.post('/chat', queryOpenAI);
89

910
export default router;
Lines changed: 56 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
import { EventEmitter } from 'events';
2+
13
import OpenAI from 'openai';
24

35
const openai = new OpenAI({
@@ -9,21 +11,33 @@ interface OpenAIMessage {
911
content: string;
1012
}
1113

12-
export async function getOpenAIResponse(messages: OpenAIMessage[]) {
14+
interface OpenAIRequest {
15+
messages: OpenAIMessage[];
16+
editorCode?: string;
17+
language?: string;
18+
questionDetails?: string;
19+
}
20+
21+
// Helper to create system message with context
22+
const createSystemMessage = (editorCode?: string, language?: string, questionDetails?: string) => {
23+
return {
24+
role: 'system' as const,
25+
content: `You are a helpful coding assistant.
26+
You are helping a user with a coding problem.
27+
${questionDetails ? `\nQuestion Context:\n${JSON.stringify(questionDetails, null, 2)}` : ''}
28+
${editorCode ? `\nCurrent Code (${language || 'unknown'}):\n${editorCode}` : ''}
29+
Provide detailed help while referring to their specific code and question context when available.`,
30+
};
31+
};
32+
33+
// Regular response function
34+
export async function getOpenAIResponse(request: OpenAIRequest) {
35+
const { messages, editorCode, language, questionDetails } = request;
36+
1337
try {
1438
const response = await openai.chat.completions.create({
15-
model: 'gpt-3.5-turbo', // or the desired model
16-
messages: [
17-
{
18-
role: 'system',
19-
content:
20-
`You are a helpful coding assistant. ` +
21-
`You are helping a user with a coding problem. ` +
22-
`Provide tips to the user on solving the problem ` +
23-
`but do NOT provide the solution directly.`,
24-
},
25-
...messages,
26-
],
39+
model: 'gpt-3.5-turbo',
40+
messages: [createSystemMessage(editorCode, language, questionDetails), ...messages],
2741
});
2842

2943
if (response.choices && response.choices[0].message) {
@@ -38,3 +52,32 @@ export async function getOpenAIResponse(messages: OpenAIMessage[]) {
3852
throw new Error((error as Error)?.message || 'Failed to query OpenAI');
3953
}
4054
}
55+
56+
// Streaming response function
57+
export async function getOpenAIStreamResponse(request: OpenAIRequest): Promise<EventEmitter> {
58+
const { messages, editorCode, language, questionDetails } = request;
59+
const stream = new EventEmitter();
60+
61+
try {
62+
const response = await openai.chat.completions.create({
63+
model: 'gpt-3.5-turbo',
64+
messages: [createSystemMessage(editorCode, language, questionDetails), ...messages],
65+
stream: true,
66+
});
67+
68+
// Process the streaming response
69+
for await (const chunk of response) {
70+
const content = chunk.choices[0]?.delta?.content || '';
71+
72+
if (content) {
73+
stream.emit('data', content);
74+
}
75+
}
76+
77+
stream.emit('end');
78+
} catch (error) {
79+
stream.emit('error', error);
80+
}
81+
82+
return stream;
83+
}

frontend/src/components/blocks/interview/ai-chat.tsx

Lines changed: 78 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,50 +1,112 @@
1-
import React, { useState } from 'react';
1+
import { type LanguageName } from '@uiw/codemirror-extensions-langs';
2+
import React, { useRef,useState } from 'react';
23

34
import { sendChatMessage } from '@/services/collab-service';
45

56
import { ChatLayout } from './chat/chat-layout';
67
import { ChatMessageType } from './chat/chat-message';
78

8-
// Types for OpenAI API
9-
// interface OpenAIMessage {
10-
// role: 'user' | 'assistant';
11-
// content: string;
12-
// }
13-
149
interface AIChatProps {
1510
isOpen: boolean;
1611
onClose: () => void;
12+
editorCode?: string;
13+
language?: LanguageName;
14+
questionDetails?: string;
1715
}
1816

19-
export const AIChat: React.FC<AIChatProps> = ({ isOpen, onClose }) => {
17+
export const AIChat: React.FC<AIChatProps> = ({
18+
isOpen,
19+
onClose,
20+
editorCode = '',
21+
language = 'typescript',
22+
questionDetails = '',
23+
}) => {
2024
const [messages, setMessages] = useState<ChatMessageType[]>([]);
2125
const [isLoading, setIsLoading] = useState<boolean>(false);
2226
const [error, setError] = useState<string | null>(null);
27+
const streamingTextRef = useRef<string>('');
2328

2429
const handleSend = async (userMessage: string): Promise<void> => {
2530
if (!userMessage.trim() || isLoading) return;
2631

27-
setMessages((prev) => [...prev, { text: userMessage, isUser: true, timestamp: new Date() }]);
32+
// Reset streaming text reference
33+
streamingTextRef.current = '';
34+
35+
// Add user message
36+
const newMessage: ChatMessageType = {
37+
text: userMessage,
38+
isUser: true,
39+
timestamp: new Date(),
40+
};
41+
42+
setMessages((prev) => [...prev, newMessage]);
2843
setIsLoading(true);
2944
setError(null);
3045

3146
try {
32-
const response = await sendChatMessage(
33-
messages.map((v) => ({ role: v.isUser ? 'user' : 'system', content: v.text }))
34-
);
47+
const payload = {
48+
messages: [...messages, newMessage].map((v) => ({
49+
role: v.isUser ? 'user' : 'assistant',
50+
content: v.text,
51+
})),
52+
editorCode,
53+
language,
54+
questionDetails,
55+
};
56+
57+
// Add AI response placeholder
58+
setMessages((prev) => [
59+
...prev,
60+
{
61+
text: '',
62+
isUser: false,
63+
timestamp: new Date(),
64+
isStreaming: true,
65+
},
66+
]);
67+
68+
const response = await sendChatMessage(payload, (chunk) => {
69+
// Update streaming text
70+
streamingTextRef.current = chunk;
71+
72+
// Update the last message with the accumulated text
73+
setMessages((prev) => {
74+
const newMessages = [...prev];
75+
newMessages[newMessages.length - 1] = {
76+
text: streamingTextRef.current,
77+
isUser: false,
78+
timestamp: new Date(),
79+
isStreaming: true,
80+
};
81+
return newMessages;
82+
});
83+
});
3584

3685
if (response.success) {
37-
setMessages((prev) => [
38-
...prev,
39-
{ text: response.message, isUser: false, timestamp: new Date() },
40-
]);
86+
setMessages((prev) => {
87+
const newMessages = [...prev];
88+
newMessages[newMessages.length - 1] = {
89+
text: newMessages[newMessages.length - 1].text,
90+
isUser: false,
91+
timestamp: new Date(),
92+
isStreaming: false,
93+
};
94+
return newMessages;
95+
});
96+
} else {
97+
setError('Failed to get response from AI');
98+
// Remove the streaming message if there was an error
99+
setMessages((prev) => prev.slice(0, -1));
41100
}
42101
} catch (err) {
43102
setError(
44103
err instanceof Error ? err.message : 'An error occurred while fetching the response'
45104
);
105+
// Remove the streaming message if there was an error
106+
setMessages((prev) => prev.slice(0, -1));
46107
} finally {
47108
setIsLoading(false);
109+
streamingTextRef.current = '';
48110
}
49111
};
50112

frontend/src/components/blocks/interview/chat/chat-message.tsx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ export interface ChatMessageType {
33
isUser: boolean;
44
timestamp: Date;
55
isCode?: boolean;
6+
isStreaming?: boolean;
67
}
78

89
interface ChatMessageProps {

0 commit comments

Comments
 (0)