Skip to content

Commit 8f73bb3

Browse files
authored
Merge branch 'main' into fix-bounce-rate-color
2 parents b36dc70 + a161001 commit 8f73bb3

File tree

32 files changed

+735
-426
lines changed

32 files changed

+735
-426
lines changed
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
2+
import { generateObject } from 'ai';
3+
import type { z } from 'zod';
4+
import {
5+
AIResponseJsonSchema,
6+
comprehensiveSystemPrompt,
7+
} from '../prompts/agent';
8+
import type { AssistantSession } from './assistant-session';
9+
10+
const openrouter = createOpenRouter({
11+
apiKey: process.env.AI_API_KEY,
12+
});
13+
14+
const AI_MODEL = 'google/gemini-2.5-flash-lite-preview-06-17';
15+
16+
export interface AIResponse {
17+
content: z.infer<typeof AIResponseJsonSchema>;
18+
usage: {
19+
promptTokens: number;
20+
completionTokens: number;
21+
totalTokens: number;
22+
};
23+
}
24+
25+
/**
26+
* Service for AI interactions
27+
* Handles all communication with AI models
28+
*/
29+
export class AIService {
30+
async generateResponse(session: AssistantSession): Promise<AIResponse> {
31+
const context = session.getContext();
32+
const messages = session.getMessages();
33+
34+
session.log('Starting AI generation');
35+
const startTime = Date.now();
36+
37+
const systemPrompt = comprehensiveSystemPrompt(
38+
context.website.id,
39+
context.website.domain,
40+
'execute_chat',
41+
context.model as 'chat' | 'agent' | 'agent-max'
42+
);
43+
44+
try {
45+
const chat = await generateObject({
46+
model: openrouter.chat(AI_MODEL),
47+
messages: [{ role: 'system', content: systemPrompt }, ...messages],
48+
temperature: 0.1,
49+
schema: AIResponseJsonSchema,
50+
});
51+
52+
const responseTime = Date.now() - startTime;
53+
const usage = {
54+
promptTokens: chat.usage.inputTokens ?? 0,
55+
completionTokens: chat.usage.outputTokens ?? 0,
56+
totalTokens: chat.usage.totalTokens ?? 0,
57+
};
58+
59+
session.setAIMetrics(responseTime, usage);
60+
61+
return {
62+
content: chat.object,
63+
usage,
64+
};
65+
} catch (error) {
66+
session.log(
67+
`AI generation failed: ${error instanceof Error ? error.message : 'Unknown error'}`
68+
);
69+
throw error;
70+
}
71+
}
72+
}
Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
import type { User } from '@databuddy/auth';
2+
import type { Website } from '@databuddy/shared';
3+
import type { AssistantRequestType } from '../../schemas';
4+
import type { StreamingUpdate } from '../utils/stream-utils';
5+
import { AIService } from './ai-service';
6+
import { AssistantSession, type SessionMetrics } from './assistant-session';
7+
import { ConversationRepository } from './conversation-repository';
8+
import {
9+
type AIResponseContent,
10+
ResponseProcessor,
11+
} from './response-processor';
12+
13+
/**
14+
* Main orchestrator for assistant interactions
15+
* Coordinates all the different services and manages the workflow
16+
*/
17+
export class AssistantOrchestrator {
18+
private readonly aiService = new AIService();
19+
private readonly responseProcessor = new ResponseProcessor();
20+
private readonly conversationRepo = new ConversationRepository();
21+
22+
async processRequest(
23+
request: AssistantRequestType,
24+
user: User,
25+
website: Website
26+
): Promise<StreamingUpdate[]> {
27+
// Create session to track this interaction
28+
const session = new AssistantSession(request, user, website);
29+
30+
try {
31+
// Step 1: Generate AI response
32+
const aiResponse = await this.aiService.generateResponse(session);
33+
34+
if (!aiResponse.content) {
35+
session.log('AI response was empty');
36+
return [
37+
{
38+
type: 'error',
39+
content:
40+
"I'm having trouble understanding that request. Could you try asking in a different way?",
41+
},
42+
];
43+
}
44+
45+
// Step 2: Process the response into streaming updates
46+
const streamingUpdates = await this.responseProcessor.process(
47+
aiResponse.content,
48+
session
49+
);
50+
51+
// Step 3: Save to database (async, don't block response)
52+
const finalResult = streamingUpdates.at(-1);
53+
if (finalResult) {
54+
const metrics = session.finalize();
55+
56+
// Save asynchronously but handle errors
57+
this.saveConversationAsync(
58+
session,
59+
aiResponse.content,
60+
finalResult,
61+
metrics
62+
);
63+
}
64+
65+
return streamingUpdates;
66+
} catch (error) {
67+
session.log(
68+
`Processing failed: ${error instanceof Error ? error.message : 'Unknown error'}`
69+
);
70+
71+
// Return error response
72+
const errorResponse: StreamingUpdate = {
73+
type: 'error',
74+
content: 'Oops! Something unexpected happened. Mind trying that again?',
75+
};
76+
77+
// Try to save error conversation
78+
const metrics = session.finalize();
79+
this.saveErrorConversationAsync(session, error, errorResponse, metrics);
80+
81+
return [errorResponse];
82+
}
83+
}
84+
85+
private async saveConversationAsync(
86+
session: AssistantSession,
87+
aiResponse: AIResponseContent,
88+
finalResult: StreamingUpdate,
89+
metrics: SessionMetrics
90+
): Promise<void> {
91+
try {
92+
await this.conversationRepo.saveConversation(
93+
session,
94+
aiResponse,
95+
finalResult,
96+
metrics
97+
);
98+
console.log('✅ Conversation saved successfully');
99+
} catch (error) {
100+
console.error('❌ Failed to save conversation:', error);
101+
}
102+
}
103+
104+
private async saveErrorConversationAsync(
105+
session: AssistantSession,
106+
originalError: unknown,
107+
errorResponse: StreamingUpdate,
108+
metrics: SessionMetrics
109+
): Promise<void> {
110+
try {
111+
const errorAIResponse = {
112+
response_type: 'text' as const,
113+
text_response: errorResponse.content,
114+
thinking_steps: [
115+
`Error: ${originalError instanceof Error ? originalError.message : 'Unknown error'}`,
116+
],
117+
};
118+
119+
await this.conversationRepo.saveConversation(
120+
session,
121+
errorAIResponse,
122+
errorResponse,
123+
metrics
124+
);
125+
console.log('✅ Error conversation saved successfully');
126+
} catch (error) {
127+
console.error('❌ Failed to save error conversation:', error);
128+
}
129+
}
130+
}
Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
import type { User } from '@databuddy/auth';
2+
import { createId, type Website } from '@databuddy/shared';
3+
import type { AssistantRequestType } from '../../schemas';
4+
5+
export interface AssistantMessage {
6+
role: 'user' | 'assistant';
7+
content: string;
8+
}
9+
10+
export interface SessionMetrics {
11+
aiResponseTime: number;
12+
totalProcessingTime: number;
13+
tokenUsage: {
14+
promptTokens: number;
15+
completionTokens: number;
16+
totalTokens: number;
17+
};
18+
}
19+
20+
export interface SessionContext {
21+
user: User;
22+
website: Website;
23+
conversationId: string;
24+
model: string;
25+
}
26+
27+
/**
28+
* Represents a single assistant interaction session
29+
* Manages the lifecycle of one request/response cycle
30+
*/
31+
export class AssistantSession {
32+
private readonly context: SessionContext;
33+
private readonly messages: AssistantMessage[];
34+
private readonly startTime: number;
35+
private readonly debugLogs: string[] = [];
36+
private metrics: Partial<SessionMetrics> = {};
37+
38+
constructor(request: AssistantRequestType, user: User, website: Website) {
39+
this.context = {
40+
user,
41+
website,
42+
conversationId: request.conversationId || createId(),
43+
model: request.model || 'chat',
44+
};
45+
this.messages = request.messages;
46+
this.startTime = Date.now();
47+
this.log('Session created');
48+
}
49+
50+
getContext(): SessionContext {
51+
return this.context;
52+
}
53+
54+
getMessages(): AssistantMessage[] {
55+
return this.messages;
56+
}
57+
58+
log(message: string): void {
59+
this.debugLogs.push(`${Date.now() - this.startTime}ms: ${message}`);
60+
}
61+
62+
setAIMetrics(
63+
responseTime: number,
64+
tokenUsage: SessionMetrics['tokenUsage']
65+
): void {
66+
this.metrics.aiResponseTime = responseTime;
67+
this.metrics.tokenUsage = tokenUsage;
68+
this.log(
69+
`AI completed in ${responseTime}ms, tokens: ${tokenUsage.totalTokens}`
70+
);
71+
}
72+
73+
finalize(): SessionMetrics {
74+
const totalTime = Date.now() - this.startTime;
75+
this.metrics.totalProcessingTime = totalTime;
76+
this.log(`Session completed in ${totalTime}ms`);
77+
78+
return {
79+
aiResponseTime: this.metrics.aiResponseTime || 0,
80+
totalProcessingTime: totalTime,
81+
tokenUsage: this.metrics.tokenUsage || {
82+
promptTokens: 0,
83+
completionTokens: 0,
84+
totalTokens: 0,
85+
},
86+
};
87+
}
88+
89+
getDebugLogs(): string[] {
90+
return [...this.debugLogs];
91+
}
92+
93+
addMessage(message: AssistantMessage): void {
94+
this.messages.push(message);
95+
this.log(`Added ${message.role} message`);
96+
}
97+
}

0 commit comments

Comments
 (0)