1- // app/api/chat/route.ts - Enhanced with better error handling and project analysis
21import type { Duration } from "@/lib/duration"
32import type { LLMModel , LLMModelConfig } from "@/lib/models"
43import { getModelClient } from "@/lib/models"
@@ -21,13 +20,13 @@ export async function POST(req: Request) {
2120 const requestId = generateRequestId ( )
2221
2322 try {
24- console . log ( `[Enhanced Chat API ${ requestId } ] Processing request` )
23+ console . log ( `[Chat API ${ requestId } ] Processing request` )
2524
2625 // Parse request body with enhanced error handling
2726 let body : any
2827 try {
2928 body = await req . json ( )
30- console . log ( `[Enhanced Chat API ${ requestId } ] Request body parsed, keys:` , Object . keys ( body ) )
29+ console . log ( `[Chat API ${ requestId } ] Request body parsed successfully` )
3130 } catch ( error ) {
3231 logError ( "Request body parsing" , error , { requestId } )
3332 return new Response (
@@ -50,8 +49,8 @@ export async function POST(req: Request) {
5049 template,
5150 model,
5251 config,
53- uploadedFiles, // New: support for uploaded files
54- analysisInstructions, // New: specific instructions for analysis
52+ uploadedFiles,
53+ analysisInstructions,
5554 } : {
5655 messages : CoreMessage [ ]
5756 userID : string
@@ -81,33 +80,21 @@ export async function POST(req: Request) {
8180 )
8281 }
8382
84- console . log ( `[Enhanced Chat API ${ requestId } ] Validation passed:` , {
85- userID : userID . substring ( 0 , 8 ) + "..." ,
86- teamID : teamID . substring ( 0 , 8 ) + "..." ,
87- modelId : model . id ,
88- provider : model . providerId ,
89- messagesCount : messages . length ,
90- hasUploadedFiles : ! ! ( uploadedFiles && uploadedFiles . length > 0 ) ,
91- } )
83+ console . log ( `[Chat API ${ requestId } ] Validation passed for model: ${ model . id } (${ model . providerId } )` )
9284
9385 // Analyze uploaded files if present
9486 let projectStructure : ProjectStructure | undefined
9587 if ( uploadedFiles && uploadedFiles . length > 0 ) {
9688 try {
97- console . log ( `[Enhanced Chat API ${ requestId } ] Analyzing ${ uploadedFiles . length } uploaded files` )
89+ console . log ( `[Chat API ${ requestId } ] Analyzing ${ uploadedFiles . length } uploaded files` )
9890 const analyzer = new ProjectAnalyzer ( )
9991 const analysis = await analyzer . analyzeProject ( uploadedFiles )
10092 projectStructure = analysis . structure
10193
102- console . log ( `[Enhanced Chat API ${ requestId } ] Project analysis completed:` , {
103- filesAnalyzed : uploadedFiles . length ,
104- dependenciesFound : projectStructure . dependencies . size ,
105- componentsFound : projectStructure . components . size ,
106- architectureType : projectStructure . architecture . type ,
107- } )
94+ console . log ( `[Chat API ${ requestId } ] Project analysis completed` )
10895 } catch ( analysisError ) {
10996 logError ( "Project analysis failed" , analysisError , { requestId, filesCount : uploadedFiles . length } )
110- console . warn ( `[Enhanced Chat API ${ requestId } ] Project analysis failed, continuing without analysis:` , analysisError )
97+ console . warn ( `[Chat API ${ requestId } ] Project analysis failed, continuing without analysis` )
11198 }
11299 }
113100
@@ -118,7 +105,7 @@ export async function POST(req: Request) {
118105 : false
119106
120107 if ( limit ) {
121- console . log ( `[Enhanced Chat API ${ requestId } ] Rate limit hit:` , limit )
108+ console . log ( `[Chat API ${ requestId } ] Rate limit hit` )
122109 return new Response (
123110 JSON . stringify ( {
124111 error : "You have reached your request limit for the day." ,
@@ -137,7 +124,6 @@ export async function POST(req: Request) {
137124 } ,
138125 )
139126 }
140- console . log ( `[Enhanced Chat API ${ requestId } ] Rate limit check passed` )
141127 } catch ( error ) {
142128 logError ( "Rate limiting check failed" , error , { requestId } )
143129 // Continue without rate limiting if it fails
@@ -146,9 +132,9 @@ export async function POST(req: Request) {
146132 // Create model client with enhanced error handling
147133 let modelClient : LanguageModel
148134 try {
149- console . log ( `[Enhanced Chat API ${ requestId } ] Creating model client for:` , model . providerId , model . id )
135+ console . log ( `[Chat API ${ requestId } ] Creating model client for: ${ model . providerId } / ${ model . id } ` )
150136 modelClient = getModelClient ( model , config ) as LanguageModel
151- console . log ( `[Enhanced Chat API ${ requestId } ] Model client created successfully` )
137+ console . log ( `[Chat API ${ requestId } ] Model client created successfully` )
152138 } catch ( error : any ) {
153139 logError ( "Model client creation failed" , error , { requestId, provider : model . providerId , modelId : model . id } )
154140
@@ -180,15 +166,14 @@ export async function POST(req: Request) {
180166 let systemPrompt : string
181167 try {
182168 if ( projectStructure && userPrompt ) {
183- console . log ( `[Enhanced Chat API ${ requestId } ] Generating enhanced prompt with project context` )
169+ console . log ( `[Chat API ${ requestId } ] Generating enhanced prompt with project context` )
184170 systemPrompt = toEnhancedPrompt ( template , userPrompt , projectStructure )
185171 } else {
186- console . log ( `[Enhanced Chat API ${ requestId } ] Using standard prompt generation` )
187- // Fallback to existing prompt system
172+ console . log ( `[Chat API ${ requestId } ] Using standard prompt generation` )
188173 systemPrompt = generateFallbackPrompt ( template )
189174 }
190175
191- console . log ( `[Enhanced Chat API ${ requestId } ] System prompt generated, length:` , systemPrompt . length )
176+ console . log ( `[Chat API ${ requestId } ] System prompt generated` )
192177 } catch ( error : any ) {
193178 logError ( "System prompt generation failed" , error , { requestId } )
194179 return new Response (
@@ -213,26 +198,20 @@ export async function POST(req: Request) {
213198 Object . entries ( providerSpecificConfig ) . filter ( ( [ _ , value ] ) => value !== undefined ) ,
214199 )
215200
216- console . log ( `[Enhanced Chat API ${ requestId } ] Creating stream with params:` , {
217- providerSpecificConfigKeys : Object . keys ( cleanProviderSpecificConfig ) ,
218- systemPromptLength : systemPrompt . length ,
219- hasProjectContext : ! ! projectStructure ,
220- } )
201+ console . log ( `[Chat API ${ requestId } ] Starting stream object creation` )
221202
222203 try {
223204 const streamConfig = {
224205 model : modelClient ,
225206 schema,
226207 system : systemPrompt ,
227208 messages,
228- maxRetries : 2 , // Increase retries for better reliability
209+ maxRetries : 2 ,
229210 ...cleanProviderSpecificConfig ,
230211 }
231212
232- console . log ( `[Enhanced Chat API ${ requestId } ] Starting stream object creation` )
233213 const stream = await streamObject ( streamConfig )
234-
235- console . log ( `[Enhanced Chat API ${ requestId } ] Stream created successfully` )
214+ console . log ( `[Chat API ${ requestId } ] Stream created successfully` )
236215 return stream . toTextStreamResponse ( )
237216 } catch ( error : any ) {
238217 logError ( "Stream creation failed" , error , {
@@ -275,41 +254,6 @@ export async function POST(req: Request) {
275254 )
276255 }
277256
278- if (
279- errorMessage . includes ( "timeout" ) ||
280- errorMessage . includes ( "ECONNRESET" ) ||
281- errorMessage . includes ( "ETIMEDOUT" ) ||
282- errorMessage . includes ( "ENOTFOUND" )
283- ) {
284- return new Response (
285- JSON . stringify ( {
286- error : "Network timeout. Please check your connection and try again." ,
287- code : "TIMEOUT_ERROR" ,
288- provider : model . providerId ,
289- requestId,
290- } ) ,
291- {
292- status : 408 ,
293- headers : { "Content-Type" : "application/json" } ,
294- } ,
295- )
296- }
297-
298- if ( error . status === 503 || error . status === 502 || errorMessage . includes ( "overload" ) ) {
299- return new Response (
300- JSON . stringify ( {
301- error : "Service temporarily unavailable. Please try again later." ,
302- code : "SERVICE_UNAVAILABLE" ,
303- provider : model . providerId ,
304- requestId,
305- } ) ,
306- {
307- status : 503 ,
308- headers : { "Content-Type" : "application/json" } ,
309- } ,
310- )
311- }
312-
313257 if ( errorMessage . includes ( "model" ) && errorMessage . includes ( "not found" ) ) {
314258 return new Response (
315259 JSON . stringify ( {
@@ -326,22 +270,13 @@ export async function POST(req: Request) {
326270 )
327271 }
328272
329- // Generic error with detailed information for debugging
330273 return new Response (
331274 JSON . stringify ( {
332275 error : "An unexpected error occurred while processing your request." ,
333- code : "INTERNAL_ERROR " ,
276+ code : "CHAT_PROCESSING_ERROR " ,
334277 provider : model . providerId ,
335278 details : errorMessage ,
336279 requestId,
337- // Include additional debug info in development
338- ...( process . env . NODE_ENV === "development" && {
339- debug : {
340- stack : error . stack ,
341- errorType : error . constructor . name ,
342- status : error . status ,
343- } ,
344- } ) ,
345280 } ) ,
346281 {
347282 status : 500 ,
0 commit comments