@@ -49,6 +49,15 @@ interface RequestBody {
4949 history ?: ChatMessage [ ]
5050}
5151
52+ // Helper: safe stringify for error payloads that may include circular structures
53+ function safeStringify ( value : unknown ) : string {
54+ try {
55+ return JSON . stringify ( value )
56+ } catch {
57+ return '[unserializable]'
58+ }
59+ }
60+
5261export async function POST ( req : NextRequest ) {
5362 const requestId = crypto . randomUUID ( ) . slice ( 0 , 8 )
5463 logger . info ( `[${ requestId } ] Received wand generation request` )
@@ -110,124 +119,101 @@ export async function POST(req: NextRequest) {
110119 `[${ requestId } ] About to create stream with model: ${ useWandAzure ? wandModelName : 'gpt-4o' } `
111120 )
112121
113- // Add AbortController with timeout
114- const abortController = new AbortController ( )
115- const timeoutId = setTimeout ( ( ) => {
116- abortController . abort ( 'Stream timeout after 30 seconds' )
117- } , 30000 )
118-
119- // Forward request abort signal if available
120- req . signal ?. addEventListener ( 'abort' , ( ) => {
121- abortController . abort ( 'Request cancelled by client' )
122+ // Create the stream without AbortController for Node.js runtime compatibility
123+ const streamCompletion = await client . chat . completions . create ( {
124+ model : useWandAzure ? wandModelName : 'gpt-4o' ,
125+ messages : messages ,
126+ temperature : 0.3 ,
127+ max_tokens : 10000 ,
128+ stream : true ,
129+ stream_options : { include_usage : true } ,
122130 } )
123131
124- const streamCompletion = await client . chat . completions . create (
125- {
126- model : useWandAzure ? wandModelName : 'gpt-4o' ,
127- messages : messages ,
128- temperature : 0.3 ,
129- max_tokens : 10000 ,
130- stream : true ,
131- stream_options : { include_usage : true } ,
132- } ,
133- {
134- signal : abortController . signal , // Add AbortSignal
135- }
136- )
132+ logger . info ( `[${ requestId } ] Stream created successfully, starting response` )
133+
134+ // Create a TransformStream for Node.js runtime compatibility
135+ const encoder = new TextEncoder ( )
136+ const readable = new ReadableStream ( {
137+ async start ( controller ) {
138+ try {
139+ logger . info ( `[${ requestId } ] Starting stream processing` )
140+ let chunkCount = 0
141+
142+ for await ( const chunk of streamCompletion ) {
143+ chunkCount ++
144+
145+ if ( chunkCount === 1 ) {
146+ logger . info ( `[${ requestId } ] Received first chunk` )
147+ }
137148
138- clearTimeout ( timeoutId ) // Clear timeout after successful creation
139- logger . info ( `[${ requestId } ] Stream created successfully, starting reader pattern` )
140-
141- logger . debug ( `[${ requestId } ] Stream connection established successfully` )
142-
143- return new Response (
144- new ReadableStream ( {
145- async start ( controller ) {
146- const encoder = new TextEncoder ( )
147-
148- try {
149- logger . info ( `[${ requestId } ] Starting streaming with timeout protection` )
150- let chunkCount = 0
151- let hasUsageData = false
152-
153- // Use for await with AbortController timeout protection
154- for await ( const chunk of streamCompletion ) {
155- chunkCount ++
156-
157- if ( chunkCount === 1 ) {
158- logger . info ( `[${ requestId } ] Received first chunk via for await` )
159- }
160-
161- // Process the chunk
162- const content = chunk . choices ?. [ 0 ] ?. delta ?. content || ''
163- if ( content ) {
164- // Use SSE format identical to chat streaming
165- controller . enqueue (
166- encoder . encode ( `data: ${ JSON . stringify ( { chunk : content } ) } \n\n` )
167- )
168- }
169-
170- // Check for usage data
171- if ( chunk . usage ) {
172- hasUsageData = true
173- logger . info (
174- `[${ requestId } ] Received usage data: ${ JSON . stringify ( chunk . usage ) } `
175- )
176- }
177-
178- // Log every 5th chunk to avoid spam
179- if ( chunkCount % 5 === 0 ) {
180- logger . debug ( `[${ requestId } ] Processed ${ chunkCount } chunks so far` )
181- }
149+ // Process the chunk
150+ const content = chunk . choices ?. [ 0 ] ?. delta ?. content || ''
151+ if ( content ) {
152+ // Send data in SSE format
153+ const data = `data: ${ JSON . stringify ( { chunk : content } ) } \n\n`
154+ controller . enqueue ( encoder . encode ( data ) )
182155 }
183156
184- logger . info (
185- `[${ requestId } ] Reader pattern completed. Total chunks: ${ chunkCount } , Usage data received: ${ hasUsageData } `
186- )
187-
188- // Send completion signal in SSE format
189- logger . info ( `[${ requestId } ] Sending completion signal` )
190- controller . enqueue ( encoder . encode ( `data: ${ JSON . stringify ( { done : true } ) } \n\n` ) )
191-
192- logger . info ( `[${ requestId } ] Closing controller` )
193- controller . close ( )
194-
195- logger . info ( `[${ requestId } ] Wand generation streaming completed successfully` )
196- } catch ( streamError : any ) {
197- if ( streamError . name === 'AbortError' ) {
198- logger . info (
199- `[${ requestId } ] Stream was aborted (timeout or cancel): ${ streamError . message } `
200- )
201- controller . enqueue (
202- encoder . encode (
203- `data: ${ JSON . stringify ( { error : 'Stream cancelled' , done : true } ) } \n\n`
204- )
205- )
206- } else {
207- logger . error ( `[${ requestId } ] Streaming error` , { error : streamError . message } )
208- controller . enqueue (
209- encoder . encode (
210- `data: ${ JSON . stringify ( { error : 'Streaming failed' , done : true } ) } \n\n`
211- )
212- )
157+ // Check for usage data
158+ if ( chunk . usage ) {
159+ logger . info ( `[${ requestId } ] Received usage data: ${ JSON . stringify ( chunk . usage ) } ` )
160+ }
161+
162+ // Log progress periodically
163+ if ( chunkCount % 10 === 0 ) {
164+ logger . debug ( `[${ requestId } ] Processed ${ chunkCount } chunks` )
213165 }
214- controller . close ( )
215166 }
216- } ,
217- } ) ,
218- {
219- headers : {
220- 'Content-Type' : 'text/event-stream' ,
221- 'Cache-Control' : 'no-cache' ,
222- Connection : 'keep-alive' ,
223- 'X-Accel-Buffering' : 'no' ,
224- } ,
225- }
226- )
167+
168+ logger . info ( `[${ requestId } ] Stream completed. Total chunks: ${ chunkCount } ` )
169+
170+ // Send completion signal
171+ controller . enqueue ( encoder . encode ( `data: ${ JSON . stringify ( { done : true } ) } \n\n` ) )
172+ controller . close ( )
173+
174+ logger . info ( `[${ requestId } ] Wand generation streaming completed successfully` )
175+ } catch ( streamError : any ) {
176+ logger . error ( `[${ requestId } ] Streaming error` , {
177+ name : streamError ?. name ,
178+ message : streamError ?. message || 'Unknown error' ,
179+ code : streamError ?. code ,
180+ status : streamError ?. status ,
181+ stack : streamError ?. stack ,
182+ useWandAzure,
183+ model : useWandAzure ? wandModelName : 'gpt-4o' ,
184+ } )
185+
186+ // Send error to client
187+ const errorData = `data: ${ JSON . stringify ( { error : 'Streaming failed' , done : true } ) } \n\n`
188+ controller . enqueue ( encoder . encode ( errorData ) )
189+ controller . close ( )
190+ }
191+ } ,
192+ } )
193+
194+ // Return Response with proper headers for Node.js runtime
195+ return new Response ( readable , {
196+ headers : {
197+ 'Content-Type' : 'text/event-stream' ,
198+ 'Cache-Control' : 'no-cache, no-transform' ,
199+ Connection : 'keep-alive' ,
200+ 'X-Accel-Buffering' : 'no' , // Disable Nginx buffering
201+ 'Transfer-Encoding' : 'chunked' , // Important for Node.js runtime
202+ } ,
203+ } )
227204 } catch ( error : any ) {
228- logger . error ( `[${ requestId } ] Streaming error` , {
229- error : error . message || 'Unknown error' ,
230- stack : error . stack ,
205+ logger . error ( `[${ requestId } ] Failed to create stream` , {
206+ name : error ?. name ,
207+ message : error ?. message || 'Unknown error' ,
208+ code : error ?. code ,
209+ status : error ?. status ,
210+ responseStatus : error ?. response ?. status ,
211+ responseData : error ?. response ?. data ? safeStringify ( error . response . data ) : undefined ,
212+ stack : error ?. stack ,
213+ useWandAzure,
214+ model : useWandAzure ? wandModelName : 'gpt-4o' ,
215+ endpoint : useWandAzure ? azureEndpoint : 'api.openai.com' ,
216+ apiVersion : useWandAzure ? azureApiVersion : 'N/A' ,
231217 } )
232218
233219 return NextResponse . json (
@@ -261,8 +247,19 @@ export async function POST(req: NextRequest) {
261247 return NextResponse . json ( { success : true , content : generatedContent } )
262248 } catch ( error : any ) {
263249 logger . error ( `[${ requestId } ] Wand generation failed` , {
264- error : error . message || 'Unknown error' ,
265- stack : error . stack ,
250+ name : error ?. name ,
251+ message : error ?. message || 'Unknown error' ,
252+ code : error ?. code ,
253+ status : error ?. status ,
254+ responseStatus : error instanceof OpenAI . APIError ? error . status : error ?. response ?. status ,
255+ responseData : ( error as any ) ?. response ?. data
256+ ? safeStringify ( ( error as any ) . response . data )
257+ : undefined ,
258+ stack : error ?. stack ,
259+ useWandAzure,
260+ model : useWandAzure ? wandModelName : 'gpt-4o' ,
261+ endpoint : useWandAzure ? azureEndpoint : 'api.openai.com' ,
262+ apiVersion : useWandAzure ? azureApiVersion : 'N/A' ,
266263 } )
267264
268265 let clientErrorMessage = 'Wand generation failed. Please try again later.'
0 commit comments