1+ import { handleAPIError , createRateLimitResponse } from '@/lib/api-errors'
2+ import { Duration } from '@/lib/duration'
3+ import { getModelClient , LLMModel , LLMModelConfig } from '@/lib/models'
4+ import { applyPatch } from '@/lib/morph'
5+ import ratelimit from '@/lib/ratelimit'
6+ import { FragmentSchema , morphEditSchema , MorphEditSchema } from '@/lib/schema'
7+ import { generateObject , LanguageModel , CoreMessage } from 'ai'
8+
9+ export const maxDuration = 300
10+
11+ const rateLimitMaxRequests = process . env . RATE_LIMIT_MAX_REQUESTS
12+ ? parseInt ( process . env . RATE_LIMIT_MAX_REQUESTS )
13+ : 10
14+ const ratelimitWindow = process . env . RATE_LIMIT_WINDOW
15+ ? ( process . env . RATE_LIMIT_WINDOW as Duration )
16+ : '1d'
17+
18+
19+ export async function POST ( req : Request ) {
20+ const {
21+ messages,
22+ model,
23+ config,
24+ currentFragment,
25+ } : {
26+ messages : CoreMessage [ ]
27+ model : LLMModel
28+ config : LLMModelConfig
29+ currentFragment : FragmentSchema
30+ } = await req . json ( )
31+
32+ // Rate limiting (same as chat route)
33+ const limit = ! config . apiKey
34+ ? await ratelimit (
35+ req . headers . get ( 'x-forwarded-for' ) ,
36+ rateLimitMaxRequests ,
37+ ratelimitWindow ,
38+ )
39+ : false
40+
41+ if ( limit ) {
42+ return createRateLimitResponse ( limit )
43+ }
44+
45+ const { model : modelNameString , apiKey : modelApiKey , ...modelParams } = config
46+ const modelClient = getModelClient ( model , config )
47+
48+ try {
49+ const contextualSystemPrompt = `You are a code editor. Generate a JSON response with exactly these fields:
50+
51+ {
52+ "commentary": "Explain what changes you are making",
53+ "instruction": "One line description of the change",
54+ "edit": "The code changes with // ... existing code ... for unchanged parts",
55+ "file_path": "${ currentFragment . file_path } "
56+ }
57+
58+ Current file: ${ currentFragment . file_path }
59+ Current code:
60+ \`\`\`
61+ ${ currentFragment . code }
62+ \`\`\`
63+
64+ `
65+
66+ const result = await generateObject ( {
67+ model : modelClient as LanguageModel ,
68+ system : contextualSystemPrompt ,
69+ messages,
70+ schema : morphEditSchema ,
71+ maxRetries : 0 ,
72+ ...modelParams ,
73+ } )
74+
75+ const editInstructions = result . object
76+
77+ // Apply edits using Morph
78+ const morphResult = await applyPatch ( {
79+ targetFile : currentFragment . file_path ,
80+ instructions : editInstructions . instruction ,
81+ initialCode : currentFragment . code ,
82+ codeEdit : editInstructions . edit ,
83+ } )
84+
85+ // Return updated fragment in standard format
86+ const updatedFragment : FragmentSchema = {
87+ ...currentFragment ,
88+ code : morphResult . code ,
89+ commentary : editInstructions . commentary ,
90+ }
91+
92+ // Create a streaming response that matches the AI SDK format
93+ const encoder = new TextEncoder ( )
94+ const stream = new ReadableStream ( {
95+ start ( controller ) {
96+ const json = JSON . stringify ( updatedFragment )
97+ controller . enqueue ( encoder . encode ( json ) )
98+ controller . close ( )
99+ } ,
100+ } )
101+
102+ return new Response ( stream , {
103+ headers : {
104+ 'Content-Type' : 'text/plain; charset=utf-8' ,
105+ } ,
106+ } )
107+ } catch ( error : any ) {
108+ return handleAPIError ( error , { hasOwnApiKey : ! ! config . apiKey } )
109+ }
110+ }
0 commit comments