Skip to content

Commit 930a129

Browse files
authored
Merge pull request #4684 from udecode/migrate/ai-prompt
Migrate Template
2 parents 7d776d1 + 18a7b77 commit 930a129

File tree

25 files changed

+3774
-1218
lines changed

25 files changed

+3774
-1218
lines changed

apps/www/public/r/ai-api.json

Lines changed: 2 additions & 2 deletions
Large diffs are not rendered by default.

apps/www/public/r/settings-dialog.json

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

apps/www/public/r/use-chat.json

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

apps/www/src/registry/app/api/ai/command/route.ts

Lines changed: 25 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@ import type {
44
} from '@/registry/components/editor/use-chat';
55
import type { NextRequest } from 'next/server';
66

7+
import { createGateway } from '@ai-sdk/gateway';
78
import {
9+
type LanguageModel,
810
type UIMessageStreamWriter,
911
createUIMessageStream,
1012
createUIMessageStreamResponse,
@@ -28,7 +30,7 @@ import {
2830
} from './prompts';
2931

3032
export async function POST(req: NextRequest) {
31-
const { apiKey: key, ctx, messages: messagesRaw } = await req.json();
33+
const { apiKey: key, ctx, messages: messagesRaw, model } = await req.json();
3234

3335
const { children, selection, toolName: toolNameParam } = ctx;
3436

@@ -49,6 +51,10 @@ export async function POST(req: NextRequest) {
4951

5052
const isSelecting = editor.api.isExpanded();
5153

54+
const gatewayProvider = createGateway({
55+
apiKey,
56+
});
57+
5258
try {
5359
const stream = createUIMessageStream<ChatMessage>({
5460
execute: async ({ writer }) => {
@@ -59,7 +65,7 @@ export async function POST(req: NextRequest) {
5965
enum: isSelecting
6066
? ['generate', 'edit', 'comment']
6167
: ['generate', 'comment'],
62-
model: 'google/gemini-2.5-flash',
68+
model: gatewayProvider(model || 'google/gemini-2.5-flash'),
6369
output: 'enum',
6470
prompt: getChooseToolPrompt(messagesRaw),
6571
});
@@ -74,11 +80,15 @@ export async function POST(req: NextRequest) {
7480

7581
const stream = streamText({
7682
experimental_transform: markdownJoinerTransform(),
77-
model: 'google/gemini-2.5-flash',
83+
model: gatewayProvider(model || 'openai/gpt-4o-mini'),
7884
// Not used
7985
prompt: '',
8086
tools: {
81-
comment: getCommentTool(editor, { messagesRaw, writer }),
87+
comment: getCommentTool(editor, {
88+
messagesRaw,
89+
model: gatewayProvider(model || 'google/gemini-2.5-flash'),
90+
writer,
91+
}),
8292
},
8393
prepareStep: async (step) => {
8494
if (toolName === 'comment') {
@@ -120,7 +130,7 @@ export async function POST(req: NextRequest) {
120130
role: 'user',
121131
},
122132
],
123-
model: 'openai/gpt-4o-mini',
133+
model: gatewayProvider(model || 'openai/gpt-4o-mini'),
124134
};
125135
}
126136
},
@@ -143,15 +153,20 @@ const getCommentTool = (
143153
editor: SlateEditor,
144154
{
145155
messagesRaw,
156+
model,
146157
writer,
147-
}: { messagesRaw: ChatMessage[]; writer: UIMessageStreamWriter<ChatMessage> }
158+
}: {
159+
messagesRaw: ChatMessage[];
160+
model: LanguageModel;
161+
writer: UIMessageStreamWriter<ChatMessage>;
162+
}
148163
) => {
149164
return tool({
150165
description: 'Comment on the content',
151166
inputSchema: z.object({}),
152167
execute: async () => {
153168
const { elementStream } = streamObject({
154-
model: 'google/gemini-2.5-flash',
169+
model,
155170
output: 'array',
156171
prompt: getCommentPrompt(editor, {
157172
messages: messagesRaw,
@@ -182,7 +197,7 @@ const getCommentTool = (
182197
id: commentDataId,
183198
data: {
184199
comment: comment,
185-
status: 'streaming'
200+
status: 'streaming',
186201
},
187202
type: 'data-comment',
188203
});
@@ -192,10 +207,10 @@ const getCommentTool = (
192207
id: nanoid(),
193208
data: {
194209
comment: null,
195-
status: 'finished'
210+
status: 'finished',
196211
},
197212
type: 'data-comment',
198-
});
213+
});
199214
},
200215
});
201216
};

apps/www/src/registry/app/api/ai/command/utils.ts

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -115,11 +115,11 @@ export const buildStructuredPrompt = ({
115115
Here is the background data you should reference when answering the user:
116116
${backgroundData}
117117
`,
118-
119-
dedent`
120-
Here are some important rules for the interaction:
121-
${rules}
122-
`,
118+
rules &&
119+
dedent`
120+
Here are some important rules for the interaction:
121+
${rules}
122+
`,
123123

124124
formattedExamples &&
125125
dedent`
@@ -246,4 +246,3 @@ export const isMultiBlocks = (editor: SlateEditor) => {
246246
export const getMarkdownWithSelection = (editor: SlateEditor) => {
247247
return removeEscapeSelection(editor, getMarkdown(editor, { type: 'block' }));
248248
};
249-

apps/www/src/registry/components/editor/settings-dialog.tsx

Lines changed: 173 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -40,28 +40,187 @@ import {
4040
PopoverTrigger,
4141
} from '@/components/ui/popover';
4242
import { cn } from '@/lib/utils';
43-
import { aiChatPlugin } from '@/registry/components/editor/plugins/ai-kit';
43+
44+
import { aiChatPlugin } from './plugins/ai-kit';
4445

4546
interface Model {
4647
label: string;
4748
value: string;
4849
}
4950

5051
export const models: Model[] = [
51-
{ label: 'gpt-4o-mini', value: 'gpt-4o-mini' },
52-
{ label: 'gpt-4o', value: 'gpt-4o' },
53-
{ label: 'gpt-4-turbo', value: 'gpt-4-turbo' },
54-
{ label: 'gpt-4', value: 'gpt-4' },
55-
{ label: 'gpt-3.5-turbo', value: 'gpt-3.5-turbo' },
56-
{ label: 'gpt-3.5-turbo-instruct', value: 'gpt-3.5-turbo-instruct' },
52+
// OpenAI Models
53+
{ label: 'GPT-3.5 Turbo', value: 'openai/gpt-3.5-turbo' },
54+
{ label: 'GPT-3.5 Turbo Instruct', value: 'openai/gpt-3.5-turbo-instruct' },
55+
{ label: 'GPT-4 Turbo', value: 'openai/gpt-4-turbo' },
56+
{ label: 'GPT-4.1', value: 'openai/gpt-4.1' },
57+
{ label: 'GPT-4.1 Mini', value: 'openai/gpt-4.1-mini' },
58+
{ label: 'GPT-4.1 Nano', value: 'openai/gpt-4.1-nano' },
59+
{ label: 'GPT-4o', value: 'openai/gpt-4o' },
60+
{ label: 'GPT-4o Mini', value: 'openai/gpt-4o-mini' },
61+
{ label: 'GPT-5', value: 'openai/gpt-5' },
62+
{ label: 'GPT-5 Codex', value: 'openai/gpt-5-codex' },
63+
{ label: 'GPT-5 Mini', value: 'openai/gpt-5-mini' },
64+
{ label: 'GPT-5 Nano', value: 'openai/gpt-5-nano' },
65+
{ label: 'GPT-OSS 120B', value: 'openai/gpt-oss-120b' },
66+
{ label: 'GPT-OSS 20B', value: 'openai/gpt-oss-20b' },
67+
{ label: 'O1', value: 'openai/o1' },
68+
{ label: 'O3', value: 'openai/o3' },
69+
{ label: 'O3 Mini', value: 'openai/o3-mini' },
70+
{ label: 'O4 Mini', value: 'openai/o4-mini' },
71+
72+
// Google Models
73+
{ label: 'Gemini 2.0 Flash', value: 'google/gemini-2.0-flash' },
74+
{ label: 'Gemini 2.0 Flash Lite', value: 'google/gemini-2.0-flash-lite' },
75+
{ label: 'Gemini 2.5 Flash', value: 'google/gemini-2.5-flash' },
76+
{
77+
label: 'Gemini 2.5 Flash Image Preview',
78+
value: 'google/gemini-2.5-flash-image-preview',
79+
},
80+
{ label: 'Gemini 2.5 Flash Lite', value: 'google/gemini-2.5-flash-lite' },
81+
{ label: 'Gemini 2.5 Pro', value: 'google/gemini-2.5-pro' },
82+
{ label: 'Gemma 2 9B', value: 'google/gemma-2-9b' },
83+
84+
// Alibaba Models
85+
{ label: 'Qwen 3 14B', value: 'alibaba/qwen-3-14b' },
86+
{ label: 'Qwen 3 235B', value: 'alibaba/qwen-3-235b' },
87+
{ label: 'Qwen 3 30B', value: 'alibaba/qwen-3-30b' },
88+
{ label: 'Qwen 3 32B', value: 'alibaba/qwen-3-32b' },
89+
{ label: 'Qwen3 Coder', value: 'alibaba/qwen3-coder' },
90+
{ label: 'Qwen3 Coder Plus', value: 'alibaba/qwen3-coder-plus' },
91+
{ label: 'Qwen3 Max', value: 'alibaba/qwen3-max' },
92+
{ label: 'Qwen3 Max Preview', value: 'alibaba/qwen3-max-preview' },
93+
{
94+
label: 'Qwen3 Next 80B A3B Instruct',
95+
value: 'alibaba/qwen3-next-80b-a3b-instruct',
96+
},
97+
{
98+
label: 'Qwen3 Next 80B A3B Thinking',
99+
value: 'alibaba/qwen3-next-80b-a3b-thinking',
100+
},
101+
{ label: 'Qwen3 VL Instruct', value: 'alibaba/qwen3-vl-instruct' },
102+
{ label: 'Qwen3 VL Thinking', value: 'alibaba/qwen3-vl-thinking' },
103+
104+
// Amazon Models
105+
{ label: 'Nova Lite', value: 'amazon/nova-lite' },
106+
{ label: 'Nova Micro', value: 'amazon/nova-micro' },
107+
{ label: 'Nova Pro', value: 'amazon/nova-pro' },
108+
109+
// Anthropic Models
110+
{ label: 'Claude 3 Haiku', value: 'anthropic/claude-3-haiku' },
111+
{ label: 'Claude 3 Opus', value: 'anthropic/claude-3-opus' },
112+
{ label: 'Claude 3.5 Haiku', value: 'anthropic/claude-3.5-haiku' },
113+
{ label: 'Claude 3.5 Sonnet', value: 'anthropic/claude-3.5-sonnet' },
114+
{ label: 'Claude 3.7 Sonnet', value: 'anthropic/claude-3.7-sonnet' },
115+
{ label: 'Claude Opus 4', value: 'anthropic/claude-opus-4' },
116+
{ label: 'Claude Opus 4.1', value: 'anthropic/claude-opus-4.1' },
117+
{ label: 'Claude Sonnet 4', value: 'anthropic/claude-sonnet-4' },
118+
119+
// Cohere Models
120+
{ label: 'Command A', value: 'cohere/command-a' },
121+
{ label: 'Command R', value: 'cohere/command-r' },
122+
{ label: 'Command R Plus', value: 'cohere/command-r-plus' },
123+
124+
// DeepSeek Models
125+
{ label: 'DeepSeek R1', value: 'deepseek/deepseek-r1' },
126+
{
127+
label: 'DeepSeek R1 Distill Llama 70B',
128+
value: 'deepseek/deepseek-r1-distill-llama-70b',
129+
},
130+
{ label: 'DeepSeek V3', value: 'deepseek/deepseek-v3' },
131+
{ label: 'DeepSeek V3.1', value: 'deepseek/deepseek-v3.1' },
132+
{ label: 'DeepSeek V3.1 Base', value: 'deepseek/deepseek-v3.1-base' },
133+
{ label: 'DeepSeek V3.1 Terminus', value: 'deepseek/deepseek-v3.1-terminus' },
134+
{ label: 'DeepSeek V3.2 Exp', value: 'deepseek/deepseek-v3.2-exp' },
135+
{
136+
label: 'DeepSeek V3.2 Exp Thinking',
137+
value: 'deepseek/deepseek-v3.2-exp-thinking',
138+
},
139+
140+
// Inception Models
141+
{ label: 'Mercury Coder Small', value: 'inception/mercury-coder-small' },
142+
143+
// Meituan Models
144+
{ label: 'LongCat Flash Chat', value: 'meituan/longcat-flash-chat' },
145+
{ label: 'LongCat Flash Thinking', value: 'meituan/longcat-flash-thinking' },
146+
147+
// Meta Models
148+
{ label: 'Llama 3 70B', value: 'meta/llama-3-70b' },
149+
{ label: 'Llama 3 8B', value: 'meta/llama-3-8b' },
150+
{ label: 'Llama 3.1 70B', value: 'meta/llama-3.1-70b' },
151+
{ label: 'Llama 3.1 8B', value: 'meta/llama-3.1-8b' },
152+
{ label: 'Llama 3.2 11B', value: 'meta/llama-3.2-11b' },
153+
{ label: 'Llama 3.2 1B', value: 'meta/llama-3.2-1b' },
154+
{ label: 'Llama 3.2 3B', value: 'meta/llama-3.2-3b' },
155+
{ label: 'Llama 3.2 90B', value: 'meta/llama-3.2-90b' },
156+
{ label: 'Llama 3.3 70B', value: 'meta/llama-3.3-70b' },
157+
{ label: 'Llama 4 Maverick', value: 'meta/llama-4-maverick' },
158+
{ label: 'Llama 4 Scout', value: 'meta/llama-4-scout' },
159+
160+
// Mistral Models
161+
{ label: 'Codestral', value: 'mistral/codestral' },
162+
{ label: 'Devstral Small', value: 'mistral/devstral-small' },
163+
{ label: 'Magistral Medium', value: 'mistral/magistral-medium' },
164+
{ label: 'Magistral Small', value: 'mistral/magistral-small' },
165+
{ label: 'Ministral 3B', value: 'mistral/ministral-3b' },
166+
{ label: 'Ministral 8B', value: 'mistral/ministral-8b' },
167+
{ label: 'Mistral Large', value: 'mistral/mistral-large' },
168+
{ label: 'Mistral Medium', value: 'mistral/mistral-medium' },
169+
{ label: 'Mistral Small', value: 'mistral/mistral-small' },
170+
{ label: 'Mixtral 8x22B Instruct', value: 'mistral/mixtral-8x22b-instruct' },
171+
{ label: 'Pixtral 12B', value: 'mistral/pixtral-12b' },
172+
{ label: 'Pixtral Large', value: 'mistral/pixtral-large' },
173+
174+
// MoonshotAI Models
175+
{ label: 'Kimi K2', value: 'moonshotai/kimi-k2' },
176+
{ label: 'Kimi K2 0905', value: 'moonshotai/kimi-k2-0905' },
177+
{ label: 'Kimi K2 Turbo', value: 'moonshotai/kimi-k2-turbo' },
178+
179+
// Morph Models
180+
{ label: 'Morph V3 Fast', value: 'morph/morph-v3-fast' },
181+
{ label: 'Morph V3 Large', value: 'morph/morph-v3-large' },
182+
183+
// Perplexity Models
184+
{ label: 'Sonar', value: 'perplexity/sonar' },
185+
{ label: 'Sonar Pro', value: 'perplexity/sonar-pro' },
186+
{ label: 'Sonar Reasoning', value: 'perplexity/sonar-reasoning' },
187+
{ label: 'Sonar Reasoning Pro', value: 'perplexity/sonar-reasoning-pro' },
188+
189+
// Stealth Models
190+
{ label: 'Sonoma Dusk Alpha', value: 'stealth/sonoma-dusk-alpha' },
191+
{ label: 'Sonoma Sky Alpha', value: 'stealth/sonoma-sky-alpha' },
192+
193+
// Vercel Models
194+
{ label: 'v0 1.0 MD', value: 'vercel/v0-1.0-md' },
195+
{ label: 'v0 1.5 MD', value: 'vercel/v0-1.5-md' },
196+
197+
// xAI Models
198+
{ label: 'Grok 2', value: 'xai/grok-2' },
199+
{ label: 'Grok 2 Vision', value: 'xai/grok-2-vision' },
200+
{ label: 'Grok 3', value: 'xai/grok-3' },
201+
{ label: 'Grok 3 Fast', value: 'xai/grok-3-fast' },
202+
{ label: 'Grok 3 Mini', value: 'xai/grok-3-mini' },
203+
{ label: 'Grok 3 Mini Fast', value: 'xai/grok-3-mini-fast' },
204+
{ label: 'Grok 4', value: 'xai/grok-4' },
205+
{ label: 'Grok Code Fast 1', value: 'xai/grok-code-fast-1' },
206+
{
207+
label: 'Grok 4 Fast Non-Reasoning',
208+
value: 'xai/grok-4-fast-non-reasoning',
209+
},
210+
{ label: 'Grok 4 Fast Reasoning', value: 'xai/grok-4-fast-reasoning' },
211+
212+
// ZAI Models
213+
{ label: 'GLM 4.5', value: 'zai/glm-4.5' },
214+
{ label: 'GLM 4.5 Air', value: 'zai/glm-4.5-air' },
215+
{ label: 'GLM 4.5V', value: 'zai/glm-4.5v' },
57216
];
58217

59218
export function SettingsDialog() {
60219
const editor = useEditorRef();
61220

62-
const [tempModel, setTempModel] = React.useState(models[0]);
221+
const [tempModel, setTempModel] = React.useState(models[7]);
63222
const [tempKeys, setTempKeys] = React.useState<Record<string, string>>({
64-
openai: '',
223+
aiGatewayApiKey: '',
65224
uploadthing: '',
66225
});
67226
const [showKey, setShowKey] = React.useState<Record<string, boolean>>({});
@@ -78,7 +237,7 @@ export function SettingsDialog() {
78237
...chatOptions,
79238
body: {
80239
...chatOptions.body,
81-
apiKey: tempKeys.openai,
240+
apiKey: tempKeys.aiGatewayApiKey,
82241
model: tempModel.value,
83242
},
84243
});
@@ -92,7 +251,7 @@ export function SettingsDialog() {
92251
...completeOptions,
93252
body: {
94253
...completeOptions.body,
95-
apiKey: tempKeys.openai,
254+
apiKey: tempKeys.aiGatewayApiKey,
96255
model: tempModel.value,
97256
},
98257
});
@@ -120,8 +279,8 @@ export function SettingsDialog() {
120279
<a
121280
className="flex items-center"
122281
href={
123-
service === 'openai'
124-
? 'https://platform.openai.com/api-keys'
282+
service === 'aiGatewayApiKey'
283+
? 'https://vercel.com/docs/ai-gateway'
125284
: 'https://uploadthing.com/dashboard'
126285
}
127286
rel="noopener noreferrer"
@@ -197,7 +356,7 @@ export function SettingsDialog() {
197356
</div>
198357

199358
<div className="space-y-4">
200-
{renderApiKeyInput('openai', 'OpenAI API key')}
359+
{renderApiKeyInput('aiGatewayApiKey', 'AI Gateway API Key')}
201360

202361
<div className="group relative">
203362
<label

apps/www/src/registry/components/editor/use-chat.ts

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,19 @@ export const useChat = () => {
5555
api: options.api || '/api/ai/command',
5656
// Mock the API response. Remove it when you implement the route /api/ai/command
5757
fetch: async (input, init) => {
58-
const res = await fetch(input, init);
58+
const bodyOptions = editor.getOptions(aiChatPlugin).chatOptions?.body;
59+
60+
const initBody = JSON.parse(init?.body as string);
61+
62+
const body = {
63+
...initBody,
64+
...bodyOptions,
65+
};
66+
67+
const res = await fetch(input, {
68+
...init,
69+
body: JSON.stringify(body),
70+
});
5971

6072
if (!res.ok) {
6173
let sample: 'comment' | 'markdown' | 'mdx' | null = null;
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
OPENAI_API_KEY=
1+
AI_GATEWAY_API_KEY=
22
UPLOADTHING_TOKEN=

0 commit comments

Comments
 (0)