Skip to content

Commit e0f813b

Browse files
chore: migrate to ai v5 and update relevant code (#3652)
* build(deps): bump ai from 4.3.19 to 5.0.55 Bumps [ai](https://github.com/vercel/ai) from 4.3.19 to 5.0.55. - [Release notes](https://github.com/vercel/ai/releases) - [Changelog](https://github.com/vercel/ai/blob/main/CHANGELOG.md) - [Commits](https://github.com/vercel/ai/compare/[email protected]@5.0.55) --- updated-dependencies: - dependency-name: ai dependency-version: 5.0.55 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] <[email protected]> * chore: migrate to ai and update relevant code Signed-off-by: Brian <[email protected]> * chore: added msw package thats needed Signed-off-by: Brian <[email protected]> * fix: addressed typecheck Signed-off-by: Brian <[email protected]> --------- Signed-off-by: dependabot[bot] <[email protected]> Signed-off-by: Brian <[email protected]> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
1 parent 8e7bb2b commit e0f813b

File tree

7 files changed

+911
-2149
lines changed

7 files changed

+911
-2149
lines changed

package.json

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@
6767
"globals": "^16.4.0",
6868
"husky": "^9.1.7",
6969
"lint-staged": "^16.2.3",
70+
"msw": "^2.11.3",
7071
"prettier": "^3.5.3",
7172
"prettier-plugin-svelte": "^3.4.0",
7273
"svelte-check": "^4.3.2",
@@ -83,7 +84,8 @@
8384
]
8485
},
8586
"dependencies": {
86-
"js-yaml": "^4.1.0"
87+
"js-yaml": "^4.1.0",
88+
"zod": "^4.1.11"
8789
},
8890
"scarfSettings": {
8991
"enabled": false

packages/backend/package.json

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -110,10 +110,10 @@
110110
"typecheck": "pnpm run generate && tsc --noEmit"
111111
},
112112
"dependencies": {
113-
"@ai-sdk/openai-compatible": "^0.2.16",
113+
"@ai-sdk/openai-compatible": "^1.0.19",
114114
"@huggingface/gguf": "^0.3.2",
115115
"@huggingface/hub": "^2.6.10",
116-
"ai": "^4.3.19",
116+
"ai": "^5.0.55",
117117
"express": "^4.21.2",
118118
"express-openapi-validator": "^5.6.0",
119119
"isomorphic-git": "^1.34.0",
@@ -130,7 +130,8 @@
130130
},
131131
"devDependencies": {
132132
"@podman-desktop/api": "1.13.0-202409181313-78725a6565",
133-
"@ai-sdk/provider-utils": "^2.2.8",
133+
"@ai-sdk/provider": "^2.0.0",
134+
"@ai-sdk/provider-utils": "^3.0.0",
134135
"@rollup/plugin-replace": "^6.0.2",
135136
"@types/express": "^4.17.21",
136137
"@types/js-yaml": "^4.0.9",

packages/backend/src/managers/playground/aiSdk.spec.ts

Lines changed: 58 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ import type {
2828
PendingChat,
2929
UserChat,
3030
} from '@shared/models/IPlaygroundMessage';
31-
import type { LanguageModelV1, LanguageModelV1CallWarning, LanguageModelV1StreamPart } from '@ai-sdk/provider';
31+
import type { LanguageModelV2, LanguageModelV2CallWarning, LanguageModelV2StreamPart } from '@ai-sdk/provider';
3232
import { convertArrayToReadableStream } from '@ai-sdk/provider-utils/test';
3333
import { ConversationRegistry } from '../../registries/ConversationRegistry';
3434
import type { RpcExtension } from '@shared/messages/MessageProxy';
@@ -99,7 +99,7 @@ describe('aiSdk', () => {
9999
type: 'tool-call',
100100
toolCallId: 'call-001',
101101
toolName: 'tool-1',
102-
args: {},
102+
input: {},
103103
},
104104
],
105105
},
@@ -110,7 +110,7 @@ describe('aiSdk', () => {
110110
type: 'tool-result',
111111
toolCallId: 'call-001',
112112
toolName: 'tool-1',
113-
result: {
113+
output: {
114114
content: [{ type: 'text', text: 'Success!!!' }],
115115
},
116116
},
@@ -150,7 +150,7 @@ describe('aiSdk', () => {
150150
expect.objectContaining({
151151
model: expect.anything(),
152152
temperature: 42,
153-
maxTokens: 37,
153+
maxOutputTokens: 37,
154154
topP: 13,
155155
abortSignal: expect.any(AbortSignal),
156156
messages: expect.any(Array),
@@ -178,10 +178,10 @@ describe('aiSdk', () => {
178178
describe('with stream error', () => {
179179
beforeEach(async () => {
180180
// eslint-disable-next-line sonarjs/no-nested-functions
181-
const doStream: LanguageModelV1['doStream'] = async () => {
181+
const doStream: LanguageModelV2['doStream'] = async () => {
182182
throw new Error('The stream is kaput.');
183183
};
184-
const model = new MockLanguageModelV1({ doStream });
184+
const model = new MockLanguageModelV2({ doStream });
185185
await new AiStreamProcessor(conversationId, conversationRegistry).stream(model).consumeStream();
186186
});
187187
test('appends a single message', () => {
@@ -194,7 +194,7 @@ describe('aiSdk', () => {
194194
});
195195
});
196196
describe('with single message stream', () => {
197-
let model: LanguageModelV1;
197+
let model: LanguageModelV2;
198198
beforeEach(async () => {
199199
model = createTestModel({
200200
stream: convertArrayToReadableStream([
@@ -204,10 +204,10 @@ describe('aiSdk', () => {
204204
modelId: 'mock-model-id',
205205
timestamp: new Date(0),
206206
},
207-
{ type: 'text-delta', textDelta: 'Greetings' },
208-
{ type: 'text-delta', textDelta: ' professor ' },
209-
{ type: 'text-delta', textDelta: `Falken` },
210-
{ type: 'finish', finishReason: 'stop', usage: { completionTokens: 133, promptTokens: 7 } },
207+
{ type: 'text-delta', id: 'id-1', delta: 'Greetings' },
208+
{ type: 'text-delta', id: 'id-2', delta: ' professor ' },
209+
{ type: 'text-delta', id: 'id-3', delta: `Falken` },
210+
{ type: 'finish', finishReason: 'stop', usage: { outputTokens: 133, inputTokens: 7, totalTokens: 140 } },
211211
]),
212212
});
213213
await new AiStreamProcessor(conversationId, conversationRegistry).stream(model).consumeStream();
@@ -230,53 +230,54 @@ describe('aiSdk', () => {
230230
});
231231
});
232232
describe('with wrapped generated multiple messages as stream', () => {
233-
let model: LanguageModelV1;
233+
let model: LanguageModelV2;
234234
let tools: ToolSet;
235235
let generateStep: number;
236236

237237
beforeEach(async () => {
238238
generateStep = 0;
239239
model = wrapLanguageModel({
240-
model: new MockLanguageModelV1({
241-
// This is the output that the simulateStreamingMiddleware expects for tool-calling
240+
model: new MockLanguageModelV2({
242241
doGenerate: (async () => {
243-
// First call to the model returns a list of tool calls
244242
if (generateStep++ === 0) {
245243
return {
246-
rawCall: { rawPrompt: {}, rawSettings: {} },
247-
usage: { promptTokens: 1, completionTokens: 1 },
248-
text: '',
249-
finishReason: 'tool-calls',
250-
toolCalls: [
244+
content: [
251245
{
252-
toolCallType: 'function',
246+
type: 'tool-call',
253247
toolCallId: 'call-001',
254248
toolName: 'tool-1',
255-
args: '',
249+
input: {},
256250
},
257251
{
258-
toolCallType: 'function',
252+
type: 'tool-call',
259253
toolCallId: 'call-002',
260254
toolName: 'tool-1',
261-
args: '',
255+
input: {},
262256
},
263257
],
258+
finishReason: 'tool-calls',
259+
usage: { inputTokens: 1, outputTokens: 1, totalTokens: 2 },
260+
warnings: [],
264261
};
265262
}
266-
// Second call to the model returns the final human-readable result
267263
return {
268-
rawCall: { rawPrompt: {}, rawSettings: {} },
264+
content: [
265+
{
266+
type: 'text',
267+
text: 'These are the results of you functions: huge success!',
268+
},
269+
],
269270
finishReason: 'stop',
270-
usage: { promptTokens: 133, completionTokens: 7 },
271-
text: 'These are the results of you functions: huge success!',
271+
usage: { inputTokens: 133, outputTokens: 7, totalTokens: 140 },
272+
warnings: [],
272273
};
273-
}) as LanguageModelV1['doGenerate'],
274+
}) as LanguageModelV2['doGenerate'],
274275
}),
275276
middleware: simulateStreamingMiddleware(),
276277
});
277278
tools = {
278279
'tool-1': tool({
279-
parameters: jsonSchema({ type: 'object' }),
280+
inputSchema: jsonSchema({ type: 'object' }),
280281
execute: async () => 'successful result!',
281282
}),
282283
};
@@ -303,19 +304,19 @@ describe('aiSdk', () => {
303304
{ index: 2, id: '4', toolCallId: 'call-002' },
304305
])(`sets tool result message at $index for $toolCallId`, ({ index, id, toolCallId }) => {
305306
const message = conversationRegistry.get(conversationId).messages[index] as AssistantChat;
306-
expect(message).toEqual({
307-
id,
308-
timestamp: expect.any(Number),
309-
completed: expect.any(Number),
310-
role: 'assistant',
311-
content: {
312-
type: 'tool-call',
313-
toolCallId,
314-
toolName: 'tool-1',
315-
args: {},
316-
result: 'successful result!',
317-
},
307+
expect(message.id).toEqual(id);
308+
expect(message.timestamp).toBeDefined();
309+
expect(message.role).toEqual('assistant');
310+
expect(message.content).toMatchObject({
311+
type: 'tool-call',
312+
toolCallId,
313+
toolName: 'tool-1',
314+
args: {},
318315
});
316+
if (message.content && typeof message.content === 'object' && 'result' in message.content) {
317+
expect(message.content.result).toEqual('successful result!');
318+
expect(message.completed).toBeDefined();
319+
}
319320
});
320321
test('appends final assistant message', () => {
321322
const message = conversationRegistry.get(conversationId).messages[3] as AssistantChat;
@@ -324,30 +325,28 @@ describe('aiSdk', () => {
324325
});
325326
test('setsUsage', async () => {
326327
const conversation = conversationRegistry.get(conversationId) as Conversation;
327-
expect(conversation?.usage?.completion_tokens).toEqual(8);
328-
expect(conversation?.usage?.prompt_tokens).toEqual(134);
328+
expect(conversation?.usage?.completion_tokens).toEqual(7);
329+
expect(conversation?.usage?.prompt_tokens).toEqual(133);
329330
});
330331
});
331332
});
332333
});
333334

334-
export class MockLanguageModelV1 implements LanguageModelV1 {
335-
readonly specificationVersion = 'v1';
336-
readonly provider: LanguageModelV1['provider'];
337-
readonly modelId: LanguageModelV1['modelId'];
335+
export class MockLanguageModelV2 implements LanguageModelV2 {
336+
readonly specificationVersion = 'v2';
337+
readonly provider: LanguageModelV2['provider'];
338+
readonly modelId: LanguageModelV2['modelId'];
338339

339-
supportsUrl: LanguageModelV1['supportsUrl'];
340-
doGenerate: LanguageModelV1['doGenerate'];
341-
doStream: LanguageModelV1['doStream'];
340+
supportedUrls: LanguageModelV2['supportedUrls'] = {};
341+
doGenerate: LanguageModelV2['doGenerate'];
342+
doStream: LanguageModelV2['doStream'];
342343

343-
readonly defaultObjectGenerationMode: LanguageModelV1['defaultObjectGenerationMode'];
344-
readonly supportsStructuredOutputs: LanguageModelV1['supportsStructuredOutputs'];
345344
constructor({
346345
doStream = notImplemented,
347346
doGenerate = notImplemented,
348347
}: {
349-
doStream?: LanguageModelV1['doStream'];
350-
doGenerate?: LanguageModelV1['doGenerate'];
348+
doStream?: LanguageModelV2['doStream'];
349+
doGenerate?: LanguageModelV2['doGenerate'];
351350
}) {
352351
this.provider = 'mock-model-provider';
353352
this.modelId = 'mock-model-id';
@@ -367,13 +366,13 @@ export function createTestModel({
367366
request = undefined,
368367
warnings,
369368
}: {
370-
stream?: ReadableStream<LanguageModelV1StreamPart>;
369+
stream?: ReadableStream<LanguageModelV2StreamPart>;
371370
rawResponse?: { headers: Record<string, string> };
372371
rawCall?: { rawPrompt: string; rawSettings: Record<string, unknown> };
373372
request?: { body: string };
374-
warnings?: LanguageModelV1CallWarning[];
375-
} = {}): LanguageModelV1 {
376-
return new MockLanguageModelV1({
373+
warnings?: LanguageModelV2CallWarning[];
374+
} = {}): LanguageModelV2 {
375+
return new MockLanguageModelV2({
377376
doStream: async () => ({ stream, rawCall, rawResponse, request, warnings }),
378377
});
379378
}

0 commit comments

Comments
 (0)