Skip to content

Commit b704d3f

Browse files
committed
Add tests
1 parent 4d16870 commit b704d3f

File tree

3 files changed

+862
-0
lines changed

3 files changed

+862
-0
lines changed

tests/ai_sdk_compatibility.test.ts

Lines changed: 264 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,264 @@
1+
/**
2+
* Tests for AI SDK compatibility with MCPAgent streamEvents()
3+
*
4+
* These tests verify that streamEvents() can be used with the AI SDK's
5+
* LangChainAdapter for creating data stream responses compatible with
6+
* Vercel AI SDK hooks like useCompletion and useChat.
7+
*/
8+
9+
import type { StreamEvent } from '../index.js'
10+
import { LangChainAdapter } from 'ai'
11+
import { describe, expect, it } from 'vitest'
12+
13+
// Mock an async generator that simulates our streamEvents output
14+
async function* mockStreamEvents(): AsyncGenerator<StreamEvent, void, void> {
15+
// Simulate typical events from streamEvents
16+
yield {
17+
event: 'on_chain_start',
18+
name: 'AgentExecutor',
19+
data: { input: { input: 'test query' } },
20+
} as StreamEvent
21+
22+
yield {
23+
event: 'on_chat_model_stream',
24+
name: 'ChatAnthropic',
25+
data: { chunk: { content: 'Hello' } },
26+
} as StreamEvent
27+
28+
yield {
29+
event: 'on_chat_model_stream',
30+
name: 'ChatAnthropic',
31+
data: { chunk: { content: ' world' } },
32+
} as StreamEvent
33+
34+
yield {
35+
event: 'on_chat_model_stream',
36+
name: 'ChatAnthropic',
37+
data: { chunk: { content: '!' } },
38+
} as StreamEvent
39+
40+
yield {
41+
event: 'on_tool_start',
42+
name: 'test_tool',
43+
data: { input: { query: 'test' } },
44+
} as StreamEvent
45+
46+
yield {
47+
event: 'on_tool_end',
48+
name: 'test_tool',
49+
data: { output: 'Tool executed successfully' },
50+
} as StreamEvent
51+
52+
yield {
53+
event: 'on_chain_end',
54+
name: 'AgentExecutor',
55+
data: { output: 'Hello world!' },
56+
} as StreamEvent
57+
}
58+
59+
// Function to convert streamEvents to a format compatible with AI SDK
60+
async function* streamEventsToAISDK(
61+
streamEvents: AsyncGenerator<StreamEvent, void, void>,
62+
): AsyncGenerator<string, void, void> {
63+
for await (const event of streamEvents) {
64+
// Only yield the actual content tokens from chat model streams
65+
if (event.event === 'on_chat_model_stream' && event.data?.chunk?.content) {
66+
yield event.data.chunk.content
67+
}
68+
}
69+
}
70+
71+
// Alternative adapter that yields complete content at the end
72+
async function* streamEventsToCompleteContent(
73+
streamEvents: AsyncGenerator<StreamEvent, void, void>,
74+
): AsyncGenerator<string, void, void> {
75+
let fullContent = ''
76+
77+
for await (const event of streamEvents) {
78+
if (event.event === 'on_chat_model_stream' && event.data?.chunk?.content) {
79+
fullContent += event.data.chunk.content
80+
}
81+
// For tool events, we could add additional formatting
82+
else if (event.event === 'on_tool_start') {
83+
// Could add tool start indicators if needed
84+
}
85+
else if (event.event === 'on_tool_end') {
86+
// Could add tool completion indicators if needed
87+
}
88+
}
89+
90+
// Yield the complete content at the end
91+
if (fullContent) {
92+
yield fullContent
93+
}
94+
}
95+
96+
describe('aI SDK Compatibility', () => {
97+
it('should convert streamEvents to AI SDK compatible stream', async () => {
98+
const mockEvents = mockStreamEvents()
99+
const aiSDKStream = streamEventsToAISDK(mockEvents)
100+
101+
const tokens: string[] = []
102+
for await (const token of aiSDKStream) {
103+
tokens.push(token)
104+
}
105+
106+
expect(tokens).toEqual(['Hello', ' world', '!'])
107+
})
108+
109+
it('should work with LangChainAdapter.toDataStreamResponse', async () => {
110+
const mockEvents = mockStreamEvents()
111+
const aiSDKStream = streamEventsToAISDK(mockEvents)
112+
113+
// Convert async generator to ReadableStream for AI SDK compatibility
114+
const readableStream = new ReadableStream({
115+
async start(controller) {
116+
try {
117+
for await (const token of aiSDKStream) {
118+
controller.enqueue(token)
119+
}
120+
controller.close()
121+
}
122+
catch (error) {
123+
controller.error(error)
124+
}
125+
},
126+
})
127+
128+
// Test that we can create a data stream response
129+
const response = LangChainAdapter.toDataStreamResponse(readableStream)
130+
131+
expect(response).toBeInstanceOf(Response)
132+
expect(response.headers.get('Content-Type')).toBe('text/plain; charset=utf-8')
133+
})
134+
135+
it('should convert streamEvents to complete content stream', async () => {
136+
const mockEvents = mockStreamEvents()
137+
const contentStream = streamEventsToCompleteContent(mockEvents)
138+
139+
const content: string[] = []
140+
for await (const chunk of contentStream) {
141+
content.push(chunk)
142+
}
143+
144+
expect(content).toEqual(['Hello world!'])
145+
})
146+
147+
it('should handle empty streams gracefully', async () => {
148+
async function* emptyStreamEvents(): AsyncGenerator<StreamEvent, void, void> {
149+
// Empty generator
150+
151+
}
152+
153+
const emptyEvents = emptyStreamEvents()
154+
const aiSDKStream = streamEventsToAISDK(emptyEvents)
155+
156+
const tokens: string[] = []
157+
for await (const token of aiSDKStream) {
158+
tokens.push(token)
159+
}
160+
161+
expect(tokens).toEqual([])
162+
})
163+
164+
it('should filter non-content events correctly', async () => {
165+
async function* mixedEvents(): AsyncGenerator<StreamEvent, void, void> {
166+
yield {
167+
event: 'on_chain_start',
168+
name: 'Test',
169+
data: { input: 'test' },
170+
} as StreamEvent
171+
172+
yield {
173+
event: 'on_chat_model_stream',
174+
name: 'ChatModel',
175+
data: { chunk: { content: 'Content' } },
176+
} as StreamEvent
177+
178+
yield {
179+
event: 'on_tool_start',
180+
name: 'Tool',
181+
data: { input: 'test' },
182+
} as StreamEvent
183+
184+
yield {
185+
event: 'on_chat_model_stream',
186+
name: 'ChatModel',
187+
data: { chunk: { content: ' token' } },
188+
} as StreamEvent
189+
190+
yield {
191+
event: 'on_chain_end',
192+
name: 'Test',
193+
data: { output: 'result' },
194+
} as StreamEvent
195+
}
196+
197+
const events = mixedEvents()
198+
const aiSDKStream = streamEventsToAISDK(events)
199+
200+
const tokens: string[] = []
201+
for await (const token of aiSDKStream) {
202+
tokens.push(token)
203+
}
204+
205+
expect(tokens).toEqual(['Content', ' token'])
206+
})
207+
208+
it('should create readable stream from streamEvents', async () => {
209+
const mockEvents = mockStreamEvents()
210+
211+
// Create a ReadableStream from our async generator
212+
const readableStream = new ReadableStream({
213+
async start(controller) {
214+
try {
215+
for await (const event of streamEventsToAISDK(mockEvents)) {
216+
controller.enqueue(new TextEncoder().encode(event))
217+
}
218+
controller.close()
219+
}
220+
catch (error) {
221+
controller.error(error)
222+
}
223+
},
224+
})
225+
226+
expect(readableStream).toBeInstanceOf(ReadableStream)
227+
228+
// Test that we can read from the stream
229+
const reader = readableStream.getReader()
230+
const decoder = new TextDecoder()
231+
232+
const chunks: string[] = []
233+
while (true) {
234+
const { done, value } = await reader.read()
235+
if (done)
236+
break
237+
chunks.push(decoder.decode(value))
238+
}
239+
240+
expect(chunks).toEqual(['Hello', ' world', '!'])
241+
})
242+
})
243+
244+
// Convert async generator to ReadableStream for AI SDK compatibility
245+
function createReadableStreamFromGenerator(
246+
generator: AsyncGenerator<string, void, void>,
247+
): ReadableStream<string> {
248+
return new ReadableStream({
249+
async start(controller) {
250+
try {
251+
for await (const chunk of generator) {
252+
controller.enqueue(chunk)
253+
}
254+
controller.close()
255+
}
256+
catch (error) {
257+
controller.error(error)
258+
}
259+
},
260+
})
261+
}
262+
263+
// Export the adapter functions for use in examples
264+
export { createReadableStreamFromGenerator, streamEventsToAISDK, streamEventsToCompleteContent }

0 commit comments

Comments
 (0)