Skip to content

Commit 2dabe2c

Browse files
test: add regression tests for issues #287 and #341 (#380)
* test: add regression tests for issues #287 and #341 - Issue #287: Tool calls with missing arguments field Tests that tool calls work correctly when the arguments field is omitted by upstream providers for tools with no parameters - Issue #341: Cache control only applies to last text part Tests that message-level cache_control works correctly with multi-part messages on Anthropic models Co-Authored-By: Robert Yeakel <robert.yeakel@openrouter.ai> * fix: use Anthropic Haiku model for issue #287 test Updated to use anthropic/claude-3.5-haiku as mentioned in the original issue context (Anthropic Haiku was noted as potentially omitting the arguments field for tools with no parameters). Co-Authored-By: Robert Yeakel <robert.yeakel@openrouter.ai> --------- Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
1 parent 3fb39fa commit 2dabe2c

File tree

3 files changed

+227
-0
lines changed

3 files changed

+227
-0
lines changed

.changeset/honest-pots-drum.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
---
2+
---
Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
/**
2+
* Regression test for GitHub issue #287
3+
* https://github.com/OpenRouterTeam/ai-sdk-provider/issues/287
4+
*
5+
* Reported error: AI_TypeValidationError when tool calls have missing arguments field
6+
*
7+
* Some upstream providers may omit the `arguments` field in tool calls when there
8+
* are no arguments to pass. This caused validation errors because the schema
9+
* required `arguments` to be a string.
10+
*
11+
* This test verifies that tool calls work correctly even when the tool has no
12+
* parameters (and the upstream provider might omit the arguments field).
13+
*/
14+
import { generateText, tool } from 'ai';
15+
import { describe, expect, it, vi } from 'vitest';
16+
import { z } from 'zod/v4';
17+
import { createOpenRouter } from '@/src';
18+
19+
vi.setConfig({
20+
testTimeout: 60_000,
21+
});
22+
23+
describe('Issue #287: Tool calls with missing arguments field', () => {
24+
const openrouter = createOpenRouter({
25+
apiKey: process.env.OPENROUTER_API_KEY,
26+
baseUrl: `${process.env.OPENROUTER_API_BASE}/api/v1`,
27+
});
28+
29+
// Use Anthropic Haiku model - mentioned in original issue context as potentially
30+
// omitting the arguments field for tools with no parameters
31+
const model = openrouter('anthropic/claude-3.5-haiku');
32+
33+
it('should handle tool with no parameters', async () => {
34+
// Tool with no parameters - some providers may omit the arguments field entirely
35+
const getCurrentTime = tool({
36+
description: 'Gets the current time',
37+
inputSchema: z.object({}),
38+
execute: async () => {
39+
return { time: new Date().toISOString() };
40+
},
41+
});
42+
43+
const response = await generateText({
44+
model,
45+
system:
46+
'You are a helpful assistant. Always use the getCurrentTime tool when asked about time.',
47+
prompt: 'What time is it right now? Use the getCurrentTime tool.',
48+
tools: { getCurrentTime },
49+
toolChoice: 'required',
50+
});
51+
52+
// Should complete without AI_TypeValidationError
53+
expect(response.text).toBeDefined();
54+
expect(response.finishReason).toBeDefined();
55+
56+
// Verify tool was called
57+
const toolCalls = response.steps.flatMap((step) => step.toolCalls || []);
58+
expect(toolCalls.length).toBeGreaterThan(0);
59+
expect(toolCalls[0]?.toolName).toBe('getCurrentTime');
60+
});
61+
62+
it('should handle tool with optional parameters where none are provided', async () => {
63+
// Tool with optional parameters - model might not provide any arguments
64+
const greet = tool({
65+
description: 'Greets the user',
66+
inputSchema: z.object({
67+
name: z.string().optional().describe('Optional name to greet'),
68+
}),
69+
execute: async ({ name }) => {
70+
return { greeting: name ? `Hello, ${name}!` : 'Hello!' };
71+
},
72+
});
73+
74+
const response = await generateText({
75+
model,
76+
system:
77+
'You are a helpful assistant. Use the greet tool when asked to say hello.',
78+
prompt: 'Just say hello using the greet tool. No name needed.',
79+
tools: { greet },
80+
toolChoice: 'required',
81+
});
82+
83+
// Should complete without AI_TypeValidationError
84+
expect(response.text).toBeDefined();
85+
expect(response.finishReason).toBeDefined();
86+
});
87+
});
Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
/**
2+
* Regression test for GitHub issue #341
3+
* https://github.com/OpenRouterTeam/ai-sdk-provider/issues/341
4+
*
5+
* Reported error: Cache control bug exceeding Anthropic's 4-segment limit
6+
*
7+
* When message-level cache_control was set, it was being applied to ALL parts
8+
* in a multi-part message, which could exceed provider cache segment limits.
9+
*
10+
* The fix ensures message-level cache_control only applies to the last text part,
11+
* while part-specific cache_control still takes precedence for all part types.
12+
*
13+
* This test verifies that cache control works correctly with multi-part messages
14+
* on Anthropic models.
15+
*/
16+
import { generateText } from 'ai';
17+
import { describe, expect, it, vi } from 'vitest';
18+
import { createOpenRouter } from '@/src';
19+
20+
vi.setConfig({
21+
testTimeout: 120_000,
22+
});
23+
24+
describe('Issue #341: Cache control only applies to last text part', () => {
25+
const openrouter = createOpenRouter({
26+
apiKey: process.env.OPENROUTER_API_KEY,
27+
baseUrl: `${process.env.OPENROUTER_API_BASE}/api/v1`,
28+
});
29+
30+
// Use Anthropic model which supports cache control
31+
const model = openrouter('anthropic/claude-3.5-haiku');
32+
33+
// Long system prompt to meet caching threshold
34+
const longSystemPrompt = `You are a helpful assistant. Here is some context that should be cached:
35+
36+
${Array(50)
37+
.fill(
38+
'This is padding text to ensure the prompt meets the minimum token threshold for automatic caching. ' +
39+
'Automatic prompt caching requires a minimum number of tokens in the prompt prefix. ' +
40+
'This text is repeated multiple times to reach that threshold. ',
41+
)
42+
.join('\n')}
43+
44+
Remember to be helpful and concise in your responses.`;
45+
46+
it('should work with message-level cache control on multi-part user message', async () => {
47+
// Multi-part message with message-level cache control
48+
// The fix ensures only the last text part gets cache_control
49+
const response = await generateText({
50+
model,
51+
messages: [
52+
{
53+
role: 'system',
54+
content: longSystemPrompt,
55+
},
56+
{
57+
role: 'user',
58+
content: [
59+
{ type: 'text', text: 'First part of my question.' },
60+
{ type: 'text', text: 'Second part: What is 2+2?' },
61+
],
62+
providerOptions: {
63+
anthropic: {
64+
cacheControl: { type: 'ephemeral' },
65+
},
66+
},
67+
},
68+
],
69+
});
70+
71+
// Should complete without errors about exceeding cache segment limits
72+
expect(response.text).toBeDefined();
73+
expect(response.text.length).toBeGreaterThan(0);
74+
expect(response.finishReason).toBeDefined();
75+
});
76+
77+
it('should work with explicit cache control on system message', async () => {
78+
// Test cache control on system message
79+
const response = await generateText({
80+
model,
81+
messages: [
82+
{
83+
role: 'system',
84+
content: longSystemPrompt,
85+
providerOptions: {
86+
anthropic: {
87+
cacheControl: { type: 'ephemeral' },
88+
},
89+
},
90+
},
91+
{
92+
role: 'user',
93+
content: 'What is the capital of France? Answer briefly.',
94+
},
95+
],
96+
});
97+
98+
expect(response.text).toBeDefined();
99+
expect(response.text.length).toBeGreaterThan(0);
100+
expect(response.finishReason).toBeDefined();
101+
});
102+
103+
it('should handle multiple requests with caching enabled', async () => {
104+
// Make multiple requests to verify caching works across requests
105+
const responses = [];
106+
107+
for (let i = 0; i < 2; i++) {
108+
const response = await generateText({
109+
model,
110+
messages: [
111+
{
112+
role: 'system',
113+
content: longSystemPrompt,
114+
providerOptions: {
115+
anthropic: {
116+
cacheControl: { type: 'ephemeral' },
117+
},
118+
},
119+
},
120+
{
121+
role: 'user',
122+
content: `Request ${i + 1}: What is ${i + 1} + ${i + 1}? Answer with just the number.`,
123+
},
124+
],
125+
});
126+
127+
expect(response.text).toBeDefined();
128+
expect(response.finishReason).toBeDefined();
129+
responses.push(response);
130+
}
131+
132+
// Both requests should complete successfully
133+
expect(responses.length).toBe(2);
134+
responses.forEach((r) => {
135+
expect(r.text.length).toBeGreaterThan(0);
136+
});
137+
});
138+
});

0 commit comments

Comments
 (0)