Skip to content

Commit 8e53142

Browse files
committed
fix: swap E2E test model to anthropic/claude-haiku-4.5
meta-llama/llama-3.2-1b-instruct routes to Cloudflare which doesn't support certain response_format types, causing test failures.
1 parent b659e63 commit 8e53142

File tree

3 files changed

+53
-53
lines changed

3 files changed

+53
-53
lines changed

tests/e2e/call-model.test.ts

Lines changed: 37 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ describe('callModel E2E Tests', () => {
5454
describe('Chat-style messages support', () => {
5555
it('should accept chat-style Message array as input', async () => {
5656
const response = client.callModel({
57-
model: 'meta-llama/llama-3.2-1b-instruct',
57+
model: 'anthropic/claude-haiku-4.5',
5858
input: fromChatMessages([
5959
{
6060
role: 'system',
@@ -76,7 +76,7 @@ describe('callModel E2E Tests', () => {
7676

7777
it('should handle multi-turn chat-style conversation', async () => {
7878
const response = client.callModel({
79-
model: 'meta-llama/llama-3.2-1b-instruct',
79+
model: 'anthropic/claude-haiku-4.5',
8080
input: fromChatMessages([
8181
{
8282
role: 'user',
@@ -101,7 +101,7 @@ describe('callModel E2E Tests', () => {
101101

102102
it('should handle system message in chat-style input', async () => {
103103
const response = client.callModel({
104-
model: 'meta-llama/llama-3.2-1b-instruct',
104+
model: 'anthropic/claude-haiku-4.5',
105105
input: fromChatMessages([
106106
{
107107
role: 'system',
@@ -213,7 +213,7 @@ describe('callModel E2E Tests', () => {
213213
];
214214

215215
const response = client.callModel({
216-
model: 'meta-llama/llama-3.2-1b-instruct',
216+
model: 'anthropic/claude-haiku-4.5',
217217
input: fromClaudeMessages(claudeMessages),
218218
});
219219

@@ -238,7 +238,7 @@ describe('callModel E2E Tests', () => {
238238
];
239239

240240
const response = client.callModel({
241-
model: 'meta-llama/llama-3.2-1b-instruct',
241+
model: 'anthropic/claude-haiku-4.5',
242242
input: fromClaudeMessages(claudeMessages),
243243
});
244244

@@ -266,7 +266,7 @@ describe('callModel E2E Tests', () => {
266266
];
267267

268268
const response = client.callModel({
269-
model: 'meta-llama/llama-3.2-1b-instruct',
269+
model: 'anthropic/claude-haiku-4.5',
270270
input: fromClaudeMessages(claudeMessages),
271271
});
272272

@@ -294,7 +294,7 @@ describe('callModel E2E Tests', () => {
294294
];
295295

296296
const response = client.callModel({
297-
model: 'meta-llama/llama-3.2-1b-instruct',
297+
model: 'anthropic/claude-haiku-4.5',
298298
input: fromClaudeMessages(claudeMessages),
299299
});
300300

@@ -308,7 +308,7 @@ describe('callModel E2E Tests', () => {
308308
describe('response.text - Text extraction', () => {
309309
it('should successfully get text from a response', async () => {
310310
const response = client.callModel({
311-
model: 'meta-llama/llama-3.2-1b-instruct',
311+
model: 'anthropic/claude-haiku-4.5',
312312
input: fromChatMessages([
313313
{
314314
role: 'user',
@@ -327,7 +327,7 @@ describe('callModel E2E Tests', () => {
327327

328328
it('should handle multi-turn conversations', async () => {
329329
const response = client.callModel({
330-
model: 'meta-llama/llama-3.2-1b-instruct',
330+
model: 'anthropic/claude-haiku-4.5',
331331
input: fromChatMessages([
332332
{
333333
role: 'user',
@@ -354,7 +354,7 @@ describe('callModel E2E Tests', () => {
354354
describe('response.message - Complete message extraction', () => {
355355
it('should successfully get a complete message from response', async () => {
356356
const response = client.callModel({
357-
model: 'meta-llama/llama-3.2-1b-instruct',
357+
model: 'anthropic/claude-haiku-4.5',
358358
input: fromChatMessages([
359359
{
360360
role: 'user',
@@ -388,7 +388,7 @@ describe('callModel E2E Tests', () => {
388388

389389
it('should have proper message structure', async () => {
390390
const response = client.callModel({
391-
model: 'meta-llama/llama-3.2-1b-instruct',
391+
model: 'anthropic/claude-haiku-4.5',
392392
input: fromChatMessages([
393393
{
394394
role: 'user',
@@ -449,7 +449,7 @@ describe('callModel E2E Tests', () => {
449449
describe('response.textStream - Streaming text deltas', () => {
450450
it('should successfully stream text deltas', async () => {
451451
const response = client.callModel({
452-
model: 'meta-llama/llama-3.2-1b-instruct',
452+
model: 'anthropic/claude-haiku-4.5',
453453
input: fromChatMessages([
454454
{
455455
role: 'user',
@@ -474,7 +474,7 @@ describe('callModel E2E Tests', () => {
474474

475475
it('should stream progressively without waiting for completion', async () => {
476476
const response = client.callModel({
477-
model: 'meta-llama/llama-3.2-1b-instruct',
477+
model: 'anthropic/claude-haiku-4.5',
478478
input: fromChatMessages([
479479
{
480480
role: 'user',
@@ -509,7 +509,7 @@ describe('callModel E2E Tests', () => {
509509
describe('response.newMessagesStream - Streaming message updates (Responses format)', () => {
510510
it('should successfully stream incremental message updates in ResponsesOutputMessage format', async () => {
511511
const response = client.callModel({
512-
model: 'meta-llama/llama-3.2-1b-instruct',
512+
model: 'anthropic/claude-haiku-4.5',
513513
input: fromChatMessages([
514514
{
515515
role: 'user',
@@ -550,7 +550,7 @@ describe('callModel E2E Tests', () => {
550550

551551
it('should return ResponsesOutputMessage with correct shape', async () => {
552552
const response = client.callModel({
553-
model: 'meta-llama/llama-3.2-1b-instruct',
553+
model: 'anthropic/claude-haiku-4.5',
554554
input: fromChatMessages([
555555
{
556556
role: 'user',
@@ -705,7 +705,7 @@ describe('callModel E2E Tests', () => {
705705

706706
it('should return messages with all required fields and correct types', async () => {
707707
const response = client.callModel({
708-
model: 'meta-llama/llama-3.2-1b-instruct',
708+
model: 'anthropic/claude-haiku-4.5',
709709
input: fromChatMessages([
710710
{
711711
role: 'user',
@@ -832,7 +832,7 @@ describe('callModel E2E Tests', () => {
832832
describe('response.fullResponsesStream - Streaming all events', () => {
833833
it('should successfully stream all response events', async () => {
834834
const response = client.callModel({
835-
model: 'meta-llama/llama-3.2-1b-instruct',
835+
model: 'anthropic/claude-haiku-4.5',
836836
input: fromChatMessages([
837837
{
838838
role: 'user',
@@ -864,7 +864,7 @@ describe('callModel E2E Tests', () => {
864864

865865
it('should include text delta events', async () => {
866866
const response = client.callModel({
867-
model: 'meta-llama/llama-3.2-1b-instruct',
867+
model: 'anthropic/claude-haiku-4.5',
868868
input: fromChatMessages([
869869
{
870870
role: 'user',
@@ -897,7 +897,7 @@ describe('callModel E2E Tests', () => {
897897
describe('response.fullChatStream - Chat-compatible streaming', () => {
898898
it('should successfully stream in chat-compatible format', async () => {
899899
const response = client.callModel({
900-
model: 'meta-llama/llama-3.2-1b-instruct',
900+
model: 'anthropic/claude-haiku-4.5',
901901
input: fromChatMessages([
902902
{
903903
role: 'user',
@@ -924,7 +924,7 @@ describe('callModel E2E Tests', () => {
924924

925925
it('should return events with correct shape for each event type', async () => {
926926
const response = client.callModel({
927-
model: 'meta-llama/llama-3.2-1b-instruct',
927+
model: 'anthropic/claude-haiku-4.5',
928928
input: fromChatMessages([
929929
{
930930
role: 'user',
@@ -986,7 +986,7 @@ describe('callModel E2E Tests', () => {
986986

987987
it('should validate content.delta events have proper structure', async () => {
988988
const response = client.callModel({
989-
model: 'meta-llama/llama-3.2-1b-instruct',
989+
model: 'anthropic/claude-haiku-4.5',
990990
input: fromChatMessages([
991991
{
992992
role: 'user',
@@ -1114,7 +1114,7 @@ describe('callModel E2E Tests', () => {
11141114
describe('Multiple concurrent consumption patterns', () => {
11151115
it('should allow reading text and streaming simultaneously', async () => {
11161116
const response = client.callModel({
1117-
model: 'meta-llama/llama-3.2-1b-instruct',
1117+
model: 'anthropic/claude-haiku-4.5',
11181118
input: fromChatMessages([
11191119
{
11201120
role: 'user',
@@ -1149,7 +1149,7 @@ describe('callModel E2E Tests', () => {
11491149

11501150
it('should allow multiple stream consumers', async () => {
11511151
const response = client.callModel({
1152-
model: 'meta-llama/llama-3.2-1b-instruct',
1152+
model: 'anthropic/claude-haiku-4.5',
11531153
input: fromChatMessages([
11541154
{
11551155
role: 'user',
@@ -1197,7 +1197,7 @@ describe('callModel E2E Tests', () => {
11971197

11981198
it('should allow sequential consumption - text then stream', async () => {
11991199
const response = client.callModel({
1200-
model: 'meta-llama/llama-3.2-1b-instruct',
1200+
model: 'anthropic/claude-haiku-4.5',
12011201
input: fromChatMessages([
12021202
{
12031203
role: 'user',
@@ -1227,7 +1227,7 @@ describe('callModel E2E Tests', () => {
12271227

12281228
it('should allow sequential consumption - stream then text', async () => {
12291229
const response = client.callModel({
1230-
model: 'meta-llama/llama-3.2-1b-instruct',
1230+
model: 'anthropic/claude-haiku-4.5',
12311231
input: fromChatMessages([
12321232
{
12331233
role: 'user',
@@ -1272,7 +1272,7 @@ describe('callModel E2E Tests', () => {
12721272

12731273
it('should handle empty input', async () => {
12741274
const response = client.callModel({
1275-
model: 'meta-llama/llama-3.2-1b-instruct',
1275+
model: 'anthropic/claude-haiku-4.5',
12761276
input: [],
12771277
});
12781278

@@ -1289,7 +1289,7 @@ describe('callModel E2E Tests', () => {
12891289
describe('response.getResponse - Full response with usage', () => {
12901290
it('should return full response with correct shape', async () => {
12911291
const response = client.callModel({
1292-
model: 'meta-llama/llama-3.2-1b-instruct',
1292+
model: 'anthropic/claude-haiku-4.5',
12931293
input: fromChatMessages([
12941294
{
12951295
role: 'user',
@@ -1335,7 +1335,7 @@ describe('callModel E2E Tests', () => {
13351335

13361336
it('should return usage with correct shape including all token details', async () => {
13371337
const response = client.callModel({
1338-
model: 'meta-llama/llama-3.2-1b-instruct',
1338+
model: 'anthropic/claude-haiku-4.5',
13391339
input: fromChatMessages([
13401340
{
13411341
role: 'user',
@@ -1391,7 +1391,7 @@ describe('callModel E2E Tests', () => {
13911391

13921392
it('should return error and incompleteDetails fields with correct shape', async () => {
13931393
const response = client.callModel({
1394-
model: 'meta-llama/llama-3.2-1b-instruct',
1394+
model: 'anthropic/claude-haiku-4.5',
13951395
input: fromChatMessages([
13961396
{
13971397
role: 'user',
@@ -1417,7 +1417,7 @@ describe('callModel E2E Tests', () => {
14171417

14181418
it('should allow concurrent access with other methods', async () => {
14191419
const response = client.callModel({
1420-
model: 'meta-llama/llama-3.2-1b-instruct',
1420+
model: 'anthropic/claude-haiku-4.5',
14211421
input: fromChatMessages([
14221422
{
14231423
role: 'user',
@@ -1445,7 +1445,7 @@ describe('callModel E2E Tests', () => {
14451445

14461446
it('should return consistent results on multiple calls', async () => {
14471447
const response = client.callModel({
1448-
model: 'meta-llama/llama-3.2-1b-instruct',
1448+
model: 'anthropic/claude-haiku-4.5',
14491449
input: fromChatMessages([
14501450
{
14511451
role: 'user',
@@ -1470,7 +1470,7 @@ describe('callModel E2E Tests', () => {
14701470
describe('Response parameters', () => {
14711471
it('should respect maxOutputTokens parameter', async () => {
14721472
const response = client.callModel({
1473-
model: 'meta-llama/llama-3.2-1b-instruct',
1473+
model: 'anthropic/claude-haiku-4.5',
14741474
input: fromChatMessages([
14751475
{
14761476
role: 'user',
@@ -1489,7 +1489,7 @@ describe('callModel E2E Tests', () => {
14891489

14901490
it('should work with instructions parameter', async () => {
14911491
const response = client.callModel({
1492-
model: 'meta-llama/llama-3.2-1b-instruct',
1492+
model: 'anthropic/claude-haiku-4.5',
14931493
input: fromChatMessages([
14941494
{
14951495
role: 'user',
@@ -1509,7 +1509,7 @@ describe('callModel E2E Tests', () => {
15091509

15101510
it('should support provider parameter with correct shape', async () => {
15111511
const response = client.callModel({
1512-
model: 'meta-llama/llama-3.2-1b-instruct',
1512+
model: 'anthropic/claude-haiku-4.5',
15131513
input: fromChatMessages([
15141514
{
15151515
role: 'user',
@@ -1531,7 +1531,7 @@ describe('callModel E2E Tests', () => {
15311531

15321532
it('should support provider with order preference', async () => {
15331533
const response = client.callModel({
1534-
model: 'meta-llama/llama-3.2-1b-instruct',
1534+
model: 'anthropic/claude-haiku-4.5',
15351535
input: fromChatMessages([
15361536
{
15371537
role: 'user',
@@ -1556,7 +1556,7 @@ describe('callModel E2E Tests', () => {
15561556

15571557
it('should support provider with ignore list', async () => {
15581558
const response = client.callModel({
1559-
model: 'meta-llama/llama-3.2-1b-instruct',
1559+
model: 'anthropic/claude-haiku-4.5',
15601560
input: fromChatMessages([
15611561
{
15621562
role: 'user',
@@ -1579,7 +1579,7 @@ describe('callModel E2E Tests', () => {
15791579

15801580
it('should support provider with quantizations filter', async () => {
15811581
const response = client.callModel({
1582-
model: 'meta-llama/llama-3.2-1b-instruct',
1582+
model: 'anthropic/claude-haiku-4.5',
15831583
input: fromChatMessages([
15841584
{
15851585
role: 'user',

tests/e2e/chat.test.ts

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ describe('Chat E2E Tests', () => {
2020
describe('chat.send() - Non-streaming', () => {
2121
it('should successfully send a chat request and get a response', async () => {
2222
const response = await client.chat.send({
23-
model: 'meta-llama/llama-3.2-1b-instruct',
23+
model: 'anthropic/claude-haiku-4.5',
2424
messages: [
2525
{
2626
role: 'user',
@@ -48,7 +48,7 @@ describe('Chat E2E Tests', () => {
4848

4949
it('should handle multi-turn conversations', async () => {
5050
const response = await client.chat.send({
51-
model: 'meta-llama/llama-3.2-1b-instruct',
51+
model: 'anthropic/claude-haiku-4.5',
5252
messages: [
5353
{
5454
role: 'user',
@@ -81,7 +81,7 @@ describe('Chat E2E Tests', () => {
8181

8282
it('should respect max_tokens parameter', async () => {
8383
const response = await client.chat.send({
84-
model: 'meta-llama/llama-3.2-1b-instruct',
84+
model: 'anthropic/claude-haiku-4.5',
8585
messages: [
8686
{
8787
role: 'user',
@@ -101,7 +101,7 @@ describe('Chat E2E Tests', () => {
101101
describe('chat.send() - Streaming', () => {
102102
it('should successfully stream chat responses', async () => {
103103
const response = await client.chat.send({
104-
model: 'meta-llama/llama-3.2-1b-instruct',
104+
model: 'anthropic/claude-haiku-4.5',
105105
messages: [
106106
{
107107
role: 'user',
@@ -130,7 +130,7 @@ describe('Chat E2E Tests', () => {
130130

131131
it('should stream complete content progressively', async () => {
132132
const response = await client.chat.send({
133-
model: 'meta-llama/llama-3.2-1b-instruct',
133+
model: 'anthropic/claude-haiku-4.5',
134134
messages: [
135135
{
136136
role: 'user',
@@ -159,7 +159,7 @@ describe('Chat E2E Tests', () => {
159159

160160
it('should include finish_reason in final chunk', async () => {
161161
const response = await client.chat.send({
162-
model: 'meta-llama/llama-3.2-1b-instruct',
162+
model: 'anthropic/claude-haiku-4.5',
163163
messages: [
164164
{
165165
role: 'user',

0 commit comments

Comments
 (0)