50
50
51
51
52
52
def test_init ():
53
- m = GroqModel ('llama-3.1 -70b-versatile' , api_key = 'foobar' )
53
+ m = GroqModel ('llama-3.3 -70b-versatile' , api_key = 'foobar' )
54
54
assert m .client .api_key == 'foobar'
55
- assert m .name () == 'groq:llama-3.1 -70b-versatile'
55
+ assert m .name () == 'groq:llama-3.3 -70b-versatile'
56
56
57
57
58
58
@dataclass
@@ -102,7 +102,7 @@ def completion_message(message: ChatCompletionMessage, *, usage: CompletionUsage
102
102
id = '123' ,
103
103
choices = [Choice (finish_reason = 'stop' , index = 0 , message = message )],
104
104
created = 1704067200 , # 2024-01-01
105
- model = 'llama-3.1 -70b-versatile' ,
105
+ model = 'llama-3.3 -70b-versatile' ,
106
106
object = 'chat.completion' ,
107
107
usage = usage ,
108
108
)
@@ -111,7 +111,7 @@ def completion_message(message: ChatCompletionMessage, *, usage: CompletionUsage
111
111
async def test_request_simple_success (allow_model_requests : None ):
112
112
c = completion_message (ChatCompletionMessage (content = 'world' , role = 'assistant' ))
113
113
mock_client = MockGroq .create_mock (c )
114
- m = GroqModel ('llama-3.1 -70b-versatile' , groq_client = mock_client )
114
+ m = GroqModel ('llama-3.3 -70b-versatile' , groq_client = mock_client )
115
115
agent = Agent (m )
116
116
117
117
result = await agent .run ('hello' )
@@ -129,13 +129,13 @@ async def test_request_simple_success(allow_model_requests: None):
129
129
ModelRequest (parts = [UserPromptPart (content = 'hello' , timestamp = IsNow (tz = timezone .utc ))]),
130
130
ModelResponse (
131
131
parts = [TextPart (content = 'world' )],
132
- model_name = 'llama-3.1 -70b-versatile' ,
132
+ model_name = 'llama-3.3 -70b-versatile' ,
133
133
timestamp = datetime (2024 , 1 , 1 , 0 , 0 , tzinfo = timezone .utc ),
134
134
),
135
135
ModelRequest (parts = [UserPromptPart (content = 'hello' , timestamp = IsNow (tz = timezone .utc ))]),
136
136
ModelResponse (
137
137
parts = [TextPart (content = 'world' )],
138
- model_name = 'llama-3.1 -70b-versatile' ,
138
+ model_name = 'llama-3.3 -70b-versatile' ,
139
139
timestamp = datetime (2024 , 1 , 1 , 0 , 0 , tzinfo = timezone .utc ),
140
140
),
141
141
]
@@ -148,7 +148,7 @@ async def test_request_simple_usage(allow_model_requests: None):
148
148
usage = CompletionUsage (completion_tokens = 1 , prompt_tokens = 2 , total_tokens = 3 ),
149
149
)
150
150
mock_client = MockGroq .create_mock (c )
151
- m = GroqModel ('llama-3.1 -70b-versatile' , groq_client = mock_client )
151
+ m = GroqModel ('llama-3.3 -70b-versatile' , groq_client = mock_client )
152
152
agent = Agent (m )
153
153
154
154
result = await agent .run ('Hello' )
@@ -170,7 +170,7 @@ async def test_request_structured_response(allow_model_requests: None):
170
170
)
171
171
)
172
172
mock_client = MockGroq .create_mock (c )
173
- m = GroqModel ('llama-3.1 -70b-versatile' , groq_client = mock_client )
173
+ m = GroqModel ('llama-3.3 -70b-versatile' , groq_client = mock_client )
174
174
agent = Agent (m , result_type = list [int ])
175
175
176
176
result = await agent .run ('Hello' )
@@ -186,7 +186,7 @@ async def test_request_structured_response(allow_model_requests: None):
186
186
tool_call_id = '123' ,
187
187
)
188
188
],
189
- model_name = 'llama-3.1 -70b-versatile' ,
189
+ model_name = 'llama-3.3 -70b-versatile' ,
190
190
timestamp = datetime (2024 , 1 , 1 , tzinfo = timezone .utc ),
191
191
),
192
192
ModelRequest (
@@ -244,7 +244,7 @@ async def test_request_tool_call(allow_model_requests: None):
244
244
completion_message (ChatCompletionMessage (content = 'final response' , role = 'assistant' )),
245
245
]
246
246
mock_client = MockGroq .create_mock (responses )
247
- m = GroqModel ('llama-3.1 -70b-versatile' , groq_client = mock_client )
247
+ m = GroqModel ('llama-3.3 -70b-versatile' , groq_client = mock_client )
248
248
agent = Agent (m , system_prompt = 'this is the system prompt' )
249
249
250
250
@agent .tool_plain
@@ -272,7 +272,7 @@ async def get_location(loc_name: str) -> str:
272
272
tool_call_id = '1' ,
273
273
)
274
274
],
275
- model_name = 'llama-3.1 -70b-versatile' ,
275
+ model_name = 'llama-3.3 -70b-versatile' ,
276
276
timestamp = datetime (2024 , 1 , 1 , 0 , 0 , tzinfo = timezone .utc ),
277
277
),
278
278
ModelRequest (
@@ -293,7 +293,7 @@ async def get_location(loc_name: str) -> str:
293
293
tool_call_id = '2' ,
294
294
)
295
295
],
296
- model_name = 'llama-3.1 -70b-versatile' ,
296
+ model_name = 'llama-3.3 -70b-versatile' ,
297
297
timestamp = datetime (2024 , 1 , 1 , 0 , 0 , tzinfo = timezone .utc ),
298
298
),
299
299
ModelRequest (
@@ -308,7 +308,7 @@ async def get_location(loc_name: str) -> str:
308
308
),
309
309
ModelResponse (
310
310
parts = [TextPart (content = 'final response' )],
311
- model_name = 'llama-3.1 -70b-versatile' ,
311
+ model_name = 'llama-3.3 -70b-versatile' ,
312
312
timestamp = datetime (2024 , 1 , 1 , 0 , 0 , tzinfo = timezone .utc ),
313
313
),
314
314
]
@@ -326,7 +326,7 @@ def chunk(delta: list[ChoiceDelta], finish_reason: FinishReason | None = None) -
326
326
],
327
327
created = 1704067200 , # 2024-01-01
328
328
x_groq = None ,
329
- model = 'llama-3.1 -70b-versatile' ,
329
+ model = 'llama-3.3 -70b-versatile' ,
330
330
object = 'chat.completion.chunk' ,
331
331
usage = CompletionUsage (completion_tokens = 1 , prompt_tokens = 2 , total_tokens = 3 ),
332
332
)
@@ -339,7 +339,7 @@ def text_chunk(text: str, finish_reason: FinishReason | None = None) -> chat.Cha
339
339
async def test_stream_text (allow_model_requests : None ):
340
340
stream = text_chunk ('hello ' ), text_chunk ('world' ), chunk ([])
341
341
mock_client = MockGroq .create_mock_stream (stream )
342
- m = GroqModel ('llama-3.1 -70b-versatile' , groq_client = mock_client )
342
+ m = GroqModel ('llama-3.3 -70b-versatile' , groq_client = mock_client )
343
343
agent = Agent (m )
344
344
345
345
async with agent .run_stream ('' ) as result :
@@ -351,7 +351,7 @@ async def test_stream_text(allow_model_requests: None):
351
351
async def test_stream_text_finish_reason (allow_model_requests : None ):
352
352
stream = text_chunk ('hello ' ), text_chunk ('world' ), text_chunk ('.' , finish_reason = 'stop' )
353
353
mock_client = MockGroq .create_mock_stream (stream )
354
- m = GroqModel ('llama-3.1 -70b-versatile' , groq_client = mock_client )
354
+ m = GroqModel ('llama-3.3 -70b-versatile' , groq_client = mock_client )
355
355
agent = Agent (m )
356
356
357
357
async with agent .run_stream ('' ) as result :
@@ -398,7 +398,7 @@ async def test_stream_structured(allow_model_requests: None):
398
398
chunk ([]),
399
399
)
400
400
mock_client = MockGroq .create_mock_stream (stream )
401
- m = GroqModel ('llama-3.1 -70b-versatile' , groq_client = mock_client )
401
+ m = GroqModel ('llama-3.3 -70b-versatile' , groq_client = mock_client )
402
402
agent = Agent (m , result_type = MyTypedDict )
403
403
404
404
async with agent .run_stream ('' ) as result :
@@ -424,7 +424,7 @@ async def test_stream_structured(allow_model_requests: None):
424
424
args = '{"first": "One", "second": "Two"}' ,
425
425
)
426
426
],
427
- model_name = 'llama-3.1 -70b-versatile' ,
427
+ model_name = 'llama-3.3 -70b-versatile' ,
428
428
timestamp = datetime (2024 , 1 , 1 , 0 , 0 , tzinfo = timezone .utc ),
429
429
),
430
430
ModelRequest (
@@ -449,7 +449,7 @@ async def test_stream_structured_finish_reason(allow_model_requests: None):
449
449
struc_chunk (None , None , finish_reason = 'stop' ),
450
450
)
451
451
mock_client = MockGroq .create_mock_stream (stream )
452
- m = GroqModel ('llama-3.1 -70b-versatile' , groq_client = mock_client )
452
+ m = GroqModel ('llama-3.3 -70b-versatile' , groq_client = mock_client )
453
453
agent = Agent (m , result_type = MyTypedDict )
454
454
455
455
async with agent .run_stream ('' ) as result :
@@ -469,7 +469,7 @@ async def test_stream_structured_finish_reason(allow_model_requests: None):
469
469
async def test_no_content (allow_model_requests : None ):
470
470
stream = chunk ([ChoiceDelta ()]), chunk ([ChoiceDelta ()])
471
471
mock_client = MockGroq .create_mock_stream (stream )
472
- m = GroqModel ('llama-3.1 -70b-versatile' , groq_client = mock_client )
472
+ m = GroqModel ('llama-3.3 -70b-versatile' , groq_client = mock_client )
473
473
agent = Agent (m , result_type = MyTypedDict )
474
474
475
475
with pytest .raises (UnexpectedModelBehavior , match = 'Received empty model response' ):
@@ -480,7 +480,7 @@ async def test_no_content(allow_model_requests: None):
480
480
async def test_no_delta (allow_model_requests : None ):
481
481
stream = chunk ([]), text_chunk ('hello ' ), text_chunk ('world' )
482
482
mock_client = MockGroq .create_mock_stream (stream )
483
- m = GroqModel ('llama-3.1 -70b-versatile' , groq_client = mock_client )
483
+ m = GroqModel ('llama-3.3 -70b-versatile' , groq_client = mock_client )
484
484
agent = Agent (m )
485
485
486
486
async with agent .run_stream ('' ) as result :
0 commit comments