Skip to content

Commit 0adb76a

Browse files
authored
Fix CI by removing references to shutdown groq models (#780)
1 parent a72c875 commit 0adb76a

File tree

7 files changed

+29
-33
lines changed

7 files changed

+29
-33
lines changed

docs/models.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -352,7 +352,7 @@ You can then use [`GroqModel`][pydantic_ai.models.groq.GroqModel] by name:
352352
```python {title="groq_model_by_name.py"}
353353
from pydantic_ai import Agent
354354

355-
agent = Agent('groq:llama-3.1-70b-versatile')
355+
agent = Agent('groq:llama-3.3-70b-versatile')
356356
...
357357
```
358358

@@ -362,7 +362,7 @@ Or initialise the model directly with just the model name:
362362
from pydantic_ai import Agent
363363
from pydantic_ai.models.groq import GroqModel
364364

365-
model = GroqModel('llama-3.1-70b-versatile')
365+
model = GroqModel('llama-3.3-70b-versatile')
366366
agent = Agent(model)
367367
...
368368
```
@@ -375,7 +375,7 @@ If you don't want to or can't set the environment variable, you can pass it at r
375375
from pydantic_ai import Agent
376376
from pydantic_ai.models.groq import GroqModel
377377

378-
model = GroqModel('llama-3.1-70b-versatile', api_key='your-api-key')
378+
model = GroqModel('llama-3.3-70b-versatile', api_key='your-api-key')
379379
agent = Agent(model)
380380
...
381381
```

examples/pydantic_ai_examples/roulette_wheel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ class Deps:
2121

2222
# Create the agent with proper typing
2323
roulette_agent = Agent(
24-
'groq:llama-3.1-70b-versatile',
24+
'groq:llama-3.3-70b-versatile',
2525
deps_type=Deps,
2626
retries=3,
2727
result_type=bool,

examples/pydantic_ai_examples/stream_markdown.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
models: list[tuple[KnownModelName, str]] = [
2828
('google-gla:gemini-1.5-flash', 'GEMINI_API_KEY'),
2929
('openai:gpt-4o-mini', 'OPENAI_API_KEY'),
30-
('groq:llama-3.1-70b-versatile', 'GROQ_API_KEY'),
30+
('groq:llama-3.3-70b-versatile', 'GROQ_API_KEY'),
3131
]
3232

3333

pydantic_ai_slim/pydantic_ai/models/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
'openai:o1',
3737
'openai:gpt-3.5-turbo',
3838
'groq:llama-3.3-70b-versatile',
39-
'groq:llama-3.1-70b-versatile',
39+
'groq:llama-3.3-70b-versatile',
4040
'groq:llama3-groq-70b-8192-tool-use-preview',
4141
'groq:llama3-groq-8b-8192-tool-use-preview',
4242
'groq:llama-3.1-70b-specdec',

pydantic_ai_slim/pydantic_ai/models/groq.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -47,10 +47,7 @@
4747

4848
GroqModelName = Literal[
4949
'llama-3.3-70b-versatile',
50-
'llama-3.1-70b-versatile',
51-
'llama3-groq-70b-8192-tool-use-preview',
52-
'llama3-groq-8b-8192-tool-use-preview',
53-
'llama-3.1-70b-specdec',
50+
'llama-3.3-70b-specdec',
5451
'llama-3.1-8b-instant',
5552
'llama-3.2-1b-preview',
5653
'llama-3.2-3b-preview',
@@ -60,7 +57,6 @@
6057
'llama3-8b-8192',
6158
'mixtral-8x7b-32768',
6259
'gemma2-9b-it',
63-
'gemma-7b-it',
6460
]
6561
"""Named Groq models.
6662

tests/models/test_groq.py

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,9 @@
5050

5151

5252
def test_init():
53-
m = GroqModel('llama-3.1-70b-versatile', api_key='foobar')
53+
m = GroqModel('llama-3.3-70b-versatile', api_key='foobar')
5454
assert m.client.api_key == 'foobar'
55-
assert m.name() == 'groq:llama-3.1-70b-versatile'
55+
assert m.name() == 'groq:llama-3.3-70b-versatile'
5656

5757

5858
@dataclass
@@ -102,7 +102,7 @@ def completion_message(message: ChatCompletionMessage, *, usage: CompletionUsage
102102
id='123',
103103
choices=[Choice(finish_reason='stop', index=0, message=message)],
104104
created=1704067200, # 2024-01-01
105-
model='llama-3.1-70b-versatile',
105+
model='llama-3.3-70b-versatile',
106106
object='chat.completion',
107107
usage=usage,
108108
)
@@ -111,7 +111,7 @@ def completion_message(message: ChatCompletionMessage, *, usage: CompletionUsage
111111
async def test_request_simple_success(allow_model_requests: None):
112112
c = completion_message(ChatCompletionMessage(content='world', role='assistant'))
113113
mock_client = MockGroq.create_mock(c)
114-
m = GroqModel('llama-3.1-70b-versatile', groq_client=mock_client)
114+
m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
115115
agent = Agent(m)
116116

117117
result = await agent.run('hello')
@@ -129,13 +129,13 @@ async def test_request_simple_success(allow_model_requests: None):
129129
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
130130
ModelResponse(
131131
parts=[TextPart(content='world')],
132-
model_name='llama-3.1-70b-versatile',
132+
model_name='llama-3.3-70b-versatile',
133133
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
134134
),
135135
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
136136
ModelResponse(
137137
parts=[TextPart(content='world')],
138-
model_name='llama-3.1-70b-versatile',
138+
model_name='llama-3.3-70b-versatile',
139139
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
140140
),
141141
]
@@ -148,7 +148,7 @@ async def test_request_simple_usage(allow_model_requests: None):
148148
usage=CompletionUsage(completion_tokens=1, prompt_tokens=2, total_tokens=3),
149149
)
150150
mock_client = MockGroq.create_mock(c)
151-
m = GroqModel('llama-3.1-70b-versatile', groq_client=mock_client)
151+
m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
152152
agent = Agent(m)
153153

154154
result = await agent.run('Hello')
@@ -170,7 +170,7 @@ async def test_request_structured_response(allow_model_requests: None):
170170
)
171171
)
172172
mock_client = MockGroq.create_mock(c)
173-
m = GroqModel('llama-3.1-70b-versatile', groq_client=mock_client)
173+
m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
174174
agent = Agent(m, result_type=list[int])
175175

176176
result = await agent.run('Hello')
@@ -186,7 +186,7 @@ async def test_request_structured_response(allow_model_requests: None):
186186
tool_call_id='123',
187187
)
188188
],
189-
model_name='llama-3.1-70b-versatile',
189+
model_name='llama-3.3-70b-versatile',
190190
timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc),
191191
),
192192
ModelRequest(
@@ -244,7 +244,7 @@ async def test_request_tool_call(allow_model_requests: None):
244244
completion_message(ChatCompletionMessage(content='final response', role='assistant')),
245245
]
246246
mock_client = MockGroq.create_mock(responses)
247-
m = GroqModel('llama-3.1-70b-versatile', groq_client=mock_client)
247+
m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
248248
agent = Agent(m, system_prompt='this is the system prompt')
249249

250250
@agent.tool_plain
@@ -272,7 +272,7 @@ async def get_location(loc_name: str) -> str:
272272
tool_call_id='1',
273273
)
274274
],
275-
model_name='llama-3.1-70b-versatile',
275+
model_name='llama-3.3-70b-versatile',
276276
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
277277
),
278278
ModelRequest(
@@ -293,7 +293,7 @@ async def get_location(loc_name: str) -> str:
293293
tool_call_id='2',
294294
)
295295
],
296-
model_name='llama-3.1-70b-versatile',
296+
model_name='llama-3.3-70b-versatile',
297297
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
298298
),
299299
ModelRequest(
@@ -308,7 +308,7 @@ async def get_location(loc_name: str) -> str:
308308
),
309309
ModelResponse(
310310
parts=[TextPart(content='final response')],
311-
model_name='llama-3.1-70b-versatile',
311+
model_name='llama-3.3-70b-versatile',
312312
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
313313
),
314314
]
@@ -326,7 +326,7 @@ def chunk(delta: list[ChoiceDelta], finish_reason: FinishReason | None = None) -
326326
],
327327
created=1704067200, # 2024-01-01
328328
x_groq=None,
329-
model='llama-3.1-70b-versatile',
329+
model='llama-3.3-70b-versatile',
330330
object='chat.completion.chunk',
331331
usage=CompletionUsage(completion_tokens=1, prompt_tokens=2, total_tokens=3),
332332
)
@@ -339,7 +339,7 @@ def text_chunk(text: str, finish_reason: FinishReason | None = None) -> chat.Cha
339339
async def test_stream_text(allow_model_requests: None):
340340
stream = text_chunk('hello '), text_chunk('world'), chunk([])
341341
mock_client = MockGroq.create_mock_stream(stream)
342-
m = GroqModel('llama-3.1-70b-versatile', groq_client=mock_client)
342+
m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
343343
agent = Agent(m)
344344

345345
async with agent.run_stream('') as result:
@@ -351,7 +351,7 @@ async def test_stream_text(allow_model_requests: None):
351351
async def test_stream_text_finish_reason(allow_model_requests: None):
352352
stream = text_chunk('hello '), text_chunk('world'), text_chunk('.', finish_reason='stop')
353353
mock_client = MockGroq.create_mock_stream(stream)
354-
m = GroqModel('llama-3.1-70b-versatile', groq_client=mock_client)
354+
m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
355355
agent = Agent(m)
356356

357357
async with agent.run_stream('') as result:
@@ -398,7 +398,7 @@ async def test_stream_structured(allow_model_requests: None):
398398
chunk([]),
399399
)
400400
mock_client = MockGroq.create_mock_stream(stream)
401-
m = GroqModel('llama-3.1-70b-versatile', groq_client=mock_client)
401+
m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
402402
agent = Agent(m, result_type=MyTypedDict)
403403

404404
async with agent.run_stream('') as result:
@@ -424,7 +424,7 @@ async def test_stream_structured(allow_model_requests: None):
424424
args='{"first": "One", "second": "Two"}',
425425
)
426426
],
427-
model_name='llama-3.1-70b-versatile',
427+
model_name='llama-3.3-70b-versatile',
428428
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
429429
),
430430
ModelRequest(
@@ -449,7 +449,7 @@ async def test_stream_structured_finish_reason(allow_model_requests: None):
449449
struc_chunk(None, None, finish_reason='stop'),
450450
)
451451
mock_client = MockGroq.create_mock_stream(stream)
452-
m = GroqModel('llama-3.1-70b-versatile', groq_client=mock_client)
452+
m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
453453
agent = Agent(m, result_type=MyTypedDict)
454454

455455
async with agent.run_stream('') as result:
@@ -469,7 +469,7 @@ async def test_stream_structured_finish_reason(allow_model_requests: None):
469469
async def test_no_content(allow_model_requests: None):
470470
stream = chunk([ChoiceDelta()]), chunk([ChoiceDelta()])
471471
mock_client = MockGroq.create_mock_stream(stream)
472-
m = GroqModel('llama-3.1-70b-versatile', groq_client=mock_client)
472+
m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
473473
agent = Agent(m, result_type=MyTypedDict)
474474

475475
with pytest.raises(UnexpectedModelBehavior, match='Received empty model response'):
@@ -480,7 +480,7 @@ async def test_no_content(allow_model_requests: None):
480480
async def test_no_delta(allow_model_requests: None):
481481
stream = chunk([]), text_chunk('hello '), text_chunk('world')
482482
mock_client = MockGroq.create_mock_stream(stream)
483-
m = GroqModel('llama-3.1-70b-versatile', groq_client=mock_client)
483+
m = GroqModel('llama-3.3-70b-versatile', groq_client=mock_client)
484484
agent = Agent(m)
485485

486486
async with agent.run_stream('') as result:

tests/test_live.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def vertexai(http_client: httpx.AsyncClient, tmp_path: Path) -> Model:
4545
def groq(http_client: httpx.AsyncClient, _tmp_path: Path) -> Model:
4646
from pydantic_ai.models.groq import GroqModel
4747

48-
return GroqModel('llama-3.1-70b-versatile', http_client=http_client)
48+
return GroqModel('llama-3.3-70b-versatile', http_client=http_client)
4949

5050

5151
def anthropic(http_client: httpx.AsyncClient, _tmp_path: Path) -> Model:

0 commit comments

Comments
 (0)