|
12 | 12 | from pydantic_ai.profiles.google import GoogleJsonSchemaTransformer, google_model_profile
|
13 | 13 | from pydantic_ai.profiles.meta import meta_model_profile
|
14 | 14 | from pydantic_ai.profiles.mistral import mistral_model_profile
|
| 15 | +from pydantic_ai.profiles.moonshotai import moonshotai_model_profile |
15 | 16 | from pydantic_ai.profiles.qwen import qwen_model_profile
|
16 | 17 |
|
17 | 18 | from ..conftest import TestEnv, try_import
|
@@ -74,6 +75,7 @@ def test_groq_provider_model_profile(mocker: MockerFixture):
|
74 | 75 | google_model_profile_mock = mocker.patch(f'{ns}.google_model_profile', wraps=google_model_profile)
|
75 | 76 | mistral_model_profile_mock = mocker.patch(f'{ns}.mistral_model_profile', wraps=mistral_model_profile)
|
76 | 77 | qwen_model_profile_mock = mocker.patch(f'{ns}.qwen_model_profile', wraps=qwen_model_profile)
|
| 78 | + moonshotai_model_profile_mock = mocker.patch(f'{ns}.moonshotai_model_profile', wraps=moonshotai_model_profile) |
77 | 79 |
|
78 | 80 | meta_profile = provider.model_profile('meta-llama/Llama-Guard-4-12B')
|
79 | 81 | meta_model_profile_mock.assert_called_with('llama-guard-4-12b')
|
@@ -103,5 +105,10 @@ def test_groq_provider_model_profile(mocker: MockerFixture):
|
103 | 105 | assert qwen_profile is not None
|
104 | 106 | assert qwen_profile.json_schema_transformer == InlineDefsJsonSchemaTransformer
|
105 | 107 |
|
| 108 | + # MoonshotAI model should remove the "moonshotai/" prefix before passing to profile |
| 109 | + moonshotai_profile = provider.model_profile('moonshotai/kimi-k2-instruct') |
| 110 | + moonshotai_model_profile_mock.assert_called_with('kimi-k2-instruct') |
| 111 | + assert moonshotai_profile is None |
| 112 | + |
106 | 113 | unknown_profile = provider.model_profile('unknown-model')
|
107 | 114 | assert unknown_profile is None
|
0 commit comments