Skip to content

Commit dad9f08

Browse files
committed
coverage
1 parent f6d99f2 commit dad9f08

File tree

4 files changed

+7
-7
lines changed

4 files changed

+7
-7
lines changed

pydantic_ai_slim/pydantic_ai/models/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -446,7 +446,7 @@ def prepare_request(
446446
if params.output_object and params.output_mode not in ('native', 'prompted'):
447447
params = replace(params, output_object=None)
448448
if params.prompted_output_template and params.output_mode != 'prompted':
449-
params = replace(params, prompted_output_template=None)
449+
params = replace(params, prompted_output_template=None) # pragma: no cover
450450

451451
# Set default prompted output template
452452
if params.output_mode == 'prompted' and not params.prompted_output_template:

pydantic_ai_slim/pydantic_ai/models/anthropic.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -252,7 +252,9 @@ def prepare_request(
252252
):
253253
if model_request_parameters.output_mode == 'auto':
254254
model_request_parameters = replace(model_request_parameters, output_mode='prompted')
255-
elif model_request_parameters.output_mode == 'tool' and not model_request_parameters.allow_text_output:
255+
elif (
256+
model_request_parameters.output_mode == 'tool' and not model_request_parameters.allow_text_output
257+
): # pragma: no branch
256258
# This would result in `tool_choice=required`, which Anthropic does not support with thinking.
257259
raise UserError(
258260
'Anthropic does not support thinking and output tools at the same time. Use `output_type=PromptedOutput(...)` instead.'

pydantic_ai_slim/pydantic_ai/models/fallback.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ def profile(self) -> ModelProfile:
128128
raise NotImplementedError('FallbackModel does not have its own model profile.')
129129

130130
def customize_request_parameters(self, model_request_parameters: ModelRequestParameters) -> ModelRequestParameters:
131-
return model_request_parameters
131+
return model_request_parameters # pragma: no cover
132132

133133
def prepare_request(
134134
self, model_settings: ModelSettings | None, model_request_parameters: ModelRequestParameters

tests/models/test_fallback.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -684,7 +684,7 @@ def native_output_func(_: list[ModelMessage], info: AgentInfo) -> ModelResponse:
684684
def prompted_output_func(_: list[ModelMessage], info: AgentInfo) -> ModelResponse:
685685
nonlocal enabled_model
686686
if enabled_model != 'prompted':
687-
raise ModelHTTPError(status_code=500, model_name='prompted-model', body=None)
687+
raise ModelHTTPError(status_code=500, model_name='prompted-model', body=None) # pragma: no cover
688688

689689
assert info.model_request_parameters == snapshot(
690690
ModelRequestParameters(
@@ -848,9 +848,7 @@ def prompted_output_func(_: list[ModelMessage], info: AgentInfo) -> ModelRespons
848848
'gen_ai.output.messages': [
849849
{'role': 'assistant', 'parts': [{'type': 'text', 'content': '{"bar":"baz"}'}]}
850850
],
851-
'gen_ai.system_instructions': [
852-
{'type': 'text', 'content': 'Be kind'}
853-
], # TODO (DouweM): We'd expect the prompted output instructions here as well
851+
'gen_ai.system_instructions': [{'type': 'text', 'content': 'Be kind'}],
854852
'gen_ai.usage.input_tokens': 51,
855853
'gen_ai.usage.output_tokens': 4,
856854
'gen_ai.response.model': 'function:prompted_output_func:',

0 commit comments

Comments
 (0)