Skip to content

Commit d45c13b

Browse files
authored
fix: Plumb system_prompt through to structured_output (strands-agents#466)
Addresses strands-agents#362 Small fix that needed to be plumbed through Co-authored-by: Mackenzie Zastrow <[email protected]>
1 parent c934153 commit d45c13b

File tree

13 files changed

+42
-25
lines changed

13 files changed

+42
-25
lines changed

src/strands/agent/agent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -457,7 +457,7 @@ async def structured_output_async(
457457
content: list[ContentBlock] = [{"text": prompt}] if isinstance(prompt, str) else prompt
458458
self._append_message({"role": "user", "content": content})
459459

460-
events = self.model.structured_output(output_model, self.messages)
460+
events = self.model.structured_output(output_model, self.messages, system_prompt=self.system_prompt)
461461
async for event in events:
462462
if "callback" in event:
463463
self.callback_handler(**cast(dict, event["callback"]))

src/strands/models/anthropic.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -392,21 +392,22 @@ async def stream(
392392

393393
@override
394394
async def structured_output(
395-
self, output_model: Type[T], prompt: Messages, **kwargs: Any
395+
self, output_model: Type[T], prompt: Messages, system_prompt: Optional[str] = None, **kwargs: Any
396396
) -> AsyncGenerator[dict[str, Union[T, Any]], None]:
397397
"""Get structured output from the model.
398398
399399
Args:
400400
output_model: The output model to use for the agent.
401401
prompt: The prompt messages to use for the agent.
402+
system_prompt: System prompt to provide context to the model.
402403
**kwargs: Additional keyword arguments for future extensibility.
403404
404405
Yields:
405406
Model events with the last being the structured output.
406407
"""
407408
tool_spec = convert_pydantic_to_tool_spec(output_model)
408409

409-
response = self.stream(messages=prompt, tool_specs=[tool_spec], **kwargs)
410+
response = self.stream(messages=prompt, tool_specs=[tool_spec], system_prompt=system_prompt, **kwargs)
410411
async for event in process_stream(response):
411412
yield event
412413

src/strands/models/bedrock.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -562,21 +562,22 @@ def _find_detected_and_blocked_policy(self, input: Any) -> bool:
562562

563563
@override
564564
async def structured_output(
565-
self, output_model: Type[T], prompt: Messages, **kwargs: Any
565+
self, output_model: Type[T], prompt: Messages, system_prompt: Optional[str] = None, **kwargs: Any
566566
) -> AsyncGenerator[dict[str, Union[T, Any]], None]:
567567
"""Get structured output from the model.
568568
569569
Args:
570570
output_model: The output model to use for the agent.
571571
prompt: The prompt messages to use for the agent.
572+
system_prompt: System prompt to provide context to the model.
572573
**kwargs: Additional keyword arguments for future extensibility.
573574
574575
Yields:
575576
Model events with the last being the structured output.
576577
"""
577578
tool_spec = convert_pydantic_to_tool_spec(output_model)
578579

579-
response = self.stream(messages=prompt, tool_specs=[tool_spec], **kwargs)
580+
response = self.stream(messages=prompt, tool_specs=[tool_spec], system_prompt=system_prompt, **kwargs)
580581
async for event in streaming.process_stream(response):
581582
yield event
582583

src/strands/models/litellm.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -184,13 +184,14 @@ async def stream(
184184

185185
@override
186186
async def structured_output(
187-
self, output_model: Type[T], prompt: Messages, **kwargs: Any
187+
self, output_model: Type[T], prompt: Messages, system_prompt: Optional[str] = None, **kwargs: Any
188188
) -> AsyncGenerator[dict[str, Union[T, Any]], None]:
189189
"""Get structured output from the model.
190190
191191
Args:
192192
output_model: The output model to use for the agent.
193193
prompt: The prompt messages to use for the agent.
194+
system_prompt: System prompt to provide context to the model.
194195
**kwargs: Additional keyword arguments for future extensibility.
195196
196197
Yields:
@@ -199,7 +200,7 @@ async def structured_output(
199200
response = await litellm.acompletion(
200201
**self.client_args,
201202
model=self.get_config()["model_id"],
202-
messages=self.format_request(prompt)["messages"],
203+
messages=self.format_request(prompt, system_prompt=system_prompt)["messages"],
203204
response_format=output_model,
204205
)
205206

src/strands/models/llamaapi.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -407,13 +407,14 @@ async def stream(
407407

408408
@override
409409
def structured_output(
410-
self, output_model: Type[T], prompt: Messages, **kwargs: Any
410+
self, output_model: Type[T], prompt: Messages, system_prompt: Optional[str] = None, **kwargs: Any
411411
) -> AsyncGenerator[dict[str, Union[T, Any]], None]:
412412
"""Get structured output from the model.
413413
414414
Args:
415415
output_model: The output model to use for the agent.
416416
prompt: The prompt messages to use for the agent.
417+
system_prompt: System prompt to provide context to the model.
417418
**kwargs: Additional keyword arguments for future extensibility.
418419
419420
Yields:

src/strands/models/mistral.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -493,13 +493,14 @@ async def stream(
493493

494494
@override
495495
async def structured_output(
496-
self, output_model: Type[T], prompt: Messages, **kwargs: Any
496+
self, output_model: Type[T], prompt: Messages, system_prompt: Optional[str] = None, **kwargs: Any
497497
) -> AsyncGenerator[dict[str, Union[T, Any]], None]:
498498
"""Get structured output from the model.
499499
500500
Args:
501501
output_model: The output model to use for the agent.
502502
prompt: The prompt messages to use for the agent.
503+
system_prompt: System prompt to provide context to the model.
503504
**kwargs: Additional keyword arguments for future extensibility.
504505
505506
Returns:
@@ -514,7 +515,7 @@ async def structured_output(
514515
"inputSchema": {"json": output_model.model_json_schema()},
515516
}
516517

517-
formatted_request = self.format_request(messages=prompt, tool_specs=[tool_spec])
518+
formatted_request = self.format_request(messages=prompt, tool_specs=[tool_spec], system_prompt=system_prompt)
518519

519520
formatted_request["tool_choice"] = "any"
520521
formatted_request["parallel_tool_calls"] = False

src/strands/models/model.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,13 +45,14 @@ def get_config(self) -> Any:
4545
@abc.abstractmethod
4646
# pragma: no cover
4747
def structured_output(
48-
self, output_model: Type[T], prompt: Messages, **kwargs: Any
48+
self, output_model: Type[T], prompt: Messages, system_prompt: Optional[str] = None, **kwargs: Any
4949
) -> AsyncGenerator[dict[str, Union[T, Any]], None]:
5050
"""Get structured output from the model.
5151
5252
Args:
5353
output_model: The output model to use for the agent.
5454
prompt: The prompt messages to use for the agent.
55+
system_prompt: System prompt to provide context to the model.
5556
**kwargs: Additional keyword arguments for future extensibility.
5657
5758
Yields:

src/strands/models/ollama.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -330,19 +330,20 @@ async def stream(
330330

331331
@override
332332
async def structured_output(
333-
self, output_model: Type[T], prompt: Messages, **kwargs: Any
333+
self, output_model: Type[T], prompt: Messages, system_prompt: Optional[str] = None, **kwargs: Any
334334
) -> AsyncGenerator[dict[str, Union[T, Any]], None]:
335335
"""Get structured output from the model.
336336
337337
Args:
338338
output_model: The output model to use for the agent.
339339
prompt: The prompt messages to use for the agent.
340+
system_prompt: System prompt to provide context to the model.
340341
**kwargs: Additional keyword arguments for future extensibility.
341342
342343
Yields:
343344
Model events with the last being the structured output.
344345
"""
345-
formatted_request = self.format_request(messages=prompt)
346+
formatted_request = self.format_request(messages=prompt, system_prompt=system_prompt)
346347
formatted_request["format"] = output_model.model_json_schema()
347348
formatted_request["stream"] = False
348349

src/strands/models/openai.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -401,21 +401,22 @@ async def stream(
401401

402402
@override
403403
async def structured_output(
404-
self, output_model: Type[T], prompt: Messages, **kwargs: Any
404+
self, output_model: Type[T], prompt: Messages, system_prompt: Optional[str] = None, **kwargs: Any
405405
) -> AsyncGenerator[dict[str, Union[T, Any]], None]:
406406
"""Get structured output from the model.
407407
408408
Args:
409409
output_model: The output model to use for the agent.
410410
prompt: The prompt messages to use for the agent.
411+
system_prompt: System prompt to provide context to the model.
411412
**kwargs: Additional keyword arguments for future extensibility.
412413
413414
Yields:
414415
Model events with the last being the structured output.
415416
"""
416417
response: ParsedChatCompletion = await self.client.beta.chat.completions.parse( # type: ignore
417418
model=self.get_config()["model_id"],
418-
messages=self.format_request(prompt)["messages"],
419+
messages=self.format_request(prompt, system_prompt=system_prompt)["messages"],
419420
response_format=output_model,
420421
)
421422

src/strands/models/writer.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -422,16 +422,17 @@ async def stream(
422422

423423
@override
424424
async def structured_output(
425-
self, output_model: Type[T], prompt: Messages, **kwargs: Any
425+
self, output_model: Type[T], prompt: Messages, system_prompt: Optional[str] = None, **kwargs: Any
426426
) -> AsyncGenerator[dict[str, Union[T, Any]], None]:
427427
"""Get structured output from the model.
428428
429429
Args:
430430
output_model(Type[BaseModel]): The output model to use for the agent.
431431
prompt(Messages): The prompt messages to use for the agent.
432+
system_prompt: System prompt to provide context to the model.
432433
**kwargs: Additional keyword arguments for future extensibility.
433434
"""
434-
formatted_request = self.format_request(messages=prompt, tool_specs=None, system_prompt=None)
435+
formatted_request = self.format_request(messages=prompt, tool_specs=None, system_prompt=system_prompt)
435436
formatted_request["response_format"] = {
436437
"type": "json_schema",
437438
"json_schema": {"schema": output_model.model_json_schema()},

0 commit comments

Comments
 (0)