Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions examples/basic/dynamic_system_prompt.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import asyncio
import random
from dataclasses import dataclass
from typing import Literal

from agents import Agent, RunContextWrapper, Runner


@dataclass
class CustomContext:
def __init__(self, style: Literal["haiku", "pirate", "robot"]):
self.style = style
style: Literal["haiku", "pirate", "robot"]


def custom_instructions(
Expand All @@ -27,11 +28,9 @@ def custom_instructions(
instructions=custom_instructions,
)


async def main():
choice: Literal["haiku", "pirate", "robot"] = random.choice(["haiku", "pirate", "robot"])
context = CustomContext(style=choice)
print(f"Using style: {choice}\n")
context = CustomContext(style=random.choice(["haiku", "pirate", "robot"]))
print(f"Using style: {context.style}\n")

user_message = "Tell me a joke."
print(f"User: {user_message}")
Expand All @@ -43,6 +42,7 @@ async def main():
if __name__ == "__main__":
asyncio.run(main())


"""
$ python examples/basic/dynamic_system_prompt.py

Expand Down
12 changes: 8 additions & 4 deletions examples/basic/stream_function_call_args.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,24 @@
import asyncio
from typing import Any
from typing import Annotated, Any, Optional

from openai.types.responses import ResponseFunctionCallArgumentsDeltaEvent

from agents import Agent, Runner, function_tool


@function_tool
def write_file(filename: str, content: str) -> str:
def write_file(filename: Annotated[str, "Name of the file"], content: str) -> str:
"""Write content to a file."""
return f"File {filename} written successfully"


@function_tool
def create_config(project_name: str, version: str, dependencies: list[str]) -> str:
"""Create a configuration file for a project."""
def create_config(
project_name: Annotated[str, "Project name"],
version: Annotated[str, "Project version"],
dependencies: Annotated[Optional[list[str]], "Dependencies (list of packages)"],
) -> str:
"""Generate a project configuration file."""
return f"Config for {project_name} v{version} created"


Expand Down
24 changes: 11 additions & 13 deletions src/agents/models/chatcmpl_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TRespon
if hasattr(message, "thinking_blocks") and message.thinking_blocks:
# Store thinking text in content and signature in encrypted_content
reasoning_item.content = []
signature = None
signatures: list[str] = []
for block in message.thinking_blocks:
if isinstance(block, dict):
thinking_text = block.get("thinking", "")
Expand All @@ -116,15 +116,12 @@ def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TRespon
Content(text=thinking_text, type="reasoning_text")
)
# Store the signature if present
if block.get("signature"):
signature = block.get("signature")
if signature := block.get("signature"):
signatures.append(signature)

# Store only the last signature in encrypted_content
# If there are multiple thinking blocks, this should be a problem.
# In practice, there should only be one signature for the entire reasoning step.
# Tested with: claude-sonnet-4-20250514
if signature:
reasoning_item.encrypted_content = signature
# Store the signatures in encrypted_content with newline delimiter
if signatures:
reasoning_item.encrypted_content = "\n".join(signatures)

items.append(reasoning_item)

Expand Down Expand Up @@ -518,7 +515,8 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
elif reasoning_item := cls.maybe_reasoning_message(item):
# Reconstruct thinking blocks from content (text) and encrypted_content (signature)
content_items = reasoning_item.get("content", [])
signature = reasoning_item.get("encrypted_content")
encrypted_content = reasoning_item.get("encrypted_content")
signatures = encrypted_content.split("\n") if encrypted_content else []

if content_items and preserve_thinking_blocks:
# Reconstruct thinking blocks from content and signature
Expand All @@ -532,9 +530,9 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
"type": "thinking",
"thinking": content_item.get("text", ""),
}
# Add signature if available
if signature:
thinking_block["signature"] = signature
# Add signatures if available
if signatures:
thinking_block["signature"] = signatures.pop(0)
pending_thinking_blocks.append(thinking_block)

# 8) If we haven't recognized it => fail or ignore
Expand Down
27 changes: 24 additions & 3 deletions tests/test_anthropic_thinking_blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,12 @@ def test_anthropic_thinking_blocks_with_tool_calls():
"Let me use the weather tool to get this information."
),
"signature": "TestSignature123",
}
},
{
"type": "thinking",
"thinking": ("We should use the city Tokyo as the city."),
"signature": "TestSignature456",
},
],
tool_calls=[
ChatCompletionMessageToolCall(
Expand All @@ -143,7 +148,7 @@ def test_anthropic_thinking_blocks_with_tool_calls():
reasoning_items = [
item for item in output_items if hasattr(item, "type") and item.type == "reasoning"
]
assert len(reasoning_items) == 1, "Should have exactly one reasoning item"
assert len(reasoning_items) == 1, "Should have exactly two reasoning items"

reasoning_item = reasoning_items[0]

Expand All @@ -159,7 +164,9 @@ def test_anthropic_thinking_blocks_with_tool_calls():
assert hasattr(reasoning_item, "encrypted_content"), (
"Reasoning item should have encrypted_content"
)
assert reasoning_item.encrypted_content == "TestSignature123", "Signature should be preserved"
assert reasoning_item.encrypted_content == "TestSignature123\nTestSignature456", (
"Signature should be preserved"
)

# Verify tool calls are present
tool_call_items = [
Expand Down Expand Up @@ -210,6 +217,20 @@ def test_anthropic_thinking_blocks_with_tool_calls():
"Signature should be preserved in thinking block"
)

first_content = content[1]
assert first_content.get("type") == "thinking", (
f"Second content must be 'thinking' type for Anthropic compatibility, "
f"but got '{first_content.get('type')}'"
)
expected_thinking = "We should use the city Tokyo as the city."
assert first_content.get("thinking") == expected_thinking, (
"Thinking content should be preserved"
)
# Signature should also be preserved
assert first_content.get("signature") == "TestSignature456", (
"Signature should be preserved in thinking block"
)

# Verify tool calls are preserved
tool_calls = assistant_msg.get("tool_calls", [])
assert len(cast(list[Any], tool_calls)) == 1, "Tool calls should be preserved"
Expand Down