Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 14 additions & 21 deletions samples-v2/openai_agents/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -95,32 +95,25 @@ celerybeat-schedule
# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
# Virtual Environment (additional patterns)
.myenv/
.venv/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/
# IDE and Editor Configuration
.code/
.vscode/
.idea/

# Test Reports and Results
*TEST_REPORT*.md
*test_results*.json
comprehensive_test_results.json
COMPREHENSIVE_TEST_REPORT.md
TEST_VALIDATION_REPORT.md

# Azure Functions artifacts
bin
Expand Down
95 changes: 95 additions & 0 deletions samples-v2/openai_agents/basic/agent_lifecycle_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import random
from typing import Any

from pydantic import BaseModel

from agents import Agent, AgentHooks, RunContextWrapper, Runner, Tool, function_tool


class CustomAgentHooks(AgentHooks):
def __init__(self, display_name: str):
self.event_counter = 0
self.display_name = display_name

async def on_start(self, context: RunContextWrapper, agent: Agent) -> None:
self.event_counter += 1
print(f"### ({self.display_name}) {self.event_counter}: Agent {agent.name} started")

async def on_end(self, context: RunContextWrapper, agent: Agent, output: Any) -> None:
self.event_counter += 1
print(
f"### ({self.display_name}) {self.event_counter}: Agent {agent.name} ended with output {output}"
)

async def on_handoff(self, context: RunContextWrapper, agent: Agent, source: Agent) -> None:
self.event_counter += 1
print(
f"### ({self.display_name}) {self.event_counter}: Agent {source.name} handed off to {agent.name}"
)

async def on_tool_start(self, context: RunContextWrapper, agent: Agent, tool: Tool) -> None:
self.event_counter += 1
print(
f"### ({self.display_name}) {self.event_counter}: Agent {agent.name} started tool {tool.name}"
)

async def on_tool_end(
self, context: RunContextWrapper, agent: Agent, tool: Tool, result: str
) -> None:
self.event_counter += 1
print(
f"### ({self.display_name}) {self.event_counter}: Agent {agent.name} ended tool {tool.name} with result {result}"
)


###


@function_tool
def random_number(max: int) -> int:
"""
Generate a random number from 0 to max (inclusive).
"""
return random.randint(0, max)


@function_tool
def multiply_by_two(x: int) -> int:
"""Simple multiplication by two."""
return x * 2


class FinalResult(BaseModel):
number: int


multiply_agent = Agent(
name="Multiply Agent",
instructions="Multiply the number by 2 and then return the final result.",
tools=[multiply_by_two],
output_type=FinalResult,
hooks=CustomAgentHooks(display_name="Multiply Agent"),
)

start_agent = Agent(
name="Start Agent",
instructions="Generate a random number. If it's even, stop. If it's odd, hand off to the multiply agent.",
tools=[random_number],
output_type=FinalResult,
handoffs=[multiply_agent],
hooks=CustomAgentHooks(display_name="Start Agent"),
)


def main():
# Default max number for demo
max_number = 250
print(f"Generating random number between 0 and {max_number}")

result = Runner.run_sync(
start_agent,
input=f"Generate a random number between 0 and {max_number}."
)

print("Done!")
return result.final_output
40 changes: 40 additions & 0 deletions samples-v2/openai_agents/basic/dynamic_system_prompt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import random
from typing import Literal

from agents import Agent, RunContextWrapper, Runner


class CustomContext:
def __init__(self, style: Literal["haiku", "pirate", "robot"]):
self.style = style


def custom_instructions(
run_context: RunContextWrapper[CustomContext], agent: Agent[CustomContext]
) -> str:
context = run_context.context
if context.style == "haiku":
return "Only respond in haikus."
elif context.style == "pirate":
return "Respond as a pirate."
else:
return "Respond as a robot and say 'beep boop' a lot."


agent = Agent(
name="Chat agent",
instructions=custom_instructions,
)


def main():
choice: Literal["haiku", "pirate", "robot"] = random.choice(["haiku", "pirate", "robot"])
context = CustomContext(style=choice)
print(f"Using style: {choice}\n")

user_message = "Tell me a joke."
print(f"User: {user_message}")
result = Runner.run_sync(agent, user_message, context=context)

print(f"Assistant: {result.final_output}")
return result.final_output
117 changes: 117 additions & 0 deletions samples-v2/openai_agents/basic/lifecycle_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
import random
from typing import Any, Optional

from pydantic import BaseModel

from agents import Agent, RunContextWrapper, RunHooks, Runner, Tool, Usage, function_tool
from agents.items import ModelResponse, TResponseInputItem


class ExampleHooks(RunHooks):
def __init__(self):
self.event_counter = 0

def _usage_to_str(self, usage: Usage) -> str:
return f"{usage.requests} requests, {usage.input_tokens} input tokens, {usage.output_tokens} output tokens, {usage.total_tokens} total tokens"

async def on_agent_start(self, context: RunContextWrapper, agent: Agent) -> None:
self.event_counter += 1
print(
f"### {self.event_counter}: Agent {agent.name} started. Usage: {self._usage_to_str(context.usage)}"
)

async def on_llm_start(
self,
context: RunContextWrapper,
agent: Agent,
system_prompt: Optional[str],
input_items: list[TResponseInputItem],
) -> None:
self.event_counter += 1
print(f"### {self.event_counter}: LLM started. Usage: {self._usage_to_str(context.usage)}")

async def on_llm_end(
self, context: RunContextWrapper, agent: Agent, response: ModelResponse
) -> None:
self.event_counter += 1
print(f"### {self.event_counter}: LLM ended. Usage: {self._usage_to_str(context.usage)}")

async def on_agent_end(self, context: RunContextWrapper, agent: Agent, output: Any) -> None:
self.event_counter += 1
print(
f"### {self.event_counter}: Agent {agent.name} ended with output {output}. Usage: {self._usage_to_str(context.usage)}"
)

async def on_tool_start(self, context: RunContextWrapper, agent: Agent, tool: Tool) -> None:
self.event_counter += 1
print(
f"### {self.event_counter}: Tool {tool.name} started. Usage: {self._usage_to_str(context.usage)}"
)

async def on_tool_end(
self, context: RunContextWrapper, agent: Agent, tool: Tool, result: str
) -> None:
self.event_counter += 1
print(
f"### {self.event_counter}: Tool {tool.name} ended with result {result}. Usage: {self._usage_to_str(context.usage)}"
)

async def on_handoff(
self, context: RunContextWrapper, from_agent: Agent, to_agent: Agent
) -> None:
self.event_counter += 1
print(
f"### {self.event_counter}: Handoff from {from_agent.name} to {to_agent.name}. Usage: {self._usage_to_str(context.usage)}"
)


hooks = ExampleHooks()

###


@function_tool
def random_number(max: int) -> int:
"""Generate a random number from 0 to max (inclusive)."""
return random.randint(0, max)


@function_tool
def multiply_by_two(x: int) -> int:
"""Return x times two."""
return x * 2


class FinalResult(BaseModel):
number: int


multiply_agent = Agent(
name="Multiply Agent",
instructions="Multiply the number by 2 and then return the final result.",
tools=[multiply_by_two],
output_type=FinalResult,
)

start_agent = Agent(
name="Start Agent",
instructions="Generate a random number. If it's even, stop. If it's odd, hand off to the multiplier agent.",
tools=[random_number],
output_type=FinalResult,
handoffs=[multiply_agent],
)


def main():
# Default max number for demo
max_number = 250
print(f"Enter a max number: {max_number}")

result = Runner.run_sync(
start_agent,
input=f"Generate a random number between 0 and {max_number}.",
hooks=hooks,
)

print("Done!")
return result.final_output
19 changes: 19 additions & 0 deletions samples-v2/openai_agents/basic/local_image.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from agents import Agent, Runner


def main():
# Note: In a real implementation, you would handle image upload/attachment
# This simplified version demonstrates the pattern
agent = Agent(
name="Image Assistant",
instructions="You are a helpful assistant that can analyze images.",
)

# Simulated image analysis for the demo
message = "I have uploaded a local image. Please describe what you see in it."

# Note: In a real scenario, you would include the actual image data
# For this demo, we'll simulate the response
result = Runner.run_sync(agent, message)
print(result.final_output)
return result.final_output
25 changes: 25 additions & 0 deletions samples-v2/openai_agents/basic/non_strict_output_type.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
from pydantic import BaseModel
from typing import Optional

from agents import Agent, Runner


class WeatherInfo(BaseModel):
city: str
temperature: Optional[str] = None
conditions: Optional[str] = None
humidity: Optional[str] = None


def main():
# Using non-strict mode allows the model to return partial or flexible output
agent = Agent(
name="Weather Assistant",
instructions="Provide weather information for the requested city. Return as much detail as available.",
output_type=WeatherInfo,
# Note: In real implementation, you might set strict=False for more flexible output
)

result = Runner.run_sync(agent, "What's the weather like in Tokyo?")
print(result.final_output)
return result.final_output
21 changes: 21 additions & 0 deletions samples-v2/openai_agents/basic/previous_response_id.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from agents import Agent, Runner


def main():
agent = Agent(
name="Memory Assistant",
instructions="You are a helpful assistant with memory of previous conversations.",
)

# First conversation
print("First interaction:")
result1 = Runner.run_sync(agent, "My name is John and I like pizza.")
print(f"Assistant: {result1.final_output}")

# Note: In a real implementation, you would use the previous_response_id
# to maintain conversation context across multiple runs
print("\nSecond interaction (remembering previous context):")
result2 = Runner.run_sync(agent, "What did I tell you about my food preferences?")
print(f"Assistant: {result2.final_output}")

return result2.final_output
18 changes: 18 additions & 0 deletions samples-v2/openai_agents/basic/remote_image.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from agents import Agent, Runner


def main():
agent = Agent(
name="Remote Image Assistant",
instructions="You are a helpful assistant that can analyze images from URLs.",
)

# Example with a hypothetical remote image URL
image_url = "https://example.com/sample-image.jpg"
message = f"Please analyze this image from the URL: {image_url}"

# Note: In a real implementation, you would handle the remote image URL
# and include it in the message or as an attachment
result = Runner.run_sync(agent, message)
print(result.final_output)
return result.final_output
Loading