Skip to content

Commit d7e6099

Browse files
update all
1 parent b18e888 commit d7e6099

File tree

6 files changed

+287
-0
lines changed

6 files changed

+287
-0
lines changed
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
from __future__ import annotations
2+
from agents import Agent, RunContextWrapper, Runner, function_tool
3+
from agents.exceptions import AgentsException
4+
import asyncio
5+
6+
7+
"""
8+
This example demonstrates the use of the OpenAI Agents SDK with tools and comprehensive error handling.
9+
10+
The agent, 'Triage Agent', is configured to handle two tasks:
11+
- Fetching weather information for a specified city using the `get_weather` tool.
12+
- Adding two numbers using the `sum_numbers` tool.
13+
14+
The agent is instructed to use only one tool per execution cycle and can switch to another tool in subsequent cycles.
15+
The example sets a `max_turns=1` limit to intentionally restrict the agent to a single turn, which may trigger a `MaxTurnsExceeded` error if the agent attempts multiple tool calls.
16+
17+
Error handling is implemented with `AgentsException`, which is the base class for all SDK-related exceptions, including:
18+
- `MaxTurnsExceeded`: Raised when the run exceeds the `max_turns` specified in the run methods.
19+
- `ModelBehaviorError`: Raised when the model produces invalid outputs, e.g., malformed JSON or using non-existent tools.
20+
- `UserError`: Raised when the SDK user makes an error in code implementation.
21+
- `InputGuardrailTripwireTriggered`: Raised when an input guardrail is violated (e.g., invalid or off-topic input).
22+
- `OutputGuardrailTripwireTriggered`: Raised when an output guardrail is violated (e.g., invalid tool output).
23+
24+
Although this example does not include explicit guardrails, the structure supports adding input/output guardrails to validate user inputs or tool outputs. The `AgentsException` catch block ensures all SDK-related errors are handled gracefully.
25+
"""
26+
27+
28+
29+
@function_tool
30+
def get_weather(city: str) -> str:
31+
"""Returns weather info for the specified city."""
32+
return f"The weather in {city} is sunny"
33+
34+
35+
@function_tool
36+
def sum_numbers(a: int, b: int) -> int:
37+
"""Adds two numbers."""
38+
return a + b
39+
40+
41+
agent = Agent(
42+
name="Triage Agent",
43+
instructions="Get weather or sum numbers. You can use one tool at a time, switching to another tool in subsequent turns.",
44+
tools=[sum_numbers, get_weather],
45+
)
46+
47+
48+
async def main():
49+
try:
50+
user_input = input("Enter a message: ")
51+
52+
result = await Runner.run(agent, user_input, max_turns=1)
53+
print(result.final_output)
54+
except AgentsException as e:
55+
print(f"Caught AgentsException: {e}")
56+
57+
58+
if __name__ == "__main__":
59+
asyncio.run(main())
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
from __future__ import annotations
2+
import asyncio
3+
from pydantic import BaseModel
4+
from agents import (
5+
Agent,
6+
GuardrailFunctionOutput,
7+
InputGuardrailTripwireTriggered,
8+
Runner,
9+
input_guardrail,
10+
)
11+
from agents.exceptions import AgentsException
12+
13+
"""
14+
This example demonstrates an OpenAI Agents SDK agent with an input guardrail to block math homework queries.
15+
16+
The 'CustomerSupportAgent' processes user queries provided as direct string inputs in an interactive loop. A guardrail, implemented via 'GuardrailAgent' and a Pydantic model (`MathHomeworkOutput`), checks if the input is a math homework question. If detected, the guardrail raises `InputGuardrailTripwireTriggered`, triggering a refusal message ("Sorry, I can't help with math homework."). Otherwise, the agent responds to the query. The loop continues to prompt for new inputs, handling each independently.
17+
"""
18+
19+
class MathHomeworkOutput(BaseModel):
20+
is_math_homework: bool
21+
22+
23+
guardrail_agent = Agent(
24+
name="GuardrailAgent",
25+
instructions="Check if the input is a math homework question.",
26+
output_type=MathHomeworkOutput,
27+
)
28+
29+
30+
@input_guardrail
31+
async def math_guardrail(context, agent: Agent, input: str) -> GuardrailFunctionOutput:
32+
result = await Runner.run(guardrail_agent, input)
33+
output = result.final_output_as(MathHomeworkOutput)
34+
return GuardrailFunctionOutput(
35+
output_info=output,
36+
tripwire_triggered=output.is_math_homework,
37+
)
38+
39+
40+
async def main():
41+
agent = Agent(
42+
name="CustomerSupportAgent",
43+
instructions="Answer user queries.",
44+
input_guardrails=[math_guardrail],
45+
)
46+
47+
user_input = "What is 2 + 2"
48+
try:
49+
result = await Runner.run(agent, user_input)
50+
print(result.final_output)
51+
except InputGuardrailTripwireTriggered:
52+
print("InputGuardrailTripwireTriggered, I can't help with math homework.")
53+
54+
55+
if __name__ == "__main__":
56+
asyncio.run(main())
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
from __future__ import annotations
2+
import asyncio
3+
from agents import Agent, Runner, function_tool
4+
from agents.exceptions import MaxTurnsExceeded
5+
6+
"""
7+
This example demonstrates an OpenAI Agents SDK agent that triggers a MaxTurnsExceeded error.
8+
9+
The 'TriageAgent' handles user queries using tools for fetching weather (`get_weather`) or adding numbers (`sum_numbers`). The instructions direct the agent to process both tasks in a single turn, but with `max_turns=1`, this causes a `MaxTurnsExceeded` error. The interactive loop processes user queries as direct string inputs, catching and displaying the `MaxTurnsExceeded` error message.
10+
"""
11+
12+
@function_tool
13+
def get_weather(city: str) -> str:
14+
"""Returns weather info for the specified city."""
15+
return f"The weather in {city} is sunny"
16+
17+
18+
@function_tool
19+
def sum_numbers(a: int, b: int) -> int:
20+
"""Adds two numbers."""
21+
return a + b
22+
23+
24+
async def main():
25+
agent = Agent(
26+
name="TriageAgent",
27+
instructions="Process both get_weather and sum_numbers in a single turn when asked for both.",
28+
tools=[sum_numbers, get_weather],
29+
model=model,
30+
)
31+
32+
user_input = "What is US Weather and sum 2 + 2."
33+
try:
34+
result = await Runner.run(agent, user_input, max_turns=1)
35+
print(result.final_output)
36+
except MaxTurnsExceeded as e:
37+
print(f"Caught MaxTurnsExceeded: {e}")
38+
39+
40+
41+
if __name__ == "__main__":
42+
asyncio.run(main())
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
from __future__ import annotations
2+
import asyncio
3+
from pydantic import BaseModel
4+
from typing import Literal
5+
from agents import Agent, Runner
6+
from agents.exceptions import ModelBehaviorError
7+
8+
"""
9+
This example demonstrates an OpenAI Agents SDK agent that triggers a ModelBehaviorError due to invalid model output.
10+
11+
The 'MiniErrorBot' agent uses a Pydantic model (`Output`) requiring a `value` field with the literal 'EXPECTED_VALUE'. The instructions tell the model to return 'Hello', causing a `ModelBehaviorError` when the output fails validation. The interactive loop processes user queries as direct string inputs, catching and displaying the `ModelBehaviorError` message.
12+
"""
13+
14+
class Output(BaseModel):
15+
value: Literal["EXPECTED_VALUE"]
16+
17+
18+
async def main():
19+
agent = Agent(
20+
name="MiniErrorBot",
21+
instructions="Just say: Hello",
22+
output_type=Output,
23+
)
24+
25+
user_input = "hello"
26+
try:
27+
result = await Runner.run(agent, user_input)
28+
print(result.final_output)
29+
except ModelBehaviorError as e:
30+
print(f"ModelBehaviorError: {e}")
31+
32+
33+
if __name__ == "__main__":
34+
asyncio.run(main())
Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
from __future__ import annotations
2+
import asyncio
3+
from pydantic import BaseModel
4+
from agents import (
5+
Agent,
6+
GuardrailFunctionOutput,
7+
OutputGuardrailTripwireTriggered,
8+
Runner,
9+
output_guardrail,
10+
)
11+
12+
13+
"""
14+
This example demonstrates an OpenAI Agents SDK agent with an output guardrail to block math homework responses.
15+
16+
The 'Assistant' agent processes user queries provided as direct string inputs in an interactive loop. An output guardrail, using a Pydantic model (`MathHomeworkOutput`) and a guardrail agent, checks if the response is a math homework answer. If detected, the guardrail raises `OutputGuardrailTripwireTriggered`, and a refusal message is printed. The loop continues to prompt for new inputs, handling each independently.
17+
"""
18+
19+
20+
class MathHomeworkOutput(BaseModel):
21+
is_math_homework: bool
22+
23+
24+
guardrail_agent = Agent(
25+
name="GuardrailAgent",
26+
instructions="Check if the output is a math homework answer.",
27+
output_type=MathHomeworkOutput,
28+
)
29+
30+
31+
@output_guardrail
32+
async def math_guardrail(context, agent: Agent, output: str) -> GuardrailFunctionOutput:
33+
result = await Runner.run(guardrail_agent, output)
34+
output_data = result.final_output_as(MathHomeworkOutput)
35+
return GuardrailFunctionOutput(
36+
output_info=output_data,
37+
tripwire_triggered=output_data.is_math_homework,
38+
)
39+
40+
41+
async def main():
42+
agent = Agent(
43+
name="Assistant",
44+
instructions="Answer user queries.",
45+
output_guardrails=[math_guardrail],
46+
)
47+
48+
user_input = "What is 2 + 2"
49+
50+
try:
51+
result = await Runner.run(agent, user_input)
52+
print(result.final_output)
53+
except OutputGuardrailTripwireTriggered:
54+
print(
55+
"OutputGuardrailTripwireTriggered, I can't provide math homework answers."
56+
)
57+
58+
59+
if __name__ == "__main__":
60+
asyncio.run(main())

examples/exceptions/user_error.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
from __future__ import annotations
2+
import asyncio
3+
from agents import Agent, Runner, function_tool
4+
from agents.exceptions import UserError
5+
6+
7+
"""
8+
This example demonstrates an OpenAI Agents SDK agent that triggers a UserError due to incorrect SDK usage.
9+
10+
The 'Assistant' agent is configured with an invalid `tool_use_behavior` (empty string) and an invalid tool (`invalid_tool`) that declares a `None` return type but returns a string. Either issue raises a `UserError` when the agent is executed, indicating improper SDK configuration by the user. The interactive loop processes user queries as direct string inputs, catching and displaying the `UserError` message.
11+
"""
12+
13+
14+
@function_tool
15+
def invalid_tool() -> None:
16+
return "I return a string" # Type mismatch triggers UserError
17+
18+
19+
async def main():
20+
agent = Agent(
21+
name="Assistant",
22+
instructions="Use the invalid_tool to process queries.",
23+
tools=[invalid_tool],
24+
tool_use_behavior="invalid_tool",
25+
model=model
26+
)
27+
user_input = "Do Something."
28+
try:
29+
result = await Runner.run(agent, user_input)
30+
print(result.final_output)
31+
except UserError as e:
32+
print(f"UserError: {e}")
33+
34+
35+
if __name__ == "__main__":
36+
asyncio.run(main())

0 commit comments

Comments
 (0)