Skip to content

Commit 03869a7

Browse files
authored
Merge branch 'main' into patch-1
2 parents 40dcad3 + 18b10f1 commit 03869a7

File tree

12 files changed

+80
-8
lines changed

12 files changed

+80
-8
lines changed

README.md

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,14 @@ The Agents SDK is designed to be highly flexible, allowing you to model a wide r
140140

141141
## Tracing
142142

143-
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk), [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk) and [Opik](https://www.comet.com/docs/opik/tracing/integrations/openai_agents). For more details about how to customize or disable tracing, see [Tracing](http://openai.github.io/openai-agents-python/tracing).
143+
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including:
144+
- [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk)
145+
- [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk)
146+
- [Comet Opik](https://www.comet.com/docs/opik/tracing/integrations/openai_agents)
147+
- [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent)
148+
- [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents)
149+
150+
For more details about how to customize or disable tracing, see [Tracing](http://openai.github.io/openai-agents-python/tracing).
144151

145152
## Development (only needed if you need to edit the SDK/examples)
146153

docs/agents.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,15 @@ The most common properties of an agent you'll configure are:
1313
```python
1414
from agents import Agent, ModelSettings, function_tool
1515

16+
@function_tool
1617
def get_weather(city: str) -> str:
1718
return f"The weather in {city} is sunny"
1819

1920
agent = Agent(
2021
name="Haiku agent",
2122
instructions="Always respond in haiku form",
2223
model="o3-mini",
23-
tools=[function_tool(get_weather)],
24+
tools=[get_weather],
2425
)
2526
```
2627

docs/context.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ class UserInfo: # (1)!
3636
name: str
3737
uid: int
3838

39+
@function_tool
3940
async def fetch_user_age(wrapper: RunContextWrapper[UserInfo]) -> str: # (2)!
4041
return f"User {wrapper.context.name} is 47 years old"
4142

@@ -44,7 +45,7 @@ async def main():
4445

4546
agent = Agent[UserInfo]( # (4)!
4647
name="Assistant",
47-
tools=[function_tool(fetch_user_age)],
48+
tools=[fetch_user_age],
4849
)
4950

5051
result = await Runner.run(

docs/running_agents.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ async def main():
7878
# San Francisco
7979

8080
# Second turn
81-
new_input = output.to_input_list() + [{"role": "user", "content": "What state is it in?"}]
81+
new_input = result.to_input_list() + [{"role": "user", "content": "What state is it in?"}]
8282
result = await Runner.run(agent, new_input)
8383
print(result.final_output)
8484
# California

docs/tracing.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,3 +94,4 @@ External trace processors include:
9494
- [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk)
9595
- [Pydantic Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents)
9696
- [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk)
97+
- [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent)

examples/agent_patterns/input_guardrails.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ async def math_guardrail(
5353

5454
return GuardrailFunctionOutput(
5555
output_info=final_output,
56-
tripwire_triggered=not final_output.is_math_homework,
56+
tripwire_triggered=final_output.is_math_homework,
5757
)
5858

5959

src/agents/guardrail.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ class InputGuardrail(Generic[TContext]):
8686
[RunContextWrapper[TContext], Agent[Any], str | list[TResponseInputItem]],
8787
MaybeAwaitable[GuardrailFunctionOutput],
8888
]
89-
"""A function that receives the the agent input and the context, and returns a
89+
"""A function that receives the agent input and the context, and returns a
9090
`GuardrailResult`. The result marks whether the tripwire was triggered, and can optionally
9191
include information about the guardrail's output.
9292
"""

src/agents/model_settings.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,15 +10,34 @@ class ModelSettings:
1010
1111
This class holds optional model configuration parameters (e.g. temperature,
1212
top_p, penalties, truncation, etc.).
13+
14+
Not all models/providers support all of these parameters, so please check the API documentation
15+
for the specific model and provider you are using.
1316
"""
1417

1518
temperature: float | None = None
19+
"""The temperature to use when calling the model."""
20+
1621
top_p: float | None = None
22+
"""The top_p to use when calling the model."""
23+
1724
frequency_penalty: float | None = None
25+
"""The frequency penalty to use when calling the model."""
26+
1827
presence_penalty: float | None = None
28+
"""The presence penalty to use when calling the model."""
29+
1930
tool_choice: Literal["auto", "required", "none"] | str | None = None
31+
"""The tool choice to use when calling the model."""
32+
2033
parallel_tool_calls: bool | None = False
34+
"""Whether to use parallel tool calls when calling the model."""
35+
2136
truncation: Literal["auto", "disabled"] | None = None
37+
"""The truncation strategy to use when calling the model."""
38+
39+
max_tokens: int | None = None
40+
"""The maximum number of output tokens to generate."""
2241

2342
def resolve(self, override: ModelSettings | None) -> ModelSettings:
2443
"""Produce a new ModelSettings by overlaying any non-None values from the
@@ -33,4 +52,5 @@ def resolve(self, override: ModelSettings | None) -> ModelSettings:
3352
tool_choice=override.tool_choice or self.tool_choice,
3453
parallel_tool_calls=override.parallel_tool_calls or self.parallel_tool_calls,
3554
truncation=override.truncation or self.truncation,
55+
max_tokens=override.max_tokens or self.max_tokens,
3656
)

src/agents/models/openai_chatcompletions.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -503,6 +503,7 @@ async def _fetch_response(
503503
top_p=self._non_null_or_not_given(model_settings.top_p),
504504
frequency_penalty=self._non_null_or_not_given(model_settings.frequency_penalty),
505505
presence_penalty=self._non_null_or_not_given(model_settings.presence_penalty),
506+
max_tokens=self._non_null_or_not_given(model_settings.max_tokens),
506507
tool_choice=tool_choice,
507508
response_format=response_format,
508509
parallel_tool_calls=parallel_tool_calls,
@@ -808,6 +809,13 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
808809
"content": cls.extract_text_content(content),
809810
}
810811
result.append(msg_developer)
812+
elif role == "assistant":
813+
flush_assistant_message()
814+
msg_assistant: ChatCompletionAssistantMessageParam = {
815+
"role": "assistant",
816+
"content": cls.extract_text_content(content),
817+
}
818+
result.append(msg_assistant)
811819
else:
812820
raise UserError(f"Unexpected role in easy_input_message: {role}")
813821

src/agents/models/openai_responses.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,7 @@ async def _fetch_response(
235235
temperature=self._non_null_or_not_given(model_settings.temperature),
236236
top_p=self._non_null_or_not_given(model_settings.top_p),
237237
truncation=self._non_null_or_not_given(model_settings.truncation),
238+
max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens),
238239
tool_choice=tool_choice,
239240
parallel_tool_calls=parallel_tool_calls,
240241
stream=stream,

0 commit comments

Comments
 (0)