Skip to content

Commit 428f38a

Browse files
Update examples to use gpt-5-mini instead of gpt-4o-mini
1 parent 114a102 commit 428f38a

File tree

27 files changed

+68
-22
lines changed

27 files changed

+68
-22
lines changed

atomic-examples/basic-multimodal/basic_multimodal/main.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,8 @@ class NutritionAnalysisOutput(BaseIOSchema):
6262
nutrition_analyzer = AtomicAgent[NutritionAnalysisInput, NutritionAnalysisOutput](
6363
config=AgentConfig(
6464
client=instructor.from_openai(openai.OpenAI(api_key=API_KEY)),
65-
model="gpt-4o-mini",
65+
model="gpt-5-mini",
66+
model_api_parameters={"reasoning_effort": "low"},
6667
system_prompt_generator=SystemPromptGenerator(
6768
background=[
6869
"You are a specialized nutrition label analyzer.",

atomic-examples/deep-research/deep_research/agents/choice_agent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ class ChoiceAgentOutputSchema(BaseIOSchema):
2525
AgentConfig(
2626
client=instructor.from_openai(openai.OpenAI(api_key=ChatConfig.api_key)),
2727
model=ChatConfig.model,
28+
model_api_parameters={"reasoning_effort": ChatConfig.reasoning_effort, "temperature": 0.1},
2829
system_prompt_generator=SystemPromptGenerator(
2930
background=[
3031
"You are a decision-making agent that determines whether a new web search is needed to answer the user's question.",
@@ -45,7 +46,6 @@ class ChoiceAgentOutputSchema(BaseIOSchema):
4546
"Your decision must match your reasoning - don't contradict yourself",
4647
],
4748
),
48-
model_api_parameters={"temperature": 0.1},
4949
)
5050
)
5151

atomic-examples/deep-research/deep_research/agents/qa_agent.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ class QuestionAnsweringAgentOutputSchema(BaseIOSchema):
3131
AgentConfig(
3232
client=instructor.from_openai(openai.OpenAI(api_key=ChatConfig.api_key)),
3333
model=ChatConfig.model,
34+
model_api_parameters={"reasoning_effort": ChatConfig.reasoning_effort},
3435
system_prompt_generator=SystemPromptGenerator(
3536
background=[
3637
"You are an expert question answering agent focused on providing factual information and encouraging deeper topic exploration.",

atomic-examples/deep-research/deep_research/agents/query_agent.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ class QueryAgentInputSchema(BaseIOSchema):
1919
AgentConfig(
2020
client=instructor.from_openai(openai.OpenAI(api_key=ChatConfig.api_key)),
2121
model=ChatConfig.model,
22+
model_api_parameters={"reasoning_effort": ChatConfig.reasoning_effort},
2223
system_prompt_generator=SystemPromptGenerator(
2324
background=[
2425
(

atomic-examples/deep-research/deep_research/config.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,8 @@ class ChatConfig:
2828
"""Configuration for the chat application"""
2929

3030
api_key: str = get_api_key() # This becomes a class variable
31-
model: str = "gpt-4o-mini"
31+
model: str = "gpt-5-mini"
32+
reasoning_effort: str = "low"
3233
exit_commands: Set[str] = frozenset({"/exit", "/quit"})
3334
searxng_base_url: str = get_searxng_base_url()
3435
searxng_api_key: str = get_searxng_api_key()

atomic-examples/mcp-agent/example-client/example_client/main_fastapi.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ class MCPConfig:
2222
mcp_server_url: str = "http://localhost:6969"
2323
openai_model: str = "gpt-5-mini"
2424
openai_api_key: str = os.getenv("OPENAI_API_KEY") or ""
25+
reasoning_effort: str = "low"
2526

2627
def __post_init__(self):
2728
if not self.openai_api_key:
@@ -152,6 +153,7 @@ class OrchestratorOutputSchema(BaseIOSchema):
152153
AgentConfig(
153154
client=globals()["client"],
154155
model=config.openai_model,
156+
model_api_parameters={"reasoning_effort": config.reasoning_effort},
155157
history=ChatHistory(),
156158
system_prompt_generator=SystemPromptGenerator(
157159
background=[

atomic-examples/mcp-agent/example-client/example_client/main_http.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ class MCPConfig:
2525
mcp_server_url: str = "http://localhost:6969"
2626
openai_model: str = "gpt-5-mini"
2727
openai_api_key: str = os.getenv("OPENAI_API_KEY")
28+
reasoning_effort: str = "low"
2829

2930
def __post_init__(self):
3031
if not self.openai_api_key:
@@ -82,6 +83,7 @@ class OrchestratorOutputSchema(BaseIOSchema):
8283
AgentConfig(
8384
client=client,
8485
model=config.openai_model,
86+
model_api_parameters={"reasoning_effort": config.reasoning_effort},
8587
history=history,
8688
system_prompt_generator=SystemPromptGenerator(
8789
background=[

atomic-examples/mcp-agent/example-client/example_client/main_sse.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ class MCPConfig:
2727
# clarify tools even more and introduce more constraints.
2828
openai_model: str = "gpt-5-mini"
2929
openai_api_key: str = os.getenv("OPENAI_API_KEY")
30+
reasoning_effort: str = "low"
3031

3132
def __post_init__(self):
3233
if not self.openai_api_key:
@@ -144,6 +145,7 @@ def main():
144145
AgentConfig(
145146
client=client,
146147
model=config.openai_model,
148+
model_api_parameters={"reasoning_effort": config.reasoning_effort},
147149
history=history,
148150
system_prompt_generator=SystemPromptGenerator(
149151
background=[

atomic-examples/mcp-agent/example-client/example_client/main_stdio.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ class MCPConfig:
2828
# clarify tools even more and introduce more constraints.
2929
openai_model: str = "gpt-5-mini"
3030
openai_api_key: str = os.getenv("OPENAI_API_KEY")
31+
reasoning_effort: str = "low"
3132

3233
# Command to run the STDIO server.
3334
# In practice, this could be something like "pipx some-other-persons-server or npx some-other-persons-server
@@ -137,6 +138,7 @@ def main():
137138
AgentConfig(
138139
client=client,
139140
model=config.openai_model,
141+
model_api_parameters={"reasoning_effort": config.reasoning_effort},
140142
history=history,
141143
system_prompt_generator=SystemPromptGenerator(
142144
background=[

atomic-examples/mcp-agent/example-client/example_client/main_stdio_async.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ class MCPConfig:
2828
# clarify tools even more and introduce more constraints.
2929
openai_model: str = "gpt-5-mini"
3030
openai_api_key: str = os.getenv("OPENAI_API_KEY")
31+
reasoning_effort: str = "low"
3132

3233
# Command to run the STDIO server.
3334
# In practice, this could be something like "pipx some-other-persons-server or npx some-other-persons-server
@@ -121,6 +122,7 @@ class OrchestratorOutputSchema(BaseIOSchema):
121122
AgentConfig(
122123
client=client,
123124
model=config.openai_model,
125+
model_api_parameters={"reasoning_effort": config.reasoning_effort},
124126
history=history,
125127
system_prompt_generator=SystemPromptGenerator(
126128
background=[

0 commit comments

Comments
 (0)