Skip to content

Commit 7a3397c

Browse files
committed
Add openai-agents examples
1 parent 04d061f commit 7a3397c

File tree

8 files changed

+168
-96
lines changed

8 files changed

+168
-96
lines changed

examples/openai_agents_basic.py

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,10 @@
22
import logging
33
import os
44

5-
import openai
65
from agents import Agent, OpenAIChatCompletionsModel, Runner, set_tracing_disabled
7-
from azure.identity import DefaultAzureCredential
8-
from azure.identity.aio import get_bearer_token_provider
6+
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
97
from dotenv import load_dotenv
8+
from openai import AsyncOpenAI
109

1110
logging.basicConfig(level=logging.WARNING)
1211
# Disable tracing since we're not connected to a supported tracing provider
@@ -15,19 +14,26 @@
1514
# Setup the OpenAI client to use either Azure OpenAI or GitHub Models
1615
load_dotenv(override=True)
1716
API_HOST = os.getenv("API_HOST", "github")
18-
if API_HOST == "github":
19-
client = openai.AsyncOpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
20-
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
21-
elif API_HOST == "azure":
22-
token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
23-
client = openai.AsyncOpenAI(
17+
18+
async_credential = None
19+
if API_HOST == "azure":
20+
async_credential = DefaultAzureCredential()
21+
token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default")
22+
client = AsyncOpenAI(
2423
base_url=os.environ["AZURE_OPENAI_ENDPOINT"] + "/openai/v1",
2524
api_key=token_provider,
2625
)
2726
MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"]
27+
elif API_HOST == "github":
28+
client = AsyncOpenAI(api_key=os.environ["GITHUB_TOKEN"], base_url="https://models.inference.ai.azure.com")
29+
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
2830
elif API_HOST == "ollama":
29-
client = openai.AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="none")
30-
MODEL_NAME = "llama3.1:latest"
31+
client = AsyncOpenAI(base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
32+
MODEL_NAME = os.environ["OLLAMA_MODEL"]
33+
else:
34+
client = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"])
35+
MODEL_NAME = os.environ.get("OPENAI_MODEL", "gpt-4o")
36+
3137

3238
agent = Agent(
3339
name="Spanish tutor",
@@ -40,6 +46,9 @@ async def main():
4046
result = await Runner.run(agent, input="hi how are you?")
4147
print(result.final_output)
4248

49+
if async_credential:
50+
await async_credential.close()
51+
4352

4453
if __name__ == "__main__":
4554
asyncio.run(main())

examples/openai_agents_handoffs.py

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,10 @@
11
import asyncio
22
import os
33

4-
import openai
54
from agents import Agent, OpenAIChatCompletionsModel, Runner, function_tool, set_tracing_disabled
6-
from azure.identity import DefaultAzureCredential
7-
from azure.identity.aio import get_bearer_token_provider
5+
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
86
from dotenv import load_dotenv
7+
from openai import AsyncOpenAI
98

109
# Disable tracing since we're not using OpenAI.com models
1110
set_tracing_disabled(disabled=True)
@@ -14,19 +13,24 @@
1413
load_dotenv(override=True)
1514
API_HOST = os.getenv("API_HOST", "github")
1615

17-
if API_HOST == "github":
18-
client = openai.AsyncOpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
19-
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
20-
elif API_HOST == "azure":
21-
token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
22-
client = openai.AsyncOpenAI(
16+
async_credential = None
17+
if API_HOST == "azure":
18+
async_credential = DefaultAzureCredential()
19+
token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default")
20+
client = AsyncOpenAI(
2321
base_url=os.environ["AZURE_OPENAI_ENDPOINT"] + "/openai/v1",
2422
api_key=token_provider,
2523
)
2624
MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"]
25+
elif API_HOST == "github":
26+
client = AsyncOpenAI(api_key=os.environ["GITHUB_TOKEN"], base_url="https://models.inference.ai.azure.com")
27+
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
2728
elif API_HOST == "ollama":
28-
client = openai.AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="none")
29-
MODEL_NAME = "llama3.1:latest"
29+
client = AsyncOpenAI(base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
30+
MODEL_NAME = os.environ["OLLAMA_MODEL"]
31+
else:
32+
client = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"])
33+
MODEL_NAME = os.environ.get("OPENAI_MODEL", "gpt-4o")
3034

3135

3236
@function_tool
@@ -70,6 +74,9 @@ async def main():
7074
result = await Runner.run(triage_agent, input="Hola, ¿cómo estás? ¿Puedes darme el clima para San Francisco CA?")
7175
print(result.final_output)
7276

77+
if async_credential:
78+
await async_credential.close()
79+
7380

7481
if __name__ == "__main__":
7582
asyncio.run(main())

examples/openai_agents_mcp_http.py

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,12 @@
99
import logging
1010
import os
1111

12-
import openai
1312
from agents import Agent, OpenAIChatCompletionsModel, Runner, set_tracing_disabled
1413
from agents.mcp.server import MCPServerStreamableHttp
1514
from agents.model_settings import ModelSettings
16-
from azure.identity import DefaultAzureCredential
17-
from azure.identity.aio import get_bearer_token_provider
15+
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
1816
from dotenv import load_dotenv
17+
from openai import AsyncOpenAI
1918

2019
logging.basicConfig(level=logging.WARNING)
2120
# Disable tracing since we're not connected to a supported tracing provider
@@ -24,19 +23,25 @@
2423
# Setup the OpenAI client to use either Azure OpenAI or GitHub Models
2524
load_dotenv(override=True)
2625
API_HOST = os.getenv("API_HOST", "github")
27-
if API_HOST == "github":
28-
client = openai.AsyncOpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
29-
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
30-
elif API_HOST == "azure":
31-
token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
32-
client = openai.AsyncOpenAI(
26+
27+
async_credential = None
28+
if API_HOST == "azure":
29+
async_credential = DefaultAzureCredential()
30+
token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default")
31+
client = AsyncOpenAI(
3332
base_url=os.environ["AZURE_OPENAI_ENDPOINT"] + "/openai/v1",
3433
api_key=token_provider,
3534
)
3635
MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"]
36+
elif API_HOST == "github":
37+
client = AsyncOpenAI(api_key=os.environ["GITHUB_TOKEN"], base_url="https://models.inference.ai.azure.com")
38+
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
3739
elif API_HOST == "ollama":
38-
client = openai.AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="none")
39-
MODEL_NAME = "llama3.1:latest"
40+
client = AsyncOpenAI(base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
41+
MODEL_NAME = os.environ["OLLAMA_MODEL"]
42+
else:
43+
client = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"])
44+
MODEL_NAME = os.environ.get("OPENAI_MODEL", "gpt-4o")
4045

4146

4247
mcp_server = MCPServerStreamableHttp(name="weather", params={"url": "http://localhost:8000/mcp/"})
@@ -52,11 +57,15 @@
5257

5358
async def main():
5459
await mcp_server.connect()
55-
message = "Find me a hotel in San Francisco for 2 nights starting from 2024-01-01. I need a hotel with free WiFi and a pool."
60+
message = "Find me a hotel in San Francisco for 2 nights starting from 2024-01-01. I need free WiFi and a pool."
5661
result = await Runner.run(starting_agent=agent, input=message)
5762
print(result.final_output)
63+
5864
await mcp_server.cleanup()
5965

66+
if async_credential:
67+
await async_credential.close()
68+
6069

6170
if __name__ == "__main__":
6271
asyncio.run(main())

examples/openai_agents_tools.py

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,10 @@
44
import random
55
from datetime import datetime
66

7-
import openai
87
from agents import Agent, OpenAIChatCompletionsModel, Runner, function_tool, set_tracing_disabled
9-
from azure.identity import DefaultAzureCredential
10-
from azure.identity.aio import get_bearer_token_provider
8+
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
119
from dotenv import load_dotenv
10+
from openai import AsyncOpenAI
1211
from rich.logging import RichHandler
1312

1413
# Setup logging with rich
@@ -21,19 +20,25 @@
2120
# Setup the OpenAI client to use either Azure OpenAI or GitHub Models
2221
load_dotenv(override=True)
2322
API_HOST = os.getenv("API_HOST", "github")
24-
if API_HOST == "github":
25-
client = openai.AsyncOpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
26-
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
27-
elif API_HOST == "azure":
28-
token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
29-
client = openai.AsyncOpenAI(
23+
24+
async_credential = None
25+
if API_HOST == "azure":
26+
async_credential = DefaultAzureCredential()
27+
token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default")
28+
client = AsyncOpenAI(
3029
base_url=os.environ["AZURE_OPENAI_ENDPOINT"] + "/openai/v1",
3130
api_key=token_provider,
3231
)
3332
MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"]
33+
elif API_HOST == "github":
34+
client = AsyncOpenAI(api_key=os.environ["GITHUB_TOKEN"], base_url="https://models.inference.ai.azure.com")
35+
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
3436
elif API_HOST == "ollama":
35-
client = openai.AsyncOpenAI(base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
37+
client = AsyncOpenAI(base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
3638
MODEL_NAME = os.environ["OLLAMA_MODEL"]
39+
else:
40+
client = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"])
41+
MODEL_NAME = os.environ.get("OPENAI_MODEL", "gpt-4o")
3742

3843

3944
@function_tool
@@ -72,7 +77,11 @@ def get_current_date() -> str:
7277

7378
agent = Agent(
7479
name="Weekend Planner",
75-
instructions="You help users plan their weekends and choose the best activities for the given weather. If an activity would be unpleasant in the weather, don't suggest it. Include the date of the weekend in your response.",
80+
instructions=(
81+
"You help users plan their weekends and choose the best activities for the given weather."
82+
"If an activity would be unpleasant in the weather, don't suggest it."
83+
"Include the date of the weekend in your response."
84+
),
7685
tools=[get_weather, get_activities, get_current_date],
7786
model=OpenAIChatCompletionsModel(model=MODEL_NAME, openai_client=client),
7887
)
@@ -82,6 +91,9 @@ async def main():
8291
result = await Runner.run(agent, input="hii what can I do this weekend in Seattle?")
8392
print(result.final_output)
8493

94+
if async_credential:
95+
await async_credential.close()
96+
8597

8698
if __name__ == "__main__":
8799
logger.setLevel(logging.INFO)
Lines changed: 20 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,36 @@
11
import asyncio
22
import os
33

4-
import openai
54
from agents import Agent, OpenAIChatCompletionsModel, Runner, set_tracing_disabled
6-
from azure.identity import DefaultAzureCredential
7-
from azure.identity.aio import get_bearer_token_provider
5+
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
86
from dotenv import load_dotenv
7+
from openai import AsyncOpenAI
98

109
# Disable tracing since we're not connected to a supported tracing provider
1110
set_tracing_disabled(disabled=True)
1211

1312
# Setup the OpenAI client to use either Azure OpenAI or GitHub Models
1413
load_dotenv(override=True)
1514
API_HOST = os.getenv("API_HOST", "github")
16-
if API_HOST == "github":
17-
client = openai.AsyncOpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
18-
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
19-
elif API_HOST == "azure":
20-
token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
21-
client = openai.AsyncOpenAI(
15+
16+
async_credential = None
17+
if API_HOST == "azure":
18+
async_credential = DefaultAzureCredential()
19+
token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default")
20+
client = AsyncOpenAI(
2221
base_url=os.environ["AZURE_OPENAI_ENDPOINT"] + "/openai/v1",
2322
api_key=token_provider,
2423
)
2524
MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"]
25+
elif API_HOST == "github":
26+
client = AsyncOpenAI(api_key=os.environ["GITHUB_TOKEN"], base_url="https://models.inference.ai.azure.com")
27+
MODEL_NAME = os.getenv("GITHUB_MODEL", "gpt-4o")
28+
elif API_HOST == "ollama":
29+
client = AsyncOpenAI(base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
30+
MODEL_NAME = os.environ["OLLAMA_MODEL"]
31+
else:
32+
client = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"])
33+
MODEL_NAME = os.environ.get("OPENAI_MODEL", "gpt-4o")
2634

2735

2836
agent = Agent(
@@ -36,6 +44,9 @@ async def main():
3644
result = await Runner.run(agent, input="hola hola, como estas?")
3745
print(result.final_output)
3846

47+
if async_credential:
48+
await async_credential.close()
49+
3950

4051
if __name__ == "__main__":
4152
asyncio.run(main())

0 commit comments

Comments
 (0)