Skip to content

Commit 5ab5786

Browse files
authored
Merge pull request #21 from Azure-Samples/pydanticai-upgrade
Upgrade samples to use Pydantic AI v1
2 parents a33975f + a37eddc commit 5ab5786

16 files changed

+275
-94
lines changed

AGENTS.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
# Python AI Agents Demos
2+
3+
This repository contains examples of several popular Python AI agent frameworks,
4+
in both english and spanish.
5+
6+
All examples should be the same functionally across english and spanish,
7+
with the prompts and data in the respective language.
8+
9+
All examples should support Azure OpenAI, GitHub Models, and Ollama.

README.md

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,11 @@ You can run the examples in this repository by executing the scripts in the `exa
9898
| openai_agents_basic.py | Uses the OpenAI Agents framework to build a single agent. |
9999
| openai_agents.py | Uses the OpenAI Agents framework to handoff between several agents with tools. |
100100
| openai_functioncalling.py | Uses OpenAI Function Calling to call functions based on LLM output. |
101-
| pydanticai.py | Uses PydanticAI to build a two-agent sequential workflow for flight planning. |
101+
| pydanticai_basic.py | Uses PydanticAI to build a basic single agent (Spanish tutor). |
102+
| pydanticai_multiagent.py | Uses PydanticAI to build a two-agent sequential workflow (flight + seat selection). |
103+
| pydanticai_graph.py | Uses PydanticAI with pydantic-graph to build a small question/answer evaluation graph. |
104+
| pydanticai_tools.py | Uses PydanticAI with multiple Python tools for weekend activity planning. |
105+
| pydanticai_mcp_http.py | Uses PydanticAI with an MCP HTTP server toolset for travel planning (hotel search). |
102106
| semantickernel.py | Uses Semantic Kernel to build a writer/editor two-agent workflow. |
103107
| smolagents_codeagent.py | Uses SmolAgents to build a question-answering agent that can search the web and run code. |
104108

examples/langgraph_agent.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
# https://github.com/JRAlexander/IntroToAgents1-Oxford/blob/main/intro-langgraph/time-travel.ipynb
2-
32
import os
43

54
import azure.identity
@@ -41,13 +40,13 @@ def play_song_on_apple(song: str):
4140
openai_api_version=os.environ["AZURE_OPENAI_VERSION"],
4241
azure_ad_token_provider=token_provider,
4342
)
44-
else:
43+
elif API_HOST == "github":
4544
model = ChatOpenAI(model=os.getenv("GITHUB_MODEL", "gpt-4o"), base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
45+
elif API_HOST == "ollama":
46+
model = ChatOpenAI(model=os.environ["OLLAMA_MODEL"], base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
4647

4748
model = model.bind_tools(tools, parallel_tool_calls=False)
4849

49-
# Define nodes and conditional edges
50-
5150

5251
# Define the function that determines whether to continue or not
5352
def should_continue(state):

examples/openai_agents_tools.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
import azure.identity
88
import openai
9-
from agents import Agent, Runner, function_tool, set_tracing_disabled
9+
from agents import Agent, OpenAIChatCompletionsModel, Runner, function_tool, set_tracing_disabled
1010
from dotenv import load_dotenv
1111
from rich.logging import RichHandler
1212

@@ -32,8 +32,8 @@
3232
)
3333
MODEL_NAME = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"]
3434
elif API_HOST == "ollama":
35-
client = openai.AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="none")
36-
MODEL_NAME = "llama3.1:latest"
35+
client = openai.AsyncOpenAI(base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
36+
MODEL_NAME = os.environ["OLLAMA_MODEL"]
3737

3838

3939
@function_tool
@@ -70,7 +70,12 @@ def get_current_date() -> str:
7070
return datetime.now().strftime("%Y-%m-%d")
7171

7272

73-
agent = Agent(name="Weekend Planner", instructions="You help users plan their weekends and choose the best activities for the given weather. If an activity would be unpleasant in the weather, don't suggest it. Include the date of the weekend in your response.", tools=[get_weather, get_activities, get_current_date])
73+
agent = Agent(
74+
name="Weekend Planner",
75+
instructions="You help users plan their weekends and choose the best activities for the given weather. If an activity would be unpleasant in the weather, don't suggest it. Include the date of the weekend in your response.",
76+
tools=[get_weather, get_activities, get_current_date],
77+
model=OpenAIChatCompletionsModel(model=MODEL_NAME, openai_client=client),
78+
)
7479

7580

7681
async def main():

examples/pydanticai_basic.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from dotenv import load_dotenv
66
from openai import AsyncAzureOpenAI, AsyncOpenAI
77
from pydantic_ai import Agent
8-
from pydantic_ai.models.openai import OpenAIModel
8+
from pydantic_ai.models.openai import OpenAIChatModel
99
from pydantic_ai.providers.openai import OpenAIProvider
1010

1111
# Setup the OpenAI client to use either Azure OpenAI or GitHub Models
@@ -14,26 +14,25 @@
1414

1515
if API_HOST == "github":
1616
client = AsyncOpenAI(api_key=os.environ["GITHUB_TOKEN"], base_url="https://models.inference.ai.azure.com")
17-
model = OpenAIModel(os.getenv("GITHUB_MODEL", "gpt-4o"), provider=OpenAIProvider(openai_client=client))
17+
model = OpenAIChatModel(os.getenv("GITHUB_MODEL", "gpt-4o"), provider=OpenAIProvider(openai_client=client))
1818
elif API_HOST == "azure":
1919
token_provider = azure.identity.get_bearer_token_provider(azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
20-
client = AsyncAzureOpenAI(
21-
api_version=os.environ["AZURE_OPENAI_VERSION"],
22-
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
23-
azure_ad_token_provider=token_provider,
24-
)
25-
model = OpenAIModel(os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], provider=OpenAIProvider(openai_client=client))
20+
client = AsyncAzureOpenAI(api_version=os.environ["AZURE_OPENAI_VERSION"], azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], azure_ad_token_provider=token_provider)
21+
model = OpenAIChatModel(os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], provider=OpenAIProvider(openai_client=client))
22+
elif API_HOST == "ollama":
23+
client = AsyncOpenAI(base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
24+
model = OpenAIChatModel(os.environ["OLLAMA_MODEL"], provider=OpenAIProvider(openai_client=client))
2625

2726
agent: Agent[None, str] = Agent(
2827
model,
2928
system_prompt="You are a Spanish tutor. Help the user learn Spanish. ONLY respond in Spanish.",
30-
result_type=str,
29+
output_type=str,
3130
)
3231

3332

3433
async def main():
3534
result = await agent.run("oh hey how are you?")
36-
print(result.data)
35+
print(result.output)
3736

3837

3938
if __name__ == "__main__":

examples/pydanticai_graph.py

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,9 @@
88
from dotenv import load_dotenv
99
from groq import BaseModel
1010
from openai import AsyncAzureOpenAI, AsyncOpenAI
11-
from pydantic_ai import Agent
12-
from pydantic_ai.format_as_xml import format_as_xml
11+
from pydantic_ai import Agent, format_as_xml
1312
from pydantic_ai.messages import ModelMessage
14-
from pydantic_ai.models.openai import OpenAIModel
13+
from pydantic_ai.models.openai import OpenAIChatModel
1514
from pydantic_ai.providers.openai import OpenAIProvider
1615
from pydantic_graph import (
1716
BaseNode,
@@ -26,24 +25,24 @@
2625

2726
if API_HOST == "github":
2827
client = AsyncOpenAI(api_key=os.environ["GITHUB_TOKEN"], base_url="https://models.inference.ai.azure.com")
29-
model = OpenAIModel(os.getenv("GITHUB_MODEL", "gpt-4o"), provider=OpenAIProvider(openai_client=client))
28+
model = OpenAIChatModel(os.getenv("GITHUB_MODEL", "gpt-4o"), provider=OpenAIProvider(openai_client=client))
3029
elif API_HOST == "azure":
3130
token_provider = azure.identity.get_bearer_token_provider(azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
3231
client = AsyncAzureOpenAI(
3332
api_version=os.environ["AZURE_OPENAI_VERSION"],
3433
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
3534
azure_ad_token_provider=token_provider,
3635
)
37-
model = OpenAIModel(os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], provider=OpenAIProvider(openai_client=client))
36+
model = OpenAIChatModel(os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], provider=OpenAIProvider(openai_client=client))
3837
elif API_HOST == "ollama":
39-
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="none")
40-
model = OpenAIModel("llama3.1:latest", provider=OpenAIProvider(openai_client=client))
38+
client = AsyncOpenAI(base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
39+
model = OpenAIChatModel(os.environ["OLLAMA_MODEL"], provider=OpenAIProvider(openai_client=client))
4140

4241
"""
4342
Agent definitions
4443
"""
4544

46-
ask_agent = Agent(model, result_type=str, instrument=True)
45+
ask_agent = Agent(model, output_type=str)
4746

4847

4948
class EvaluationResult(BaseModel, use_attribute_docstrings=True):
@@ -55,7 +54,7 @@ class EvaluationResult(BaseModel, use_attribute_docstrings=True):
5554

5655
evaluate_agent = Agent(
5756
model,
58-
result_type=EvaluationResult,
57+
output_type=EvaluationResult,
5958
system_prompt="Given a question and answer, evaluate if the answer is correct.",
6059
)
6160

@@ -79,8 +78,8 @@ async def run(self, ctx: GraphRunContext[QuestionState]) -> Answer:
7978
message_history=ctx.state.ask_agent_messages,
8079
)
8180
ctx.state.ask_agent_messages += result.all_messages()
82-
ctx.state.question = result.data
83-
return Answer(result.data)
81+
ctx.state.question = result.output
82+
return Answer(result.output)
8483

8584

8685
@dataclass
@@ -106,10 +105,10 @@ async def run(
106105
message_history=ctx.state.evaluate_agent_messages,
107106
)
108107
ctx.state.evaluate_agent_messages += result.all_messages()
109-
if result.data.correct:
110-
return End(result.data.comment)
108+
if result.output.correct:
109+
return End(result.output.comment)
111110
else:
112-
return Reprimand(result.data.comment)
111+
return Reprimand(result.output.comment)
113112

114113

115114
@dataclass

examples/pydanticai_mcp_http.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from openai import AsyncAzureOpenAI, AsyncOpenAI
88
from pydantic_ai import Agent
99
from pydantic_ai.mcp import MCPServerStreamableHTTP
10-
from pydantic_ai.models.openai import OpenAIModel
10+
from pydantic_ai.models.openai import OpenAIChatModel
1111
from pydantic_ai.providers.openai import OpenAIProvider
1212

1313
# Setup the OpenAI client to use either Azure OpenAI or GitHub Models
@@ -16,19 +16,20 @@
1616

1717
if API_HOST == "github":
1818
client = AsyncOpenAI(api_key=os.environ["GITHUB_TOKEN"], base_url="https://models.inference.ai.azure.com")
19-
model = OpenAIModel(os.getenv("GITHUB_MODEL", "gpt-4o"), provider=OpenAIProvider(openai_client=client))
19+
model = OpenAIChatModel(os.getenv("GITHUB_MODEL", "gpt-4o"), provider=OpenAIProvider(openai_client=client))
2020
elif API_HOST == "azure":
2121
token_provider = azure.identity.get_bearer_token_provider(azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
2222
client = AsyncAzureOpenAI(
2323
api_version=os.environ["AZURE_OPENAI_VERSION"],
2424
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
2525
azure_ad_token_provider=token_provider,
2626
)
27-
model = OpenAIModel(os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], provider=OpenAIProvider(openai_client=client))
28-
else:
29-
raise ValueError(f"Unknown API_HOST: {API_HOST}. Set it to either 'github' or 'azure'.")
27+
model = OpenAIChatModel(os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], provider=OpenAIProvider(openai_client=client))
28+
elif API_HOST == "ollama":
29+
client = AsyncOpenAI(base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
30+
model = OpenAIChatModel(os.environ["OLLAMA_MODEL"], provider=OpenAIProvider(openai_client=client))
3031

31-
server = MCPServerStreamableHTTP("http://localhost:8000/mcp")
32+
server = MCPServerStreamableHTTP(url="http://localhost:8000/mcp")
3233

3334
agent: Agent[None, str] = Agent(model, system_prompt="You are a travel planning agent. You can help users find hotels.", output_type=str, toolsets=[server])
3435

@@ -39,5 +40,5 @@ async def main():
3940

4041

4142
if __name__ == "__main__":
42-
logging.basicConfig(level=logging.DEBUG)
43+
logging.basicConfig(level=logging.WARNING)
4344
asyncio.run(main())

examples/pydanticai_multiagent.py

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from pydantic import BaseModel, Field
99
from pydantic_ai import Agent, RunContext
1010
from pydantic_ai.messages import ModelMessage
11-
from pydantic_ai.models.openai import OpenAIModel
11+
from pydantic_ai.models.openai import OpenAIChatModel
1212
from pydantic_ai.providers.openai import OpenAIProvider
1313
from rich.prompt import Prompt
1414

@@ -18,15 +18,18 @@
1818

1919
if API_HOST == "github":
2020
client = AsyncOpenAI(api_key=os.environ["GITHUB_TOKEN"], base_url="https://models.inference.ai.azure.com")
21-
model = OpenAIModel(os.getenv("GITHUB_MODEL", "gpt-4o"), provider=OpenAIProvider(openai_client=client))
21+
model = OpenAIChatModel(os.getenv("GITHUB_MODEL", "gpt-4o"), provider=OpenAIProvider(openai_client=client))
2222
elif API_HOST == "azure":
2323
token_provider = azure.identity.get_bearer_token_provider(azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
2424
client = AsyncAzureOpenAI(
2525
api_version=os.environ["AZURE_OPENAI_VERSION"],
2626
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
2727
azure_ad_token_provider=token_provider,
2828
)
29-
model = OpenAIModel(os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], provider=OpenAIProvider(openai_client=client))
29+
model = OpenAIChatModel(os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], provider=OpenAIProvider(openai_client=client))
30+
elif API_HOST == "ollama":
31+
client = AsyncOpenAI(base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
32+
model = OpenAIChatModel(os.environ["OLLAMA_MODEL"], provider=OpenAIProvider(openai_client=client))
3033

3134

3235
class Flight(BaseModel):
@@ -39,7 +42,7 @@ class Failed(BaseModel):
3942

4043
flight_search_agent = Agent(
4144
model,
42-
result_type=Flight | Failed,
45+
output_type=Flight | Failed,
4346
system_prompt=('Use the "flight_search" tool to find a flight from the given origin to the given destination.'),
4447
)
4548

@@ -58,10 +61,10 @@ async def find_flight() -> Flight | None:
5861
"Where would you like to fly from and to?",
5962
)
6063
result = await flight_search_agent.run(prompt, message_history=message_history)
61-
if isinstance(result.data, Flight):
62-
return result.data
64+
if isinstance(result.output, Flight):
65+
return result.output
6366
else:
64-
message_history = result.all_messages(result_tool_return_content="Please try again.")
67+
message_history = result.all_messages()
6568

6669

6770
class Seat(BaseModel):
@@ -72,7 +75,7 @@ class Seat(BaseModel):
7275
# This agent is responsible for extracting the user's seat selection
7376
seat_preference_agent = Agent(
7477
model,
75-
result_type=Seat | Failed,
78+
output_type=Seat | Failed,
7679
system_prompt=("Extract the user's seat preference. " "Seats A and F are window seats. " "Row 1 is the front row and has extra leg room. " "Rows 14, and 20 also have extra leg room. "),
7780
)
7881

@@ -83,8 +86,8 @@ async def find_seat() -> Seat:
8386
answer = Prompt.ask("What seat would you like?")
8487

8588
result = await seat_preference_agent.run(answer, message_history=message_history)
86-
if isinstance(result.data, Seat):
87-
return result.data
89+
if isinstance(result.output, Seat):
90+
return result.output
8891
else:
8992
print("Could not understand seat preference. Please try again.")
9093
message_history = result.all_messages()

examples/pydanticai_tools.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from dotenv import load_dotenv
99
from openai import AsyncAzureOpenAI, AsyncOpenAI
1010
from pydantic_ai import Agent
11-
from pydantic_ai.models.openai import OpenAIModel
11+
from pydantic_ai.models.openai import OpenAIChatModel
1212
from pydantic_ai.providers.openai import OpenAIProvider
1313
from rich.logging import RichHandler
1414

@@ -23,18 +23,21 @@
2323

2424
if API_HOST == "github":
2525
client = AsyncOpenAI(api_key=os.environ["GITHUB_TOKEN"], base_url="https://models.inference.ai.azure.com")
26-
model = OpenAIModel(os.getenv("GITHUB_MODEL", "gpt-4o"), provider=OpenAIProvider(openai_client=client))
26+
model = OpenAIChatModel(os.getenv("GITHUB_MODEL", "gpt-4o"), provider=OpenAIProvider(openai_client=client))
2727
elif API_HOST == "azure":
2828
token_provider = azure.identity.get_bearer_token_provider(azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
2929
client = AsyncAzureOpenAI(
3030
api_version=os.environ["AZURE_OPENAI_VERSION"],
3131
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
3232
azure_ad_token_provider=token_provider,
3333
)
34-
model = OpenAIModel(os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], provider=OpenAIProvider(openai_client=client))
34+
model = OpenAIChatModel(os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], provider=OpenAIProvider(openai_client=client))
35+
elif API_HOST == "ollama":
36+
client = AsyncOpenAI(base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
37+
model = OpenAIChatModel(os.environ["OLLAMA_MODEL"], provider=OpenAIProvider(openai_client=client))
3538

3639

37-
def get_weather(city: str) -> str:
40+
def get_weather(city: str) -> dict:
3841
logger.info(f"Getting weather for {city}")
3942
if random.random() < 0.05:
4043
return {
@@ -58,6 +61,7 @@ def get_activities(city: str, date: str) -> list:
5861
{"name": "Museum", "location": city},
5962
]
6063

64+
6165
def get_current_date() -> str:
6266
logger.info("Getting current date")
6367
return datetime.now().strftime("%Y-%m-%d")
@@ -69,6 +73,7 @@ def get_current_date() -> str:
6973
tools=[get_weather, get_activities, get_current_date],
7074
)
7175

76+
7277
async def main():
7378
result = await agent.run("what can I do for funzies this weekend in Seattle?")
7479
print(result.output)

examples/spanish/README.md

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -102,9 +102,11 @@ Podés ejecutar los ejemplos en este repositorio ejecutando los scripts en el di
102102
| openai_agents_tools.py | Usa el framework de Agentes de OpenAI para crear un planificador de fin de semana. |
103103
| openai_functioncalling.py | Usa OpenAI Function Calling para llamar funciones basadas en la salida del LLM. |
104104
| openai_githubmodels.py | Configuración básica para usar modelos de GitHub con la API de OpenAI. |
105-
| pydanticai_basic.py | Implementación básica usando PydanticAI para crear un agente estructurado. |
106-
| pydanticai_graph.py | Usa PydanticAI para construir un grafo de agentes para hacer preguntas y evaluar respuestas. |
107-
| pydanticai_multiagent.py | Usa PydanticAI para construir un flujo de trabajo secuencial de dos agentes para planificación de vuelos. |
105+
| pydanticai_basic.py | Usa PydanticAI para construir un agente básico (tutor de español). |
106+
| pydanticai_multiagent.py | Usa PydanticAI para un flujo secuencial de dos agentes (vuelo + selección de asiento). |
107+
| pydanticai_graph.py | Usa PydanticAI con pydantic-graph para un pequeño grafo de evaluación pregunta/respuesta. |
108+
| pydanticai_tools.py | Usa PydanticAI con varias herramientas de Python para planificar actividades de fin de semana. |
109+
| pydanticai_mcp_http.py | Usa PydanticAI con un servidor MCP HTTP como conjunto de herramientas para planificación de viajes (búsqueda de hoteles). |
108110
| semantickernel_basic.py | Usa Semantic Kernel para construir un agente simple que enseña español. |
109111
| semantickernel_groupchat.py | Usa Semantic Kernel para construir un flujo de trabajo de dos agentes escritor/editor. |
110112
| smolagents_codeagent.py | Usa SmolAgents para construir un agente de respuesta a preguntas que puede buscar en la web y ejecutar código. |
@@ -173,4 +175,4 @@ Este proyecto incluye infraestructura como código (IaC) para provisionar despli
173175
* [Documentación de OpenAI Function Calling](https://platform.openai.com/docs/guides/function-calling?api-mode=chat)
174176
* [Documentación de PydanticAI](https://ai.pydantic.dev/multi-agent-applications/)
175177
* [Documentación de Semantic Kernel](https://learn.microsoft.com/semantic-kernel/overview/)
176-
* [Documentación de SmolAgents](https://huggingface.co/docs/smolagents/index)
178+
* [Documentación de SmolAgents](https://huggingface.co/docs/smolagents/index)

0 commit comments

Comments
 (0)