Skip to content

Commit ed636ef

Browse files
committed
feat: Add Bedrock models support
1 parent 71f882f commit ed636ef

File tree

5 files changed

+100
-5
lines changed

5 files changed

+100
-5
lines changed

.env.example

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
11
GEMINI_API_KEY=
22
OPENAI_API_KEY=
3-
LOGFIRE_TOKEN=
3+
LOGFIRE_TOKEN=
4+
AWS_REGION=
5+
AWS_PROFILE=

agents_mcp_usage/multi_mcp/eval_multi_mcp/evals_pydantic_mcp.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -209,6 +209,30 @@ def create_agent(
209209
if model_settings is None:
210210
model_settings = {}
211211

212+
# Handle Bedrock models specifically
213+
if model.startswith("bedrock:"):
214+
from pydantic_ai.models.bedrock import BedrockConverseModel
215+
from pydantic_ai.providers.bedrock import BedrockProvider
216+
217+
# Extract the model name (remove "bedrock:" prefix)
218+
model_name = model.replace("bedrock:", "")
219+
220+
# Create BedrockConverseModel with proper region and profile configuration
221+
bedrock_model = BedrockConverseModel(
222+
model_name,
223+
provider=BedrockProvider(
224+
region_name=os.getenv("AWS_REGION", "us-east-1"),
225+
profile_name=os.getenv("AWS_PROFILE", "my-aws-profile"),
226+
),
227+
)
228+
229+
return Agent(
230+
bedrock_model,
231+
mcp_servers=get_mcp_servers(),
232+
model_settings=model_settings,
233+
)
234+
235+
# For non-Bedrock models, use the original approach
212236
return Agent(
213237
model,
214238
mcp_servers=get_mcp_servers(),

agents_mcp_usage/multi_mcp/eval_multi_mcp/run_multi_evals.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,19 @@
4545

4646
DEFAULT_MODELS = [
4747
# "gemini-2.5-pro-preview-06-05",
48+
# "gemini-2.5-pro-preview-05-06",
49+
# "gemini-2.5-pro-preview-03-25",
4850
"gemini-2.0-flash",
4951
"gemini-2.5-flash-preview-04-17",
52+
# "openai:o4-mini",
53+
# "openai:gpt-4.1",
54+
# "openai:gpt-4.1-mini",
55+
# "openai:gpt-4.1-nano",
56+
# "bedrock:us.anthropic.claude-sonnet-4-20250514-v1:0",
57+
# "bedrock:us.anthropic.claude-opus-4-20250514-v1:0",
58+
# "bedrock:us.anthropic.claude-3-7-sonnet-20250219-v1:0",
59+
# "bedrock:us.anthropic.claude-3-5-sonnet-20240620-v1:0",
60+
# "bedrock:us.anthropic.claude-3-5-haiku-20241022-v1:0",
5061
]
5162

5263
logfire.configure(
@@ -507,7 +518,7 @@ async def main() -> None:
507518
parser.add_argument(
508519
"--parallel",
509520
action="store_true",
510-
default=True,
521+
default=False,
511522
help="Run evaluations in parallel",
512523
)
513524
parser.add_argument(

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ dependencies = [
1818
"openai-agents>=0.0.12",
1919
"pandas>=2.3.0",
2020
"plotly>=6.1.2",
21-
"pydantic-ai-slim[mcp]>=0.2.15",
21+
"pydantic-ai-slim[bedrock,mcp]>=0.2.15",
2222
"pydantic-evals[logfire]>=0.2.15",
2323
"python-dotenv>=1.1.0",
2424
"ruff>=0.11.10",

uv.lock

Lines changed: 60 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)