Skip to content

Commit 05aa551

Browse files
adding bedrock as model provider (#37)
* adding bedrock as model provider * bump version
1 parent 0a76ec1 commit 05aa551

File tree

10 files changed

+105
-1
lines changed

10 files changed

+105
-1
lines changed
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
label: "Bedrock"
2+
position: 5

docs/docs/models/bedrock/index.mdx

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Bedrock
2+
3+
Jupyter AI Agents supports [Amazon Bedrock](https://aws.amazon.com/bedrock/) models.
4+
5+
To use Bedrock models, you'll need to define the following environment variables:
6+
7+
```bash
8+
AWS_ACCESS_KEY_ID - The access key for your AWS account.
9+
AWS_SECRET_ACCESS_KEY - The secret key for your AWS account.
10+
AWS_SESSION_TOKEN - The session key for your AWS account. This is only needed when you are using temporary credentials.
11+
```
12+
13+
Example usage with direct Bedrock integration:
14+
15+
```bash
16+
# Prompt agent example with Bedrock
17+
jupyter-ai-agents prompt \
18+
--url http://localhost:8888 \
19+
--token MY_TOKEN \
20+
--model-provider bedrock \
21+
--model-name anthropic.claude-3-sonnet-20240229-v1:0 \
22+
--path test.ipynb \
23+
--input "Create a matplotlib example"
24+
```
25+

jupyter_ai_agents/__version__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,4 @@
44

55
"""Jupyter AI Agents."""
66

7-
__version__ = "0.10.1"
7+
__version__ = "0.10.2"

jupyter_ai_agents/agents/utils.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from jupyter_ai_agents.providers.anthropic import create_anthropic_agent
88
from jupyter_ai_agents.providers.azure_openai import create_azure_openai_agent
99
from jupyter_ai_agents.providers.github_copilot import create_github_copilot_agent
10+
from jupyter_ai_agents.providers.bedrock import create_bedrock_agent
1011
from jupyter_ai_agents.providers.openai import create_openai_agent
1112

1213

@@ -22,6 +23,8 @@ def create_ai_agent(
2223
agent = create_openai_agent(model_name, system_prompt_final, tools)
2324
elif model_provider == "anthropic":
2425
agent = create_anthropic_agent(model_name, system_prompt_final, tools)
26+
elif model_provider == "bedrock":
27+
agent = create_bedrock_agent(model_name, system_prompt_final, tools)
2528
else:
2629
raise ValueError(f"Model provider {model_provider} is not supported.")
2730
return agent

jupyter_ai_agents/providers/anthropic.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@
88
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
99
from langchain.agents.tool_calling_agent.base import create_tool_calling_agent
1010
from langchain_core.tools import BaseTool
11+
from dotenv import load_dotenv
12+
1113

1214
def create_anthropic_agent(model_name: str, system_prompt: str, tools: List[BaseTool]) -> AgentExecutor:
1315
"""Create an agent from a set of tools using Anthropic's Claude API.
@@ -20,6 +22,8 @@ def create_anthropic_agent(model_name: str, system_prompt: str, tools: List[Base
2022
Returns:
2123
An agent executor that can use tools via Claude
2224
"""
25+
26+
load_dotenv()
2327

2428
# Create the Anthropic LLM
2529
llm = ChatAnthropic(model=model_name)

jupyter_ai_agents/providers/azure_openai.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,13 @@
55
from langchain.agents import AgentExecutor, create_openai_tools_agent
66
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
77
from langchain_openai import AzureChatOpenAI
8+
from dotenv import load_dotenv
89

910

1011
def create_azure_openai_agent(model_name: str, system_prompt: str, tools: list) -> AgentExecutor:
1112
"""Create an agent from a set of tools and an Azure deployment"""
13+
14+
load_dotenv()
1215

1316
llm = AzureChatOpenAI(azure_deployment=model_name)
1417

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
# Copyright (c) 2023-2024 Datalayer, Inc.
2+
#
3+
# BSD 3-Clause License
4+
5+
from typing import List
6+
from langchain.agents.agent import AgentExecutor
7+
from langchain_aws import ChatBedrockConverse
8+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
9+
from langchain.agents.tool_calling_agent.base import create_tool_calling_agent
10+
from langchain_core.tools import BaseTool
11+
from dotenv import load_dotenv
12+
13+
def create_bedrock_agent(model_name: str, system_prompt: str, tools: List[BaseTool]) -> AgentExecutor:
14+
"""Create an agent from a set of tools using Anthropic's Claude API.
15+
16+
Args:
17+
model_name: The name of the Claude model to use (e.g., "claude-3-haiku-20240307")
18+
system_prompt: The system prompt to use for the agent
19+
tools: A list of tools for the agent to use
20+
21+
Returns:
22+
An agent executor that can use tools via Claude
23+
"""
24+
25+
load_dotenv()
26+
27+
# Create the Anthropic LLM
28+
llm = ChatBedrockConverse(model_id=model_name)
29+
30+
# Create a prompt template for the agent with enhanced instructions
31+
enhanced_system_prompt = f"""
32+
{system_prompt}
33+
34+
When you use tools, please include the results in your response to the user.
35+
Be sure to always provide a text response, even if it's just to acknowledge the tool's output.
36+
After using a tool, explain what the result means in a clear and helpful way.
37+
"""
38+
39+
# Create prompt template
40+
prompt = ChatPromptTemplate.from_messages(
41+
[
42+
("system", enhanced_system_prompt),
43+
("user", "{input}"),
44+
MessagesPlaceholder(variable_name="agent_scratchpad"),
45+
])
46+
47+
# Create a tool-calling agent
48+
agent = create_tool_calling_agent(llm, tools, prompt)
49+
50+
# Create an agent executor with output parsing
51+
agent_executor = AgentExecutor(
52+
name="AnthropicToolAgent",
53+
agent=agent,
54+
tools=tools,
55+
verbose=True,
56+
handle_parsing_errors=True,
57+
return_intermediate_steps=True # Include intermediate steps in the output
58+
)
59+
60+
return agent_executor

jupyter_ai_agents/providers/github_copilot.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,13 @@
55
from langchain.agents import AgentExecutor, create_openai_tools_agent
66
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
77
from langchain_github_copilot import ChatGitHubCopilot
8+
from dotenv import load_dotenv
89

910

1011
def create_github_copilot_agent(model_name: str, system_prompt: str, tools: list) -> AgentExecutor:
1112
"""Create an agent from a set of tools and a Github Copilot model"""
13+
14+
load_dotenv()
1215

1316
llm = ChatGitHubCopilot(model_name=model_name)
1417

jupyter_ai_agents/providers/openai.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,12 @@
66
from langchain_openai import ChatOpenAI
77
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
88
from langchain.agents import create_openai_tools_agent
9+
from dotenv import load_dotenv
910

1011
def create_openai_agent(model_name: str, system_prompt: str, tools: list) -> AgentExecutor:
1112
"""Create an agent from a set of tools using OpenAI's API."""
13+
14+
load_dotenv()
1215

1316
llm = ChatOpenAI(model_name=model_name)
1417

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ dependencies = [
3232
"langchain-openai",
3333
"langchain-github-copilot>=0.4.0",
3434
"langchain-anthropic",
35+
"langchain-aws",
3536
"fastapi",
3637
"jupyter_kernel_client==0.6.0",
3738
"jupyter_nbmodel_client==0.11.3",

0 commit comments

Comments
 (0)