Skip to content
This repository was archived by the owner on Nov 10, 2025. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
485ba5f
feat: add E2B code interpreter
jamesmurdza Sep 23, 2024
93cc7e9
feat: add arguments
jamesmurdza Sep 24, 2024
d84f5ff
chore: improve docs
jamesmurdza Sep 24, 2024
e4bb036
fix: allow API key passed as an argument instead of an environment va…
jamesmurdza Sep 25, 2024
864d635
fix: use correct pip install notation
jamesmurdza Oct 5, 2024
48ca109
fix: add imports
jamesmurdza Oct 5, 2024
32af13f
fix: remove unsupported 'envs' argument from E2BCodeInterpreterTool, …
devin-ai-integration[bot] Dec 29, 2024
eaf8ea3
chore: update environment config (convert to uv, update pre-commit py…
devin-ai-integration[bot] Dec 29, 2024
06f00be
style: apply remaining code formatting from pre-commit hooks
devin-ai-integration[bot] Dec 29, 2024
8fd7972
fix: update test_code_interpreter_tool.py to use correct List[str] ty…
devin-ai-integration[bot] Dec 29, 2024
52e6b33
style: apply formatting from pre-commit hooks
devin-ai-integration[bot] Dec 29, 2024
de9760e
feat: add e2b_code_interpreter as optional dependency
devin-ai-integration[bot] Dec 29, 2024
85a6a43
refactor: update E2B Code Interpreter Tool to use new Sandbox API
devin-ai-integration[bot] Dec 29, 2024
63dd967
fix: use relative import for BaseTool to avoid circular imports
devin-ai-integration[bot] Dec 29, 2024
6d81ddd
ci: add GitHub Actions workflow for tests, type checking, and pre-commit
devin-ai-integration[bot] Dec 29, 2024
439cde1
style: apply final formatting changes
devin-ai-integration[bot] Dec 29, 2024
78cb715
style: fix import ordering in __init__.py
devin-ai-integration[bot] Dec 29, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 47 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
name: CI

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4

- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.12'

- name: Install uv
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.cargo/bin" >> $GITHUB_PATH

- name: Install dependencies
run: |
uv venv
source .venv/bin/activate
uv sync --all-extras

- name: Install pre-commit
run: |
pip install pre-commit
pre-commit install

- name: Run pre-commit hooks
run: pre-commit run --all-files

- name: Run type checking
run: |
source .venv/bin/activate
uv run pyright

- name: Run tests
run: |
source .venv/bin/activate
uv run pytest tests -v
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ repos:
rev: 23.12.1
hooks:
- id: black
language_version: python3.11
language_version: python3.12
files: \.(py)$

- repo: https://github.com/pycqa/isort
Expand Down
68 changes: 63 additions & 5 deletions crewai_tools/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from crewai.tools import BaseTool, tool

from .tools import (
BraveSearchTool,
BrowserbaseLoadTool,
Expand All @@ -9,6 +11,7 @@
DirectoryReadTool,
DirectorySearchTool,
DOCXSearchTool,
E2BCodeInterpreterTool,
EXASearchTool,
FileReadTool,
FileWriterTool,
Expand All @@ -27,11 +30,13 @@
PGSearchTool,
RagTool,
ScrapeElementFromWebsiteTool,
ScrapeGraphScrapeTool,
ScrapegraphScrapeToolSchema
ScrapeGraphScrapeTool,
ScrapegraphScrapeToolSchema,
ScrapeWebsiteTool,
ScrapflyScrapeWebsiteTool,
SeleniumScrapingTool,
SerpApiGoogleSearchTool,
SerpApiGoogleShoppingTool,
SerperDevTool,
SerplyJobSearchTool,
SerplyNewsSearchTool,
Expand All @@ -41,11 +46,64 @@
SpiderTool,
TXTSearchTool,
VisionTool,
WeaviateVectorSearchTool,
WebsiteSearchTool,
XMLSearchTool,
YoutubeChannelSearchTool,
YoutubeVideoSearchTool,
WeaviateVectorSearchTool,
SerpApiGoogleSearchTool,
SerpApiGoogleShoppingTool,
)

__all__ = [
"BaseTool",
"tool",
"BraveSearchTool",
"BrowserbaseLoadTool",
"CodeDocsSearchTool",
"CodeInterpreterTool",
"ComposioTool",
"CSVSearchTool",
"DallETool",
"DirectoryReadTool",
"DirectorySearchTool",
"DOCXSearchTool",
"E2BCodeInterpreterTool",
"EXASearchTool",
"FileReadTool",
"FileWriterTool",
"FirecrawlCrawlWebsiteTool",
"FirecrawlScrapeWebsiteTool",
"FirecrawlSearchTool",
"GithubSearchTool",
"JSONSearchTool",
"LinkupSearchTool",
"LlamaIndexTool",
"MDXSearchTool",
"MultiOnTool",
"MySQLSearchTool",
"NL2SQLTool",
"PDFSearchTool",
"PGSearchTool",
"RagTool",
"ScrapeElementFromWebsiteTool",
"ScrapeGraphScrapeTool",
"ScrapegraphScrapeToolSchema",
"ScrapeWebsiteTool",
"ScrapflyScrapeWebsiteTool",
"SeleniumScrapingTool",
"SerpApiGoogleSearchTool",
"SerpApiGoogleShoppingTool",
"SerperDevTool",
"SerplyJobSearchTool",
"SerplyNewsSearchTool",
"SerplyScholarSearchTool",
"SerplyWebpageToMarkdownTool",
"SerplyWebSearchTool",
"SpiderTool",
"TXTSearchTool",
"VisionTool",
"WeaviateVectorSearchTool",
"WebsiteSearchTool",
"XMLSearchTool",
"YoutubeChannelSearchTool",
"YoutubeVideoSearchTool",
]
12 changes: 8 additions & 4 deletions crewai_tools/tools/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from .directory_read_tool.directory_read_tool import DirectoryReadTool
from .directory_search_tool.directory_search_tool import DirectorySearchTool
from .docx_search_tool.docx_search_tool import DOCXSearchTool
from .e2b_code_interpreter_tool.code_interpreter_tool import E2BCodeInterpreterTool
from .exa_tools.exa_search_tool import EXASearchTool
from .file_read_tool.file_read_tool import FileReadTool
from .file_writer_tool.file_writer_tool import FileWriterTool
Expand All @@ -32,12 +33,17 @@
from .scrape_element_from_website.scrape_element_from_website import (
ScrapeElementFromWebsiteTool,
)
from .scrapegraph_scrape_tool.scrapegraph_scrape_tool import ScrapeGraphScrapeTool, ScrapegraphScrapeToolSchema
from .scrape_website_tool.scrape_website_tool import ScrapeWebsiteTool
from .scrapegraph_scrape_tool.scrapegraph_scrape_tool import (
ScrapeGraphScrapeTool,
ScrapegraphScrapeToolSchema,
)
from .scrapfly_scrape_website_tool.scrapfly_scrape_website_tool import (
ScrapflyScrapeWebsiteTool,
)
from .selenium_scraping_tool.selenium_scraping_tool import SeleniumScrapingTool
from .serpapi_tool.serpapi_google_search_tool import SerpApiGoogleSearchTool
from .serpapi_tool.serpapi_google_shopping_tool import SerpApiGoogleShoppingTool
from .serper_dev_tool.serper_dev_tool import SerperDevTool
from .serply_api_tool.serply_job_search_tool import SerplyJobSearchTool
from .serply_api_tool.serply_news_search_tool import SerplyNewsSearchTool
Expand All @@ -47,12 +53,10 @@
from .spider_tool.spider_tool import SpiderTool
from .txt_search_tool.txt_search_tool import TXTSearchTool
from .vision_tool.vision_tool import VisionTool
from .weaviate_tool.vector_search import WeaviateVectorSearchTool
from .website_search.website_search_tool import WebsiteSearchTool
from .xml_search_tool.xml_search_tool import XMLSearchTool
from .youtube_channel_search_tool.youtube_channel_search_tool import (
YoutubeChannelSearchTool,
)
from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool
from .weaviate_tool.vector_search import WeaviateVectorSearchTool
from .serpapi_tool.serpapi_google_search_tool import SerpApiGoogleSearchTool
from .serpapi_tool.serpapi_google_shopping_tool import SerpApiGoogleShoppingTool
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,7 @@ class BrowserbaseLoadToolSchema(BaseModel):

class BrowserbaseLoadTool(BaseTool):
name: str = "Browserbase web load tool"
description: str = (
"Load webpages url in a headless browser using Browserbase and return the contents"
)
description: str = "Load webpages url in a headless browser using Browserbase and return the contents"
args_schema: Type[BaseModel] = BrowserbaseLoadToolSchema
api_key: Optional[str] = None
project_id: Optional[str] = None
Expand Down
22 changes: 14 additions & 8 deletions crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
import os
from typing import List, Optional, Type

from crewai.tools import BaseTool
from docker import from_env as docker_from_env
from docker.models.containers import Container
from docker.errors import ImageNotFound, NotFound
from crewai.tools import BaseTool
from docker.models.containers import Container
from pydantic import BaseModel, Field


Expand All @@ -30,20 +30,28 @@ class CodeInterpreterTool(BaseTool):
default_image_tag: str = "code-interpreter:latest"
code: Optional[str] = None
user_dockerfile_path: Optional[str] = None
user_docker_base_url: Optional[str] = None
user_docker_base_url: Optional[str] = None
unsafe_mode: bool = False

@staticmethod
def _get_installed_package_path():
def _get_installed_package_path() -> str:
spec = importlib.util.find_spec("crewai_tools")
if spec is None or spec.origin is None:
raise ImportError("Could not find crewai_tools package")
return os.path.dirname(spec.origin)

def _verify_docker_image(self) -> None:
"""
Verify if the Docker image is available. Optionally use a user-provided Dockerfile.
"""

client = docker_from_env() if self.user_docker_base_url == None else docker.DockerClient(base_url=self.user_docker_base_url)
from docker import DockerClient

client = (
docker_from_env()
if self.user_docker_base_url == None
else DockerClient(base_url=self.user_docker_base_url)
)

try:
client.images.get(self.default_image_tag)
Expand Down Expand Up @@ -76,9 +84,7 @@ def _run(self, **kwargs) -> str:
else:
return self.run_code_in_docker(code, libraries_used)

def _install_libraries(
self, container: Container, libraries: List[str]
) -> None:
def _install_libraries(self, container: Container, libraries: List[str]) -> None:
"""
Install missing libraries in the Docker container
"""
Expand Down
2 changes: 0 additions & 2 deletions crewai_tools/tools/directory_read_tool/directory_read_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@
class FixedDirectoryReadToolSchema(BaseModel):
"""Input for DirectoryReadTool."""

pass


class DirectoryReadToolSchema(FixedDirectoryReadToolSchema):
"""Input for DirectoryReadTool."""
Expand Down
50 changes: 50 additions & 0 deletions crewai_tools/tools/e2b_code_interpreter_tool/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# E2BCodeInterpreterTool

## Description
This tool is used to give an agent the ability to run arbitrary Python code. The code is executed in a secure cloud sandbox via E2B. After the code is run, the agent recieves the result of the code as well as any errors that occured during execution, which gives the agent debugging ability.

## Installation

- Get an API key from [e2b.dev](https://e2b.dev) and set it in the environment variables `E2B_API_KEY`.
- Install the [Code Interpreter Beta SDK](https://e2b.dev/docs/guide/beta-migration) along with the `crewai[tools]` package:

```
pip install e2b-code-interpreter==0.0.11b32 'crewai[tools]'
```

## Example

Utilize the code interpreter tool to allow your agent to run Python code:

```python
from crewai import Agent
from crewai_tools import E2BCodeInterpreterTool

# The cloud sandbox will shut down after 300 seconds, or whatever value is passed to the timeout argument.
code_interpreter = E2BCodeInterpreterTool()

Agent(
...
tools=[code_interpreter],
)

# ... Use the agent ...

# To shut down the sandbox immediately, use:
code_interpreter.close()
```

If the `close()` method is not used, the sandbox will continue to exist until it times out, consuming additional cloud credits.

Futher examples are provided in the [E2B Cookbook](https://github.com/e2b-dev/e2b-cookbook).

## Arguments

All of the below arguments are optional:

- `api_key`: E2B [API key](https://e2b.dev/docs/getting-started/api-key) used for authentication. Defaults to `E2B_API_KEY` environment variable.
- `template`: A pre-defined template to spawn a [custom sandbox](https://e2b.dev/docs/sandbox/custom). Defaults to the standard sandbox.
- `timeout`: Specifies the timeout in seconds for the sandbox to open or execute. Defaults to 300s.
- `request_timeout`: Timeout for the creation of the sandbox itself. Defaults to 30s.
- `metadata`: Optional [metadata](https://e2b.dev/docs/sandbox/api/metadata) to be associated with the sandbox.
- `envs`: A dictionary containing [environment variables](https://e2b.dev/docs/sandbox/api/envs) to be passed to the sandbox.
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import json
from typing import Optional, Type

from crewai.tools import BaseTool
from e2b_code_interpreter import Sandbox
from pydantic import BaseModel, Field


class E2BCodeInterpreterSchema(BaseModel):
"""Input schema for the CodeInterpreterTool, used by the agent."""

code: str = Field(
...,
description="Python3 code used to run in the Jupyter notebook cell. Non-standard packages are installed by appending !pip install [packagenames] and the Python code in one single code block.",
)


class E2BCodeInterpreterTool(BaseTool):
"""
This is a tool that runs arbitrary code in a Python Jupyter notebook.
It uses E2B to run the notebook in a secure cloud sandbox.
It requires an E2B_API_KEY to create a sandbox.
"""

name: str = "code_interpreter"
description: str = "Execute Python code in a Jupyter notebook cell and return any rich data (eg charts), stdout, stderr, and errors."
args_schema: Type[BaseModel] = E2BCodeInterpreterSchema
_sandbox: Sandbox | None = None

def __init__(
self,
template: Optional[str] = None,
timeout: Optional[int] = None,
api_key: Optional[str] = None,
request_timeout: Optional[float] = None,
**kwargs,
):
# Call the superclass's init method
super().__init__(**kwargs)

# Initialize the sandbox
self._sandbox = Sandbox(
template=template,
timeout=timeout,
api_key=api_key,
request_timeout=request_timeout,
)

def _run(self, code: str) -> str:
# Execute the code using the sandbox
execution = self._sandbox.run_code(code)

# Extract relevant execution details
result = {
"results": [str(execution.text)],
"stdout": execution.stdout or "",
"stderr": execution.stderr or "",
"error": str(execution.error or ""),
}

# Convert the result dictionary to a JSON string since CrewAI expects a string output
content = json.dumps(result, indent=2)

return content

def close(self):
# Close the sandbox when done
if self._sandbox:
self._sandbox.close()
Loading
Loading