Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
FROM python:3.12-slim

WORKDIR /app

RUN apt-get update && apt-get install -y \
git \
texlive-latex-base \
texlive-fonts-recommended \
texlive-fonts-extra \
texlive-latex-extra \
texlive-science \
&& rm -rf /var/lib/apt/lists/*

COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt

COPY . .

ENV PYTHONUNBUFFERED=1

RUN mkdir -p /app/research_dir

ENTRYPOINT ["python3", "ai_lab_repo.py"]
14 changes: 14 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,20 @@ To run Agent Laboratory in copilot mode, simply set the copilot-mode flag to `"t
`python ai_lab_repo.py --api-key "API_KEY_HERE" --llm-backend "o1-mini" --research-topic "YOUR RESEARCH IDEA" --copilot-mode "true"`

-----

## Docker Option

1. Build the Docker image

```docker build -t agentlab .```

2. Run the container (Replace API_KEY with your actual API key)

```docker run -it -v $(pwd)/research_dir:/app/research_dir agentlab --research-topic "your research topic" --llm-backend "deepseek-chat" --api-key "API_KEY" ```

Note: The research_dir will be mounted from your local machine, allowing you to access the generated files outside the container


## Tips for better research outcomes


Expand Down
10 changes: 5 additions & 5 deletions ai_lab_repo.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,15 +86,15 @@ def __init__(self, research_topic, openai_api_key, max_steps=100, num_papers_lit
self.ml_engineer = MLEngineerAgent(model=self.model_backbone, notes=self.notes, max_steps=self.max_steps, openai_api_key=self.openai_api_key)
self.sw_engineer = SWEngineerAgent(model=self.model_backbone, notes=self.notes, max_steps=self.max_steps, openai_api_key=self.openai_api_key)

# remove previous files
# remove previous files
remove_figures()
remove_directory("research_dir")
# make src and research directory
if not os.path.exists("state_saves"):
os.mkdir(os.path.join(".", "state_saves"))
os.mkdir(os.path.join(".", "research_dir"))
os.mkdir(os.path.join("./research_dir", "src"))
os.mkdir(os.path.join("./research_dir", "tex"))
os.makedirs("state_saves", exist_ok=True)
os.makedirs("research_dir", exist_ok=True)
os.makedirs("research_dir/src", exist_ok=True)
os.makedirs("research_dir/tex", exist_ok=True)

def set_model(self, model):
self.set_agent_attr("model", model)
Expand Down
2 changes: 1 addition & 1 deletion inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def query_model(model_str, prompt, system_prompt, openai_api_key=None, anthropic
if model_str in ["o1-preview", "o1-mini", "claude-3.5-sonnet", "o1"]:
encoding = tiktoken.encoding_for_model("gpt-4o")
elif model_str in ["deepseek-chat"]:
encoding = tiktoken.encoding_for_model("cl100k_base")
encoding = tiktoken.get_encoding("cl100k_base")
else:
encoding = tiktoken.encoding_for_model(model_str)
if model_str not in TOKENS_IN:
Expand Down