Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions agents/deepagents_content_builder/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
FROM python:3.13-slim
ARG RELEASE_VERSION="main"
COPY ./agents/deepagents_content_builder/ /app/agents/deepagents_content_builder
COPY ./apps/agentstack-sdk-py/ /app/apps/agentstack-sdk-py/
WORKDIR /app/agents/deepagents_content_builder
RUN --mount=type=cache,target=/tmp/.cache/uv \
--mount=type=bind,from=ghcr.io/astral-sh/uv:0.9.5,source=/uv,target=/bin/uv \
UV_COMPILE_BYTECODE=1 HOME=/tmp uv sync
ENV PRODUCTION_MODE=True \
RELEASE_VERSION=${RELEASE_VERSION}
CMD ["/app/agents/deepagents_content_builder/.venv/bin/server"]
26 changes: 26 additions & 0 deletions agents/deepagents_content_builder/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Content Builder Agent (DeepAgents)

This project is an adapted implementation of the Content Builder Agent originally published in the
[DeepAgents repository](https://github.com/langchain-ai/deepagents/tree/master/examples/content-builder-agent).

The original example was designed to run in a stateful environment. This implementation
extends and modifies it to work reliably within the Agent Stack while supporting context persistence, parallels run and Agent Stack primitives like LLM Proxy Service, File Storage or Environment Variables.

## Implementation Notes

- **Custom Backend implementation**
- Introduces a custom `Backend` to store generated files in the Agent Stack file storage rather than
on the local filesystem.
- Ensures files persist across multiple conversation turns.
- Safely supports parallel executions where context is not shared.

- **File handling updates**
- Updates the `generate_social_image` and `generate_cover` tools to store assets using the
`File.create(...)` API provided by the Agent Stack instead of writing to disk.

- **Dynamic model loading**
- Modifies the YAML loader to dynamically resolve and load the specified model using the
LLM Fulfillment mechanism.

- **Message conversion utilities**
- Adds helper functions to convert A2A messages into LangChain-compatible message formats.
40 changes: 40 additions & 0 deletions agents/deepagents_content_builder/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
[project]
name = "content-builder"
version = "0.1.0"
description = "A content writer agent configured entirely through files on disk"
authors = [
{ name = "IBM Corp." },
]
requires-python = ">=3.11"
dependencies = [
"agentstack-sdk",
"cachetools>=6.2.5",
"deepagents>=0.3.5",
"google-genai>=1.0.0",
"langchain-openai>=1.1.7",
"pillow>=10.0.0",
"pyyaml>=6.0.0",
"rich>=13.0.0",
"tavily-python>=0.5.0",
"wcmatch>=10.1"
]

[dependency-groups]
dev = []

[tool.ruff]
line-length = 120

[tool.uv.sources]
agentstack-sdk = { path = "../../apps/agentstack-sdk-py", editable = true }

[project.scripts]
server = "content_builder.agent:serve"

[build-system]
requires = ["uv_build>=0.9.0,<0.10.0"]
build-backend = "uv_build"

[tool.pyright]
venvPath = "."
venv = ".venv"
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
./blogs
.venv/
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Copyright 2026 © BeeAI a Series of LF Projects, LLC
# SPDX-License-Identifier: Apache-2.0

172 changes: 172 additions & 0 deletions agents/deepagents_content_builder/src/content_builder/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
# Copyright 2025 © BeeAI a Series of LF Projects, LLC
# SPDX-License-Identifier: Apache-2.0


import json
import os
from collections import defaultdict
from pathlib import Path
from typing import Annotated
from datetime import datetime, timezone
from a2a.utils import get_message_text
from deepagents.backends import CompositeBackend, FilesystemBackend
from a2a.types import Message
from langchain_core.runnables import RunnableConfig

from agentstack_sdk.a2a.extensions import (
AgentDetail,
AgentDetailContributor,
LLMServiceExtensionServer,
LLMServiceExtensionSpec,
PlatformApiExtensionSpec,
PlatformApiExtensionServer,
LLMServiceExtensionParams,
LLMDemand,
TrajectoryExtensionServer,
TrajectoryExtensionSpec,
EnvVar,
)
from agentstack_sdk.a2a.types import AgentMessage
from agentstack_sdk.server import Server
from agentstack_sdk.server.context import RunContext
from langchain_core.messages import HumanMessage, AIMessageChunk, ToolMessage
from deepagents import create_deep_agent, SubAgent

from content_builder.backend import AgentStackBackend
from content_builder.tools import generate_cover, generate_social_image
from content_builder.utils import load_subagents, create_chat_model, to_langchain_messages
from content_builder.tools import web_search

DEFAULT_MODEL = "anthropic:claude-sonnet-4-5-20250929"
AVAILABLE_SUBAGENTS = load_subagents(config_path=Path("./subagents.yaml"), tools={"web_search": web_search})
LLM_BY_AGENT = {
"default": LLMDemand(suggested=(DEFAULT_MODEL,), description="Default LLM for the root agent"),
**{
agent.name: LLMDemand(suggested=(agent.model,), description=f"LLM for subagent '{agent.name}'")
for agent in AVAILABLE_SUBAGENTS
if agent.model
},
}

server = Server()

CURRENT_DIRECTORY = Path(__file__).parent


@server.agent(
name="Content Creator Agent (Deepagents)",
documentation_url=f"https://github.com/i-am-bee/agentstack/blob/{os.getenv('RELEASE_VERSION', 'main')}/agents/deepagents_content_builder",
default_input_modes=["text/plain"],
default_output_modes=["text/plain", "image/jpeg", "image/png", "text/markdown"],
description="A content writer for a technology company that creates engaging, informative content that educates readers about AI, software development, and emerging technologies.",
detail=AgentDetail(
interaction_mode="multi-turn",
author=AgentDetailContributor(name="IBM"),
variables=[
EnvVar(name="TAVILY_API_KEY", description="API Key for Tavily to do web search", required=True),
EnvVar(name="GOOGLE_API_KEY", description="API Key for Google Image models", required=True),
],
),
)
async def content_builder_agent(
message: Message,
context: RunContext,
llm: Annotated[
LLMServiceExtensionServer,
LLMServiceExtensionSpec(params=LLMServiceExtensionParams(llm_demands=LLM_BY_AGENT)),
],
trajectory: Annotated[TrajectoryExtensionServer, TrajectoryExtensionSpec()],
_: Annotated[PlatformApiExtensionServer, PlatformApiExtensionSpec()],
):
default_llm_config = llm.data.llm_fulfillments.get("default")
if not default_llm_config:
yield "No LLM configured!"
return

user_message = get_message_text(message).strip()
if not user_message:
yield "Please provide a topic or instruction."
return

started_at = datetime.now(timezone.utc)
await context.store(data=message)

subagents: list[SubAgent] = []
for sub_agent in AVAILABLE_SUBAGENTS:
llm_config = llm.data.llm_fulfillments.get(sub_agent.name) or default_llm_config
sub_agent = sub_agent.to_deepagent_subagent(model=create_chat_model(llm_config))
subagents.append(sub_agent)

agent_stack_backend = AgentStackBackend()
print([f.filename for f in await agent_stack_backend.alist()])
fs_backend = FilesystemBackend(virtual_mode=True, root_dir=CURRENT_DIRECTORY)

agent = create_deep_agent(
model=create_chat_model(default_llm_config),
memory=[f"{CURRENT_DIRECTORY}/memory/AGENTS.md"],
skills=[f"{CURRENT_DIRECTORY}/skills/"],
tools=[generate_cover, generate_social_image],
subagents=subagents,
backend=CompositeBackend(
default=agent_stack_backend,
routes={f"{CURRENT_DIRECTORY}/memory/": fs_backend, f"{CURRENT_DIRECTORY}/skills/": fs_backend},
),
)

thread_id = f"session-{context.task_id}"
history = [message async for message in context.load_history() if isinstance(message, Message) and message.parts]
lc_messages = [*to_langchain_messages(history), HumanMessage(content=user_message)]
tool_calls = defaultdict(lambda: {"name": "", "args": ""})

async for chunk in agent.astream(
input={"messages": lc_messages},
config=RunnableConfig(configurable={"thread_id": thread_id}),
stream_mode=["messages"],
):
node_name, messages = chunk
if node_name != "messages" or not messages:
continue

for last_msg in messages:
if isinstance(last_msg, AIMessageChunk):
if (
"finish_reason" in last_msg.response_metadata
and last_msg.response_metadata["finish_reason"] == "tool_calls"
):
for _, data in tool_calls.items():
tool_call_metadata = trajectory.trajectory_metadata(
title=data["name"], content=json.dumps(obj=data["args"])
)
yield tool_call_metadata
await context.store(data=AgentMessage(metadata=tool_call_metadata))
tool_calls.clear()

elif last_msg.tool_call_chunks:
for tc in last_msg.tool_call_chunks:
tc_id: str | None = tc.get("id")
if tc_id:
tool_calls[tc_id]["name"] += tc.get("name") or ""
tool_calls[tc_id]["args"] += tc.get("args") or ""
elif last_msg.text:
yield AgentMessage(text=last_msg.text)
await context.store(AgentMessage(text=last_msg.text))

elif isinstance(last_msg, ToolMessage) and last_msg.name and last_msg.text:
tool_message_metadata = trajectory.trajectory_metadata(title=last_msg.name, content=last_msg.text)
yield tool_message_metadata
await context.store(data=AgentMessage(metadata=tool_message_metadata))

updated_files = await agent_stack_backend.alist(order_by="created_at", order="asc", created_after=started_at)
for updated_file in updated_files:
yield updated_file.to_file_part()


def serve():
try:
server.run(host=os.getenv("HOST", "127.0.0.1"), port=int(os.getenv("PORT", 10003)), configure_telemetry=True)
except KeyboardInterrupt:
pass


if __name__ == "__main__":
serve()
Loading
Loading