Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 18 additions & 13 deletions typescript-sdk/apps/dojo/scripts/generate-content-json.ts
Original file line number Diff line number Diff line change
Expand Up @@ -140,57 +140,60 @@ async function getFeatureFrontendFiles(featureId: string) {
}

const integrationsFolderPath = '../../../integrations'
const agentFilesMapper: Record<string, (agentKeys: string[]) => Record<string, string>> = {
const agentFilesMapper: Record<string, (agentKeys: string[]) => Record<string, string[]>> = {
'middleware-starter': () => ({
agentic_chat: path.join(__dirname, integrationsFolderPath, `/middleware-starter/src/index.ts`)
agentic_chat: [path.join(__dirname, integrationsFolderPath, `/middleware-starter/src/index.ts`)]
}),
'pydantic-ai': (agentKeys: string[]) => {
return agentKeys.reduce((acc, agentId) => ({
...acc,
[agentId]: `https://github.com/pydantic/pydantic-ai/blob/main/examples/pydantic_ai_examples/ag_ui/api/${agentId}.py`
[agentId]: [`https://github.com/pydantic/pydantic-ai/blob/main/examples/pydantic_ai_examples/ag_ui/api/${agentId}.py`]
}), {})
},
'server-starter': () => ({
agentic_chat: path.join(__dirname, integrationsFolderPath, `/server-starter/server/python/example_server/__init__.py`)
agentic_chat: [path.join(__dirname, integrationsFolderPath, `/server-starter/server/python/example_server/__init__.py`)]
}),
'server-starter-all-features': (agentKeys: string[]) => {
return agentKeys.reduce((acc, agentId) => ({
...acc,
[agentId]: path.join(__dirname, integrationsFolderPath, `/server-starter/server/python/example_server/${agentId}.py`)
[agentId]: [path.join(__dirname, integrationsFolderPath, `/server-starter-all-features/server/python/example_server/${agentId}.py`)]
}), {})
},
'mastra': () => ({
agentic_chat: path.join(__dirname, integrationsFolderPath, `/mastra/example/src/mastra/agents/weather-agent.ts`)
agentic_chat: [path.join(__dirname, integrationsFolderPath, `/mastra/example/src/mastra/agents/weather-agent.ts`)]
}),
'mastra-agent-lock': () => ({
agentic_chat: path.join(__dirname, integrationsFolderPath, `/mastra/example/src/mastra/agents/weather-agent.ts`)
agentic_chat: [path.join(__dirname, integrationsFolderPath, `/mastra/example/src/mastra/agents/weather-agent.ts`)]
}),
'vercel-ai-sdk': () => ({
agentic_chat: path.join(__dirname, integrationsFolderPath, `/vercel-ai-sdk/src/index.ts`)
agentic_chat: [path.join(__dirname, integrationsFolderPath, `/vercel-ai-sdk/src/index.ts`)]
}),
'langgraph': (agentKeys: string[]) => {
return agentKeys.reduce((acc, agentId) => ({
...acc,
[agentId]: path.join(__dirname, integrationsFolderPath, `/langgraph/examples/agents/${agentId}/agent.py`)
[agentId]: [
path.join(__dirname, integrationsFolderPath, `/langgraph/examples/python/agents/${agentId}/agent.py`),
path.join(__dirname, integrationsFolderPath, `/langgraph/examples/typescript/src/agents/${agentId}/agent.ts`)
]
}), {})
},
'langgraph-fastapi': (agentKeys: string[]) => {
return agentKeys.reduce((acc, agentId) => ({
...acc,
[agentId]: path.join(__dirname, integrationsFolderPath, `/langgraph/python/ag_ui_langgraph/examples/agents/${agentId}.py`)
[agentId]: [path.join(__dirname, integrationsFolderPath, `/langgraph/examples/python/agents/${agentId}/agent.py`)]
}), {})
},
'agno': () => ({}),
'llama-index': (agentKeys: string[]) => {
return agentKeys.reduce((acc, agentId) => ({
...acc,
[agentId]: path.join(__dirname, integrationsFolderPath, `/llamaindex/server-py/server/routers/${agentId}.py`)
[agentId]: [path.join(__dirname, integrationsFolderPath, `/llamaindex/server-py/server/routers/${agentId}.py`)]
}), {})
},
'crewai': (agentKeys: string[]) => {
return agentKeys.reduce((acc, agentId) => ({
...acc,
[agentId]: path.join(__dirname, integrationsFolderPath, `/crewai/python/ag_ui_crewai/examples/${agentId}.py`)
[agentId]: [path.join(__dirname, integrationsFolderPath, `/crewai/python/ag_ui_crewai/examples/${agentId}.py`)]
}), {})
}
}
Expand All @@ -202,14 +205,16 @@ async function runGenerateContent() {
const agentsPerFeatures = agentConfig.agentKeys

const agentFilePaths = agentFilesMapper[agentConfig.id](agentConfig.agentKeys)

// Per feature, assign all the frontend files like page.tsx as well as all agent files
for (const featureId of agentsPerFeatures) {
const agentFilePathsForFeature = agentFilePaths[featureId] ?? []
// @ts-expect-error -- redundant error about indexing of a new object.
result[`${agentConfig.id}::${featureId}`] = [
// Get all frontend files for the feature
...(await getFeatureFrontendFiles(featureId)),
// Get the agent (python/TS) file
await getFile(agentFilePaths[featureId])
...(await Promise.all(agentFilePathsForFeature.map(async f => await getFile(f))))
]
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ export default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIP
agent="tool_based_generative_ui"
>
<div
className={`${isMobile ? 'h-screen' : 'min-h-full'} w-full relative overflow-hidden`}
className={`${isMobile ? 'h-screen' : 'min-h-full flex'} w-full relative overflow-hidden`}
style={
{
// "--copilot-kit-primary-color": "#222",
Expand Down Expand Up @@ -336,7 +336,7 @@ function Haiku() {
<div className="flex h-full w-full">
{/* Thumbnail List */}
{Boolean(generatedHaikus.length) && !isMobile && (
<div className="w-40 p-4 border-r border-gray-200 overflow-y-auto overflow-x-hidden max-w-1">
<div className="w-40 p-4 border-r border-gray-200 overflow-y-auto overflow-x-hidden">
{generatedHaikus.map((haiku, index) => (
<div
key={index}
Expand Down
127 changes: 96 additions & 31 deletions typescript-sdk/apps/dojo/src/files.json

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
A simple agentic chat flow using LangGraph instead of CrewAI.
"""

from typing import Dict, List, Any, Optional
from typing import List, Any
import os

# Updated imports for LangGraph
from langchain_core.runnables import RunnableConfig
Expand Down Expand Up @@ -76,5 +77,16 @@ async def chat_node(state: AgentState, config: RunnableConfig):
workflow.add_edge(START, "chat_node")
workflow.add_edge("chat_node", END)

# Conditionally use a checkpointer based on the environment
# Check for multiple indicators that we're running in LangGraph dev/API mode
is_fast_api = os.environ.get("LANGGRAPH_FAST_API", "false").lower() == "true"

# Compile the graph
agentic_chat_graph = workflow.compile()
if is_fast_api:
# For CopilotKit and other contexts, use MemorySaver
from langgraph.checkpoint.memory import MemorySaver
memory = MemorySaver()
graph = workflow.compile(checkpointer=memory)
else:
# When running in LangGraph API/dev, don't use a custom checkpointer
graph = workflow.compile()
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
An example demonstrating agentic generative UI using LangGraph.
"""

import json
import asyncio
from typing import Dict, List, Any, Optional, Literal
from typing import List, Any
import os

# LangGraph imports
from langchain_core.runnables import RunnableConfig
from langgraph.graph import StateGraph, END, START
Expand Down Expand Up @@ -189,5 +190,16 @@ async def chat_node(state: AgentState, config: RunnableConfig):
workflow.add_edge("start_flow", "chat_node")
workflow.add_edge("chat_node", END)

# Conditionally use a checkpointer based on the environment
# Check for multiple indicators that we're running in LangGraph dev/API mode
is_fast_api = os.environ.get("LANGGRAPH_FAST_API", "false").lower() == "true"

# Compile the graph
graph = workflow.compile()
if is_fast_api:
# For CopilotKit and other contexts, use MemorySaver
from langgraph.checkpoint.memory import MemorySaver
memory = MemorySaver()
graph = workflow.compile(checkpointer=memory)
else:
# When running in LangGraph API/dev, don't use a custom checkpointer
graph = workflow.compile()
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,15 @@
from dotenv import load_dotenv
load_dotenv()

os.environ["LANGGRAPH_FAST_API"] = "true"

from ag_ui_langgraph import LangGraphAgent, add_langgraph_fastapi_endpoint
from .human_in_the_loop import human_in_the_loop_graph
from .predictive_state_updates import predictive_state_updates_graph
from .shared_state import shared_state_graph
from .tool_based_generative_ui import tool_based_generative_ui_graph
from .agentic_chat import agentic_chat_graph
from .agentic_generative_ui import graph
from .human_in_the_loop.agent import graph as human_in_the_loop_graph
from .predictive_state_updates.agent import graph as predictive_state_updates_graph
from .shared_state.agent import graph as shared_state_graph
from .tool_based_generative_ui.agent import graph as tool_based_generative_ui_graph
from .agentic_chat.agent import graph as agentic_chat_graph
from .agentic_generative_ui.agent import graph as agentic_generative_ui_graph

app = FastAPI(title="LangGraph Dojo Example Server")

Expand All @@ -30,7 +32,7 @@
"agentic_generative_ui": LangGraphAgent(
name="agentic_generative_ui",
description="An example for an agentic generative UI flow.",
graph=graph,
graph=agentic_generative_ui_graph,
),
"human_in_the_loop": LangGraphAgent(
name="human_in_the_loop",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,14 @@

import json
from typing import Dict, List, Any
import os

# LangGraph imports
from langchain_core.runnables import RunnableConfig
from langgraph.graph import StateGraph, END, START
from langgraph.types import Command, interrupt
from langgraph.graph import MessagesState

from copilotkit.langgraph import copilotkit_emit_state, copilotkit_interrupt

# LLM imports
from langchain_openai import ChatOpenAI
from langchain_core.messages import SystemMessage
Expand Down Expand Up @@ -181,7 +180,7 @@ async def chat_node(state: Dict[str, Any], config: RunnableConfig):
"steps": state["steps"],
}
)

# If no tool calls or not plan_execution_steps, return to END with the updated messages
return Command(
goto=END,
Expand All @@ -207,7 +206,7 @@ async def process_steps_node(state: Dict[str, Any], config: RunnableConfig):
user_response = interrupt({"steps": state["steps"]})
# Store the user response in state for when the node restarts
state["user_response"] = user_response

# Generate the creative completion response
final_prompt = """
Provide a textual description of how you are performing the task.
Expand All @@ -224,11 +223,11 @@ async def process_steps_node(state: Dict[str, Any], config: RunnableConfig):

# Add the final response to messages
messages = state["messages"] + [final_response]

# Clear the user_response from state to prepare for future interactions
if "user_response" in state:
state.pop("user_response")

# Return to END with the updated messages
return Command(
goto=END,
Expand Down Expand Up @@ -271,5 +270,16 @@ def should_continue(command: Command):
},
)

# Conditionally use a checkpointer based on the environment
# Check for multiple indicators that we're running in LangGraph dev/API mode
is_fast_api = os.environ.get("LANGGRAPH_FAST_API", "false").lower() == "true"

# Compile the graph
human_in_the_loop_graph = workflow.compile()
if is_fast_api:
# For CopilotKit and other contexts, use MemorySaver
from langgraph.checkpoint.memory import MemorySaver
memory = MemorySaver()
graph = workflow.compile(checkpointer=memory)
else:
# When running in LangGraph API/dev, don't use a custom checkpointer
graph = workflow.compile()
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
A demo of predictive state updates using LangGraph.
"""

import json
import uuid
from typing import Dict, List, Any, Optional
from typing import List, Any, Optional
import os

# LangGraph imports
from langchain_core.runnables import RunnableConfig
Expand Down Expand Up @@ -174,5 +174,17 @@ async def chat_node(state: AgentState, config: RunnableConfig):
workflow.add_edge("start_flow", "chat_node")
workflow.add_edge("chat_node", END)

# Conditionally use a checkpointer based on the environment
# Check for multiple indicators that we're running in LangGraph dev/API mode
is_fast_api = os.environ.get("LANGGRAPH_FAST_API", "false").lower() == "true"

# Compile the graph
predictive_state_updates_graph = workflow.compile()
if is_fast_api:
# For CopilotKit and other contexts, use MemorySaver
from langgraph.checkpoint.memory import MemorySaver
memory = MemorySaver()
graph = workflow.compile(checkpointer=memory)
else:
# When running in LangGraph API/dev, don't use a custom checkpointer
graph = workflow.compile()

Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import json
from enum import Enum
from typing import Dict, List, Any, Optional
import os

# LangGraph imports
from langchain_core.runnables import RunnableConfig
Expand Down Expand Up @@ -297,5 +298,16 @@ async def chat_node(state: Dict[str, Any], config: RunnableConfig):
workflow.add_edge("start_flow", "chat_node")
workflow.add_edge("chat_node", END)

# Conditionally use a checkpointer based on the environment
# Check for multiple indicators that we're running in LangGraph dev/API mode
is_fast_api = os.environ.get("LANGGRAPH_FAST_API", "false").lower() == "true"

# Compile the graph
shared_state_graph = workflow.compile()
if is_fast_api:
# For CopilotKit and other contexts, use MemorySaver
from langgraph.checkpoint.memory import MemorySaver
memory = MemorySaver()
graph = workflow.compile(checkpointer=memory)
else:
# When running in LangGraph API/dev, don't use a custom checkpointer
graph = workflow.compile()
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
An example demonstrating tool-based generative UI using LangGraph.
"""

from typing import Dict, List, Any, Optional
from typing import List, Any
import os

# LangGraph imports
from langchain_core.runnables import RunnableConfig
Expand Down Expand Up @@ -124,6 +125,18 @@ async def chat_node(state: AgentState, config: RunnableConfig):
workflow.add_edge(START, "chat_node")
workflow.add_edge("chat_node", END)

# Conditionally use a checkpointer based on the environment
# Check for multiple indicators that we're running in LangGraph dev/API mode
is_fast_api = os.environ.get("LANGGRAPH_FAST_API", "false").lower() == "true"

# Compile the graph
tool_based_generative_ui_graph = workflow.compile()
if is_fast_api:
# For CopilotKit and other contexts, use MemorySaver
from langgraph.checkpoint.memory import MemorySaver
memory = MemorySaver()
graph = workflow.compile(checkpointer=memory)
else:
# When running in LangGraph API/dev, don't use a custom checkpointer
graph = workflow.compile()


Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@
"dockerfile_lines": [],
"dependencies": ["."],
"graphs": {
"agentic_chat": "./agents/agentic_chat/agent.py:agentic_chat_graph",
"agentic_chat": "./agents/agentic_chat/agent.py:graph",
"agentic_generative_ui": "./agents/agentic_generative_ui/agent.py:graph",
"human_in_the_loop": "./agents/human_in_the_loop/agent.py:human_in_the_loop_graph",
"predictive_state_updates": "./agents/predictive_state_updates/agent.py:predictive_state_updates_graph",
"shared_state": "./agents/shared_state/agent.py:shared_state_graph",
"tool_based_generative_ui": "./agents/tool_based_generative_ui/agent.py:tool_based_generative_ui_graph"
"human_in_the_loop": "./agents/human_in_the_loop/agent.py:graph",
"predictive_state_updates": "./agents/predictive_state_updates/agent.py:graph",
"shared_state": "./agents/shared_state/agent.py:graph",
"tool_based_generative_ui": "./agents/tool_based_generative_ui/agent.py:graph"
},
"env": ".env"
}
Loading