diff --git a/typescript-sdk/apps/dojo/e2e2/tests/langgraph-fastapi-agentic-chat.spec.ts b/typescript-sdk/apps/dojo/e2e2/tests/langgraph-fastapi-agentic-chat.spec.ts new file mode 100644 index 000000000..45aae48d7 --- /dev/null +++ b/typescript-sdk/apps/dojo/e2e2/tests/langgraph-fastapi-agentic-chat.spec.ts @@ -0,0 +1,22 @@ +import { test, expect } from '@playwright/test'; + +test('renders initial message', async ({ page }) => { + await page.goto('http://localhost:9999/langgraph-fastapi/feature/agentic_chat'); + + await expect(page.getByText('Hi, I\'m an agent. Want to chat?')).toBeVisible(); +}); + +test('responds to user message', async ({ page }) => { + await page.goto('http://localhost:9999/langgraph-fastapi/feature/agentic_chat'); + + const textarea = page.getByPlaceholder('Type a message...'); + textarea.fill('How many sides are in a square? Please answer in one word. Do not use any punctuation, just the number in word form.'); + await page.keyboard.press('Enter'); + + page.locator('.copilotKitInputControls button.copilotKitInputControlButton').click(); + + await expect(page.locator('.copilotKitMessage')).toHaveCount(3); + await expect(page.locator('.copilotKitMessage.copilotKitAssistantMessage')).toHaveCount(2); + await expect(page.locator('.copilotKitMessage.copilotKitUserMessage')).toHaveCount(1); + await expect(page.locator('.copilotKitMessage.copilotKitAssistantMessage').last()).toHaveText('four', { ignoreCase: true }); +}); \ No newline at end of file diff --git a/typescript-sdk/apps/dojo/e2e2/tests/langgraph-typescript-agentic-chat.spec.ts b/typescript-sdk/apps/dojo/e2e2/tests/langgraph-typescript-agentic-chat.spec.ts new file mode 100644 index 000000000..011efd3ff --- /dev/null +++ b/typescript-sdk/apps/dojo/e2e2/tests/langgraph-typescript-agentic-chat.spec.ts @@ -0,0 +1,22 @@ +import { test, expect } from '@playwright/test'; + +test('renders initial message', async ({ page }) => { + await page.goto('http://localhost:9999/langgraph-typescript/feature/agentic_chat'); + + await expect(page.getByText('Hi, I\'m an agent. Want to chat?')).toBeVisible(); +}); + +test('responds to user message', async ({ page }) => { + await page.goto('http://localhost:9999/langgraph-typescript/feature/agentic_chat'); + + const textarea = page.getByPlaceholder('Type a message...'); + textarea.fill('How many sides are in a square? Please answer in one word. Do not use any punctuation, just the number in word form.'); + await page.keyboard.press('Enter'); + + page.locator('.copilotKitInputControls button.copilotKitInputControlButton').click(); + + await expect(page.locator('.copilotKitMessage')).toHaveCount(3); + await expect(page.locator('.copilotKitMessage.copilotKitAssistantMessage')).toHaveCount(2); + await expect(page.locator('.copilotKitMessage.copilotKitUserMessage')).toHaveCount(1); + await expect(page.locator('.copilotKitMessage.copilotKitAssistantMessage').last()).toHaveText('four', { ignoreCase: true }); +}); \ No newline at end of file diff --git a/typescript-sdk/apps/dojo/scripts/generate-content-json.ts b/typescript-sdk/apps/dojo/scripts/generate-content-json.ts index d06693c70..b24119081 100644 --- a/typescript-sdk/apps/dojo/scripts/generate-content-json.ts +++ b/typescript-sdk/apps/dojo/scripts/generate-content-json.ts @@ -5,27 +5,27 @@ import path from "path"; function parseAgentsFile(): Array<{id: string, agentKeys: string[]}> { const agentsFilePath = path.join(__dirname, '../src/agents.ts'); const agentsContent = fs.readFileSync(agentsFilePath, 'utf8'); - + const agentConfigs: Array<{id: string, agentKeys: string[]}> = []; - + // Split the content to process each agent configuration individually const agentBlocks = agentsContent.split(/(?=\s*{\s*id:\s*["'])/); - + for (const block of agentBlocks) { // Extract the ID const idMatch = block.match(/id:\s*["']([^"']+)["']/); if (!idMatch) continue; - + const id = idMatch[1]; - + // Find the return object by looking for the pattern and then manually parsing balanced braces const returnMatch = block.match(/agents:\s*async\s*\(\)\s*=>\s*{\s*return\s*{/); if (!returnMatch) continue; - + const startIndex = returnMatch.index! + returnMatch[0].length; const returnObjectContent = extractBalancedBraces(block, startIndex); - - + + // Extract keys from the return object - only capture keys that are followed by a colon and then 'new' // This ensures we only get the top-level keys like "agentic_chat: new ..." not nested keys like "url: ..." const keyRegex = /^\s*(\w+):\s*new\s+\w+/gm; @@ -34,10 +34,10 @@ function parseAgentsFile(): Array<{id: string, agentKeys: string[]}> { while ((keyMatch = keyRegex.exec(returnObjectContent)) !== null) { keys.push(keyMatch[1]); } - + agentConfigs.push({ id, agentKeys: keys }); } - + return agentConfigs; } @@ -45,7 +45,7 @@ function parseAgentsFile(): Array<{id: string, agentKeys: string[]}> { function extractBalancedBraces(text: string, startIndex: number): string { let braceCount = 0; let i = startIndex; - + while (i < text.length) { if (text[i] === '{') { braceCount++; @@ -58,7 +58,7 @@ function extractBalancedBraces(text: string, startIndex: number): string { } i++; } - + return ''; } @@ -71,15 +71,15 @@ async function getFile(_filePath: string | undefined, _fileName?: string) { console.warn(`File path is undefined, skipping.`); return {} } - + const fileName = _fileName ?? _filePath.split('/').pop() ?? '' const filePath = _fileName ? path.join(_filePath, fileName) : _filePath; - + // Check if it's a remote URL const isRemoteUrl = _filePath.startsWith('http://') || _filePath.startsWith('https://'); - + let content: string; - + try { if (isRemoteUrl) { // Convert GitHub URLs to raw URLs for direct file access @@ -87,7 +87,7 @@ async function getFile(_filePath: string | undefined, _fileName?: string) { if (_filePath.includes('github.com') && _filePath.includes('/blob/')) { fetchUrl = _filePath.replace('github.com', 'raw.githubusercontent.com').replace('/blob/', '/'); } - + // Fetch remote file content console.log(`Fetching remote file: ${fetchUrl}`); const response = await fetch(fetchUrl); @@ -177,6 +177,15 @@ const agentFilesMapper: Record Record { + return agentKeys.reduce((acc, agentId) => ({ + ...acc, + [agentId]: [ + path.join(__dirname, integrationsFolderPath, `/langgraph/examples/python/agents/${agentId}/agent.py`), + path.join(__dirname, integrationsFolderPath, `/langgraph/examples/typescript/src/agents/${agentId}/agent.ts`) + ] + }), {}) + }, 'langgraph-fastapi': (agentKeys: string[]) => { return agentKeys.reduce((acc, agentId) => ({ ...acc, @@ -228,6 +237,6 @@ async function runGenerateContent() { path.join(__dirname, "../src/files.json"), JSON.stringify(result, null, 2) ); - + console.log("Successfully generated src/files.json"); })(); \ No newline at end of file diff --git a/typescript-sdk/apps/dojo/scripts/prep-dojo-everything.js b/typescript-sdk/apps/dojo/scripts/prep-dojo-everything.js index e9ce67861..bf4faf463 100755 --- a/typescript-sdk/apps/dojo/scripts/prep-dojo-everything.js +++ b/typescript-sdk/apps/dojo/scripts/prep-dojo-everything.js @@ -61,7 +61,10 @@ const crewai = { const langgraphFastapi = { command: 'poetry install', name: 'LG FastAPI', - cwd: path.join(integrationsRoot, 'langgraph/python/ag_ui_langgraph/examples'), + cwd: path.join(integrationsRoot, 'langgraph/examples/python'), + env: { + POETRY_VIRTUALENVS_IN_PROJECT: "false" + } } // Langgraph (Platorm {typescript}) @@ -115,7 +118,7 @@ async function main() { serverStarterAllFeatures, agno, crewai, - // langgraphFastapi, // Disabled until build fixes + langgraphFastapi, langgraphPlatformTypescript, llamaIndex, mastra, diff --git a/typescript-sdk/apps/dojo/scripts/run-dojo-everything.js b/typescript-sdk/apps/dojo/scripts/run-dojo-everything.js index 4e148b8d2..ec945ec00 100755 --- a/typescript-sdk/apps/dojo/scripts/run-dojo-everything.js +++ b/typescript-sdk/apps/dojo/scripts/run-dojo-everything.js @@ -63,8 +63,11 @@ const crewai = { const langgraphFastapi = { command: 'poetry run dev', name: 'LG FastAPI', - cwd: path.join(integrationsRoot, 'langgraph/python/ag_ui_langgraph/examples'), - env: {PORT: 8004}, + cwd: path.join(integrationsRoot, 'langgraph/examples/python'), + env: { + PORT: 8004, + POETRY_VIRTUALENVS_IN_PROJECT: "false" + }, } // Langgraph (Platform {python}) @@ -119,10 +122,8 @@ const dojo = { AGNO_URL: 'http://localhost:8002', CREW_AI_URL: 'http://localhost:8003', LANGGRAPH_FAST_API_URL: 'http://localhost:8004', - // TODO: Move this to run 2 platforms for testing. - LANGGRAPH_URL: 'http://localhost:8005', - // LANGGRAPH_PLATFORM_PYTHON_URL: 'http://localhost:8005', - // LANGGRAPH_PLATFORM_TYPESCRIPT_URL: 'http://localhost:8006', + LANGGRAPH_PYTHON_URL: 'http://localhost:8005', + LANGGRAPH_TYPESCRIPT_URL: 'http://localhost:8006', LLAMA_INDEX_URL: 'http://localhost:8007', MASTRA_URL: 'http://localhost:8008', PYDANTIC_AI_URL: 'http://localhost:8009', @@ -135,9 +136,8 @@ const procs = [ serverStarterAllFeatures, agno, crewai, - // langgraphFastapi, // Disabled until it runs + langgraphFastapi, langgraphPlatformPython, - // TODO: Also run the typescript version of langgraph. langgraphPlatformTypescript, llamaIndex, mastra, diff --git a/typescript-sdk/apps/dojo/src/agents.ts b/typescript-sdk/apps/dojo/src/agents.ts index d71e431ae..5e2e85044 100644 --- a/typescript-sdk/apps/dojo/src/agents.ts +++ b/typescript-sdk/apps/dojo/src/agents.ts @@ -115,29 +115,32 @@ export const agentsIntegrations: AgentIntegrationConfig[] = [ agents: async () => { return { agentic_chat: new LangGraphAgent({ - deploymentUrl: envVars.langgraphUrl, + deploymentUrl: envVars.langgraphPythonUrl, graphId: "agentic_chat", }), agentic_generative_ui: new LangGraphAgent({ - deploymentUrl: envVars.langgraphUrl, + deploymentUrl: envVars.langgraphPythonUrl, graphId: "agentic_generative_ui", }), human_in_the_loop: new LangGraphAgent({ - deploymentUrl: envVars.langgraphUrl, + deploymentUrl: envVars.langgraphPythonUrl, graphId: "human_in_the_loop", }), predictive_state_updates: new LangGraphAgent({ - deploymentUrl: envVars.langgraphUrl, + deploymentUrl: envVars.langgraphPythonUrl, graphId: "predictive_state_updates", }), shared_state: new LangGraphAgent({ - deploymentUrl: envVars.langgraphUrl, + deploymentUrl: envVars.langgraphPythonUrl, graphId: "shared_state", }), tool_based_generative_ui: new LangGraphAgent({ - deploymentUrl: envVars.langgraphUrl, + deploymentUrl: envVars.langgraphPythonUrl, graphId: "tool_based_generative_ui", }), + agentic_chat_reasoning: new LangGraphHttpAgent({ + url: `${envVars.langgraphPythonUrl}/agent/agentic_chat_reasoning`, + }), }; }, }, @@ -164,11 +167,42 @@ export const agentsIntegrations: AgentIntegrationConfig[] = [ url: `${envVars.langgraphFastApiUrl}/agent/tool_based_generative_ui`, }), agentic_chat_reasoning: new LangGraphHttpAgent({ - url: "http://localhost:8000/agent/agentic_chat_reasoning", + url: `${envVars.langgraphFastApiUrl}/agent/agentic_chat_reasoning`, }), }; }, }, + { + id: "langgraph-typescript", + agents: async () => { + return { + agentic_chat: new LangGraphAgent({ + deploymentUrl: envVars.langgraphTypescriptUrl, + graphId: "agentic_chat", + }), + agentic_generative_ui: new LangGraphAgent({ + deploymentUrl: envVars.langgraphTypescriptUrl, + graphId: "agentic_generative_ui", + }), + human_in_the_loop: new LangGraphAgent({ + deploymentUrl: envVars.langgraphTypescriptUrl, + graphId: "human_in_the_loop", + }), + predictive_state_updates: new LangGraphAgent({ + deploymentUrl: envVars.langgraphTypescriptUrl, + graphId: "predictive_state_updates", + }), + shared_state: new LangGraphAgent({ + deploymentUrl: envVars.langgraphTypescriptUrl, + graphId: "shared_state", + }), + tool_based_generative_ui: new LangGraphAgent({ + deploymentUrl: envVars.langgraphTypescriptUrl, + graphId: "tool_based_generative_ui", + }) + }; + }, + }, { id: "agno", agents: async () => { diff --git a/typescript-sdk/apps/dojo/src/env.ts b/typescript-sdk/apps/dojo/src/env.ts index df4df2f37..2e9a6a66a 100644 --- a/typescript-sdk/apps/dojo/src/env.ts +++ b/typescript-sdk/apps/dojo/src/env.ts @@ -2,8 +2,9 @@ type envVars = { serverStarterUrl: string; serverStarterAllFeaturesUrl: string; mastraUrl: string; - langgraphUrl: string; + langgraphPythonUrl: string; langgraphFastApiUrl: string; + langgraphTypescriptUrl: string; agnoUrl: string; llamaIndexUrl: string; crewAiUrl: string; @@ -24,8 +25,9 @@ export default function getEnvVars(): envVars { serverStarterUrl: process.env.SERVER_STARTER_URL || 'http://localhost:8000', serverStarterAllFeaturesUrl: process.env.SERVER_STARTER_ALL_FEATURES_URL || 'http://localhost:8000', mastraUrl: process.env.MASTRA_URL || 'http://localhost:4111', - langgraphUrl: process.env.LANGGRAPH_URL || 'http://localhost:2024', + langgraphPythonUrl: process.env.LANGGRAPH_PYTHON_URL || 'http://localhost:2024', langgraphFastApiUrl: process.env.LANGGRAPH_FAST_API_URL || 'http://localhost:8000', + langgraphTypescriptUrl: process.env.LANGGRAPH_TYPESCRIPT_URL || 'http://localhost:8000', agnoUrl: process.env.AGNO_URL || 'http://localhost:9001', llamaIndexUrl: process.env.LLAMA_INDEX_URL || 'http://localhost:9000', crewAiUrl: process.env.CREW_AI_URL || 'http://localhost:9002', diff --git a/typescript-sdk/apps/dojo/src/files.json b/typescript-sdk/apps/dojo/src/files.json index c426785ed..de4f8d15c 100644 --- a/typescript-sdk/apps/dojo/src/files.json +++ b/typescript-sdk/apps/dojo/src/files.json @@ -581,6 +581,33 @@ "type": "file" } ], + "langgraph::agentic_chat_reasoning": [ + { + "name": "page.tsx", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { ChevronDown } from \"lucide-react\";\nimport { Button } from \"@/components/ui/button\";\nimport {\n DropdownMenu,\n DropdownMenuContent,\n DropdownMenuItem,\n DropdownMenuLabel,\n DropdownMenuSeparator,\n DropdownMenuTrigger,\n} from \"@/components/ui/dropdown-menu\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n model: string;\n}\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"agentic_chat_reasoning\",\n initialState: {\n model: \"OpenAI\",\n },\n });\n\n // Initialize model if not set\n const selectedModel = agentState?.model || \"OpenAI\";\n\n const handleModelChange = (model: string) => {\n setAgentState({ model });\n };\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n },\n });\n\n return (\n
\n {/* Reasoning Model Dropdown */}\n
\n
\n
\n \n Reasoning Model:\n \n \n \n \n \n \n Select Model\n \n handleModelChange(\"OpenAI\")}>\n OpenAI\n \n handleModelChange(\"Anthropic\")}>\n Anthropic\n \n handleModelChange(\"Gemini\")}>\n Gemini\n \n \n \n
\n
\n
\n\n {/* Chat Container */}\n
\n
\n \n
\n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "language": "typescript", + "type": "file" + }, + { + "name": "style.css", + "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", + "language": "css", + "type": "file" + }, + { + "name": "README.mdx", + "content": "# ๐Ÿค– Agentic Chat with Reasoning\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## โœจ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", + "language": "markdown", + "type": "file" + }, + { + "name": "agent.py", + "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n model: str\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n\n # 1. Define the model\n model = ChatOpenAI(model=\"o3\")\n if state[\"model\"] == \"Anthropic\":\n model = ChatAnthropic(\n model=\"claude-sonnet-4-20250514\",\n thinking={\"type\": \"enabled\", \"budget_tokens\": 2000}\n )\n elif state[\"model\"] == \"Gemini\":\n model = ChatGoogleGenerativeAI(model=\"gemini-2.5-pro\", thinking_budget=1024)\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()", + "language": "python", + "type": "file" + }, + {} + ], "langgraph-fastapi::agentic_chat": [ { "name": "page.tsx", @@ -763,6 +790,198 @@ "type": "file" } ], + "langgraph-typescript::agentic_chat": [ + { + "name": "page.tsx", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "language": "typescript", + "type": "file" + }, + { + "name": "style.css", + "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", + "language": "css", + "type": "file" + }, + { + "name": "README.mdx", + "content": "# ๐Ÿค– Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## โœจ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", + "language": "markdown", + "type": "file" + }, + { + "name": "agent.py", + "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "language": "python", + "type": "file" + }, + { + "name": "agent.ts", + "content": "/**\n * A simple agentic chat flow using LangGraph instead of CrewAI.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from \"@langchain/langgraph\";\n\nconst AgentStateAnnotation = Annotation.Root({\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node based on the ReAct design pattern. It handles:\n * - The model to use (and binds in CopilotKit actions and the tools defined above)\n * - The system prompt\n * - Getting a response from the model\n * - Handling tool calls\n *\n * For more about the ReAct design pattern, see: \n * https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n */\n \n // 1. Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // 2. Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n // your_tool_here\n ],\n {\n // 2.1 Disable parallel tool calls to avoid race conditions,\n // enable this for faster performance if you want to manage\n // the complexity of running tool calls in parallel.\n parallel_tool_calls: false,\n }\n );\n\n // 3. Define the system message by which the chat model will be run\n const systemMessage = new SystemMessage({\n content: \"You are a helpful assistant.\"\n });\n\n // 4. Run the model to generate a response\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n // 6. We've handled all tool calls, so we can end the graph.\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n })\n}\n\n// Define a new graph \nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"chat_node\", chatNode)\n .addEdge(START, \"chat_node\");\n\n// Compile the graph\nexport const agenticChatGraph = workflow.compile();", + "language": "ts", + "type": "file" + } + ], + "langgraph-typescript::agentic_generative_ui": [ + { + "name": "page.tsx", + "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter(step => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n
\n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n \n {/* Progress Bar */}\n
\n
\n
\n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending = step.status === \"pending\" && \n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n
\n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n
\n )}\n\n {/* Status Icon */}\n
\n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n
\n {step.description}\n
\n {isCurrentPending && (\n
\n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n
\n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n
\n
\n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", + "language": "typescript", + "type": "file" + }, + { + "name": "style.css", + "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", + "language": "css", + "type": "file" + }, + { + "name": "README.mdx", + "content": "# ๐Ÿš€ Agentic Generative UI Task Executor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic generative UI** capabilities:\n\n1. **Real-time Status Updates**: The Copilot provides live feedback as it works\n through complex tasks\n2. **Long-running Task Execution**: See how agents can handle extended processes\n with continuous feedback\n3. **Dynamic UI Generation**: The interface updates in real-time to reflect the\n agent's progress\n\n## How to Interact\n\nSimply ask your Copilot to perform any moderately complex task:\n\n- \"Make me a sandwich\"\n- \"Plan a vacation to Japan\"\n- \"Create a weekly workout routine\"\n\nThe Copilot will break down the task into steps and begin \"executing\" them,\nproviding real-time status updates as it progresses.\n\n## โœจ Agentic Generative UI in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and creates a detailed execution plan\n- Each step is processed sequentially with realistic timing\n- Status updates are streamed to the frontend using CopilotKit's streaming\n capabilities\n- The UI dynamically renders these updates without page refreshes\n- The entire flow is managed by the agent, requiring no manual intervention\n\n**What you'll see in this demo:**\n\n- The Copilot breaks your task into logical steps\n- A status indicator shows the current progress\n- Each step is highlighted as it's being executed\n- Detailed status messages explain what's happening at each moment\n- Upon completion, you receive a summary of the task execution\n\nThis pattern of providing real-time progress for long-running tasks is perfect\nfor scenarios where users benefit from transparency into complex processes -\nfrom data analysis to content creation, system configurations, or multi-stage\nworkflows!\n", + "language": "markdown", + "type": "file" + }, + { + "name": "agent.py", + "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "language": "python", + "type": "file" + }, + { + "name": "agent.ts", + "content": "/**\n * An example demonstrating agentic generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Annotation, MessagesAnnotation, StateGraph } from \"@langchain/langgraph\";\n\n// This tool simulates performing a task on the server.\n// The tool call will be streamed to the frontend as it is being generated.\nconst PERFORM_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_task_steps_generative_ui\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in gerund form\"\n },\n status: {\n type: \"string\",\n enum: [\"pending\"],\n description: \"The status of the step, always 'pending'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nconst AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig) {\n /**\n * This is the entry point for the flow.\n */\n\n if (!state.steps) {\n state.steps = [];\n }\n\n return {\n steps: state.steps || []\n };\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node.\n */\n const systemPrompt = `\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function \\`generate_task_steps_generative_ui\\`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"generate_task_steps_generative_ui\",\n tool_argument: \"steps\",\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n PERFORM_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_task_steps_generative_ui\") {\n const steps = toolCall.args.steps.map((step: any) => ({\n description: step.description,\n status: step.status\n }));\n \n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Steps executed.\",\n tool_call_id: toolCall.id\n };\n\n const updatedMessages = [...messages, toolResponse];\n\n // Simulate executing the steps\n for (let i = 0; i < steps.length; i++) {\n // simulate executing the step\n await new Promise(resolve => setTimeout(resolve, 1000));\n steps[i].status = \"completed\";\n // Update the state with the completed step\n state.steps = steps;\n // Emit custom events to update the frontend\n await dispatchCustomEvent(\"manually_emit_state\", state, config);\n }\n \n return {\n messages: updatedMessages,\n steps: state.steps\n };\n }\n }\n\n return {\n messages: messages,\n steps: state.steps\n };\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"start_flow\", startFlow)\n .addNode(\"chat_node\", chatNode)\n .addEdge(\"__start__\", \"start_flow\")\n .addEdge(\"start_flow\", \"chat_node\")\n .addEdge(\"chat_node\", \"__end__\");\n\n// Compile the graph\nexport const agenticGenerativeUiGraph = workflow.compile();", + "language": "ts", + "type": "file" + } + ], + "langgraph-typescript::human_in_the_loop": [ + { + "name": "page.tsx", + "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n โœจ\n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n โœ—\n Reject\n \n \n โœ“\n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"โœ“\" : \"โœ—\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", + "language": "typescript", + "type": "file" + }, + { + "name": "style.css", + "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", + "language": "css", + "type": "file" + }, + { + "name": "README.mdx", + "content": "# ๐Ÿค Human-in-the-Loop Task Planner\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **human-in-the-loop** capabilities:\n\n1. **Collaborative Planning**: The Copilot generates task steps and lets you\n decide which ones to perform\n2. **Interactive Decision Making**: Select or deselect steps to customize the\n execution plan\n3. **Adaptive Responses**: The Copilot adapts its execution based on your\n choices, even handling missing steps\n\n## How to Interact\n\nTry these steps to experience the demo:\n\n1. Ask your Copilot to help with a task, such as:\n\n - \"Make me a sandwich\"\n - \"Plan a weekend trip\"\n - \"Organize a birthday party\"\n - \"Start a garden\"\n\n2. Review the suggested steps provided by your Copilot\n\n3. Select or deselect steps using the checkboxes to customize the plan\n\n - Try removing essential steps to see how the Copilot adapts!\n\n4. Click \"Execute Plan\" to see the outcome based on your selections\n\n## โœจ Human-in-the-Loop Magic in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and breaks it down into logical steps\n- These steps are presented to you through a dynamic UI component\n- Your selections are captured as user input\n- The agent considers your choices when executing the plan\n- The agent adapts to missing steps with creative problem-solving\n\n**What you'll see in this demo:**\n\n- The Copilot provides a detailed, step-by-step plan for your task\n- You have complete control over which steps to include\n- If you remove essential steps, the Copilot provides entertaining and creative\n workarounds\n- The final execution reflects your choices, showing how human input shapes the\n outcome\n- Each response is tailored to your specific selections\n\nThis human-in-the-loop pattern creates a powerful collaborative experience where\nboth human judgment and AI capabilities work together to achieve better results\nthan either could alone!\n", + "language": "markdown", + "type": "file" + }, + { + "name": "agent.py", + "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any]\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "language": "python", + "type": "file" + }, + { + "name": "agent.ts", + "content": "/**\n * A LangGraph implementation of the human-in-the-loop agent.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, interrupt, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nconst DEFINE_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"plan_execution_steps\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in imperative form (i.e. Dig hole, Open door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in imperative form\"\n },\n status: {\n type: \"string\",\n enum: [\"enabled\"],\n description: \"The status of the step, always 'enabled'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation(),\n user_response: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize steps list if not exists\n if (!state.steps) {\n state.steps = [];\n }\n\n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n steps: state.steps,\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node where the agent processes messages and generates responses.\n * If task steps are defined, the user can enable/disable them using interrupts.\n */\n const systemPrompt = `\n You are a helpful assistant that can perform any task.\n You MUST call the \\`plan_execution_steps\\` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"plan_execution_steps\",\n tool_argument: \"steps\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n DEFINE_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"plan_execution_steps\") {\n // Get the steps from the tool call\n const stepsRaw = toolCall.args.steps || [];\n \n // Set initial status to \"enabled\" for all steps\n const stepsData: Array<{ description: string; status: string }> = [];\n \n // Handle different potential formats of steps data\n if (Array.isArray(stepsRaw)) {\n for (const step of stepsRaw) {\n if (typeof step === 'object' && step.description) {\n stepsData.push({\n description: step.description,\n status: \"enabled\"\n });\n } else if (typeof step === 'string') {\n stepsData.push({\n description: step,\n status: \"enabled\"\n });\n }\n }\n }\n \n // If no steps were processed correctly, return to END with the updated messages\n if (stepsData.length === 0) {\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n }\n\n // Update steps in state and emit to frontend\n state.steps = stepsData;\n \n // Add a tool response to satisfy OpenAI's requirements\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Task steps generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n\n // Move to the process_steps_node which will handle the interrupt and final response\n return new Command({\n goto: \"process_steps_node\",\n update: {\n messages: updatedMessages,\n steps: state.steps,\n }\n });\n }\n }\n \n // If no tool calls or not plan_execution_steps, return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\nasync function processStepsNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This node handles the user interrupt for step customization and generates the final response.\n */\n\n let userResponse: string;\n\n // Check if we already have a user_response in the state\n // This happens when the node restarts after an interrupt\n if (state.user_response) {\n userResponse = state.user_response;\n } else {\n // Use LangGraph interrupt to get user input on steps\n // This will pause execution and wait for user input in the frontend\n userResponse = interrupt({ steps: state.steps });\n // Store the user response in state for when the node restarts\n state.user_response = userResponse;\n }\n \n // Generate the creative completion response\n const finalPrompt = `\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n `;\n \n const finalResponse = await new ChatOpenAI({ model: \"gpt-4o\" }).invoke([\n new SystemMessage({ content: finalPrompt }),\n { role: \"user\", content: userResponse }\n ], config);\n\n // Add the final response to messages\n const messages = [...state.messages, finalResponse];\n \n // Clear the user_response from state to prepare for future interactions\n const newState = { ...state };\n delete newState.user_response;\n \n // Return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\nworkflow.addNode(\"process_steps_node\", processStepsNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"process_steps_node\", END);\n\n// Add conditional edges from chat_node\nworkflow.addConditionalEdges(\n \"chat_node\",\n (state: AgentState) => {\n // This would be determined by the Command returned from chat_node\n // For now, we'll assume the logic is handled in the Command's goto property\n return \"continue\";\n },\n {\n \"process_steps_node\": \"process_steps_node\",\n \"continue\": END,\n }\n);\n\n// Compile the graph\nexport const humanInTheLoopGraph = workflow.compile();", + "language": "ts", + "type": "file" + } + ], + "langgraph-typescript::predictive_state_updates": [ + { + "name": "page.tsx", + "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n const chatTitle = 'AI Document Editor'\n const chatDescription = 'Ask me to create or edit a document'\n const initialLabel = 'Hi ๐Ÿ‘‹ How can I help with your document?'\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n
\n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction({\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n });\n\n // Action to write the document.\n useCopilotAction({\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n });\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n
\n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n
\n {accepted ? \"โœ“ Accepted\" : \"โœ— Rejected\"}\n
\n
\n )}\n
\n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", + "language": "typescript", + "type": "file" + }, + { + "name": "style.css", + "content": "/* Basic editor styles */\n.tiptap-container {\n height: 100vh; /* Full viewport height */\n width: 100vw; /* Full viewport width */\n display: flex;\n flex-direction: column;\n}\n\n.tiptap {\n flex: 1; /* Take up remaining space */\n overflow: auto; /* Allow scrolling if content overflows */\n}\n\n.tiptap :first-child {\n margin-top: 0;\n}\n\n/* List styles */\n.tiptap ul,\n.tiptap ol {\n padding: 0 1rem;\n margin: 1.25rem 1rem 1.25rem 0.4rem;\n}\n\n.tiptap ul li p,\n.tiptap ol li p {\n margin-top: 0.25em;\n margin-bottom: 0.25em;\n}\n\n/* Heading styles */\n.tiptap h1,\n.tiptap h2,\n.tiptap h3,\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n line-height: 1.1;\n margin-top: 2.5rem;\n text-wrap: pretty;\n font-weight: bold;\n}\n\n.tiptap h1,\n.tiptap h2,\n.tiptap h3,\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n margin-top: 3.5rem;\n margin-bottom: 1.5rem;\n}\n\n.tiptap p {\n margin-bottom: 1rem;\n}\n\n.tiptap h1 {\n font-size: 1.4rem;\n}\n\n.tiptap h2 {\n font-size: 1.2rem;\n}\n\n.tiptap h3 {\n font-size: 1.1rem;\n}\n\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n font-size: 1rem;\n}\n\n/* Code and preformatted text styles */\n.tiptap code {\n background-color: var(--purple-light);\n border-radius: 0.4rem;\n color: var(--black);\n font-size: 0.85rem;\n padding: 0.25em 0.3em;\n}\n\n.tiptap pre {\n background: var(--black);\n border-radius: 0.5rem;\n color: var(--white);\n font-family: \"JetBrainsMono\", monospace;\n margin: 1.5rem 0;\n padding: 0.75rem 1rem;\n}\n\n.tiptap pre code {\n background: none;\n color: inherit;\n font-size: 0.8rem;\n padding: 0;\n}\n\n.tiptap blockquote {\n border-left: 3px solid var(--gray-3);\n margin: 1.5rem 0;\n padding-left: 1rem;\n}\n\n.tiptap hr {\n border: none;\n border-top: 1px solid var(--gray-2);\n margin: 2rem 0;\n}\n\n.tiptap s {\n background-color: #f9818150;\n padding: 2px;\n font-weight: bold;\n color: rgba(0, 0, 0, 0.7);\n}\n\n.tiptap em {\n background-color: #b2f2bb;\n padding: 2px;\n font-weight: bold;\n font-style: normal;\n}\n\n.copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n", + "language": "css", + "type": "file" + }, + { + "name": "README.mdx", + "content": "# ๐Ÿ“ Predictive State Updates Document Editor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **predictive state updates** for real-time\ndocument collaboration:\n\n1. **Live Document Editing**: Watch as your Copilot makes changes to a document\n in real-time\n2. **Diff Visualization**: See exactly what's being changed as it happens\n3. **Streaming Updates**: Changes are displayed character-by-character as the\n Copilot works\n\n## How to Interact\n\nTry these interactions with the collaborative document editor:\n\n- \"Fix the grammar and typos in this document\"\n- \"Make this text more professional\"\n- \"Add a section about [topic]\"\n- \"Summarize this content in bullet points\"\n- \"Change the tone to be more casual\"\n\nWatch as the Copilot processes your request and edits the document in real-time\nright before your eyes.\n\n## โœจ Predictive State Updates in Action\n\n**What's happening technically:**\n\n- The document state is shared between your UI and the Copilot\n- As the Copilot generates content, changes are streamed to the UI\n- Each modification is visualized with additions and deletions\n- The UI renders these changes progressively, without waiting for completion\n- All edits are tracked and displayed in a visually intuitive way\n\n**What you'll see in this demo:**\n\n- Text changes are highlighted in different colors (green for additions, red for\n deletions)\n- The document updates character-by-character, creating a typing-like effect\n- You can see the Copilot's thought process as it refines the content\n- The final document seamlessly incorporates all changes\n- The experience feels collaborative, as if someone is editing alongside you\n\nThis pattern of real-time collaborative editing with diff visualization is\nperfect for document editors, code review tools, content creation platforms, or\nany application where users benefit from seeing exactly how content is being\ntransformed!\n", + "language": "markdown", + "type": "file" + }, + { + "name": "agent.py", + "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n return Command(\n goto=\"chat_node\"\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", + "language": "python", + "type": "file" + }, + { + "name": "agent.ts", + "content": "/**\n * A demo of predictive state updates using LangGraph.\n */\n\nimport { v4 as uuidv4 } from \"uuid\";\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nconst WRITE_DOCUMENT_TOOL = {\n type: \"function\",\n function: {\n name: \"write_document_local\",\n description: [\n \"Write a document. Use markdown formatting to format the document.\",\n \"It's good to format the document extensively so it's easy to read.\",\n \"You can use all kinds of markdown.\",\n \"However, do not use italic or strike-through formatting, it's reserved for another purpose.\",\n \"You MUST write the full document, even when changing only a few words.\",\n \"When making edits to the document, try to make them minimal - do not change every word.\",\n \"Keep stories SHORT!\"\n ].join(\" \"),\n parameters: {\n type: \"object\",\n properties: {\n document: {\n type: \"string\",\n description: \"The document to write\"\n },\n },\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n document: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n return new Command({\n goto: \"chat_node\"\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n\n const systemPrompt = `\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n ${state.document || ''}\\n-----\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"document\",\n tool: \"write_document_local\",\n tool_argument: \"document\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n WRITE_DOCUMENT_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"write_document_local\") {\n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Document written.\",\n tool_call_id: toolCall.id\n };\n\n // Add confirmation tool call\n const confirmToolCall = {\n role: \"assistant\" as const,\n content: \"\",\n tool_calls: [{\n id: uuidv4(),\n type: \"function\" as const,\n function: {\n name: \"confirm_changes\",\n arguments: \"{}\"\n }\n }]\n };\n\n const updatedMessages = [...messages, toolResponse, confirmToolCall];\n\n // Return Command to route to end\n return new Command({\n goto: END,\n update: {\n messages: updatedMessages,\n document: toolCall.args.document\n }\n });\n }\n }\n\n // If no tool was called, go to end\n return new Command({\n goto: END,\n update: {\n messages: messages\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const predictiveStateUpdatesGraph = workflow.compile();", + "language": "ts", + "type": "file" + } + ], + "langgraph-typescript::shared_state": [ + { + "name": "page.tsx", + "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi ๐Ÿ‘‹ How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"๐Ÿฅ•\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"๐ŸŒพ\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350ยฐF (175ยฐC)\"],\n },\n};\n\nfunction Recipe() {\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"๐Ÿด\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"๐Ÿด\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n ๐Ÿ•’\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n ๐Ÿ†\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ร—\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ร—\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", + "language": "typescript", + "type": "file" + }, + { + "name": "style.css", + "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n background-color: #fff;\n color: #000;\n border-bottom: 0px;\n}\n\n/* Recipe App Styles */\n.app-container {\n min-height: 100vh;\n width: 100%;\n display: flex;\n align-items: center;\n justify-content: center;\n background-size: cover;\n background-position: center;\n background-repeat: no-repeat;\n background-attachment: fixed;\n position: relative;\n overflow: auto;\n}\n\n.recipe-card {\n background-color: rgba(255, 255, 255, 0.97);\n border-radius: 16px;\n box-shadow: 0 15px 30px rgba(0, 0, 0, 0.25), 0 5px 15px rgba(0, 0, 0, 0.15);\n width: 100%;\n max-width: 750px;\n margin: 20px auto;\n padding: 14px 32px;\n position: relative;\n z-index: 1;\n backdrop-filter: blur(5px);\n border: 1px solid rgba(255, 255, 255, 0.3);\n transition: transform 0.2s ease, box-shadow 0.2s ease;\n animation: fadeIn 0.5s ease-out forwards;\n box-sizing: border-box;\n overflow: hidden;\n}\n\n.recipe-card:hover {\n transform: translateY(-5px);\n box-shadow: 0 20px 40px rgba(0, 0, 0, 0.3), 0 10px 20px rgba(0, 0, 0, 0.2);\n}\n\n/* Recipe Header */\n.recipe-header {\n margin-bottom: 24px;\n}\n\n.recipe-title-input {\n width: 100%;\n font-size: 24px;\n font-weight: bold;\n border: none;\n outline: none;\n padding: 8px 0;\n margin-bottom: 0px;\n}\n\n.recipe-meta {\n display: flex;\n align-items: center;\n gap: 20px;\n margin-top: 5px;\n margin-bottom: 14px;\n}\n\n.meta-item {\n display: flex;\n align-items: center;\n gap: 8px;\n color: #555;\n}\n\n.meta-icon {\n font-size: 20px;\n color: #777;\n}\n\n.meta-text {\n font-size: 15px;\n}\n\n/* Recipe Meta Selects */\n.meta-item select {\n border: none;\n background: transparent;\n font-size: 15px;\n color: #555;\n cursor: pointer;\n outline: none;\n padding-right: 18px;\n transition: color 0.2s, transform 0.1s;\n font-weight: 500;\n}\n\n.meta-item select:hover,\n.meta-item select:focus {\n color: #FF5722;\n}\n\n.meta-item select:active {\n transform: scale(0.98);\n}\n\n.meta-item select option {\n color: #333;\n background-color: white;\n font-weight: normal;\n padding: 8px;\n}\n\n/* Section Container */\n.section-container {\n margin-bottom: 20px;\n position: relative;\n width: 100%;\n}\n\n.section-title {\n font-size: 20px;\n font-weight: 700;\n margin-bottom: 20px;\n color: #333;\n position: relative;\n display: inline-block;\n}\n\n.section-title:after {\n content: \"\";\n position: absolute;\n bottom: -8px;\n left: 0;\n width: 40px;\n height: 3px;\n background-color: #ff7043;\n border-radius: 3px;\n}\n\n/* Dietary Preferences */\n.dietary-options {\n display: flex;\n flex-wrap: wrap;\n gap: 10px 16px;\n margin-bottom: 16px;\n width: 100%;\n}\n\n.dietary-option {\n display: flex;\n align-items: center;\n gap: 6px;\n font-size: 14px;\n cursor: pointer;\n margin-bottom: 4px;\n}\n\n.dietary-option input {\n cursor: pointer;\n}\n\n/* Ingredients */\n.ingredients-container {\n display: flex;\n flex-wrap: wrap;\n gap: 10px;\n margin-bottom: 15px;\n width: 100%;\n box-sizing: border-box;\n}\n\n.ingredient-card {\n display: flex;\n align-items: center;\n background-color: rgba(255, 255, 255, 0.9);\n border-radius: 12px;\n padding: 12px;\n margin-bottom: 10px;\n box-shadow: 0 4px 10px rgba(0, 0, 0, 0.08);\n position: relative;\n transition: all 0.2s ease;\n border: 1px solid rgba(240, 240, 240, 0.8);\n width: calc(33.333% - 7px);\n box-sizing: border-box;\n}\n\n.ingredient-card:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 15px rgba(0, 0, 0, 0.12);\n}\n\n.ingredient-card .remove-button {\n position: absolute;\n right: 10px;\n top: 10px;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 24px;\n height: 24px;\n line-height: 1;\n}\n\n.ingredient-card:hover .remove-button {\n display: block;\n}\n\n.ingredient-icon {\n font-size: 24px;\n margin-right: 12px;\n display: flex;\n align-items: center;\n justify-content: center;\n width: 40px;\n height: 40px;\n background-color: #f7f7f7;\n border-radius: 50%;\n flex-shrink: 0;\n}\n\n.ingredient-content {\n flex: 1;\n display: flex;\n flex-direction: column;\n gap: 3px;\n min-width: 0;\n}\n\n.ingredient-name-input,\n.ingredient-amount-input {\n border: none;\n background: transparent;\n outline: none;\n width: 100%;\n padding: 0;\n text-overflow: ellipsis;\n overflow: hidden;\n white-space: nowrap;\n}\n\n.ingredient-name-input {\n font-weight: 500;\n font-size: 14px;\n}\n\n.ingredient-amount-input {\n font-size: 13px;\n color: #666;\n}\n\n.ingredient-name-input::placeholder,\n.ingredient-amount-input::placeholder {\n color: #aaa;\n}\n\n.remove-button {\n background: none;\n border: none;\n color: #999;\n font-size: 20px;\n cursor: pointer;\n padding: 0;\n width: 28px;\n height: 28px;\n display: flex;\n align-items: center;\n justify-content: center;\n margin-left: 10px;\n}\n\n.remove-button:hover {\n color: #FF5722;\n}\n\n/* Instructions */\n.instructions-container {\n display: flex;\n flex-direction: column;\n gap: 6px;\n position: relative;\n margin-bottom: 12px;\n width: 100%;\n}\n\n.instruction-item {\n position: relative;\n display: flex;\n width: 100%;\n box-sizing: border-box;\n margin-bottom: 8px;\n align-items: flex-start;\n}\n\n.instruction-number {\n display: flex;\n align-items: center;\n justify-content: center;\n min-width: 26px;\n height: 26px;\n background-color: #ff7043;\n color: white;\n border-radius: 50%;\n font-weight: 600;\n flex-shrink: 0;\n box-shadow: 0 2px 4px rgba(255, 112, 67, 0.3);\n z-index: 1;\n font-size: 13px;\n margin-top: 2px;\n}\n\n.instruction-line {\n position: absolute;\n left: 13px; /* Half of the number circle width */\n top: 22px;\n bottom: -18px;\n width: 2px;\n background: linear-gradient(to bottom, #ff7043 60%, rgba(255, 112, 67, 0.4));\n z-index: 0;\n}\n\n.instruction-content {\n background-color: white;\n border-radius: 10px;\n padding: 10px 14px;\n margin-left: 12px;\n flex-grow: 1;\n transition: all 0.2s ease;\n box-shadow: 0 2px 6px rgba(0, 0, 0, 0.08);\n border: 1px solid rgba(240, 240, 240, 0.8);\n position: relative;\n width: calc(100% - 38px);\n box-sizing: border-box;\n display: flex;\n align-items: center;\n}\n\n.instruction-content-editing {\n background-color: #fff9f6;\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12), 0 0 0 2px rgba(255, 112, 67, 0.2);\n}\n\n.instruction-content:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12);\n}\n\n.instruction-textarea {\n width: 100%;\n background: transparent;\n border: none;\n resize: vertical;\n font-family: inherit;\n font-size: 14px;\n line-height: 1.4;\n min-height: 20px;\n outline: none;\n padding: 0;\n margin: 0;\n}\n\n.instruction-delete-btn {\n position: absolute;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 20px;\n height: 20px;\n line-height: 1;\n top: 50%;\n transform: translateY(-50%);\n right: 8px;\n}\n\n.instruction-content:hover .instruction-delete-btn {\n display: flex;\n align-items: center;\n justify-content: center;\n}\n\n/* Action Button */\n.action-container {\n display: flex;\n justify-content: center;\n margin-top: 40px;\n padding-bottom: 20px;\n position: relative;\n}\n\n.improve-button {\n background-color: #ff7043;\n border: none;\n color: white;\n border-radius: 30px;\n font-size: 18px;\n font-weight: 600;\n padding: 14px 28px;\n cursor: pointer;\n transition: all 0.3s ease;\n box-shadow: 0 4px 15px rgba(255, 112, 67, 0.4);\n display: flex;\n align-items: center;\n justify-content: center;\n text-align: center;\n position: relative;\n min-width: 180px;\n}\n\n.improve-button:hover {\n background-color: #ff5722;\n transform: translateY(-2px);\n box-shadow: 0 8px 20px rgba(255, 112, 67, 0.5);\n}\n\n.improve-button.loading {\n background-color: #ff7043;\n opacity: 0.8;\n cursor: not-allowed;\n padding-left: 42px; /* Reduced padding to bring text closer to icon */\n padding-right: 22px; /* Balance the button */\n justify-content: flex-start; /* Left align text for better alignment with icon */\n}\n\n.improve-button.loading:after {\n content: \"\"; /* Add space between icon and text */\n display: inline-block;\n width: 8px; /* Width of the space */\n}\n\n.improve-button:before {\n content: \"\";\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='white' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpath d='M12 2v4M12 18v4M4.93 4.93l2.83 2.83M16.24 16.24l2.83 2.83M2 12h4M18 12h4M4.93 19.07l2.83-2.83M16.24 7.76l2.83-2.83'/%3E%3C/svg%3E\");\n width: 20px; /* Slightly smaller icon */\n height: 20px;\n background-repeat: no-repeat;\n background-size: contain;\n position: absolute;\n left: 16px; /* Slightly adjusted */\n top: 50%;\n transform: translateY(-50%);\n display: none;\n}\n\n.improve-button.loading:before {\n display: block;\n animation: spin 1.5s linear infinite;\n}\n\n@keyframes spin {\n 0% { transform: translateY(-50%) rotate(0deg); }\n 100% { transform: translateY(-50%) rotate(360deg); }\n}\n\n/* Ping Animation */\n.ping-animation {\n position: absolute;\n display: flex;\n width: 12px;\n height: 12px;\n top: 0;\n right: 0;\n}\n\n.ping-circle {\n position: absolute;\n display: inline-flex;\n width: 100%;\n height: 100%;\n border-radius: 50%;\n background-color: #38BDF8;\n opacity: 0.75;\n animation: ping 1.5s cubic-bezier(0, 0, 0.2, 1) infinite;\n}\n\n.ping-dot {\n position: relative;\n display: inline-flex;\n width: 12px;\n height: 12px;\n border-radius: 50%;\n background-color: #0EA5E9;\n}\n\n@keyframes ping {\n 75%, 100% {\n transform: scale(2);\n opacity: 0;\n }\n}\n\n/* Instruction hover effects */\n.instruction-item:hover .instruction-delete-btn {\n display: flex !important;\n}\n\n/* Add some subtle animations */\n@keyframes fadeIn {\n from { opacity: 0; transform: translateY(20px); }\n to { opacity: 1; transform: translateY(0); }\n}\n\n/* Better center alignment for the recipe card */\n.recipe-card-container {\n display: flex;\n justify-content: center;\n width: 100%;\n position: relative;\n z-index: 1;\n margin: 0 auto;\n box-sizing: border-box;\n}\n\n/* Add Buttons */\n.add-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 8px;\n padding: 10px 16px;\n cursor: pointer;\n font-weight: 500;\n display: inline-block;\n font-size: 14px;\n margin-bottom: 0;\n}\n\n.add-step-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 6px;\n padding: 6px 12px;\n cursor: pointer;\n font-weight: 500;\n font-size: 13px;\n}\n\n/* Section Headers */\n.section-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 12px;\n}", + "language": "css", + "type": "file" + }, + { + "name": "README.mdx", + "content": "# ๐Ÿณ Shared State Recipe Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **shared state** functionality - a powerful\nfeature that enables bidirectional data flow between:\n\n1. **Frontend โ†’ Agent**: UI controls update the agent's context in real-time\n2. **Agent โ†’ Frontend**: The Copilot's recipe creations instantly update the UI\n components\n\nIt's like having a cooking buddy who not only listens to what you want but also\nupdates your recipe card as you chat - no refresh needed! โœจ\n\n## How to Interact\n\nMix and match any of these parameters (or none at all - it's up to you!):\n\n- **Skill Level**: Beginner to expert ๐Ÿ‘จโ€๐Ÿณ\n- **Cooking Time**: Quick meals or slow cooking โฑ๏ธ\n- **Special Preferences**: Dietary needs, flavor profiles, health goals ๐Ÿฅ—\n- **Ingredients**: Items you want to include ๐Ÿง…๐Ÿฅฉ๐Ÿ„\n- **Instructions**: Any specific steps\n\nThen chat with your Copilot chef with prompts like:\n\n- \"I'm a beginner cook. Can you make me a quick dinner?\"\n- \"I need something spicy with chicken that takes under 30 minutes!\"\n\n## โœจ Shared State Magic in Action\n\n**What's happening technically:**\n\n- The UI and Copilot agent share the same state object (**Agent State = UI\n State**)\n- Changes from either side automatically update the other\n- Neither side needs to manually request updates from the other\n\n**What you'll see in this demo:**\n\n- Set cooking time to 20 minutes in the UI and watch the Copilot immediately\n respect your time constraint\n- Add ingredients through the UI and see them appear in your recipe\n- When the Copilot suggests new ingredients, watch them automatically appear in\n the UI ingredients list\n- Change your skill level and see how the Copilot adapts its instructions in\n real-time\n\nThis synchronized state creates a seamless experience where the agent always has\nyour current preferences, and any updates to the recipe are instantly reflected\nin both places.\n\nThis shared state pattern can be applied to any application where you want your\nUI and Copilot to work together in perfect harmony!\n", + "language": "markdown", + "type": "file" + }, + { + "name": "agent.py", + "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like ๐Ÿฅ•\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like ๐Ÿฅ•,\n name and amount.\n Like so: ๐Ÿฅ• Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any]\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"๐Ÿด\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "language": "python", + "type": "file" + }, + { + "name": "agent.ts", + "content": "/**\n * A demo of shared state between the agent and CopilotKit using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\"\n}\n\nenum SpecialPreferences {\n HIGH_PROTEIN = \"High Protein\",\n LOW_CARB = \"Low Carb\",\n SPICY = \"Spicy\",\n BUDGET_FRIENDLY = \"Budget-Friendly\",\n ONE_POT_MEAL = \"One-Pot Meal\",\n VEGETARIAN = \"Vegetarian\",\n VEGAN = \"Vegan\"\n}\n\nenum CookingTime {\n FIVE_MIN = \"5 min\",\n FIFTEEN_MIN = \"15 min\",\n THIRTY_MIN = \"30 min\",\n FORTY_FIVE_MIN = \"45 min\",\n SIXTY_PLUS_MIN = \"60+ min\"\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n skill_level: SkillLevel;\n special_preferences: SpecialPreferences[];\n cooking_time: CookingTime;\n ingredients: Ingredient[];\n instructions: string[];\n changes?: string;\n}\n\nconst GENERATE_RECIPE_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_recipe\",\n description: \"Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it. Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\",\n parameters: {\n type: \"object\",\n properties: {\n recipe: {\n type: \"object\",\n properties: {\n skill_level: {\n type: \"string\",\n enum: Object.values(SkillLevel),\n description: \"The skill level required for the recipe\"\n },\n special_preferences: {\n type: \"array\",\n items: {\n type: \"string\",\n enum: Object.values(SpecialPreferences)\n },\n description: \"A list of special preferences for the recipe\"\n },\n cooking_time: {\n type: \"string\",\n enum: Object.values(CookingTime),\n description: \"The cooking time of the recipe\"\n },\n ingredients: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n icon: { type: \"string\", description: \"The icon emoji (not emoji code like '\\\\u1f35e', but the actual emoji like ๐Ÿฅ•) of the ingredient\" },\n name: { type: \"string\" },\n amount: { type: \"string\" }\n }\n },\n description: \"Entire list of ingredients for the recipe, including the new ingredients and the ones that are already in the recipe\"\n },\n instructions: {\n type: \"array\",\n items: { type: \"string\" },\n description: \"Entire list of instructions for the recipe, including the new instructions and the ones that are already there\"\n },\n changes: {\n type: \"string\",\n description: \"A description of the changes made to the recipe\"\n }\n },\n }\n },\n required: [\"recipe\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n recipe: Annotation(),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize recipe if not exists\n if (!state.recipe) {\n state.recipe = {\n skill_level: SkillLevel.BEGINNER,\n special_preferences: [],\n cooking_time: CookingTime.FIFTEEN_MIN,\n ingredients: [{ icon: \"๐Ÿด\", name: \"Sample Ingredient\", amount: \"1 unit\" }],\n instructions: [\"First step instruction\"]\n };\n // Emit the initial state to ensure it's properly shared with the frontend\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n }\n \n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n recipe: state.recipe\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n // Create a safer serialization of the recipe\n let recipeJson = \"No recipe yet\";\n if (state.recipe) {\n try {\n recipeJson = JSON.stringify(state.recipe, null, 2);\n } catch (e) {\n recipeJson = `Error serializing recipe: ${e}`;\n }\n }\n\n const systemPrompt = `You are a helpful assistant for creating recipes. \n This is the current state of the recipe: ${recipeJson}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"recipe\",\n tool: \"generate_recipe\",\n tool_argument: \"recipe\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n GENERATE_RECIPE_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_recipe\") {\n // Update recipe state with tool_call_args\n const recipeData = toolCall.args.recipe;\n let recipe: Recipe;\n // If we have an existing recipe, update it\n if (state.recipe) {\n recipe = { ...state.recipe };\n for (const [key, value] of Object.entries(recipeData)) {\n if (value !== null && value !== undefined) { // Only update fields that were provided\n (recipe as any)[key] = value;\n }\n }\n } else {\n // Create a new recipe\n recipe = {\n skill_level: recipeData.skill_level || SkillLevel.BEGINNER,\n special_preferences: recipeData.special_preferences || [],\n cooking_time: recipeData.cooking_time || CookingTime.FIFTEEN_MIN,\n ingredients: recipeData.ingredients || [],\n instructions: recipeData.instructions || []\n };\n }\n \n // Add tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Recipe generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n \n // Explicitly emit the updated state to ensure it's shared with frontend\n state.recipe = recipe;\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n \n // Return command with updated recipe\n return new Command({\n goto: \"start_flow\",\n update: {\n messages: updatedMessages,\n recipe: recipe\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n recipe: state.recipe\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const sharedStateGraph = workflow.compile();", + "language": "ts", + "type": "file" + } + ], + "langgraph-typescript::tool_based_generative_ui": [ + { + "name": "page.tsx", + "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku{\n japanese : string[] | [],\n english : string[] | [],\n image_names : string[] | [],\n selectedImage : string | null,\n}\n\ninterface HaikuCardProps{\n generatedHaiku : GenerateHaiku | Partial\n setHaikus : Dispatch>\n haikus : GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator ๐Ÿ‘‹. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n
\n
\n
\n \n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )}\n \n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction HaikuCard({generatedHaiku, setHaikus, haikus} : HaikuCardProps) {\n return (\n
\n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n
\n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"ไปฎใฎๅฅใ‚ˆ\", \"ใพใฃใ•ใ‚‰ใชใŒใ‚‰\", \"่Šฑใ‚’ๅ‘ผใถ\"],\n english: [\n \"A placeholder verseโ€”\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n const validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n if (correctedNames.length < 3) {\n const availableFallbacks = VALID_IMAGE_NAMES.filter(name => !usedValidNames.has(name));\n for (let i = availableFallbacks.length - 1; i > 0; i--) {\n const j = Math.floor(Math.random() * (i + 1));\n [availableFallbacks[i], availableFallbacks[j]] = [availableFallbacks[j], availableFallbacks[i]];\n }\n\n while (correctedNames.length < 3 && availableFallbacks.length > 0) {\n const fallbackName = availableFallbacks.pop();\n if (fallbackName) {\n correctedNames.push(fallbackName);\n }\n }\n }\n\n while (correctedNames.length < 3 && VALID_IMAGE_NAMES.length > 0) {\n const fallbackName = VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n correctedNames.push(fallbackName);\n }\n\n return correctedNames.slice(0, 3);\n };\n\n useCopilotAction({\n name: \"generate_haiku\",\n available: \"frontend\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: \"Names of 3 relevant images\",\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [...prev, newHaiku]);\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const generatedHaikus = useMemo(() => (\n haikus.filter((haiku) => haiku.english[0] !== \"A placeholder verseโ€”\")\n ), [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n {/* Thumbnail List */}\n {Boolean(generatedHaikus.length) && !isMobile && (\n
\n {generatedHaikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n
\n ))}\n \n )}\n\n {/* Main Display */}\n
\n
\n {haikus.filter((_haiku: Haiku, index: number) => {\n if (haikus.length == 1) return true;\n else return index == activeIndex + 1;\n }).map((haiku, index) => (\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n
\n ))}\n \n \n \n );\n}\n", + "language": "typescript", + "type": "file" + }, + { + "name": "style.css", + "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n}\n\n.page-background {\n /* Darker gradient background */\n background: linear-gradient(170deg, #e9ecef 0%, #ced4da 100%);\n}\n\n@keyframes fade-scale-in {\n from {\n opacity: 0;\n transform: translateY(10px) scale(0.98);\n }\n to {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Updated card entry animation */\n@keyframes pop-in {\n 0% {\n opacity: 0;\n transform: translateY(15px) scale(0.95);\n }\n 70% {\n opacity: 1;\n transform: translateY(-2px) scale(1.02);\n }\n 100% {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Animation for subtle background gradient movement */\n@keyframes animated-gradient {\n 0% {\n background-position: 0% 50%;\n }\n 50% {\n background-position: 100% 50%;\n }\n 100% {\n background-position: 0% 50%;\n }\n}\n\n/* Animation for flash effect on apply */\n@keyframes flash-border-glow {\n 0% {\n /* Start slightly intensified */\n border-top-color: #ff5b4a !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n }\n 50% {\n /* Peak intensity */\n border-top-color: #ff4733 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.08),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 35px rgba(255, 71, 51, 0.7);\n }\n 100% {\n /* Return to default state appearance */\n border-top-color: #ff6f61 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 10px rgba(255, 111, 97, 0.15);\n }\n}\n\n/* Existing animation for haiku lines */\n@keyframes fade-slide-in {\n from {\n opacity: 0;\n transform: translateX(-15px);\n }\n to {\n opacity: 1;\n transform: translateX(0);\n }\n}\n\n.animated-fade-in {\n /* Use the new pop-in animation */\n animation: pop-in 0.6s ease-out forwards;\n}\n\n.haiku-card {\n /* Subtle animated gradient background */\n background: linear-gradient(120deg, #ffffff 0%, #fdfdfd 50%, #ffffff 100%);\n background-size: 200% 200%;\n animation: animated-gradient 10s ease infinite;\n\n /* === Explicit Border Override Attempt === */\n /* 1. Set the default grey border for all sides */\n border: 1px solid #dee2e6;\n\n /* 2. Explicitly override the top border immediately after */\n border-top: 10px solid #ff6f61 !important; /* Orange top - Added !important */\n /* === End Explicit Border Override Attempt === */\n\n padding: 2.5rem 3rem;\n border-radius: 20px;\n\n /* Default glow intensity */\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 15px rgba(255, 111, 97, 0.25);\n text-align: left;\n max-width: 745px;\n margin: 3rem auto;\n min-width: 600px;\n\n /* Transition */\n transition: transform 0.35s ease, box-shadow 0.35s ease, border-top-width 0.35s ease, border-top-color 0.35s ease;\n}\n\n.haiku-card:hover {\n transform: translateY(-8px) scale(1.03);\n /* Enhanced shadow + Glow */\n box-shadow: 0 15px 35px rgba(0, 0, 0, 0.1),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n /* Modify only top border properties */\n border-top-width: 14px !important; /* Added !important */\n border-top-color: #ff5b4a !important; /* Added !important */\n}\n\n.haiku-card .flex {\n margin-bottom: 1.5rem;\n}\n\n.haiku-card .flex.haiku-line { /* Target the lines specifically */\n margin-bottom: 1.5rem;\n opacity: 0; /* Start hidden for animation */\n animation: fade-slide-in 0.5s ease-out forwards;\n /* animation-delay is set inline in page.tsx */\n}\n\n/* Remove previous explicit color overrides - rely on Tailwind */\n/* .haiku-card p.text-4xl {\n color: #212529;\n}\n\n.haiku-card p.text-base {\n color: #495057;\n} */\n\n.haiku-card.applied-flash {\n /* Apply the flash animation once */\n /* Note: animation itself has !important on border-top-color */\n animation: flash-border-glow 0.6s ease-out forwards;\n}\n\n/* Styling for images within the main haiku card */\n.haiku-card-image {\n width: 9.5rem; /* Increased size (approx w-48) */\n height: 9.5rem; /* Increased size (approx h-48) */\n object-fit: cover;\n border-radius: 1.5rem; /* rounded-xl */\n border: 1px solid #e5e7eb;\n /* Enhanced shadow with subtle orange hint */\n box-shadow: 0 8px 15px rgba(0, 0, 0, 0.1),\n 0 3px 6px rgba(0, 0, 0, 0.08),\n 0 0 10px rgba(255, 111, 97, 0.2);\n /* Inherit animation delay from inline style */\n animation-name: fadeIn;\n animation-duration: 0.5s;\n animation-fill-mode: both;\n}\n\n/* Styling for images within the suggestion card */\n.suggestion-card-image {\n width: 6.5rem; /* Increased slightly (w-20) */\n height: 6.5rem; /* Increased slightly (h-20) */\n object-fit: cover;\n border-radius: 1rem; /* Equivalent to rounded-md */\n border: 1px solid #d1d5db; /* Equivalent to border (using Tailwind gray-300) */\n margin-top: 0.5rem;\n /* Added shadow for suggestion images */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1),\n 0 2px 4px rgba(0, 0, 0, 0.06);\n transition: all 0.2s ease-in-out; /* Added for smooth deselection */\n}\n\n/* Styling for the focused suggestion card image */\n.suggestion-card-image-focus {\n width: 6.5rem;\n height: 6.5rem;\n object-fit: cover;\n border-radius: 1rem;\n margin-top: 0.5rem;\n /* Highlight styles */\n border: 2px solid #ff6f61; /* Thicker, themed border */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1), /* Base shadow for depth */\n 0 0 12px rgba(255, 111, 97, 0.6); /* Orange glow */\n transform: scale(1.05); /* Slightly scale up */\n transition: all 0.2s ease-in-out; /* Smooth transition for focus */\n}\n\n/* Styling for the suggestion card container in the sidebar */\n.suggestion-card {\n border: 1px solid #dee2e6; /* Same default border as haiku-card */\n border-top: 10px solid #ff6f61; /* Same orange top border */\n border-radius: 0.375rem; /* Default rounded-md */\n /* Note: background-color is set by Tailwind bg-gray-100 */\n /* Other styles like padding, margin, flex are handled by Tailwind */\n}\n\n.suggestion-image-container {\n display: flex;\n gap: 1rem;\n justify-content: space-between;\n width: 100%;\n height: 6.5rem;\n}\n\n/* Mobile responsive styles - matches useMobileView hook breakpoint */\n@media (max-width: 767px) {\n .haiku-card {\n padding: 1rem 1.5rem; /* Reduced from 2.5rem 3rem */\n min-width: auto; /* Remove min-width constraint */\n max-width: 100%; /* Full width on mobile */\n margin: 1rem auto; /* Reduced margin */\n }\n\n .haiku-card-image {\n width: 5.625rem; /* 90px - smaller on mobile */\n height: 5.625rem; /* 90px - smaller on mobile */\n }\n\n .suggestion-card-image {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n\n .suggestion-card-image-focus {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n}\n", + "language": "css", + "type": "file" + }, + { + "name": "README.mdx", + "content": "# ๐Ÿชถ Tool-Based Generative UI Haiku Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **tool-based generative UI** capabilities:\n\n1. **Frontend Rendering of Tool Calls**: Backend tool calls are automatically\n rendered in the UI\n2. **Dynamic UI Generation**: The UI updates in real-time as the agent generates\n content\n3. **Elegant Content Presentation**: Complex structured data (haikus) are\n beautifully displayed\n\n## How to Interact\n\nChat with your Copilot and ask for haikus about different topics:\n\n- \"Create a haiku about nature\"\n- \"Write a haiku about technology\"\n- \"Generate a haiku about the changing seasons\"\n- \"Make a humorous haiku about programming\"\n\nEach request will trigger the agent to generate a haiku and display it in a\nvisually appealing card format in the UI.\n\n## โœจ Tool-Based Generative UI in Action\n\n**What's happening technically:**\n\n- The agent processes your request and determines it should create a haiku\n- It calls a backend tool that returns structured haiku data\n- CopilotKit automatically renders this tool call in the frontend\n- The rendering is handled by the registered tool component in your React app\n- No manual state management is required to display the results\n\n**What you'll see in this demo:**\n\n- As you request a haiku, a beautifully formatted card appears in the UI\n- The haiku follows the traditional 5-7-5 syllable structure\n- Each haiku is presented with consistent styling\n- Multiple haikus can be generated in sequence\n- The UI adapts to display each new piece of content\n\nThis pattern of tool-based generative UI can be extended to create any kind of\ndynamic content - from data visualizations to interactive components, all driven\nby your Copilot's tool calls!\n", + "language": "markdown", + "type": "file" + }, + { + "name": "agent.py", + "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n@tool\ndef generate_haiku(\n japanese: Annotated[ # pylint: disable=unused-argument\n List[str],\n \"An array of three lines of the haiku in Japanese\"\n ],\n english: Annotated[ # pylint: disable=unused-argument\n List[str],\n \"An array of three lines of the haiku in English\"\n ]\n):\n \"\"\"\n Generate a haiku in Japanese and its English translation.\n Also select exactly 3 relevant images from the provided list based on the haiku's theme.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n The main function handling chat and tool calls.\n \"\"\"\n\n system_prompt = \"\"\"\n You assist the user in generating a haiku.\n When generating a haiku using the 'generate_haiku' tool.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [generate_haiku],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n if response.tool_calls:\n return Command(\n goto=\"tool_node\",\n update={\n \"messages\": state[\"messages\"] + [response]\n }\n )\n # Return Command to end with updated messages\n return Command(\n goto=END,\n update={\n \"messages\": state[\"messages\"] + [response]\n }\n )\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"tool_node\", ToolNode([generate_haiku]))\n\n# Add edges\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\nworkflow.add_edge(\"tool_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", + "language": "python", + "type": "file" + }, + { + "name": "agent.ts", + "content": "/**\n * An example demonstrating tool-based generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\n// List of available images (modify path if needed)\nconst IMAGE_LIST = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\n// This tool generates a haiku on the server.\n// The tool call will be streamed to the frontend as it is being generated.\nconst GENERATE_HAIKU_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_haiku\",\n description: \"Generate a haiku in Japanese and its English translation. Also select exactly 3 relevant images from the provided list based on the haiku's theme.\",\n parameters: {\n type: \"object\",\n properties: {\n japanese: {\n type: \"array\",\n items: {\n type: \"string\"\n },\n description: \"An array of three lines of the haiku in Japanese\"\n },\n english: {\n type: \"array\",\n items: {\n type: \"string\"\n },\n description: \"An array of three lines of the haiku in English\"\n },\n image_names: {\n type: \"array\",\n items: {\n type: \"string\"\n },\n description: \"An array of EXACTLY THREE image filenames from the provided list that are most relevant to the haiku.\"\n }\n },\n required: [\"japanese\", \"english\", \"image_names\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * The main function handling chat and tool calls.\n */\n // Prepare the image list string for the prompt\n const imageListStr = IMAGE_LIST.map(img => `- ${img}`).join(\"\\n\");\n\n const systemPrompt = `\n You assist the user in generating a haiku.\n When generating a haiku using the 'generate_haiku' tool, you MUST also select exactly 3 image filenames from the following list that are most relevant to the haiku's content or theme. Return the filenames in the 'image_names' parameter.\n \n Available images:\n ${imageListStr}\n \n Don't provide the relevant image names in your final response to the user.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [GENERATE_HAIKU_TOOL],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Return Command to end with updated messages\n return new Command({\n goto: END,\n update: {\n messages: [...state.messages, response]\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.setEntryPoint(\"chat_node\");\nworkflow.addEdge(START, \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const toolBasedGenerativeUiGraph = workflow.compile();", + "language": "ts", + "type": "file" + } + ], "agno::agentic_chat": [ { "name": "page.tsx", diff --git a/typescript-sdk/integrations/langgraph/examples/python/README.md b/typescript-sdk/integrations/langgraph/examples/python/README.md index 7a64aee56..d825ecb37 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/README.md +++ b/typescript-sdk/integrations/langgraph/examples/python/README.md @@ -4,5 +4,30 @@ First, make sure to create a new .env file from the .env.example and include the required keys. -For python, run: -`pnpx @langchain/langgraph-cli@latest dev` +To run the Python examples for langgraph platform, run: +``` +cd typescript-sdk/integrations/langgraph/examples/python +pnpx @langchain/langgraph-cli@latest dev +``` + +To run the python examples using FastAPI, run: +``` +cd typescript-sdk/integrations/langgraph/examples/python +poetry install +poetry run dev +``` + +Note that when running them both concurrently, poetry and the langgraph-cli will step on eachothers toes and install/uninstall eachothers dependencies. +You can fix this by running the poetry commands with virtualenvs.in-project set to false. You can set this permanently for the project using: +`poetry config virtualenvs.create false --local`, globally using `poetry config virtualenvs.create false`, or temporarily using an environment variable: + +``` +export POETRY_VIRTUALENVS_IN_PROJECT=false +poetry install +poetry run dev +``` +or +``` +POETRY_VIRTUALENVS_IN_PROJECT=false poetry install +POETRY_VIRTUALENVS_IN_PROJECT=false poetry run dev +```